[llvm] 70b8b73 - [CSKY] Fix the btsti16 instruction missing in generic processor

Zi Xuan Wu via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 27 02:39:26 PDT 2022


Author: Zi Xuan Wu (Zeson)
Date: 2022-07-27T17:39:15+08:00
New Revision: 70b8b738c5794799e9807549e5058d9570176918

URL: https://github.com/llvm/llvm-project/commit/70b8b738c5794799e9807549e5058d9570176918
DIFF: https://github.com/llvm/llvm-project/commit/70b8b738c5794799e9807549e5058d9570176918.diff

LOG: [CSKY] Fix the btsti16 instruction missing in generic processor

Normally, generic processor does not have any SubtargetFeature. And it
can just generate most basic instructions which have no Predicates to
guard.

But it needs to enbale predicate for the btsti16 instruction as one of the most basic instructions.
Or the generic processor can't finish codegen process. So Add FeatureBTST16 SubtargetFeature to generic ProcessorModel.

Added: 
    

Modified: 
    llvm/lib/Target/CSKY/CSKY.td
    llvm/test/CodeGen/CSKY/atomic-rmw.ll
    llvm/test/CodeGen/CSKY/base-i.ll
    llvm/test/CodeGen/CSKY/br.ll
    llvm/test/CodeGen/CSKY/cmp-i.ll
    llvm/test/CodeGen/CSKY/fpu/br-d.ll
    llvm/test/CodeGen/CSKY/fpu/br-f.ll
    llvm/test/CodeGen/CSKY/fpu/select.ll
    llvm/test/CodeGen/CSKY/select.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/CSKY/CSKY.td b/llvm/lib/Target/CSKY/CSKY.td
index a8db9151e127b..e5ac106c9b59a 100644
--- a/llvm/lib/Target/CSKY/CSKY.td
+++ b/llvm/lib/Target/CSKY/CSKY.td
@@ -373,7 +373,9 @@ include "CSKYInstrInfo.td"
 // CSKY processors supported.
 //===----------------------------------------------------------------------===//
 
-def : ProcessorModel<"generic", NoSchedModel, []>;
+// btsti16 is one of most basic instructions should be enable
+// even in generic processor to avoid failure codegen.
+def : ProcessorModel<"generic", NoSchedModel, [FeatureBTST16]>;
 
 // CK801 series
 class CK801<string n, SchedMachineModel m, list<SubtargetFeature> f,

diff  --git a/llvm/test/CodeGen/CSKY/atomic-rmw.ll b/llvm/test/CodeGen/CSKY/atomic-rmw.ll
index b2f8467738a69..c9fd90bb8c347 100644
--- a/llvm/test/CodeGen/CSKY/atomic-rmw.ll
+++ b/llvm/test/CodeGen/CSKY/atomic-rmw.ll
@@ -17,7 +17,6 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI0_0:
 ; CSKY-NEXT:    .long __atomic_exchange_1
-;
   %1 = atomicrmw xchg i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -37,7 +36,6 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI1_0:
 ; CSKY-NEXT:    .long __atomic_exchange_1
-;
   %1 = atomicrmw xchg i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -57,7 +55,6 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI2_0:
 ; CSKY-NEXT:    .long __atomic_exchange_1
-;
   %1 = atomicrmw xchg i8* %a, i8 %b release
   ret i8 %1
 }
@@ -77,7 +74,6 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI3_0:
 ; CSKY-NEXT:    .long __atomic_exchange_1
-;
   %1 = atomicrmw xchg i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -97,7 +93,6 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI4_0:
 ; CSKY-NEXT:    .long __atomic_exchange_1
-;
   %1 = atomicrmw xchg i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -117,7 +112,6 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI5_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_1
-;
   %1 = atomicrmw add i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -137,7 +131,6 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI6_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_1
-;
   %1 = atomicrmw add i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -157,7 +150,6 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI7_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_1
-;
   %1 = atomicrmw add i8* %a, i8 %b release
   ret i8 %1
 }
@@ -177,7 +169,6 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI8_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_1
-;
   %1 = atomicrmw add i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -197,7 +188,6 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI9_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_1
-;
   %1 = atomicrmw add i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -217,7 +207,6 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI10_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_1
-;
   %1 = atomicrmw sub i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -237,7 +226,6 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI11_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_1
-;
   %1 = atomicrmw sub i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -257,7 +245,6 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI12_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_1
-;
   %1 = atomicrmw sub i8* %a, i8 %b release
   ret i8 %1
 }
@@ -277,7 +264,6 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI13_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_1
-;
   %1 = atomicrmw sub i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -297,7 +283,6 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI14_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_1
-;
   %1 = atomicrmw sub i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -317,7 +302,6 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI15_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_1
-;
   %1 = atomicrmw and i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -337,7 +321,6 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI16_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_1
-;
   %1 = atomicrmw and i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -357,7 +340,6 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI17_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_1
-;
   %1 = atomicrmw and i8* %a, i8 %b release
   ret i8 %1
 }
@@ -377,7 +359,6 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI18_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_1
-;
   %1 = atomicrmw and i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -397,7 +378,6 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI19_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_1
-;
   %1 = atomicrmw and i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -417,7 +397,6 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI20_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_1
-;
   %1 = atomicrmw nand i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -437,7 +416,6 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI21_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_1
-;
   %1 = atomicrmw nand i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -457,7 +435,6 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI22_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_1
-;
   %1 = atomicrmw nand i8* %a, i8 %b release
   ret i8 %1
 }
@@ -477,7 +454,6 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI23_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_1
-;
   %1 = atomicrmw nand i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -497,7 +473,6 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI24_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_1
-;
   %1 = atomicrmw nand i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -517,7 +492,6 @@ define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI25_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_1
-;
   %1 = atomicrmw or i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -537,7 +511,6 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI26_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_1
-;
   %1 = atomicrmw or i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -557,7 +530,6 @@ define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI27_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_1
-;
   %1 = atomicrmw or i8* %a, i8 %b release
   ret i8 %1
 }
@@ -577,7 +549,6 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI28_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_1
-;
   %1 = atomicrmw or i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -597,7 +568,6 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI29_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_1
-;
   %1 = atomicrmw or i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -617,7 +587,6 @@ define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI30_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_1
-;
   %1 = atomicrmw xor i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -637,7 +606,6 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI31_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_1
-;
   %1 = atomicrmw xor i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -657,7 +625,6 @@ define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI32_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_1
-;
   %1 = atomicrmw xor i8* %a, i8 %b release
   ret i8 %1
 }
@@ -677,7 +644,6 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI33_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_1
-;
   %1 = atomicrmw xor i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -697,7 +663,6 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI34_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_1
-;
   %1 = atomicrmw xor i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -706,10 +671,10 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i8_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -735,10 +700,10 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -746,7 +711,6 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI35_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw max i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -755,10 +719,10 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i8_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -784,10 +748,10 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -795,7 +759,6 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI36_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw max i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -804,10 +767,10 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i8_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -833,10 +796,10 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -844,7 +807,6 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI37_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw max i8* %a, i8 %b release
   ret i8 %1
 }
@@ -853,10 +815,10 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i8_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -882,10 +844,10 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -893,7 +855,6 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI38_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw max i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -902,10 +863,10 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i8_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -931,10 +892,10 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -942,7 +903,6 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI39_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw max i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -951,10 +911,10 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i8_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -980,10 +940,10 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -991,7 +951,6 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI40_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw min i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -1000,10 +959,10 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i8_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1029,10 +988,10 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1040,7 +999,6 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI41_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw min i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -1049,10 +1007,10 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i8_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1078,10 +1036,10 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1089,7 +1047,6 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI42_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw min i8* %a, i8 %b release
   ret i8 %1
 }
@@ -1098,10 +1055,10 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i8_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1127,10 +1084,10 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1138,7 +1095,6 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI43_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw min i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -1147,10 +1103,10 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i8_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1176,10 +1132,10 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1187,7 +1143,6 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI44_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw min i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -1196,10 +1151,10 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i8_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1225,10 +1180,10 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1236,7 +1191,6 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI45_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw umax i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -1245,10 +1199,10 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i8_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1274,10 +1228,10 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1285,7 +1239,6 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI46_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw umax i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -1294,10 +1247,10 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i8_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1323,10 +1276,10 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1334,7 +1287,6 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI47_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw umax i8* %a, i8 %b release
   ret i8 %1
 }
@@ -1343,10 +1295,10 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i8_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1372,10 +1324,10 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1383,7 +1335,6 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI48_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw umax i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -1392,10 +1343,10 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i8_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1421,10 +1372,10 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1432,7 +1383,6 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI49_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw umax i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -1441,10 +1391,10 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i8_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1470,10 +1420,10 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1481,7 +1431,6 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI50_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw umin i8* %a, i8 %b monotonic
   ret i8 %1
 }
@@ -1490,10 +1439,10 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i8_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1519,10 +1468,10 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1530,7 +1479,6 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI51_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw umin i8* %a, i8 %b acquire
   ret i8 %1
 }
@@ -1539,10 +1487,10 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i8_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1568,10 +1516,10 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1579,7 +1527,6 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI52_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw umin i8* %a, i8 %b release
   ret i8 %1
 }
@@ -1588,10 +1535,10 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i8_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1617,10 +1564,10 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1628,7 +1575,6 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI53_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw umin i8* %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -1637,10 +1583,10 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i8_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -1666,10 +1612,10 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -1677,7 +1623,6 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI54_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_1
-;
   %1 = atomicrmw umin i8* %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -1697,7 +1642,6 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI55_0:
 ; CSKY-NEXT:    .long __atomic_exchange_2
-;
   %1 = atomicrmw xchg i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -1717,7 +1661,6 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI56_0:
 ; CSKY-NEXT:    .long __atomic_exchange_2
-;
   %1 = atomicrmw xchg i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -1737,7 +1680,6 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI57_0:
 ; CSKY-NEXT:    .long __atomic_exchange_2
-;
   %1 = atomicrmw xchg i16* %a, i16 %b release
   ret i16 %1
 }
@@ -1757,7 +1699,6 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI58_0:
 ; CSKY-NEXT:    .long __atomic_exchange_2
-;
   %1 = atomicrmw xchg i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -1777,7 +1718,6 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI59_0:
 ; CSKY-NEXT:    .long __atomic_exchange_2
-;
   %1 = atomicrmw xchg i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -1797,7 +1737,6 @@ define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI60_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_2
-;
   %1 = atomicrmw add i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -1817,7 +1756,6 @@ define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI61_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_2
-;
   %1 = atomicrmw add i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -1837,7 +1775,6 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI62_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_2
-;
   %1 = atomicrmw add i16* %a, i16 %b release
   ret i16 %1
 }
@@ -1857,7 +1794,6 @@ define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI63_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_2
-;
   %1 = atomicrmw add i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -1877,7 +1813,6 @@ define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI64_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_2
-;
   %1 = atomicrmw add i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -1897,7 +1832,6 @@ define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI65_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_2
-;
   %1 = atomicrmw sub i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -1917,7 +1851,6 @@ define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI66_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_2
-;
   %1 = atomicrmw sub i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -1937,7 +1870,6 @@ define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI67_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_2
-;
   %1 = atomicrmw sub i16* %a, i16 %b release
   ret i16 %1
 }
@@ -1957,7 +1889,6 @@ define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI68_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_2
-;
   %1 = atomicrmw sub i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -1977,7 +1908,6 @@ define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI69_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_2
-;
   %1 = atomicrmw sub i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -1997,7 +1927,6 @@ define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI70_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_2
-;
   %1 = atomicrmw and i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -2017,7 +1946,6 @@ define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI71_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_2
-;
   %1 = atomicrmw and i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -2037,7 +1965,6 @@ define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI72_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_2
-;
   %1 = atomicrmw and i16* %a, i16 %b release
   ret i16 %1
 }
@@ -2057,7 +1984,6 @@ define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI73_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_2
-;
   %1 = atomicrmw and i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2077,7 +2003,6 @@ define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI74_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_2
-;
   %1 = atomicrmw and i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -2097,7 +2022,6 @@ define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI75_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_2
-;
   %1 = atomicrmw nand i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -2117,7 +2041,6 @@ define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI76_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_2
-;
   %1 = atomicrmw nand i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -2137,7 +2060,6 @@ define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI77_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_2
-;
   %1 = atomicrmw nand i16* %a, i16 %b release
   ret i16 %1
 }
@@ -2157,7 +2079,6 @@ define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI78_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_2
-;
   %1 = atomicrmw nand i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2177,7 +2098,6 @@ define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI79_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_2
-;
   %1 = atomicrmw nand i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -2197,7 +2117,6 @@ define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI80_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_2
-;
   %1 = atomicrmw or i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -2217,7 +2136,6 @@ define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI81_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_2
-;
   %1 = atomicrmw or i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -2237,7 +2155,6 @@ define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI82_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_2
-;
   %1 = atomicrmw or i16* %a, i16 %b release
   ret i16 %1
 }
@@ -2257,7 +2174,6 @@ define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI83_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_2
-;
   %1 = atomicrmw or i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2277,7 +2193,6 @@ define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI84_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_2
-;
   %1 = atomicrmw or i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -2297,7 +2212,6 @@ define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI85_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_2
-;
   %1 = atomicrmw xor i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -2317,7 +2231,6 @@ define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI86_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_2
-;
   %1 = atomicrmw xor i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -2337,7 +2250,6 @@ define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI87_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_2
-;
   %1 = atomicrmw xor i16* %a, i16 %b release
   ret i16 %1
 }
@@ -2357,7 +2269,6 @@ define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI88_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_2
-;
   %1 = atomicrmw xor i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2377,7 +2288,6 @@ define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI89_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_2
-;
   %1 = atomicrmw xor i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -2386,10 +2296,10 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i16_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2415,10 +2325,10 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2426,7 +2336,6 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI90_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw max i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -2435,10 +2344,10 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i16_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2464,10 +2373,10 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2475,7 +2384,6 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI91_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw max i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -2484,10 +2392,10 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i16_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2513,10 +2421,10 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2524,7 +2432,6 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI92_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw max i16* %a, i16 %b release
   ret i16 %1
 }
@@ -2533,10 +2440,10 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i16_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2562,10 +2469,10 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2573,7 +2480,6 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI93_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw max i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2582,10 +2488,10 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i16_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2611,10 +2517,10 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2622,7 +2528,6 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI94_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw max i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -2631,10 +2536,10 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i16_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2660,10 +2565,10 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2671,7 +2576,6 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI95_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw min i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -2680,10 +2584,10 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i16_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2709,10 +2613,10 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2720,7 +2624,6 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI96_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw min i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -2729,10 +2632,10 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i16_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2758,10 +2661,10 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2769,7 +2672,6 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI97_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw min i16* %a, i16 %b release
   ret i16 %1
 }
@@ -2778,10 +2680,10 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i16_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2807,10 +2709,10 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2818,7 +2720,6 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI98_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw min i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2827,10 +2728,10 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i16_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2856,10 +2757,10 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2867,7 +2768,6 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI99_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw min i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -2876,10 +2776,10 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i16_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2905,10 +2805,10 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2916,7 +2816,6 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI100_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw umax i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -2925,10 +2824,10 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i16_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -2954,10 +2853,10 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -2965,7 +2864,6 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI101_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw umax i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -2974,10 +2872,10 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i16_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -3003,10 +2901,10 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -3014,7 +2912,6 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI102_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw umax i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3023,10 +2920,10 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i16_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -3052,10 +2949,10 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -3063,7 +2960,6 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI103_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw umax i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3072,10 +2968,10 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i16_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -3101,10 +2997,10 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -3112,7 +3008,6 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI104_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw umax i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3121,10 +3016,10 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i16_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -3150,10 +3045,10 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -3161,7 +3056,6 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI105_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw umin i16* %a, i16 %b monotonic
   ret i16 %1
 }
@@ -3170,10 +3064,10 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i16_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -3199,10 +3093,10 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -3210,7 +3104,6 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI106_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw umin i16* %a, i16 %b acquire
   ret i16 %1
 }
@@ -3219,10 +3112,10 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i16_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -3248,10 +3141,10 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -3259,7 +3152,6 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI107_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw umin i16* %a, i16 %b release
   ret i16 %1
 }
@@ -3268,10 +3160,10 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i16_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -3297,10 +3189,10 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -3308,7 +3200,6 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI108_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw umin i16* %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -3317,10 +3208,10 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i16_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -3346,10 +3237,10 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -3357,7 +3248,6 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI109_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_2
-;
   %1 = atomicrmw umin i16* %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3377,7 +3267,6 @@ define i32 @atomicrmw_xchg_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI110_0:
 ; CSKY-NEXT:    .long __atomic_exchange_4
-;
   %1 = atomicrmw xchg i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -3397,7 +3286,6 @@ define i32 @atomicrmw_xchg_i32_acquire(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI111_0:
 ; CSKY-NEXT:    .long __atomic_exchange_4
-;
   %1 = atomicrmw xchg i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -3417,7 +3305,6 @@ define i32 @atomicrmw_xchg_i32_release(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI112_0:
 ; CSKY-NEXT:    .long __atomic_exchange_4
-;
   %1 = atomicrmw xchg i32* %a, i32 %b release
   ret i32 %1
 }
@@ -3437,7 +3324,6 @@ define i32 @atomicrmw_xchg_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI113_0:
 ; CSKY-NEXT:    .long __atomic_exchange_4
-;
   %1 = atomicrmw xchg i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -3457,7 +3343,6 @@ define i32 @atomicrmw_xchg_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI114_0:
 ; CSKY-NEXT:    .long __atomic_exchange_4
-;
   %1 = atomicrmw xchg i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -3477,7 +3362,6 @@ define i32 @atomicrmw_add_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI115_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_4
-;
   %1 = atomicrmw add i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -3497,7 +3381,6 @@ define i32 @atomicrmw_add_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI116_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_4
-;
   %1 = atomicrmw add i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -3517,7 +3400,6 @@ define i32 @atomicrmw_add_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI117_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_4
-;
   %1 = atomicrmw add i32* %a, i32 %b release
   ret i32 %1
 }
@@ -3537,7 +3419,6 @@ define i32 @atomicrmw_add_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI118_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_4
-;
   %1 = atomicrmw add i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -3557,7 +3438,6 @@ define i32 @atomicrmw_add_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI119_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_4
-;
   %1 = atomicrmw add i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -3577,7 +3457,6 @@ define i32 @atomicrmw_sub_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI120_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_4
-;
   %1 = atomicrmw sub i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -3597,7 +3476,6 @@ define i32 @atomicrmw_sub_i32_acquire(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI121_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_4
-;
   %1 = atomicrmw sub i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -3617,7 +3495,6 @@ define i32 @atomicrmw_sub_i32_release(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI122_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_4
-;
   %1 = atomicrmw sub i32* %a, i32 %b release
   ret i32 %1
 }
@@ -3637,7 +3514,6 @@ define i32 @atomicrmw_sub_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI123_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_4
-;
   %1 = atomicrmw sub i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -3657,7 +3533,6 @@ define i32 @atomicrmw_sub_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI124_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_4
-;
   %1 = atomicrmw sub i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -3677,7 +3552,6 @@ define i32 @atomicrmw_and_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI125_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_4
-;
   %1 = atomicrmw and i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -3697,7 +3571,6 @@ define i32 @atomicrmw_and_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI126_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_4
-;
   %1 = atomicrmw and i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -3717,7 +3590,6 @@ define i32 @atomicrmw_and_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI127_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_4
-;
   %1 = atomicrmw and i32* %a, i32 %b release
   ret i32 %1
 }
@@ -3737,7 +3609,6 @@ define i32 @atomicrmw_and_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI128_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_4
-;
   %1 = atomicrmw and i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -3757,7 +3628,6 @@ define i32 @atomicrmw_and_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI129_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_4
-;
   %1 = atomicrmw and i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -3777,7 +3647,6 @@ define i32 @atomicrmw_nand_i32_monotonic(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI130_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_4
-;
   %1 = atomicrmw nand i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -3797,7 +3666,6 @@ define i32 @atomicrmw_nand_i32_acquire(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI131_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_4
-;
   %1 = atomicrmw nand i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -3817,7 +3685,6 @@ define i32 @atomicrmw_nand_i32_release(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI132_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_4
-;
   %1 = atomicrmw nand i32* %a, i32 %b release
   ret i32 %1
 }
@@ -3837,7 +3704,6 @@ define i32 @atomicrmw_nand_i32_acq_rel(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI133_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_4
-;
   %1 = atomicrmw nand i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -3857,7 +3723,6 @@ define i32 @atomicrmw_nand_i32_seq_cst(i32* %a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI134_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_4
-;
   %1 = atomicrmw nand i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -3877,7 +3742,6 @@ define i32 @atomicrmw_or_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI135_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_4
-;
   %1 = atomicrmw or i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -3897,7 +3761,6 @@ define i32 @atomicrmw_or_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI136_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_4
-;
   %1 = atomicrmw or i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -3917,7 +3780,6 @@ define i32 @atomicrmw_or_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI137_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_4
-;
   %1 = atomicrmw or i32* %a, i32 %b release
   ret i32 %1
 }
@@ -3937,7 +3799,6 @@ define i32 @atomicrmw_or_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI138_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_4
-;
   %1 = atomicrmw or i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -3957,7 +3818,6 @@ define i32 @atomicrmw_or_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI139_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_4
-;
   %1 = atomicrmw or i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -3977,7 +3837,6 @@ define i32 @atomicrmw_xor_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI140_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_4
-;
   %1 = atomicrmw xor i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -3997,7 +3856,6 @@ define i32 @atomicrmw_xor_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI141_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_4
-;
   %1 = atomicrmw xor i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -4017,7 +3875,6 @@ define i32 @atomicrmw_xor_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI142_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_4
-;
   %1 = atomicrmw xor i32* %a, i32 %b release
   ret i32 %1
 }
@@ -4037,7 +3894,6 @@ define i32 @atomicrmw_xor_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI143_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_4
-;
   %1 = atomicrmw xor i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -4057,7 +3913,6 @@ define i32 @atomicrmw_xor_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI144_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_4
-;
   %1 = atomicrmw xor i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -4066,9 +3921,9 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i32_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4092,9 +3947,9 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4102,7 +3957,6 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI145_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw max i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -4111,9 +3965,9 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i32_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4137,9 +3991,9 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4147,7 +4001,6 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI146_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw max i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -4156,9 +4009,9 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i32_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4182,9 +4035,9 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4192,7 +4045,6 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI147_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw max i32* %a, i32 %b release
   ret i32 %1
 }
@@ -4201,9 +4053,9 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i32_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4227,9 +4079,9 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4237,7 +4089,6 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI148_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw max i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -4246,9 +4097,9 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i32_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4272,9 +4123,9 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4282,7 +4133,6 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI149_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw max i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -4291,9 +4141,9 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i32_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4317,9 +4167,9 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4327,7 +4177,6 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI150_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw min i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -4336,9 +4185,9 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i32_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4362,9 +4211,9 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4372,7 +4221,6 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI151_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw min i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -4381,9 +4229,9 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i32_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4407,9 +4255,9 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4417,7 +4265,6 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI152_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw min i32* %a, i32 %b release
   ret i32 %1
 }
@@ -4426,9 +4273,9 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i32_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4452,9 +4299,9 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4462,7 +4309,6 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI153_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw min i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -4471,9 +4317,9 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i32_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4497,9 +4343,9 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4507,7 +4353,6 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI154_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw min i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -4516,9 +4361,9 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i32_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4542,9 +4387,9 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4552,7 +4397,6 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI155_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw umax i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -4561,9 +4405,9 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i32_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4587,9 +4431,9 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4597,7 +4441,6 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI156_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw umax i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -4606,9 +4449,9 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i32_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4632,9 +4475,9 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4642,7 +4485,6 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI157_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw umax i32* %a, i32 %b release
   ret i32 %1
 }
@@ -4651,9 +4493,9 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i32_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4677,9 +4519,9 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4687,7 +4529,6 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI158_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw umax i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -4696,9 +4537,9 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i32_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4722,9 +4563,9 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4732,7 +4573,6 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI159_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw umax i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -4741,9 +4581,9 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i32_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4767,9 +4607,9 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4777,7 +4617,6 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI160_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw umin i32* %a, i32 %b monotonic
   ret i32 %1
 }
@@ -4786,9 +4625,9 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i32_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4812,9 +4651,9 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4822,7 +4661,6 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI161_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw umin i32* %a, i32 %b acquire
   ret i32 %1
 }
@@ -4831,9 +4669,9 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i32_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4857,9 +4695,9 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4867,7 +4705,6 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI162_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw umin i32* %a, i32 %b release
   ret i32 %1
 }
@@ -4876,9 +4713,9 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i32_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4902,9 +4739,9 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4912,7 +4749,6 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI163_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw umin i32* %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -4921,9 +4757,9 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i32_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 16
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 8
 ; CSKY-NEXT:    mov16 l0, a1
@@ -4947,9 +4783,9 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 8
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 16
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -4957,7 +4793,6 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI164_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_4
-;
   %1 = atomicrmw umin i32* %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -4977,7 +4812,6 @@ define i64 @atomicrmw_xchg_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI165_0:
 ; CSKY-NEXT:    .long __atomic_exchange_8
-;
   %1 = atomicrmw xchg i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -4997,7 +4831,6 @@ define i64 @atomicrmw_xchg_i64_acquire(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI166_0:
 ; CSKY-NEXT:    .long __atomic_exchange_8
-;
   %1 = atomicrmw xchg i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -5017,7 +4850,6 @@ define i64 @atomicrmw_xchg_i64_release(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI167_0:
 ; CSKY-NEXT:    .long __atomic_exchange_8
-;
   %1 = atomicrmw xchg i64* %a, i64 %b release
   ret i64 %1
 }
@@ -5037,7 +4869,6 @@ define i64 @atomicrmw_xchg_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI168_0:
 ; CSKY-NEXT:    .long __atomic_exchange_8
-;
   %1 = atomicrmw xchg i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -5057,7 +4888,6 @@ define i64 @atomicrmw_xchg_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI169_0:
 ; CSKY-NEXT:    .long __atomic_exchange_8
-;
   %1 = atomicrmw xchg i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -5077,7 +4907,6 @@ define i64 @atomicrmw_add_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI170_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_8
-;
   %1 = atomicrmw add i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -5097,7 +4926,6 @@ define i64 @atomicrmw_add_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI171_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_8
-;
   %1 = atomicrmw add i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -5117,7 +4945,6 @@ define i64 @atomicrmw_add_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI172_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_8
-;
   %1 = atomicrmw add i64* %a, i64 %b release
   ret i64 %1
 }
@@ -5137,7 +4964,6 @@ define i64 @atomicrmw_add_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI173_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_8
-;
   %1 = atomicrmw add i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -5157,7 +4983,6 @@ define i64 @atomicrmw_add_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI174_0:
 ; CSKY-NEXT:    .long __atomic_fetch_add_8
-;
   %1 = atomicrmw add i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -5177,7 +5002,6 @@ define i64 @atomicrmw_sub_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI175_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_8
-;
   %1 = atomicrmw sub i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -5197,7 +5021,6 @@ define i64 @atomicrmw_sub_i64_acquire(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI176_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_8
-;
   %1 = atomicrmw sub i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -5217,7 +5040,6 @@ define i64 @atomicrmw_sub_i64_release(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI177_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_8
-;
   %1 = atomicrmw sub i64* %a, i64 %b release
   ret i64 %1
 }
@@ -5237,7 +5059,6 @@ define i64 @atomicrmw_sub_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI178_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_8
-;
   %1 = atomicrmw sub i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -5257,7 +5078,6 @@ define i64 @atomicrmw_sub_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI179_0:
 ; CSKY-NEXT:    .long __atomic_fetch_sub_8
-;
   %1 = atomicrmw sub i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -5277,7 +5097,6 @@ define i64 @atomicrmw_and_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI180_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_8
-;
   %1 = atomicrmw and i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -5297,7 +5116,6 @@ define i64 @atomicrmw_and_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI181_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_8
-;
   %1 = atomicrmw and i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -5317,7 +5135,6 @@ define i64 @atomicrmw_and_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI182_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_8
-;
   %1 = atomicrmw and i64* %a, i64 %b release
   ret i64 %1
 }
@@ -5337,7 +5154,6 @@ define i64 @atomicrmw_and_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI183_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_8
-;
   %1 = atomicrmw and i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -5357,7 +5173,6 @@ define i64 @atomicrmw_and_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI184_0:
 ; CSKY-NEXT:    .long __atomic_fetch_and_8
-;
   %1 = atomicrmw and i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -5377,7 +5192,6 @@ define i64 @atomicrmw_nand_i64_monotonic(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI185_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_8
-;
   %1 = atomicrmw nand i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -5397,7 +5211,6 @@ define i64 @atomicrmw_nand_i64_acquire(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI186_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_8
-;
   %1 = atomicrmw nand i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -5417,7 +5230,6 @@ define i64 @atomicrmw_nand_i64_release(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI187_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_8
-;
   %1 = atomicrmw nand i64* %a, i64 %b release
   ret i64 %1
 }
@@ -5437,7 +5249,6 @@ define i64 @atomicrmw_nand_i64_acq_rel(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI188_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_8
-;
   %1 = atomicrmw nand i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -5457,7 +5268,6 @@ define i64 @atomicrmw_nand_i64_seq_cst(i64* %a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI189_0:
 ; CSKY-NEXT:    .long __atomic_fetch_nand_8
-;
   %1 = atomicrmw nand i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -5477,7 +5287,6 @@ define i64 @atomicrmw_or_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI190_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_8
-;
   %1 = atomicrmw or i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -5497,7 +5306,6 @@ define i64 @atomicrmw_or_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI191_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_8
-;
   %1 = atomicrmw or i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -5517,7 +5325,6 @@ define i64 @atomicrmw_or_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI192_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_8
-;
   %1 = atomicrmw or i64* %a, i64 %b release
   ret i64 %1
 }
@@ -5537,7 +5344,6 @@ define i64 @atomicrmw_or_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI193_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_8
-;
   %1 = atomicrmw or i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -5557,7 +5363,6 @@ define i64 @atomicrmw_or_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI194_0:
 ; CSKY-NEXT:    .long __atomic_fetch_or_8
-;
   %1 = atomicrmw or i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -5577,7 +5382,6 @@ define i64 @atomicrmw_xor_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI195_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_8
-;
   %1 = atomicrmw xor i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -5597,7 +5401,6 @@ define i64 @atomicrmw_xor_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI196_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_8
-;
   %1 = atomicrmw xor i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -5617,7 +5420,6 @@ define i64 @atomicrmw_xor_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI197_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_8
-;
   %1 = atomicrmw xor i64* %a, i64 %b release
   ret i64 %1
 }
@@ -5637,7 +5439,6 @@ define i64 @atomicrmw_xor_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI198_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_8
-;
   %1 = atomicrmw xor i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -5657,7 +5458,6 @@ define i64 @atomicrmw_xor_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI199_0:
 ; CSKY-NEXT:    .long __atomic_fetch_xor_8
-;
   %1 = atomicrmw xor i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -5666,10 +5466,10 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i64_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 28
 ; CSKY-NEXT:    mov16 l0, a2
@@ -5689,12 +5489,12 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    cmphs16 l1, a0
 ; CSKY-NEXT:    mvcv16 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 16)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -5713,10 +5513,10 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 28
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -5724,7 +5524,6 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI200_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw max i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -5733,10 +5532,10 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i64_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 28
 ; CSKY-NEXT:    mov16 l0, a2
@@ -5756,12 +5555,12 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    cmphs16 l1, a0
 ; CSKY-NEXT:    mvcv16 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 16)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -5780,10 +5579,10 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 28
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -5791,7 +5590,6 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI201_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw max i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -5800,10 +5598,10 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i64_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 24
-; CSKY-NEXT:    st16.w l3, (sp, 20)
-; CSKY-NEXT:    st16.w l2, (sp, 16)
-; CSKY-NEXT:    st16.w l1, (sp, 12)
-; CSKY-NEXT:    st16.w l0, (sp, 8)
+; CSKY-NEXT:    st16.w l3, (sp, 20) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 8) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w l4, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 28
@@ -5825,12 +5623,12 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    cmphs16 l1, a0
 ; CSKY-NEXT:    mvcv16 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 16)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -5850,10 +5648,10 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    addi16 sp, sp, 28
 ; CSKY-NEXT:    ld32.w l4, (sp, 0) # 4-byte Folded Reload
 ; CSKY-NEXT:    ld32.w lr, (sp, 4) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 8)
-; CSKY-NEXT:    ld16.w l1, (sp, 12)
-; CSKY-NEXT:    ld16.w l2, (sp, 16)
-; CSKY-NEXT:    ld16.w l3, (sp, 20)
+; CSKY-NEXT:    ld16.w l0, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 16) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 20) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -5861,7 +5659,6 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI202_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw max i64* %a, i64 %b release
   ret i64 %1
 }
@@ -5870,10 +5667,10 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i64_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 24
-; CSKY-NEXT:    st16.w l3, (sp, 20)
-; CSKY-NEXT:    st16.w l2, (sp, 16)
-; CSKY-NEXT:    st16.w l1, (sp, 12)
-; CSKY-NEXT:    st16.w l0, (sp, 8)
+; CSKY-NEXT:    st16.w l3, (sp, 20) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 8) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w l4, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 28
@@ -5895,12 +5692,12 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    cmphs16 l1, a0
 ; CSKY-NEXT:    mvcv16 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 16)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -5920,10 +5717,10 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    addi16 sp, sp, 28
 ; CSKY-NEXT:    ld32.w l4, (sp, 0) # 4-byte Folded Reload
 ; CSKY-NEXT:    ld32.w lr, (sp, 4) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 8)
-; CSKY-NEXT:    ld16.w l1, (sp, 12)
-; CSKY-NEXT:    ld16.w l2, (sp, 16)
-; CSKY-NEXT:    ld16.w l3, (sp, 20)
+; CSKY-NEXT:    ld16.w l0, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 16) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 20) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -5931,7 +5728,6 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI203_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw max i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -5940,10 +5736,10 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_max_i64_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 28
 ; CSKY-NEXT:    mov16 l0, a2
@@ -5963,12 +5759,12 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    cmphs16 l1, a0
 ; CSKY-NEXT:    mvcv16 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 16)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -5987,10 +5783,10 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 28
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -5998,7 +5794,6 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI204_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw max i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -6007,10 +5802,10 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i64_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 28
 ; CSKY-NEXT:    mov16 l0, a2
@@ -6030,12 +5825,12 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    cmplt16 l0, a1
 ; CSKY-NEXT:    mvcv16 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 16)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a2, a3
-; CSKY-NEXT:    btsti32 a2, 0
+; CSKY-NEXT:    btsti16 a2, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6054,10 +5849,10 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 28
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6065,7 +5860,6 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI205_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw min i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -6074,10 +5868,10 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i64_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 28
 ; CSKY-NEXT:    mov16 l0, a2
@@ -6097,12 +5891,12 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    cmplt16 l0, a1
 ; CSKY-NEXT:    mvcv16 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 16)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a2, a3
-; CSKY-NEXT:    btsti32 a2, 0
+; CSKY-NEXT:    btsti16 a2, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6121,10 +5915,10 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 28
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6132,7 +5926,6 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI206_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw min i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -6141,10 +5934,10 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i64_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 24
-; CSKY-NEXT:    st16.w l3, (sp, 20)
-; CSKY-NEXT:    st16.w l2, (sp, 16)
-; CSKY-NEXT:    st16.w l1, (sp, 12)
-; CSKY-NEXT:    st16.w l0, (sp, 8)
+; CSKY-NEXT:    st16.w l3, (sp, 20) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 8) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w l4, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 28
@@ -6166,12 +5959,12 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    cmplt16 l0, a1
 ; CSKY-NEXT:    mvcv16 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 16)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a2, a3
-; CSKY-NEXT:    btsti32 a2, 0
+; CSKY-NEXT:    btsti16 a2, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6191,10 +5984,10 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    addi16 sp, sp, 28
 ; CSKY-NEXT:    ld32.w l4, (sp, 0) # 4-byte Folded Reload
 ; CSKY-NEXT:    ld32.w lr, (sp, 4) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 8)
-; CSKY-NEXT:    ld16.w l1, (sp, 12)
-; CSKY-NEXT:    ld16.w l2, (sp, 16)
-; CSKY-NEXT:    ld16.w l3, (sp, 20)
+; CSKY-NEXT:    ld16.w l0, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 16) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 20) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6202,7 +5995,6 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI207_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw min i64* %a, i64 %b release
   ret i64 %1
 }
@@ -6211,10 +6003,10 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i64_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 24
-; CSKY-NEXT:    st16.w l3, (sp, 20)
-; CSKY-NEXT:    st16.w l2, (sp, 16)
-; CSKY-NEXT:    st16.w l1, (sp, 12)
-; CSKY-NEXT:    st16.w l0, (sp, 8)
+; CSKY-NEXT:    st16.w l3, (sp, 20) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 8) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w l4, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 28
@@ -6236,12 +6028,12 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    cmplt16 l0, a1
 ; CSKY-NEXT:    mvcv16 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 16)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a2, a3
-; CSKY-NEXT:    btsti32 a2, 0
+; CSKY-NEXT:    btsti16 a2, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6261,10 +6053,10 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    addi16 sp, sp, 28
 ; CSKY-NEXT:    ld32.w l4, (sp, 0) # 4-byte Folded Reload
 ; CSKY-NEXT:    ld32.w lr, (sp, 4) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 8)
-; CSKY-NEXT:    ld16.w l1, (sp, 12)
-; CSKY-NEXT:    ld16.w l2, (sp, 16)
-; CSKY-NEXT:    ld16.w l3, (sp, 20)
+; CSKY-NEXT:    ld16.w l0, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 16) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 20) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6272,7 +6064,6 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI208_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw min i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -6281,10 +6072,10 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_min_i64_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 28
 ; CSKY-NEXT:    mov16 l0, a2
@@ -6304,12 +6095,12 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    cmplt16 l0, a1
 ; CSKY-NEXT:    mvcv16 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 16)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a2, a3
-; CSKY-NEXT:    btsti32 a2, 0
+; CSKY-NEXT:    btsti16 a2, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6328,10 +6119,10 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 28
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6339,7 +6130,6 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI209_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw min i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -6348,10 +6138,10 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i64_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 24
 ; CSKY-NEXT:    mov16 l0, a2
@@ -6372,7 +6162,7 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    ld32.w t0, (sp, 12)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6391,10 +6181,10 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6402,7 +6192,6 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI210_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw umax i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -6411,10 +6200,10 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i64_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 24
 ; CSKY-NEXT:    mov16 l0, a2
@@ -6435,7 +6224,7 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    ld32.w t0, (sp, 12)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6454,10 +6243,10 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6465,7 +6254,6 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI211_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw umax i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -6474,10 +6262,10 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i64_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 24
-; CSKY-NEXT:    st16.w l3, (sp, 20)
-; CSKY-NEXT:    st16.w l2, (sp, 16)
-; CSKY-NEXT:    st16.w l1, (sp, 12)
-; CSKY-NEXT:    st16.w l0, (sp, 8)
+; CSKY-NEXT:    st16.w l3, (sp, 20) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 8) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w l4, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 24
@@ -6500,7 +6288,7 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    ld32.w t0, (sp, 12)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6520,10 +6308,10 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    ld32.w l4, (sp, 0) # 4-byte Folded Reload
 ; CSKY-NEXT:    ld32.w lr, (sp, 4) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 8)
-; CSKY-NEXT:    ld16.w l1, (sp, 12)
-; CSKY-NEXT:    ld16.w l2, (sp, 16)
-; CSKY-NEXT:    ld16.w l3, (sp, 20)
+; CSKY-NEXT:    ld16.w l0, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 16) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 20) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6531,7 +6319,6 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI212_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw umax i64* %a, i64 %b release
   ret i64 %1
 }
@@ -6540,10 +6327,10 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i64_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 24
-; CSKY-NEXT:    st16.w l3, (sp, 20)
-; CSKY-NEXT:    st16.w l2, (sp, 16)
-; CSKY-NEXT:    st16.w l1, (sp, 12)
-; CSKY-NEXT:    st16.w l0, (sp, 8)
+; CSKY-NEXT:    st16.w l3, (sp, 20) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 8) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w l4, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 24
@@ -6566,7 +6353,7 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    ld32.w t0, (sp, 12)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6586,10 +6373,10 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    ld32.w l4, (sp, 0) # 4-byte Folded Reload
 ; CSKY-NEXT:    ld32.w lr, (sp, 4) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 8)
-; CSKY-NEXT:    ld16.w l1, (sp, 12)
-; CSKY-NEXT:    ld16.w l2, (sp, 16)
-; CSKY-NEXT:    ld16.w l3, (sp, 20)
+; CSKY-NEXT:    ld16.w l0, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 16) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 20) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6597,7 +6384,6 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI213_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw umax i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -6606,10 +6392,10 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umax_i64_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 24
 ; CSKY-NEXT:    mov16 l0, a2
@@ -6630,7 +6416,7 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    ld32.w t0, (sp, 12)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6649,10 +6435,10 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6660,7 +6446,6 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI214_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw umax i64* %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -6669,10 +6454,10 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i64_monotonic:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 32
 ; CSKY-NEXT:    mov16 l0, a2
@@ -6693,15 +6478,15 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    mvc32 a2
 ; CSKY-NEXT:    st16.w a2, (sp, 20)
 ; CSKY-NEXT:    ld16.w a2, (sp, 16)
-; CSKY-NEXT:    btsti32 a2, 0
+; CSKY-NEXT:    btsti16 a2, 0
 ; CSKY-NEXT:    mvc32 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 20)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6720,10 +6505,10 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 32
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6731,7 +6516,6 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI215_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw umin i64* %a, i64 %b monotonic
   ret i64 %1
 }
@@ -6740,10 +6524,10 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i64_acquire:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 32
 ; CSKY-NEXT:    mov16 l0, a2
@@ -6764,15 +6548,15 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    mvc32 a2
 ; CSKY-NEXT:    st16.w a2, (sp, 20)
 ; CSKY-NEXT:    ld16.w a2, (sp, 16)
-; CSKY-NEXT:    btsti32 a2, 0
+; CSKY-NEXT:    btsti16 a2, 0
 ; CSKY-NEXT:    mvc32 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 20)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6791,10 +6575,10 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 32
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6802,7 +6586,6 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI216_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw umin i64* %a, i64 %b acquire
   ret i64 %1
 }
@@ -6811,10 +6594,10 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i64_release:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 24
-; CSKY-NEXT:    st16.w l3, (sp, 20)
-; CSKY-NEXT:    st16.w l2, (sp, 16)
-; CSKY-NEXT:    st16.w l1, (sp, 12)
-; CSKY-NEXT:    st16.w l0, (sp, 8)
+; CSKY-NEXT:    st16.w l3, (sp, 20) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 8) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w l4, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 32
@@ -6837,15 +6620,15 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    mvc32 a2
 ; CSKY-NEXT:    st16.w a2, (sp, 20)
 ; CSKY-NEXT:    ld16.w a2, (sp, 16)
-; CSKY-NEXT:    btsti32 a2, 0
+; CSKY-NEXT:    btsti16 a2, 0
 ; CSKY-NEXT:    mvc32 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 20)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6865,10 +6648,10 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    addi16 sp, sp, 32
 ; CSKY-NEXT:    ld32.w l4, (sp, 0) # 4-byte Folded Reload
 ; CSKY-NEXT:    ld32.w lr, (sp, 4) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 8)
-; CSKY-NEXT:    ld16.w l1, (sp, 12)
-; CSKY-NEXT:    ld16.w l2, (sp, 16)
-; CSKY-NEXT:    ld16.w l3, (sp, 20)
+; CSKY-NEXT:    ld16.w l0, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 16) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 20) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6876,7 +6659,6 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI217_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw umin i64* %a, i64 %b release
   ret i64 %1
 }
@@ -6885,10 +6667,10 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i64_acq_rel:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 24
-; CSKY-NEXT:    st16.w l3, (sp, 20)
-; CSKY-NEXT:    st16.w l2, (sp, 16)
-; CSKY-NEXT:    st16.w l1, (sp, 12)
-; CSKY-NEXT:    st16.w l0, (sp, 8)
+; CSKY-NEXT:    st16.w l3, (sp, 20) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 8) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w l4, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 32
@@ -6911,15 +6693,15 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    mvc32 a2
 ; CSKY-NEXT:    st16.w a2, (sp, 20)
 ; CSKY-NEXT:    ld16.w a2, (sp, 16)
-; CSKY-NEXT:    btsti32 a2, 0
+; CSKY-NEXT:    btsti16 a2, 0
 ; CSKY-NEXT:    mvc32 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 20)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -6939,10 +6721,10 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    addi16 sp, sp, 32
 ; CSKY-NEXT:    ld32.w l4, (sp, 0) # 4-byte Folded Reload
 ; CSKY-NEXT:    ld32.w lr, (sp, 4) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 8)
-; CSKY-NEXT:    ld16.w l1, (sp, 12)
-; CSKY-NEXT:    ld16.w l2, (sp, 16)
-; CSKY-NEXT:    ld16.w l3, (sp, 20)
+; CSKY-NEXT:    ld16.w l0, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 16) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 20) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 24
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -6950,7 +6732,6 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI218_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw umin i64* %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -6959,10 +6740,10 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-LABEL: atomicrmw_umin_i64_seq_cst:
 ; CSKY:       # %bb.0:
 ; CSKY-NEXT:    subi16 sp, sp, 20
-; CSKY-NEXT:    st16.w l3, (sp, 16)
-; CSKY-NEXT:    st16.w l2, (sp, 12)
-; CSKY-NEXT:    st16.w l1, (sp, 8)
-; CSKY-NEXT:    st16.w l0, (sp, 4)
+; CSKY-NEXT:    st16.w l3, (sp, 16) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l2, (sp, 12) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l1, (sp, 8) # 4-byte Folded Spill
+; CSKY-NEXT:    st16.w l0, (sp, 4) # 4-byte Folded Spill
 ; CSKY-NEXT:    st32.w lr, (sp, 0) # 4-byte Folded Spill
 ; CSKY-NEXT:    subi16 sp, sp, 32
 ; CSKY-NEXT:    mov16 l0, a2
@@ -6983,15 +6764,15 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    mvc32 a2
 ; CSKY-NEXT:    st16.w a2, (sp, 20)
 ; CSKY-NEXT:    ld16.w a2, (sp, 16)
-; CSKY-NEXT:    btsti32 a2, 0
+; CSKY-NEXT:    btsti16 a2, 0
 ; CSKY-NEXT:    mvc32 a2
 ; CSKY-NEXT:    ld16.w a3, (sp, 12)
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mvc32 a3
 ; CSKY-NEXT:    ld32.w t0, (sp, 20)
 ; CSKY-NEXT:    btsti32 t0, 0
 ; CSKY-NEXT:    movf32 a3, a2
-; CSKY-NEXT:    btsti32 a3, 0
+; CSKY-NEXT:    btsti16 a3, 0
 ; CSKY-NEXT:    mov16 a2, l1
 ; CSKY-NEXT:    movt32 a2, a0
 ; CSKY-NEXT:    mov16 a3, l0
@@ -7010,10 +6791,10 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:  # %bb.2: # %atomicrmw.end
 ; CSKY-NEXT:    addi16 sp, sp, 32
 ; CSKY-NEXT:    ld32.w lr, (sp, 0) # 4-byte Folded Reload
-; CSKY-NEXT:    ld16.w l0, (sp, 4)
-; CSKY-NEXT:    ld16.w l1, (sp, 8)
-; CSKY-NEXT:    ld16.w l2, (sp, 12)
-; CSKY-NEXT:    ld16.w l3, (sp, 16)
+; CSKY-NEXT:    ld16.w l0, (sp, 4) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l1, (sp, 8) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l2, (sp, 12) # 4-byte Folded Reload
+; CSKY-NEXT:    ld16.w l3, (sp, 16) # 4-byte Folded Reload
 ; CSKY-NEXT:    addi16 sp, sp, 20
 ; CSKY-NEXT:    rts16
 ; CSKY-NEXT:    .p2align 1
@@ -7021,7 +6802,6 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind {
 ; CSKY-NEXT:    .p2align 2
 ; CSKY-NEXT:  .LCPI219_0:
 ; CSKY-NEXT:    .long __atomic_compare_exchange_8
-;
   %1 = atomicrmw umin i64* %a, i64 %b seq_cst
   ret i64 %1
 }

diff  --git a/llvm/test/CodeGen/CSKY/base-i.ll b/llvm/test/CodeGen/CSKY/base-i.ll
index 3848053bde079..132a39f92c4b4 100644
--- a/llvm/test/CodeGen/CSKY/base-i.ll
+++ b/llvm/test/CodeGen/CSKY/base-i.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs -csky-no-aliases -mattr=+e2 -mattr=+2e3 < %s -mtriple=csky | FileCheck %s
-; RUN: llc -verify-machineinstrs -csky-no-aliases -mattr=+btst16 < %s -mtriple=csky | FileCheck %s --check-prefix=GENERIC
+; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky | FileCheck %s --check-prefix=GENERIC
 
 define i32 @addRR(i32 %x, i32 %y) {
 ; CHECK-LABEL: addRR:
@@ -279,9 +279,9 @@ define i64 @SUB_LONG(i64 %x, i64 %y) {
 ; CHECK-NEXT:    setc32
 ; CHECK-NEXT:    subc32 a0, a2, a0
 ; CHECK-NEXT:    mvcv16 a2
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    mvcv16 a2
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    subc32 a1, a3, a1
 ; CHECK-NEXT:    rts16
 ;

diff  --git a/llvm/test/CodeGen/CSKY/br.ll b/llvm/test/CodeGen/CSKY/br.ll
index 688b40128a2a3..d4a1e578362cc 100644
--- a/llvm/test/CodeGen/CSKY/br.ll
+++ b/llvm/test/CodeGen/CSKY/br.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky -mattr=+2e3 | FileCheck %s
-; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky -mattr=+btst16 | FileCheck %s --check-prefix=GENERIC
+; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky  | FileCheck %s --check-prefix=GENERIC
 
 ;EQ
 define i32 @brRR_eq(i32 %x, i32 %y) {
@@ -1390,15 +1390,15 @@ define i64 @brRR_i64_ugt(i64 %x, i64 %y) {
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    st16.w a0, (sp, 8)
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 12)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB35_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -1473,9 +1473,9 @@ define i64 @brRI_i64_ugt(i64 %x) {
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    movi16 a1, 0
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB36_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -1580,9 +1580,9 @@ define i64 @brRR_i64_uge(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmphs16 a2, a0
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB38_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -1650,9 +1650,9 @@ define i64 @brRI_i64_uge(i64 %x) {
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    movi16 a1, 0
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB39_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -1717,15 +1717,15 @@ define i64 @brRR_i64_ult(i64 %x, i64 %y) {
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    st16.w a0, (sp, 8)
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 12)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB40_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -1796,7 +1796,7 @@ define i64 @brRI_i64_ult(i64 %x) {
 ; CHECK-NEXT:    cmpnei16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB41_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -1861,9 +1861,9 @@ define i64 @brRR_i64_ule(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmphs16 a0, a2
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB42_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -1927,7 +1927,7 @@ define i64 @brRI_i64_ule(i64 %x) {
 ; CHECK-NEXT:    cmpnei16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB43_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -2034,12 +2034,12 @@ define i64 @brRR_i64_sgt(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmplt16 a1, a3
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB45_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -2106,12 +2106,12 @@ define i64 @brRI_i64_sgt(i64 %x) {
 ; CHECK-NEXT:    cmphsi16 a0, 11
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB46_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -2179,12 +2179,12 @@ define i64 @brR0_i64_sgt(i64 %x) {
 ; CHECK-NEXT:    cmpnei16 a0, 0
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB47_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -2252,12 +2252,12 @@ define i64 @brRR_i64_sge(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmphs16 a2, a0
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB48_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -2329,12 +2329,12 @@ define i64 @brRI_i64_sge(i64 %x) {
 ; CHECK-NEXT:    cmphsi16 a0, 10
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB49_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -2443,12 +2443,12 @@ define i64 @brRR_i64_slt(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmplt16 a3, a1
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB51_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -2516,12 +2516,12 @@ define i64 @brRI_i64_slt(i64 %x) {
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    st16.w a1, (sp, 4)
 ; CHECK-NEXT:    ld16.w a1, (sp, 8)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB52_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -2640,12 +2640,12 @@ define i64 @brRR_i64_sle(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmphs16 a0, a2
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB54_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -2718,12 +2718,12 @@ define i64 @brRI_i64_sle(i64 %x) {
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    st16.w a1, (sp, 4)
 ; CHECK-NEXT:    ld16.w a1, (sp, 8)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB55_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -2793,15 +2793,15 @@ define i64 @brR0_i64_sle(i64 %x) {
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    st16.w a0, (sp, 8)
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 12)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    bt32 .LBB56_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -5500,7 +5500,7 @@ define i1 @brRI_i1_eq(i1 %x) {
 ; CHECK-LABEL: brRI_i1_eq:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi32 a0, a0, 1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bf32 .LBB117_2
 ; CHECK-NEXT:  # %bb.1: # %label2
 ; CHECK-NEXT:    movi16 a0, 0
@@ -5539,7 +5539,7 @@ define i1 @brR0_i1_eq(i1 %x) {
 ; CHECK-LABEL: brR0_i1_eq:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi32 a0, a0, 1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bf32 .LBB118_2
 ; CHECK-NEXT:  # %bb.1: # %label2
 ; CHECK-NEXT:    movi16 a0, 0
@@ -5864,7 +5864,7 @@ define i1 @brRI_i1_uge(i1 %x) {
 ; CHECK-LABEL: brRI_i1_uge:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movi16 a0, 0
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB126_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -5948,7 +5948,7 @@ define i1 @brRI_i1_ult(i1 %x) {
 ; CHECK-LABEL: brRI_i1_ult:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movi16 a0, 1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB128_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -6033,7 +6033,7 @@ define i1 @brRI_i1_ule(i1 %x) {
 ; CHECK-LABEL: brRI_i1_ule:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi32 a0, a0, 1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB130_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -6074,7 +6074,7 @@ define i1 @brR0_i1_ule(i1 %x) {
 ; CHECK-LABEL: brR0_i1_ule:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi32 a0, a0, 1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB131_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -6160,7 +6160,7 @@ define i1 @brRI_i1_sgt(i1 %x) {
 ; CHECK-LABEL: brRI_i1_sgt:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movi16 a0, 1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB133_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -6200,7 +6200,7 @@ define i1 @brR0_i1_sgt(i1 %x) {
 ; CHECK-LABEL: brR0_i1_sgt:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movi16 a0, 1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB134_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -6285,7 +6285,7 @@ define i1 @brRI_i1_sge(i1 %x) {
 ; CHECK-LABEL: brRI_i1_sge:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi32 a0, a0, 1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB136_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -6326,7 +6326,7 @@ define i1 @brR0_i1_sge(i1 %x) {
 ; CHECK-LABEL: brR0_i1_sge:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    andi32 a0, a0, 1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB137_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -6537,7 +6537,7 @@ define i1 @brRI_i1_sle(i1 %x) {
 ; CHECK-LABEL: brRI_i1_sle:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movi16 a0, 0
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB142_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1
@@ -6577,7 +6577,7 @@ define i1 @brR0_i1_sle(i1 %x) {
 ; CHECK-LABEL: brR0_i1_sle:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movi16 a0, 0
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB143_2
 ; CHECK-NEXT:  # %bb.1: # %label1
 ; CHECK-NEXT:    movi16 a0, 1

diff  --git a/llvm/test/CodeGen/CSKY/cmp-i.ll b/llvm/test/CodeGen/CSKY/cmp-i.ll
index 09cbdaf214d14..ceb78aba6fc73 100644
--- a/llvm/test/CodeGen/CSKY/cmp-i.ll
+++ b/llvm/test/CodeGen/CSKY/cmp-i.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky -mattr=+2e3 | FileCheck %s
-; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky -mattr=+btst16 | FileCheck %s --check-prefix=GENERIC
+; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky  | FileCheck %s --check-prefix=GENERIC
 
 ;eq
 define i1 @icmpRR_eq(i32 %x, i32 %y) {
@@ -687,7 +687,7 @@ define i1 @ICMP_LONG_ugt(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmphs16 a0, a2
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 8
@@ -1016,13 +1016,13 @@ define i1 @ICMP_LONG_uge(i64 %x, i64 %y) {
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    st16.w a0, (sp, 8)
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a0, (sp, 12)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 16
 ; CHECK-NEXT:    rts16
@@ -1345,7 +1345,7 @@ define i1 @ICMP_LONG_ult(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmphs16 a2, a0
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 8
@@ -1658,13 +1658,13 @@ define i1 @ICMP_LONG_ule(i64 %x, i64 %y) {
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    st16.w a0, (sp, 8)
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a0, (sp, 12)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 16
 ; CHECK-NEXT:    rts16
@@ -1718,7 +1718,7 @@ define i1 @ICMP_LONG_I_ule(i64 %x) {
 ; CHECK-NEXT:    mvcv16 a1
 ; CHECK-NEXT:    movi16 a0, 0
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 8
 ; CHECK-NEXT:    rts16
@@ -2007,10 +2007,10 @@ define i1 @ICMP_LONG_sgt(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmphs16 a0, a2
 ; CHECK-NEXT:    mvcv16 a1
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 12
 ; CHECK-NEXT:    rts16
@@ -2061,10 +2061,10 @@ define i1 @ICMP_LONG_I_sgt(i64 %x) {
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    st16.w a0, (sp, 4)
 ; CHECK-NEXT:    ld16.w a0, (sp, 8)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movf32 a0, a2
 ; CHECK-NEXT:    addi16 sp, sp, 12
 ; CHECK-NEXT:    rts16
@@ -2340,10 +2340,10 @@ define i1 @ICMP_LONG_sge(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmplt16 a3, a1
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 12
 ; CHECK-NEXT:    rts16
@@ -2389,13 +2389,13 @@ define i1 @ICMP_LONG_I_sge(i64 %x) {
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    st16.w a0, (sp, 8)
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a0, (sp, 12)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 16
 ; CHECK-NEXT:    rts16
@@ -2671,10 +2671,10 @@ define i1 @ICMP_LONG_slt(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmphs16 a2, a0
 ; CHECK-NEXT:    mvcv16 a1
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 12
 ; CHECK-NEXT:    rts16
@@ -2724,10 +2724,10 @@ define i1 @ICMP_LONG_I_slt(i64 %x) {
 ; CHECK-NEXT:    cmpnei16 a0, 0
 ; CHECK-NEXT:    mvcv16 a1
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 12
 ; CHECK-NEXT:    rts16
@@ -2998,10 +2998,10 @@ define i1 @ICMP_LONG_sle(i64 %x, i64 %y) {
 ; CHECK-NEXT:    cmplt16 a1, a3
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 12
 ; CHECK-NEXT:    rts16
@@ -3046,10 +3046,10 @@ define i1 @ICMP_LONG_I_sle(i64 %x) {
 ; CHECK-NEXT:    cmphsi16 a0, 2
 ; CHECK-NEXT:    mvcv16 a1
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 sp, sp, 12
 ; CHECK-NEXT:    rts16

diff  --git a/llvm/test/CodeGen/CSKY/fpu/br-d.ll b/llvm/test/CodeGen/CSKY/fpu/br-d.ll
index 9e7d804c0f642..95e7d07231439 100644
--- a/llvm/test/CodeGen/CSKY/fpu/br-d.ll
+++ b/llvm/test/CodeGen/CSKY/fpu/br-d.ll
@@ -139,7 +139,7 @@ define i32 @brRR_one(double %x, double %y) {
 ; CHECK-DF-NEXT:    fcmpned vr1, vr0
 ; CHECK-DF-NEXT:    mvcv16 a1
 ; CHECK-DF-NEXT:    or16 a0, a1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB3_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -155,7 +155,7 @@ define i32 @brRR_one(double %x, double %y) {
 ; CHECK-DF2-NEXT:    fcmpne.64 vr1, vr0
 ; CHECK-DF2-NEXT:    mvcv16 a1
 ; CHECK-DF2-NEXT:    or16 a0, a1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB3_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -184,7 +184,7 @@ define i32 @brRI_one(double %x) {
 ; CHECK-DF-NEXT:    fcmpned vr0, vr1
 ; CHECK-DF-NEXT:    mvcv16 a1
 ; CHECK-DF-NEXT:    or16 a0, a1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB4_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -206,7 +206,7 @@ define i32 @brRI_one(double %x) {
 ; CHECK-DF2-NEXT:    fcmpne.64 vr0, vr1
 ; CHECK-DF2-NEXT:    mvcv16 a1
 ; CHECK-DF2-NEXT:    or16 a0, a1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB4_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -240,7 +240,7 @@ define i32 @brR0_one(double %x) {
 ; CHECK-DF-NEXT:    fcmpned vr0, vr1
 ; CHECK-DF-NEXT:    mvcv16 a1
 ; CHECK-DF-NEXT:    or16 a0, a1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB5_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -262,7 +262,7 @@ define i32 @brR0_one(double %x) {
 ; CHECK-DF2-NEXT:    fcmpne.64 vr0, vr1
 ; CHECK-DF2-NEXT:    mvcv16 a1
 ; CHECK-DF2-NEXT:    or16 a0, a1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB5_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -797,7 +797,7 @@ define i32 @brRR_ogt(double %x, double %y) {
 ; CHECK-DF-NEXT:    fcmpltd vr0, vr1
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB18_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -811,7 +811,7 @@ define i32 @brRR_ogt(double %x, double %y) {
 ; CHECK-DF2-NEXT:    fcmplt.64 vr0, vr1
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB18_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -838,7 +838,7 @@ define i32 @brRI_ogt(double %x) {
 ; CHECK-DF-NEXT:    fcmpltd vr1, vr0
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB19_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -858,7 +858,7 @@ define i32 @brRI_ogt(double %x) {
 ; CHECK-DF2-NEXT:    fcmplt.64 vr1, vr0
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB19_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -890,7 +890,7 @@ define i32 @brR0_ogt(double %x) {
 ; CHECK-DF-NEXT:    fcmpltd vr1, vr0
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB20_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -910,7 +910,7 @@ define i32 @brR0_ogt(double %x) {
 ; CHECK-DF2-NEXT:    fcmplt.64 vr1, vr0
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB20_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -941,7 +941,7 @@ define i32 @brRR_oge(double %x, double %y) {
 ; CHECK-DF-NEXT:    fcmphsd vr1, vr0
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB21_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -955,7 +955,7 @@ define i32 @brRR_oge(double %x, double %y) {
 ; CHECK-DF2-NEXT:    fcmphs.64 vr1, vr0
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB21_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -982,7 +982,7 @@ define i32 @brRI_oge(double %x) {
 ; CHECK-DF-NEXT:    fcmphsd vr0, vr1
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB22_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1002,7 +1002,7 @@ define i32 @brRI_oge(double %x) {
 ; CHECK-DF2-NEXT:    fcmphs.64 vr0, vr1
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB22_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1034,7 +1034,7 @@ define i32 @brR0_oge(double %x) {
 ; CHECK-DF-NEXT:    fcmphsd vr0, vr1
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB23_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1054,7 +1054,7 @@ define i32 @brR0_oge(double %x) {
 ; CHECK-DF2-NEXT:    fcmphs.64 vr0, vr1
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB23_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1085,7 +1085,7 @@ define i32 @brRR_olt(double %x, double %y) {
 ; CHECK-DF-NEXT:    fcmpltd vr1, vr0
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB24_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1099,7 +1099,7 @@ define i32 @brRR_olt(double %x, double %y) {
 ; CHECK-DF2-NEXT:    fcmplt.64 vr1, vr0
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB24_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1126,7 +1126,7 @@ define i32 @brRI_olt(double %x) {
 ; CHECK-DF-NEXT:    fcmpltd vr0, vr1
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB25_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1146,7 +1146,7 @@ define i32 @brRI_olt(double %x) {
 ; CHECK-DF2-NEXT:    fcmplt.64 vr0, vr1
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB25_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1178,7 +1178,7 @@ define i32 @brR0_olt(double %x) {
 ; CHECK-DF-NEXT:    fcmpltd vr0, vr1
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB26_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1198,7 +1198,7 @@ define i32 @brR0_olt(double %x) {
 ; CHECK-DF2-NEXT:    fcmplt.64 vr0, vr1
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB26_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1229,7 +1229,7 @@ define i32 @brRR_ole(double %x, double %y) {
 ; CHECK-DF-NEXT:    fcmphsd vr0, vr1
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB27_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1243,7 +1243,7 @@ define i32 @brRR_ole(double %x, double %y) {
 ; CHECK-DF2-NEXT:    fcmphs.64 vr0, vr1
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB27_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1270,7 +1270,7 @@ define i32 @brRI_ole(double %x) {
 ; CHECK-DF-NEXT:    fcmphsd vr1, vr0
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB28_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1290,7 +1290,7 @@ define i32 @brRI_ole(double %x) {
 ; CHECK-DF2-NEXT:    fcmphs.64 vr1, vr0
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB28_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1322,7 +1322,7 @@ define i32 @brR0_ole(double %x) {
 ; CHECK-DF-NEXT:    fcmphsd vr1, vr0
 ; CHECK-DF-NEXT:    mvc32 a0
 ; CHECK-DF-NEXT:    xori32 a0, a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB29_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1342,7 +1342,7 @@ define i32 @brR0_ole(double %x) {
 ; CHECK-DF2-NEXT:    fcmphs.64 vr1, vr0
 ; CHECK-DF2-NEXT:    mvc32 a0
 ; CHECK-DF2-NEXT:    xori32 a0, a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB29_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1371,7 +1371,7 @@ define i32 @brRR_false(double %x, double %y) {
 ; CHECK-DF-LABEL: brRR_false:
 ; CHECK-DF:       # %bb.0: # %entry
 ; CHECK-DF-NEXT:    movi16 a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB30_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1383,7 +1383,7 @@ define i32 @brRR_false(double %x, double %y) {
 ; CHECK-DF2-LABEL: brRR_false:
 ; CHECK-DF2:       # %bb.0: # %entry
 ; CHECK-DF2-NEXT:    movi16 a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB30_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1406,7 +1406,7 @@ define i32 @brRI_false(double %x) {
 ; CHECK-DF-LABEL: brRI_false:
 ; CHECK-DF:       # %bb.0: # %entry
 ; CHECK-DF-NEXT:    movi16 a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB31_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1418,7 +1418,7 @@ define i32 @brRI_false(double %x) {
 ; CHECK-DF2-LABEL: brRI_false:
 ; CHECK-DF2:       # %bb.0: # %entry
 ; CHECK-DF2-NEXT:    movi16 a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB31_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1441,7 +1441,7 @@ define i32 @brR0_false(double %x) {
 ; CHECK-DF-LABEL: brR0_false:
 ; CHECK-DF:       # %bb.0: # %entry
 ; CHECK-DF-NEXT:    movi16 a0, 1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB32_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1453,7 +1453,7 @@ define i32 @brR0_false(double %x) {
 ; CHECK-DF2-LABEL: brR0_false:
 ; CHECK-DF2:       # %bb.0: # %entry
 ; CHECK-DF2-NEXT:    movi16 a0, 1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB32_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1583,7 +1583,7 @@ define i32 @brRR_ueq(double %x, double %y) {
 ; CHECK-DF-NEXT:    fcmpned vr1, vr0
 ; CHECK-DF-NEXT:    mvc32 a1
 ; CHECK-DF-NEXT:    and16 a0, a1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bf32 .LBB36_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label2
 ; CHECK-DF-NEXT:    movi16 a0, 0
@@ -1599,7 +1599,7 @@ define i32 @brRR_ueq(double %x, double %y) {
 ; CHECK-DF2-NEXT:    fcmpne.64 vr1, vr0
 ; CHECK-DF2-NEXT:    mvc32 a1
 ; CHECK-DF2-NEXT:    and16 a0, a1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bf32 .LBB36_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label2
 ; CHECK-DF2-NEXT:    movi16 a0, 0
@@ -1628,7 +1628,7 @@ define i32 @brRI_ueq(double %x) {
 ; CHECK-DF-NEXT:    fcmpned vr0, vr1
 ; CHECK-DF-NEXT:    mvc32 a1
 ; CHECK-DF-NEXT:    and16 a0, a1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bf32 .LBB37_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label2
 ; CHECK-DF-NEXT:    movi16 a0, 0
@@ -1650,7 +1650,7 @@ define i32 @brRI_ueq(double %x) {
 ; CHECK-DF2-NEXT:    fcmpne.64 vr0, vr1
 ; CHECK-DF2-NEXT:    mvc32 a1
 ; CHECK-DF2-NEXT:    and16 a0, a1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bf32 .LBB37_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label2
 ; CHECK-DF2-NEXT:    movi16 a0, 0
@@ -1684,7 +1684,7 @@ define i32 @brR0_ueq(double %x) {
 ; CHECK-DF-NEXT:    fcmpned vr0, vr1
 ; CHECK-DF-NEXT:    mvc32 a1
 ; CHECK-DF-NEXT:    and16 a0, a1
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bf32 .LBB38_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label2
 ; CHECK-DF-NEXT:    movi16 a0, 0
@@ -1706,7 +1706,7 @@ define i32 @brR0_ueq(double %x) {
 ; CHECK-DF2-NEXT:    fcmpne.64 vr0, vr1
 ; CHECK-DF2-NEXT:    mvc32 a1
 ; CHECK-DF2-NEXT:    and16 a0, a1
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bf32 .LBB38_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label2
 ; CHECK-DF2-NEXT:    movi16 a0, 0
@@ -1961,7 +1961,7 @@ define i32 @brRR_true(double %x, double %y) {
 ; CHECK-DF-LABEL: brRR_true:
 ; CHECK-DF:       # %bb.0: # %entry
 ; CHECK-DF-NEXT:    movi16 a0, 0
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB45_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -1973,7 +1973,7 @@ define i32 @brRR_true(double %x, double %y) {
 ; CHECK-DF2-LABEL: brRR_true:
 ; CHECK-DF2:       # %bb.0: # %entry
 ; CHECK-DF2-NEXT:    movi16 a0, 0
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB45_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -1996,7 +1996,7 @@ define i32 @brRI_true(double %x) {
 ; CHECK-DF-LABEL: brRI_true:
 ; CHECK-DF:       # %bb.0: # %entry
 ; CHECK-DF-NEXT:    movi16 a0, 0
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB46_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -2008,7 +2008,7 @@ define i32 @brRI_true(double %x) {
 ; CHECK-DF2-LABEL: brRI_true:
 ; CHECK-DF2:       # %bb.0: # %entry
 ; CHECK-DF2-NEXT:    movi16 a0, 0
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB46_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1
@@ -2031,7 +2031,7 @@ define i32 @brR0_true(double %x) {
 ; CHECK-DF-LABEL: brR0_true:
 ; CHECK-DF:       # %bb.0: # %entry
 ; CHECK-DF-NEXT:    movi16 a0, 0
-; CHECK-DF-NEXT:    btsti32 a0, 0
+; CHECK-DF-NEXT:    btsti16 a0, 0
 ; CHECK-DF-NEXT:    bt32 .LBB47_2
 ; CHECK-DF-NEXT:  # %bb.1: # %label1
 ; CHECK-DF-NEXT:    movi16 a0, 1
@@ -2043,7 +2043,7 @@ define i32 @brR0_true(double %x) {
 ; CHECK-DF2-LABEL: brR0_true:
 ; CHECK-DF2:       # %bb.0: # %entry
 ; CHECK-DF2-NEXT:    movi16 a0, 0
-; CHECK-DF2-NEXT:    btsti32 a0, 0
+; CHECK-DF2-NEXT:    btsti16 a0, 0
 ; CHECK-DF2-NEXT:    bt32 .LBB47_2
 ; CHECK-DF2-NEXT:  # %bb.1: # %label1
 ; CHECK-DF2-NEXT:    movi16 a0, 1

diff  --git a/llvm/test/CodeGen/CSKY/fpu/br-f.ll b/llvm/test/CodeGen/CSKY/fpu/br-f.ll
index eb13f20187430..cf9617db059c1 100644
--- a/llvm/test/CodeGen/CSKY/fpu/br-f.ll
+++ b/llvm/test/CodeGen/CSKY/fpu/br-f.ll
@@ -113,7 +113,7 @@ define i32 @brRR_one(float %x, float %y) {
 ; CHECK-SF-NEXT:    fcmpnes vr1, vr0
 ; CHECK-SF-NEXT:    mvcv16 a1
 ; CHECK-SF-NEXT:    or16 a0, a1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB3_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -129,7 +129,7 @@ define i32 @brRR_one(float %x, float %y) {
 ; CHECK-SF2-NEXT:    fcmpne.32 vr1, vr0
 ; CHECK-SF2-NEXT:    mvcv16 a1
 ; CHECK-SF2-NEXT:    or16 a0, a1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB3_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -157,7 +157,7 @@ define i32 @brRI_one(float %x) {
 ; CHECK-SF-NEXT:    fcmpuos vr0, vr0
 ; CHECK-SF-NEXT:    mvc32 a1
 ; CHECK-SF-NEXT:    or16 a0, a1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB4_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -175,7 +175,7 @@ define i32 @brRI_one(float %x) {
 ; CHECK-SF2-NEXT:    fcmpuo.32 vr0, vr0
 ; CHECK-SF2-NEXT:    mvc32 a1
 ; CHECK-SF2-NEXT:    or16 a0, a1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB4_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -201,7 +201,7 @@ define i32 @brR0_one(float %x) {
 ; CHECK-SF-NEXT:    fcmpznes vr0
 ; CHECK-SF-NEXT:    mvcv16 a1
 ; CHECK-SF-NEXT:    or16 a0, a1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB5_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -217,7 +217,7 @@ define i32 @brR0_one(float %x) {
 ; CHECK-SF2-NEXT:    fcmpnez.32 vr0
 ; CHECK-SF2-NEXT:    mvcv16 a1
 ; CHECK-SF2-NEXT:    or16 a0, a1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB5_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -646,7 +646,7 @@ define i32 @brRR_ogt(float %x, float %y) {
 ; CHECK-SF-NEXT:    fcmplts vr0, vr1
 ; CHECK-SF-NEXT:    mvc32 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB18_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -660,7 +660,7 @@ define i32 @brRR_ogt(float %x, float %y) {
 ; CHECK-SF2-NEXT:    fcmplt.32 vr0, vr1
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB18_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -686,7 +686,7 @@ define i32 @brRI_ogt(float %x) {
 ; CHECK-SF-NEXT:    fcmplts vr1, vr0
 ; CHECK-SF-NEXT:    mvc32 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB19_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -702,7 +702,7 @@ define i32 @brRI_ogt(float %x) {
 ; CHECK-SF2-NEXT:    fcmplt.32 vr1, vr0
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB19_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -726,7 +726,7 @@ define i32 @brR0_ogt(float %x) {
 ; CHECK-SF-NEXT:    fcmpzlss vr0
 ; CHECK-SF-NEXT:    mvcv16 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB20_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -740,7 +740,7 @@ define i32 @brR0_ogt(float %x) {
 ; CHECK-SF2-NEXT:    fcmphz.32 vr0
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB20_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -765,7 +765,7 @@ define i32 @brRR_oge(float %x, float %y) {
 ; CHECK-SF-NEXT:    fcmphss vr1, vr0
 ; CHECK-SF-NEXT:    mvc32 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB21_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -779,7 +779,7 @@ define i32 @brRR_oge(float %x, float %y) {
 ; CHECK-SF2-NEXT:    fcmphs.32 vr1, vr0
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB21_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -805,7 +805,7 @@ define i32 @brRI_oge(float %x) {
 ; CHECK-SF-NEXT:    fcmphss vr0, vr1
 ; CHECK-SF-NEXT:    mvc32 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB22_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -821,7 +821,7 @@ define i32 @brRI_oge(float %x) {
 ; CHECK-SF2-NEXT:    fcmphs.32 vr0, vr1
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB22_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -845,7 +845,7 @@ define i32 @brR0_oge(float %x) {
 ; CHECK-SF-NEXT:    fcmpzhss vr0
 ; CHECK-SF-NEXT:    mvc32 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB23_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -859,7 +859,7 @@ define i32 @brR0_oge(float %x) {
 ; CHECK-SF2-NEXT:    fcmphsz.32 vr0
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB23_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -884,7 +884,7 @@ define i32 @brRR_olt(float %x, float %y) {
 ; CHECK-SF-NEXT:    fcmplts vr1, vr0
 ; CHECK-SF-NEXT:    mvc32 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB24_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -898,7 +898,7 @@ define i32 @brRR_olt(float %x, float %y) {
 ; CHECK-SF2-NEXT:    fcmplt.32 vr1, vr0
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB24_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -924,7 +924,7 @@ define i32 @brRI_olt(float %x) {
 ; CHECK-SF-NEXT:    fcmplts vr0, vr1
 ; CHECK-SF-NEXT:    mvc32 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB25_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -940,7 +940,7 @@ define i32 @brRI_olt(float %x) {
 ; CHECK-SF2-NEXT:    fcmplt.32 vr0, vr1
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB25_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -964,7 +964,7 @@ define i32 @brR0_olt(float %x) {
 ; CHECK-SF-NEXT:    fcmpzhss vr0
 ; CHECK-SF-NEXT:    mvcv16 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB26_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -978,7 +978,7 @@ define i32 @brR0_olt(float %x) {
 ; CHECK-SF2-NEXT:    fcmpltz.32 vr0
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB26_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -1003,7 +1003,7 @@ define i32 @brRR_ole(float %x, float %y) {
 ; CHECK-SF-NEXT:    fcmphss vr0, vr1
 ; CHECK-SF-NEXT:    mvc32 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB27_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -1017,7 +1017,7 @@ define i32 @brRR_ole(float %x, float %y) {
 ; CHECK-SF2-NEXT:    fcmphs.32 vr0, vr1
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB27_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -1043,7 +1043,7 @@ define i32 @brRI_ole(float %x) {
 ; CHECK-SF-NEXT:    fcmphss vr1, vr0
 ; CHECK-SF-NEXT:    mvc32 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB28_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -1059,7 +1059,7 @@ define i32 @brRI_ole(float %x) {
 ; CHECK-SF2-NEXT:    fcmphs.32 vr1, vr0
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB28_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -1083,7 +1083,7 @@ define i32 @brR0_ole(float %x) {
 ; CHECK-SF-NEXT:    fcmpzlss vr0
 ; CHECK-SF-NEXT:    mvc32 a0
 ; CHECK-SF-NEXT:    xori32 a0, a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB29_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -1097,7 +1097,7 @@ define i32 @brR0_ole(float %x) {
 ; CHECK-SF2-NEXT:    fcmplsz.32 vr0
 ; CHECK-SF2-NEXT:    mvc32 a0
 ; CHECK-SF2-NEXT:    xori32 a0, a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB29_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -1120,7 +1120,7 @@ define i32 @brRR_false(float %x, float %y) {
 ; CHECK-SF-LABEL: brRR_false:
 ; CHECK-SF:       # %bb.0: # %entry
 ; CHECK-SF-NEXT:    movi16 a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB30_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -1132,7 +1132,7 @@ define i32 @brRR_false(float %x, float %y) {
 ; CHECK-SF2-LABEL: brRR_false:
 ; CHECK-SF2:       # %bb.0: # %entry
 ; CHECK-SF2-NEXT:    movi16 a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB30_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -1154,7 +1154,7 @@ define i32 @brRI_false(float %x) {
 ; CHECK-SF-LABEL: brRI_false:
 ; CHECK-SF:       # %bb.0: # %entry
 ; CHECK-SF-NEXT:    movi16 a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB31_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -1166,7 +1166,7 @@ define i32 @brRI_false(float %x) {
 ; CHECK-SF2-LABEL: brRI_false:
 ; CHECK-SF2:       # %bb.0: # %entry
 ; CHECK-SF2-NEXT:    movi16 a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB31_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -1188,7 +1188,7 @@ define i32 @brR0_false(float %x) {
 ; CHECK-SF-LABEL: brR0_false:
 ; CHECK-SF:       # %bb.0: # %entry
 ; CHECK-SF-NEXT:    movi16 a0, 1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB32_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -1200,7 +1200,7 @@ define i32 @brR0_false(float %x) {
 ; CHECK-SF2-LABEL: brR0_false:
 ; CHECK-SF2:       # %bb.0: # %entry
 ; CHECK-SF2-NEXT:    movi16 a0, 1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB32_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -1326,7 +1326,7 @@ define i32 @brRR_ueq(float %x, float %y) {
 ; CHECK-SF-NEXT:    fcmpnes vr1, vr0
 ; CHECK-SF-NEXT:    mvc32 a1
 ; CHECK-SF-NEXT:    and16 a0, a1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bf32 .LBB36_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label2
 ; CHECK-SF-NEXT:    movi16 a0, 0
@@ -1342,7 +1342,7 @@ define i32 @brRR_ueq(float %x, float %y) {
 ; CHECK-SF2-NEXT:    fcmpne.32 vr1, vr0
 ; CHECK-SF2-NEXT:    mvc32 a1
 ; CHECK-SF2-NEXT:    and16 a0, a1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bf32 .LBB36_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label2
 ; CHECK-SF2-NEXT:    movi16 a0, 0
@@ -1370,7 +1370,7 @@ define i32 @brRI_ueq(float %x) {
 ; CHECK-SF-NEXT:    fcmpuos vr0, vr0
 ; CHECK-SF-NEXT:    mvcv16 a1
 ; CHECK-SF-NEXT:    and16 a0, a1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bf32 .LBB37_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label2
 ; CHECK-SF-NEXT:    movi16 a0, 0
@@ -1388,7 +1388,7 @@ define i32 @brRI_ueq(float %x) {
 ; CHECK-SF2-NEXT:    fcmpuo.32 vr0, vr0
 ; CHECK-SF2-NEXT:    mvcv16 a1
 ; CHECK-SF2-NEXT:    and16 a0, a1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bf32 .LBB37_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label2
 ; CHECK-SF2-NEXT:    movi16 a0, 0
@@ -1414,7 +1414,7 @@ define i32 @brR0_ueq(float %x) {
 ; CHECK-SF-NEXT:    fcmpznes vr0
 ; CHECK-SF-NEXT:    mvc32 a1
 ; CHECK-SF-NEXT:    and16 a0, a1
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bf32 .LBB38_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label2
 ; CHECK-SF-NEXT:    movi16 a0, 0
@@ -1432,7 +1432,7 @@ define i32 @brR0_ueq(float %x) {
 ; CHECK-SF2-NEXT:    fcmpuo.32 vr0, vr0
 ; CHECK-SF2-NEXT:    mvcv16 a1
 ; CHECK-SF2-NEXT:    and16 a0, a1
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bf32 .LBB38_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label2
 ; CHECK-SF2-NEXT:    movi16 a0, 0
@@ -1653,7 +1653,7 @@ define i32 @brRR_true(float %x, float %y) {
 ; CHECK-SF-LABEL: brRR_true:
 ; CHECK-SF:       # %bb.0: # %entry
 ; CHECK-SF-NEXT:    movi16 a0, 0
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB45_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -1665,7 +1665,7 @@ define i32 @brRR_true(float %x, float %y) {
 ; CHECK-SF2-LABEL: brRR_true:
 ; CHECK-SF2:       # %bb.0: # %entry
 ; CHECK-SF2-NEXT:    movi16 a0, 0
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB45_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -1687,7 +1687,7 @@ define i32 @brRI_true(float %x) {
 ; CHECK-SF-LABEL: brRI_true:
 ; CHECK-SF:       # %bb.0: # %entry
 ; CHECK-SF-NEXT:    movi16 a0, 0
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB46_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -1699,7 +1699,7 @@ define i32 @brRI_true(float %x) {
 ; CHECK-SF2-LABEL: brRI_true:
 ; CHECK-SF2:       # %bb.0: # %entry
 ; CHECK-SF2-NEXT:    movi16 a0, 0
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB46_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1
@@ -1721,7 +1721,7 @@ define i32 @brR0_true(float %x) {
 ; CHECK-SF-LABEL: brR0_true:
 ; CHECK-SF:       # %bb.0: # %entry
 ; CHECK-SF-NEXT:    movi16 a0, 0
-; CHECK-SF-NEXT:    btsti32 a0, 0
+; CHECK-SF-NEXT:    btsti16 a0, 0
 ; CHECK-SF-NEXT:    bt32 .LBB47_2
 ; CHECK-SF-NEXT:  # %bb.1: # %label1
 ; CHECK-SF-NEXT:    movi16 a0, 1
@@ -1733,7 +1733,7 @@ define i32 @brR0_true(float %x) {
 ; CHECK-SF2-LABEL: brR0_true:
 ; CHECK-SF2:       # %bb.0: # %entry
 ; CHECK-SF2-NEXT:    movi16 a0, 0
-; CHECK-SF2-NEXT:    btsti32 a0, 0
+; CHECK-SF2-NEXT:    btsti16 a0, 0
 ; CHECK-SF2-NEXT:    bt32 .LBB47_2
 ; CHECK-SF2-NEXT:  # %bb.1: # %label1
 ; CHECK-SF2-NEXT:    movi16 a0, 1

diff  --git a/llvm/test/CodeGen/CSKY/fpu/select.ll b/llvm/test/CodeGen/CSKY/fpu/select.ll
index 1e84718114d88..7d64dc535fe50 100644
--- a/llvm/test/CodeGen/CSKY/fpu/select.ll
+++ b/llvm/test/CodeGen/CSKY/fpu/select.ll
@@ -1,12 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky -mattr=+2e3,+hard-float,+fpuv2_sf,+fpuv2_df -float-abi=hard | FileCheck %s
 ; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky -mattr=+2e3,+hard-float,+fpuv3_sf,+fpuv3_df -float-abi=hard | FileCheck %s --check-prefix=CHECK-DF3
-; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky -mattr=+btst16,+hard-float,+fpuv2_sf,+fpuv2_df -float-abi=hard | FileCheck %s --check-prefix=GENERIC
+; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky -mattr=+hard-float,+fpuv2_sf,+fpuv2_df -float-abi=hard | FileCheck %s --check-prefix=GENERIC
 
 define float @selectRR_eq_float(i1 %x, float %n, float %m) {
 ; CHECK-LABEL: selectRR_eq_float:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB0_2
 ; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    fmovs vr1, vr0
@@ -16,7 +16,7 @@ define float @selectRR_eq_float(i1 %x, float %n, float %m) {
 ;
 ; CHECK-DF3-LABEL: selectRR_eq_float:
 ; CHECK-DF3:       # %bb.0: # %entry
-; CHECK-DF3-NEXT:    btsti32 a0, 0
+; CHECK-DF3-NEXT:    btsti16 a0, 0
 ; CHECK-DF3-NEXT:    fsel.32 vr0, vr1, vr0
 ; CHECK-DF3-NEXT:    rts16
 ;
@@ -41,7 +41,7 @@ entry:
 define double @selectRR_eq_double(i1 %x, double %n, double %m) {
 ; CHECK-LABEL: selectRR_eq_double:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    bt32 .LBB1_2
 ; CHECK-NEXT:  # %bb.1: # %entry
 ; CHECK-NEXT:    fmovd vr1, vr0
@@ -51,7 +51,7 @@ define double @selectRR_eq_double(i1 %x, double %n, double %m) {
 ;
 ; CHECK-DF3-LABEL: selectRR_eq_double:
 ; CHECK-DF3:       # %bb.0: # %entry
-; CHECK-DF3-NEXT:    btsti32 a0, 0
+; CHECK-DF3-NEXT:    btsti16 a0, 0
 ; CHECK-DF3-NEXT:    fsel.64 vr0, vr1, vr0
 ; CHECK-DF3-NEXT:    rts16
 ;
@@ -72,4 +72,3 @@ entry:
   %ret = select i1 %x, double %m, double %n
   ret double %ret
 }
-

diff  --git a/llvm/test/CodeGen/CSKY/select.ll b/llvm/test/CodeGen/CSKY/select.ll
index 8940ba6712d43..2416f8e488d08 100644
--- a/llvm/test/CodeGen/CSKY/select.ll
+++ b/llvm/test/CodeGen/CSKY/select.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky -mattr=+2e3 | FileCheck %s
-; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky -mattr=+btst16 | FileCheck %s --check-prefix=GENERIC
+; RUN: llc -verify-machineinstrs -csky-no-aliases < %s -mtriple=csky  | FileCheck %s --check-prefix=GENERIC
 
 define i32 @selectRR_eq_i32(i32 %x, i32 %y, i32 %n, i32 %m) {
 ; CHECK-LABEL: selectRR_eq_i32:
@@ -109,7 +109,7 @@ entry:
 define i32 @selectC_eq_i32(i1 %c, i32 %n, i32 %m) {
 ; CHECK-LABEL: selectC_eq_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -301,7 +301,7 @@ define i64 @selectC_eq_i64(i1 %c, i64 %n, i64 %m) {
 ; CHECK-LABEL: selectC_eq_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld32.w t0, (sp, 0)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    movt32 a2, t0
 ; CHECK-NEXT:    mov16 a0, a1
@@ -487,7 +487,7 @@ entry:
 define i16 @selectC_eq_i16(i1 %c, i16 %n, i16 %m) {
 ; CHECK-LABEL: selectC_eq_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -618,7 +618,7 @@ entry:
 define i8 @selectC_eq_i8(i1 %c, i8 %n, i8 %m) {
 ; CHECK-LABEL: selectC_eq_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -646,7 +646,7 @@ define i1 @selectRR_eq_i1(i1 %x, i1 %y, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRR_eq_i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xor16 a0, a1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a3, a2
 ; CHECK-NEXT:    mov16 a0, a3
 ; CHECK-NEXT:    rts16
@@ -674,7 +674,7 @@ entry:
 define i1 @selectRI_eq_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRI_eq_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a2, a1
 ; CHECK-NEXT:    mov16 a0, a2
 ; CHECK-NEXT:    rts16
@@ -701,7 +701,7 @@ entry:
 define i1 @selectRX_eq_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRX_eq_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -728,7 +728,7 @@ entry:
 define i1 @selectC_eq_i1(i1 %c, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectC_eq_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -865,7 +865,7 @@ entry:
 define i32 @selectC_ne_i32(i1 %c, i32 %n, i32 %m) {
 ; CHECK-LABEL: selectC_ne_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -1063,7 +1063,7 @@ define i64 @selectC_ne_i64(i1 %c, i64 %n, i64 %m) {
 ; CHECK-LABEL: selectC_ne_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld32.w t0, (sp, 0)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    movt32 a2, t0
 ; CHECK-NEXT:    mov16 a0, a1
@@ -1255,7 +1255,7 @@ entry:
 define i16 @selectC_ne_i16(i1 %c, i16 %n, i16 %m) {
 ; CHECK-LABEL: selectC_ne_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -1392,7 +1392,7 @@ entry:
 define i8 @selectC_ne_i8(i1 %c, i8 %n, i8 %m) {
 ; CHECK-LABEL: selectC_ne_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -1420,7 +1420,7 @@ define i1 @selectRR_ne_i1(i1 %x, i1 %y, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRR_ne_i1:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    xor16 a0, a1
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a2, a3
 ; CHECK-NEXT:    mov16 a0, a2
 ; CHECK-NEXT:    rts16
@@ -1448,7 +1448,7 @@ entry:
 define i1 @selectRI_ne_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRI_ne_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -1475,7 +1475,7 @@ entry:
 define i1 @selectRX_ne_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRX_ne_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a2, a1
 ; CHECK-NEXT:    mov16 a0, a2
 ; CHECK-NEXT:    rts16
@@ -1502,7 +1502,7 @@ entry:
 define i1 @selectC_ne_i1(i1 %c, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectC_ne_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -1635,7 +1635,7 @@ entry:
 define i32 @selectC_ugt_i32(i1 %c, i32 %n, i32 %m) {
 ; CHECK-LABEL: selectC_ugt_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -1672,11 +1672,11 @@ define i64 @selectRR_ugt_i64(i64 %x, i64 %y, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmphs16 a0, a2
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
 ; CHECK-NEXT:    addi16 a2, sp, 8
 ; CHECK-NEXT:    addi16 a0, sp, 16
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, a0
 ; CHECK-NEXT:    ld16.w a0, (a2, 0)
 ; CHECK-NEXT:    ld16.w a1, (a2, 4)
@@ -1740,7 +1740,7 @@ define i64 @selectRI_ugt_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmpnei16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -1812,7 +1812,7 @@ define i64 @selectRX_ugt_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmpnei16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -1883,7 +1883,7 @@ define i64 @selectC_ugt_i64(i1 %c, i64 %n, i64 %m) {
 ; CHECK-LABEL: selectC_ugt_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld32.w t0, (sp, 0)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    movt32 a2, t0
 ; CHECK-NEXT:    mov16 a0, a1
@@ -2072,7 +2072,7 @@ entry:
 define i16 @selectC_ugt_i16(i1 %c, i16 %n, i16 %m) {
 ; CHECK-LABEL: selectC_ugt_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -2206,7 +2206,7 @@ entry:
 define i8 @selectC_ugt_i8(i1 %c, i8 %n, i8 %m) {
 ; CHECK-LABEL: selectC_ugt_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -2233,9 +2233,9 @@ entry:
 define i1 @selectRR_ugt_i1(i1 %x, i1 %y, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRR_ugt_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a3, a2
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, a3
 ; CHECK-NEXT:    mov16 a0, a2
 ; CHECK-NEXT:    rts16
@@ -2271,7 +2271,7 @@ entry:
 define i1 @selectRI_ugt_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRI_ugt_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -2318,7 +2318,7 @@ entry:
 define i1 @selectC_ugt_i1(i1 %c, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectC_ugt_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -2453,7 +2453,7 @@ entry:
 define i32 @selectC_uge_i32(i1 %c, i32 %n, i32 %m) {
 ; CHECK-LABEL: selectC_uge_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -2492,17 +2492,17 @@ define i64 @selectRR_uge_i64(i64 %x, i64 %y, i64 %n, i64 %m) {
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    st16.w a0, (sp, 8)
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 12)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
 ; CHECK-NEXT:    addi16 a2, sp, 16
 ; CHECK-NEXT:    addi16 a0, sp, 24
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, a0
 ; CHECK-NEXT:    ld16.w a0, (a2, 0)
 ; CHECK-NEXT:    ld16.w a1, (a2, 4)
@@ -2573,7 +2573,7 @@ define i64 @selectRI_uge_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmpnei16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -2645,7 +2645,7 @@ define i64 @selectRX_uge_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmpnei16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -2716,7 +2716,7 @@ define i64 @selectC_uge_i64(i1 %c, i64 %n, i64 %m) {
 ; CHECK-LABEL: selectC_uge_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld32.w t0, (sp, 0)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    movt32 a2, t0
 ; CHECK-NEXT:    mov16 a0, a1
@@ -2907,7 +2907,7 @@ entry:
 define i16 @selectC_uge_i16(i1 %c, i16 %n, i16 %m) {
 ; CHECK-LABEL: selectC_uge_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -3043,7 +3043,7 @@ entry:
 define i8 @selectC_uge_i8(i1 %c, i8 %n, i8 %m) {
 ; CHECK-LABEL: selectC_uge_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -3070,10 +3070,10 @@ entry:
 define i1 @selectRR_uge_i1(i1 %x, i1 %y, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRR_uge_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mov16 a0, a3
 ; CHECK-NEXT:    movt32 a0, a2
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a0, a3
 ; CHECK-NEXT:    rts16
 ;
@@ -3124,7 +3124,7 @@ entry:
 define i1 @selectRX_uge_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRX_uge_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -3151,7 +3151,7 @@ entry:
 define i1 @selectC_uge_i1(i1 %c, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectC_uge_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -3282,7 +3282,7 @@ entry:
 define i32 @selectC_ult_i32(i1 %c, i32 %n, i32 %m) {
 ; CHECK-LABEL: selectC_ult_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -3319,11 +3319,11 @@ define i64 @selectRR_ult_i64(i64 %x, i64 %y, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmphs16 a2, a0
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
 ; CHECK-NEXT:    addi16 a2, sp, 8
 ; CHECK-NEXT:    addi16 a0, sp, 16
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, a0
 ; CHECK-NEXT:    ld16.w a0, (a2, 0)
 ; CHECK-NEXT:    ld16.w a1, (a2, 4)
@@ -3387,10 +3387,10 @@ define i64 @selectRI_ult_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    movi16 a1, 0
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    ld16.w a2, (sp, 0)
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -3454,7 +3454,7 @@ define i64 @selectRX_ult_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmpnei16 a1, 0
 ; CHECK-NEXT:    movi16 a1, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -3524,7 +3524,7 @@ define i64 @selectC_ult_i64(i1 %c, i64 %n, i64 %m) {
 ; CHECK-LABEL: selectC_ult_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld32.w t0, (sp, 0)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    movt32 a2, t0
 ; CHECK-NEXT:    mov16 a0, a1
@@ -3710,7 +3710,7 @@ entry:
 define i16 @selectC_ult_i16(i1 %c, i16 %n, i16 %m) {
 ; CHECK-LABEL: selectC_ult_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -3841,7 +3841,7 @@ entry:
 define i8 @selectC_ult_i8(i1 %c, i8 %n, i8 %m) {
 ; CHECK-LABEL: selectC_ult_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -3868,9 +3868,9 @@ entry:
 define i1 @selectRR_ult_i1(i1 %x, i1 %y, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRR_ult_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a3, a2
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a2, a3
 ; CHECK-NEXT:    mov16 a0, a2
 ; CHECK-NEXT:    rts16
@@ -3928,7 +3928,7 @@ entry:
 define i1 @selectRX_ult_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRX_ult_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a2, a1
 ; CHECK-NEXT:    mov16 a0, a2
 ; CHECK-NEXT:    rts16
@@ -3955,7 +3955,7 @@ entry:
 define i1 @selectC_ult_i1(i1 %c, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectC_ult_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -4087,7 +4087,7 @@ entry:
 define i32 @selectC_ule_i32(i1 %c, i32 %n, i32 %m) {
 ; CHECK-LABEL: selectC_ule_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -4126,17 +4126,17 @@ define i64 @selectRR_ule_i64(i64 %x, i64 %y, i64 %n, i64 %m) {
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    st16.w a0, (sp, 8)
 ; CHECK-NEXT:    ld16.w a0, (sp, 4)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mvc32 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 12)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
 ; CHECK-NEXT:    addi16 a2, sp, 16
 ; CHECK-NEXT:    addi16 a0, sp, 24
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, a0
 ; CHECK-NEXT:    ld16.w a0, (a2, 0)
 ; CHECK-NEXT:    ld16.w a1, (a2, 4)
@@ -4207,10 +4207,10 @@ define i64 @selectRI_ule_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    movi16 a1, 0
 ; CHECK-NEXT:    ld16.w a2, (sp, 4)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    ld16.w a2, (sp, 0)
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -4274,7 +4274,7 @@ define i64 @selectRX_ule_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmpnei16 a1, 0
 ; CHECK-NEXT:    movi16 a1, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -4344,7 +4344,7 @@ define i64 @selectC_ule_i64(i1 %c, i64 %n, i64 %m) {
 ; CHECK-LABEL: selectC_ule_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld32.w t0, (sp, 0)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    movt32 a2, t0
 ; CHECK-NEXT:    mov16 a0, a1
@@ -4532,7 +4532,7 @@ entry:
 define i16 @selectC_ule_i16(i1 %c, i16 %n, i16 %m) {
 ; CHECK-LABEL: selectC_ule_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -4665,7 +4665,7 @@ entry:
 define i8 @selectC_ule_i8(i1 %c, i8 %n, i8 %m) {
 ; CHECK-LABEL: selectC_ule_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -4692,10 +4692,10 @@ entry:
 define i1 @selectRR_ule_i1(i1 %x, i1 %y, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRR_ule_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mov16 a1, a3
 ; CHECK-NEXT:    movt32 a1, a2
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -4727,7 +4727,7 @@ entry:
 define i1 @selectRI_ule_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRI_ule_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a2, a1
 ; CHECK-NEXT:    mov16 a0, a2
 ; CHECK-NEXT:    rts16
@@ -4774,7 +4774,7 @@ entry:
 define i1 @selectC_ule_i1(i1 %c, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectC_ule_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -4913,7 +4913,7 @@ entry:
 define i32 @selectC_sgt_i32(i1 %c, i32 %n, i32 %m) {
 ; CHECK-LABEL: selectC_sgt_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -4951,14 +4951,14 @@ define i64 @selectRR_sgt_i64(i64 %x, i64 %y, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmphs16 a0, a2
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
 ; CHECK-NEXT:    addi16 a2, sp, 12
 ; CHECK-NEXT:    addi16 a0, sp, 20
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, a0
 ; CHECK-NEXT:    ld16.w a0, (a2, 0)
 ; CHECK-NEXT:    ld16.w a1, (a2, 4)
@@ -5029,12 +5029,12 @@ define i64 @selectRI_sgt_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    st16.w a1, (sp, 4)
 ; CHECK-NEXT:    ld16.w a1, (sp, 8)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w l0, (sp, 4)
-; CHECK-NEXT:    btsti32 l0, 0
+; CHECK-NEXT:    btsti16 l0, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -5119,12 +5119,12 @@ define i64 @selectRX_sgt_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    st16.w a1, (sp, 4)
 ; CHECK-NEXT:    ld16.w a1, (sp, 8)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w l0, (sp, 4)
-; CHECK-NEXT:    btsti32 l0, 0
+; CHECK-NEXT:    btsti16 l0, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -5202,7 +5202,7 @@ define i64 @selectC_sgt_i64(i1 %c, i64 %n, i64 %m) {
 ; CHECK-LABEL: selectC_sgt_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld32.w t0, (sp, 0)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    movt32 a2, t0
 ; CHECK-NEXT:    mov16 a0, a1
@@ -5355,7 +5355,7 @@ entry:
 define i16 @selectC_sgt_i16(i1 %c, i16 %n, i16 %m) {
 ; CHECK-LABEL: selectC_sgt_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -5503,7 +5503,7 @@ entry:
 define i8 @selectC_sgt_i8(i1 %c, i8 %n, i8 %m) {
 ; CHECK-LABEL: selectC_sgt_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -5530,9 +5530,9 @@ entry:
 define i1 @selectRR_sgt_i1(i1 %x, i1 %y, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRR_sgt_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a3, a2
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a2, a3
 ; CHECK-NEXT:    mov16 a0, a2
 ; CHECK-NEXT:    rts16
@@ -5590,7 +5590,7 @@ entry:
 define i1 @selectRX_sgt_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRX_sgt_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a2, a1
 ; CHECK-NEXT:    mov16 a0, a2
 ; CHECK-NEXT:    rts16
@@ -5617,7 +5617,7 @@ entry:
 define i1 @selectC_sgt_i1(i1 %c, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectC_sgt_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -5754,7 +5754,7 @@ entry:
 define i32 @selectC_sge_i32(i1 %c, i32 %n, i32 %m) {
 ; CHECK-LABEL: selectC_sge_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -5792,14 +5792,14 @@ define i64 @selectRR_sge_i64(i64 %x, i64 %y, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmplt16 a3, a1
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 a1, sp, 12
 ; CHECK-NEXT:    addi16 a2, sp, 20
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    ld16.w a0, (a1, 0)
 ; CHECK-NEXT:    ld16.w a1, (a1, 4)
@@ -5865,12 +5865,12 @@ define i64 @selectRI_sge_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    st16.w a1, (sp, 4)
 ; CHECK-NEXT:    ld16.w a1, (sp, 8)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w l0, (sp, 4)
-; CHECK-NEXT:    btsti32 l0, 0
+; CHECK-NEXT:    btsti16 l0, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -5955,12 +5955,12 @@ define i64 @selectRX_sge_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    st16.w a1, (sp, 4)
 ; CHECK-NEXT:    ld16.w a1, (sp, 8)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w l0, (sp, 4)
-; CHECK-NEXT:    btsti32 l0, 0
+; CHECK-NEXT:    btsti16 l0, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -6038,7 +6038,7 @@ define i64 @selectC_sge_i64(i1 %c, i64 %n, i64 %m) {
 ; CHECK-LABEL: selectC_sge_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld32.w t0, (sp, 0)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    movt32 a2, t0
 ; CHECK-NEXT:    mov16 a0, a1
@@ -6189,7 +6189,7 @@ entry:
 define i16 @selectC_sge_i16(i1 %c, i16 %n, i16 %m) {
 ; CHECK-LABEL: selectC_sge_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -6335,7 +6335,7 @@ entry:
 define i8 @selectC_sge_i8(i1 %c, i8 %n, i8 %m) {
 ; CHECK-LABEL: selectC_sge_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -6362,10 +6362,10 @@ entry:
 define i1 @selectRR_sge_i1(i1 %x, i1 %y, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRR_sge_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mov16 a1, a3
 ; CHECK-NEXT:    movt32 a1, a2
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -6397,7 +6397,7 @@ entry:
 define i1 @selectRI_sge_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRI_sge_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a2, a1
 ; CHECK-NEXT:    mov16 a0, a2
 ; CHECK-NEXT:    rts16
@@ -6444,7 +6444,7 @@ entry:
 define i1 @selectC_sge_i1(i1 %c, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectC_sge_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -6581,7 +6581,7 @@ entry:
 define i32 @selectC_slt_i32(i1 %c, i32 %n, i32 %m) {
 ; CHECK-LABEL: selectC_slt_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -6619,14 +6619,14 @@ define i64 @selectRR_slt_i64(i64 %x, i64 %y, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmphs16 a2, a0
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a1, a0
 ; CHECK-NEXT:    addi16 a2, sp, 12
 ; CHECK-NEXT:    addi16 a0, sp, 20
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, a0
 ; CHECK-NEXT:    ld16.w a0, (a2, 0)
 ; CHECK-NEXT:    ld16.w a1, (a2, 4)
@@ -6696,12 +6696,12 @@ define i64 @selectRI_slt_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmphsi16 a0, 10
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w l0, (sp, 8)
-; CHECK-NEXT:    btsti32 l0, 0
+; CHECK-NEXT:    btsti16 l0, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -6782,12 +6782,12 @@ define i64 @selectRX_slt_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    st16.w a1, (sp, 4)
 ; CHECK-NEXT:    ld16.w a1, (sp, 8)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w l0, (sp, 4)
-; CHECK-NEXT:    btsti32 l0, 0
+; CHECK-NEXT:    btsti16 l0, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -6865,7 +6865,7 @@ define i64 @selectC_slt_i64(i1 %c, i64 %n, i64 %m) {
 ; CHECK-LABEL: selectC_slt_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld32.w t0, (sp, 0)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    movt32 a2, t0
 ; CHECK-NEXT:    mov16 a0, a1
@@ -7015,7 +7015,7 @@ entry:
 define i16 @selectC_slt_i16(i1 %c, i16 %n, i16 %m) {
 ; CHECK-LABEL: selectC_slt_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -7161,7 +7161,7 @@ entry:
 define i8 @selectC_slt_i8(i1 %c, i8 %n, i8 %m) {
 ; CHECK-LABEL: selectC_slt_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -7188,9 +7188,9 @@ entry:
 define i1 @selectRR_slt_i1(i1 %x, i1 %y, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRR_slt_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a3, a2
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, a3
 ; CHECK-NEXT:    mov16 a0, a2
 ; CHECK-NEXT:    rts16
@@ -7226,7 +7226,7 @@ entry:
 define i1 @selectRI_slt_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRI_slt_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -7273,7 +7273,7 @@ entry:
 define i1 @selectC_slt_i1(i1 %c, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectC_slt_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -7407,7 +7407,7 @@ entry:
 define i32 @selectC_sle_i32(i1 %c, i32 %n, i32 %m) {
 ; CHECK-LABEL: selectC_sle_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -7445,14 +7445,14 @@ define i64 @selectRR_sle_i64(i64 %x, i64 %y, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmplt16 a1, a3
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w a2, (sp, 8)
-; CHECK-NEXT:    btsti32 a2, 0
+; CHECK-NEXT:    btsti16 a2, 0
 ; CHECK-NEXT:    movf32 a0, a1
 ; CHECK-NEXT:    addi16 a1, sp, 12
 ; CHECK-NEXT:    addi16 a2, sp, 20
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    ld16.w a0, (a1, 0)
 ; CHECK-NEXT:    ld16.w a1, (a1, 4)
@@ -7517,12 +7517,12 @@ define i64 @selectRI_sle_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    cmphsi16 a0, 11
 ; CHECK-NEXT:    mvcv16 a0
 ; CHECK-NEXT:    ld16.w a1, (sp, 4)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w l0, (sp, 8)
-; CHECK-NEXT:    btsti32 l0, 0
+; CHECK-NEXT:    btsti16 l0, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -7603,12 +7603,12 @@ define i64 @selectRX_sle_i64(i64 %x, i64 %n, i64 %m) {
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    st16.w a1, (sp, 4)
 ; CHECK-NEXT:    ld16.w a1, (sp, 8)
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    mvc32 a1
 ; CHECK-NEXT:    ld16.w l0, (sp, 4)
-; CHECK-NEXT:    btsti32 l0, 0
+; CHECK-NEXT:    btsti16 l0, 0
 ; CHECK-NEXT:    movf32 a1, a0
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a2, t1
 ; CHECK-NEXT:    movt32 a3, t0
 ; CHECK-NEXT:    mov16 a0, a2
@@ -7686,7 +7686,7 @@ define i64 @selectC_sle_i64(i1 %c, i64 %n, i64 %m) {
 ; CHECK-LABEL: selectC_sle_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    ld32.w t0, (sp, 0)
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a3
 ; CHECK-NEXT:    movt32 a2, t0
 ; CHECK-NEXT:    mov16 a0, a1
@@ -7834,7 +7834,7 @@ entry:
 define i16 @selectC_sle_i16(i1 %c, i16 %n, i16 %m) {
 ; CHECK-LABEL: selectC_sle_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -7978,7 +7978,7 @@ entry:
 define i8 @selectC_sle_i8(i1 %c, i8 %n, i8 %m) {
 ; CHECK-LABEL: selectC_sle_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -8005,10 +8005,10 @@ entry:
 define i1 @selectRR_sle_i1(i1 %x, i1 %y, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRR_sle_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    mov16 a0, a3
 ; CHECK-NEXT:    movt32 a0, a2
-; CHECK-NEXT:    btsti32 a1, 0
+; CHECK-NEXT:    btsti16 a1, 0
 ; CHECK-NEXT:    movt32 a0, a3
 ; CHECK-NEXT:    rts16
 ;
@@ -8059,7 +8059,7 @@ entry:
 define i1 @selectRX_sle_i1(i1 %x, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectRX_sle_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16
@@ -8086,7 +8086,7 @@ entry:
 define i1 @selectC_sle_i1(i1 %c, i1 %n, i1 %m) {
 ; CHECK-LABEL: selectC_sle_i1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    btsti32 a0, 0
+; CHECK-NEXT:    btsti16 a0, 0
 ; CHECK-NEXT:    movt32 a1, a2
 ; CHECK-NEXT:    mov16 a0, a1
 ; CHECK-NEXT:    rts16


        


More information about the llvm-commits mailing list