[llvm] e497b12 - [NFC][AArch64][ARM][Thumb][Hexagon] Autogenerate some tests

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Sun Jun 20 04:13:16 PDT 2021


Author: Roman Lebedev
Date: 2021-06-20T14:12:45+03:00
New Revision: e497b12a69604b6d691312a30f6b86da4f18f7f8

URL: https://github.com/llvm/llvm-project/commit/e497b12a69604b6d691312a30f6b86da4f18f7f8
DIFF: https://github.com/llvm/llvm-project/commit/e497b12a69604b6d691312a30f6b86da4f18f7f8.diff

LOG: [NFC][AArch64][ARM][Thumb][Hexagon] Autogenerate some tests

These all (and some others) are being affected by D104597,
but they are manually-written, which rather complicates
checking the effect that change has on them.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/addsub.ll
    llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
    llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
    llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
    llvm/test/CodeGen/AArch64/cond-br-tuning.ll
    llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll
    llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
    llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
    llvm/test/CodeGen/ARM/ifcvt-callback.ll
    llvm/test/CodeGen/ARM/ifcvt1.ll
    llvm/test/CodeGen/ARM/ifcvt3.ll
    llvm/test/CodeGen/ARM/load-global2.ll
    llvm/test/CodeGen/ARM/smml.ll
    llvm/test/CodeGen/ARM/speculation-hardening-sls.ll
    llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll
    llvm/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll
    llvm/test/CodeGen/Thumb2/tpsoft.ll
    llvm/test/CodeGen/Thumb2/v8_IT_4.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll
index c0235cd5d9ef3..65f5a8b140183 100644
--- a/llvm/test/CodeGen/AArch64/addsub.ll
+++ b/llvm/test/CodeGen/AArch64/addsub.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-linux-gnu | FileCheck %s
 
 ; Note that this should be refactored (for efficiency if nothing else)
@@ -11,13 +12,23 @@
 ; Add pure 12-bit immediates:
 define void @add_small() {
 ; CHECK-LABEL: add_small:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, :got:var_i32
+; CHECK-NEXT:    adrp x9, :got:var_i64
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var_i32]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:var_i64]
+; CHECK-NEXT:    ldr w10, [x8]
+; CHECK-NEXT:    ldr x11, [x9]
+; CHECK-NEXT:    add w10, w10, #4095 // =4095
+; CHECK-NEXT:    add x11, x11, #52 // =52
+; CHECK-NEXT:    str w10, [x8]
+; CHECK-NEXT:    str x11, [x9]
+; CHECK-NEXT:    ret
 
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #4095
   %val32 = load i32, i32* @var_i32
   %newval32 = add i32 %val32, 4095
   store i32 %newval32, i32* @var_i32
 
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #52
   %val64 = load i64, i64* @var_i64
   %newval64 = add i64 %val64, 52
   store i64 %newval64, i64* @var_i64
@@ -36,19 +47,23 @@ define void @add_small() {
 ; xC = add xA, #12 ; <- xA implicitly zero extend wA.
 define void @add_small_imm(i8* %p, i64* %q, i32 %b, i32* %addr) {
 ; CHECK-LABEL: add_small_imm:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldrb w8, [x0]
+; CHECK-NEXT:    add w9, w8, w2
+; CHECK-NEXT:    add x8, x8, #12 // =12
+; CHECK-NEXT:    str w9, [x3]
+; CHECK-NEXT:    str x8, [x1]
+; CHECK-NEXT:    ret
 entry:
 
-; CHECK: ldrb w[[LOAD32:[0-9]+]], [x0]
   %t = load i8, i8* %p
   %promoted = zext i8 %t to i64
   %zextt = zext i8 %t to i32
   %add = add nuw i32 %zextt, %b
 
-; CHECK: add [[ADD2:x[0-9]+]], x[[LOAD32]], #12
   %add2 = add nuw i64 %promoted, 12
   store i32 %add, i32* %addr
 
-; CHECK: str [[ADD2]], [x1]
   store i64 %add2, i64* %q
   ret void
 }
@@ -56,13 +71,23 @@ entry:
 ; Add 12-bit immediates, shifted left by 12 bits
 define void @add_med() {
 ; CHECK-LABEL: add_med:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, :got:var_i32
+; CHECK-NEXT:    adrp x9, :got:var_i64
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var_i32]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:var_i64]
+; CHECK-NEXT:    ldr w10, [x8]
+; CHECK-NEXT:    ldr x11, [x9]
+; CHECK-NEXT:    add w10, w10, #3567, lsl #12 // =14610432
+; CHECK-NEXT:    add x11, x11, #4095, lsl #12 // =16773120
+; CHECK-NEXT:    str w10, [x8]
+; CHECK-NEXT:    str x11, [x9]
+; CHECK-NEXT:    ret
 
-; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{#3567, lsl #12|#14610432}}
   %val32 = load i32, i32* @var_i32
   %newval32 = add i32 %val32, 14610432 ; =0xdef000
   store i32 %newval32, i32* @var_i32
 
-; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{#4095, lsl #12|#16773120}}
   %val64 = load i64, i64* @var_i64
   %newval64 = add i64 %val64, 16773120 ; =0xfff000
   store i64 %newval64, i64* @var_i64
@@ -73,13 +98,23 @@ define void @add_med() {
 ; Subtract 12-bit immediates
 define void @sub_small() {
 ; CHECK-LABEL: sub_small:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, :got:var_i32
+; CHECK-NEXT:    adrp x9, :got:var_i64
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var_i32]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:var_i64]
+; CHECK-NEXT:    ldr w10, [x8]
+; CHECK-NEXT:    ldr x11, [x9]
+; CHECK-NEXT:    sub w10, w10, #4095 // =4095
+; CHECK-NEXT:    sub x11, x11, #52 // =52
+; CHECK-NEXT:    str w10, [x8]
+; CHECK-NEXT:    str x11, [x9]
+; CHECK-NEXT:    ret
 
-; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #4095
   %val32 = load i32, i32* @var_i32
   %newval32 = sub i32 %val32, 4095
   store i32 %newval32, i32* @var_i32
 
-; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, #52
   %val64 = load i64, i64* @var_i64
   %newval64 = sub i64 %val64, 52
   store i64 %newval64, i64* @var_i64
@@ -90,13 +125,23 @@ define void @sub_small() {
 ; Subtract 12-bit immediates, shifted left by 12 bits
 define void @sub_med() {
 ; CHECK-LABEL: sub_med:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, :got:var_i32
+; CHECK-NEXT:    adrp x9, :got:var_i64
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var_i32]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:var_i64]
+; CHECK-NEXT:    ldr w10, [x8]
+; CHECK-NEXT:    ldr x11, [x9]
+; CHECK-NEXT:    sub w10, w10, #3567, lsl #12 // =14610432
+; CHECK-NEXT:    sub x11, x11, #4095, lsl #12 // =16773120
+; CHECK-NEXT:    str w10, [x8]
+; CHECK-NEXT:    str x11, [x9]
+; CHECK-NEXT:    ret
 
-; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{#3567, lsl #12|#14610432}}
   %val32 = load i32, i32* @var_i32
   %newval32 = sub i32 %val32, 14610432 ; =0xdef000
   store i32 %newval32, i32* @var_i32
 
-; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{#4095, lsl #12|#16773120}}
   %val64 = load i64, i64* @var_i64
   %newval64 = sub i64 %val64, 16773120 ; =0xfff000
   store i64 %newval64, i64* @var_i64
@@ -106,41 +151,65 @@ define void @sub_med() {
 
 define void @testing() {
 ; CHECK-LABEL: testing:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, :got:var_i32
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var_i32]
+; CHECK-NEXT:    ldr w9, [x8]
+; CHECK-NEXT:    cmp w9, #4095 // =4095
+; CHECK-NEXT:    b.ne .LBB5_6
+; CHECK-NEXT:  // %bb.1: // %test2
+; CHECK-NEXT:    adrp x10, :got:var2_i32
+; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var2_i32]
+; CHECK-NEXT:    add w11, w9, #1 // =1
+; CHECK-NEXT:    str w11, [x8]
+; CHECK-NEXT:    ldr w10, [x10]
+; CHECK-NEXT:    cmp w10, #3567, lsl #12 // =14610432
+; CHECK-NEXT:    b.lo .LBB5_6
+; CHECK-NEXT:  // %bb.2: // %test3
+; CHECK-NEXT:    add w11, w9, #2 // =2
+; CHECK-NEXT:    cmp w9, #123 // =123
+; CHECK-NEXT:    str w11, [x8]
+; CHECK-NEXT:    b.lt .LBB5_6
+; CHECK-NEXT:  // %bb.3: // %test4
+; CHECK-NEXT:    add w11, w9, #3 // =3
+; CHECK-NEXT:    cmp w10, #321 // =321
+; CHECK-NEXT:    str w11, [x8]
+; CHECK-NEXT:    b.gt .LBB5_6
+; CHECK-NEXT:  // %bb.4: // %test5
+; CHECK-NEXT:    add w11, w9, #4 // =4
+; CHECK-NEXT:    cmn w10, #444 // =444
+; CHECK-NEXT:    str w11, [x8]
+; CHECK-NEXT:    b.gt .LBB5_6
+; CHECK-NEXT:  // %bb.5: // %test6
+; CHECK-NEXT:    add w9, w9, #5 // =5
+; CHECK-NEXT:    str w9, [x8]
+; CHECK-NEXT:  .LBB5_6: // %ret
+; CHECK-NEXT:    ret
   %val = load i32, i32* @var_i32
   %val2 = load i32, i32* @var2_i32
 
-; CHECK: cmp {{w[0-9]+}}, #4095
-; CHECK: b.ne [[RET:.?LBB[0-9]+_[0-9]+]]
   %cmp_pos_small = icmp ne i32 %val, 4095
   br i1 %cmp_pos_small, label %ret, label %test2
 
 test2:
-; CHECK: cmp {{w[0-9]+}}, {{#3567, lsl #12|#14610432}}
-; CHECK: b.lo [[RET]]
   %newval2 = add i32 %val, 1
   store i32 %newval2, i32* @var_i32
   %cmp_pos_big = icmp ult i32 %val2, 14610432
   br i1 %cmp_pos_big, label %ret, label %test3
 
 test3:
-; CHECK: cmp {{w[0-9]+}}, #123
-; CHECK: b.lt [[RET]]
   %newval3 = add i32 %val, 2
   store i32 %newval3, i32* @var_i32
   %cmp_pos_slt = icmp slt i32 %val, 123
   br i1 %cmp_pos_slt, label %ret, label %test4
 
 test4:
-; CHECK: cmp {{w[0-9]+}}, #321
-; CHECK: b.gt [[RET]]
   %newval4 = add i32 %val, 3
   store i32 %newval4, i32* @var_i32
   %cmp_pos_sgt = icmp sgt i32 %val2, 321
   br i1 %cmp_pos_sgt, label %ret, label %test5
 
 test5:
-; CHECK: cmn {{w[0-9]+}}, #444
-; CHECK: b.gt [[RET]]
   %newval5 = add i32 %val, 4
   store i32 %newval5, i32* @var_i32
   %cmp_neg_uge = icmp sgt i32 %val2, -444

diff  --git a/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll b/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
index 7135dff7f5732..3385fdaf9d9aa 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
@@ -1,19 +1,26 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-apple-darwin -aarch64-bcc-offset-bits=4 -align-all-nofallthru-blocks=4 < %s | FileCheck %s
 
 ; Long branch is assumed because the block has a higher alignment
 ; requirement than the function.
 
-; CHECK-LABEL: invert_bcc_block_align_higher_func:
-; CHECK: b.eq [[JUMP_BB1:LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: b [[JUMP_BB2:LBB[0-9]+_[0-9]+]]
-
-; CHECK: [[JUMP_BB1]]:
-; CHECK: ret
-; CHECK: .p2align 4
-
-; CHECK: [[JUMP_BB2]]:
-; CHECK: ret
 define i32 @invert_bcc_block_align_higher_func(i32 %x, i32 %y) align 4 #0 {
+; CHECK-LABEL: invert_bcc_block_align_higher_func:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    b.eq LBB0_1
+; CHECK-NEXT:    b LBB0_2
+; CHECK-NEXT:  LBB0_1: ; %bb1
+; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    str w8, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  LBB0_2: ; %bb2
+; CHECK-NEXT:    mov w8, #9
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    str w8, [x8]
+; CHECK-NEXT:    ret
   %1 = icmp eq i32 %x, %y
   br i1 %1, label %bb1, label %bb2
 

diff  --git a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
index 0604c2cc881d0..f719c9448f39b 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
@@ -1,23 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-apple-darwin -aarch64-bcc-offset-bits=3 < %s | FileCheck %s
 
-; CHECK-LABEL: invert_bcc:
-; CHECK:      fcmp s0, s1
-; CHECK-NEXT: b.ne [[JUMP_BB1:LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: b [[BB1:LBB[0-9]+_[0-9]+]]
-
-; CHECK-NEXT: [[JUMP_BB1]]:
-; CHECK-NEXT: b.vc [[BB2:LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: b [[BB1]]
-
-; CHECK: [[BB2]]: ; %bb2
-; CHECK: mov w{{[0-9]+}}, #9
-; CHECK: ret
-
-; CHECK: [[BB1]]: ; %bb1
-; CHECK: mov w{{[0-9]+}}, #42
-; CHECK: ret
-
 define i32 @invert_bcc(float %x, float %y) #0 {
+; CHECK-LABEL: invert_bcc:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    b.ne LBB0_3
+; CHECK-NEXT:    b LBB0_2
+; CHECK-NEXT:  LBB0_3:
+; CHECK-NEXT:    b.vc LBB0_1
+; CHECK-NEXT:    b LBB0_2
+; CHECK-NEXT:  LBB0_1: ; %bb2
+; CHECK-NEXT:    mov w8, #9
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ; InlineAsm Start
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    ; InlineAsm End
+; CHECK-NEXT:    str w8, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  LBB0_2: ; %bb1
+; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    str w8, [x8]
+; CHECK-NEXT:    ret
   %1 = fcmp ueq float %x, %y
   br i1 %1, label %bb1, label %bb2
 
@@ -36,24 +42,26 @@ bb1:
 
 declare i32 @foo() #0
 
-; CHECK-LABEL: _block_split:
-; CHECK: cmp w0, #5
-; CHECK-NEXT: b.ne [[LOR_LHS_FALSE_BB:LBB[0-9]+_[0-9]+]]
-; CHECK-NEXT: b [[IF_THEN_BB:LBB[0-9]+_[0-9]+]]
-
-; CHECK: [[LOR_LHS_FALSE_BB]]:
-; CHECK: cmp w{{[0-9]+}}, #16
-; CHECK-NEXT: b.le [[IF_THEN_BB]]
-; CHECK-NEXT: b [[IF_END_BB:LBB[0-9]+_[0-9]+]]
-
-; CHECK: [[IF_THEN_BB]]:
-; CHECK: bl _foo
-; CHECK-NOT: b L
-
-; CHECK: [[IF_END_BB]]:
-; CHECK: mov{{.*}}, #7
-; CHECK: ret
 define i32 @block_split(i32 %a, i32 %b) #0 {
+; CHECK-LABEL: block_split:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    cmp w0, #5 ; =5
+; CHECK-NEXT:    b.ne LBB1_1
+; CHECK-NEXT:    b LBB1_2
+; CHECK-NEXT:  LBB1_1: ; %lor.lhs.false
+; CHECK-NEXT:    lsl w8, w1, #1
+; CHECK-NEXT:    cmp w1, #7 ; =7
+; CHECK-NEXT:    csinc w8, w8, w1, lt
+; CHECK-NEXT:    cmp w8, #16 ; =16
+; CHECK-NEXT:    b.le LBB1_2
+; CHECK-NEXT:    b LBB1_3
+; CHECK-NEXT:  LBB1_2: ; %if.then
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-NEXT:    bl _foo
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT:  LBB1_3: ; %if.end
+; CHECK-NEXT:    mov w0, #7
+; CHECK-NEXT:    ret
 entry:
   %cmp = icmp eq i32 %a, 5
   br i1 %cmp, label %if.then, label %lor.lhs.false

diff  --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
index e7c6e3b5ef7b1..9f415858acd2e 100644
--- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
+++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -debugify-and-strip-all-safe < %s -mtriple=aarch64-linux-gnu | FileCheck %s
 
 ; marked as external to prevent possible optimizations
@@ -8,12 +9,40 @@
 
 ; (a > 10 && b == c) || (a >= 10 && b == d)
 define i32 @combine_gt_ge_10() #0 {
-; CHECK-LABEL: combine_gt_ge_10
-; CHECK: cmp
-; CHECK: b.le
-; CHECK: ret
-; CHECK-NOT: cmp
-; CHECK: b.lt
+; CHECK-LABEL: combine_gt_ge_10:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #10 // =10
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    b.le .LBB0_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x10, :got:c
+; CHECK-NEXT:    ldr w9, [x8]
+; CHECK-NEXT:    ldr x10, [x10, :got_lo12:c]
+; CHECK-NEXT:    ldr w10, [x10]
+; CHECK-NEXT:    cmp w9, w10
+; CHECK-NEXT:    b.ne .LBB0_4
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_3: // %lor.lhs.false
+; CHECK-NEXT:    b.lt .LBB0_6
+; CHECK-NEXT:  .LBB0_4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB0_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* @a, align 4
   %cmp = icmp sgt i32 %0, 10
@@ -45,12 +74,42 @@ return:                                           ; preds = %if.end, %land.lhs.t
 
 ; (a > 5 && b == c) || (a < 5 && b == d)
 define i32 @combine_gt_lt_5() #0 {
-; CHECK-LABEL: combine_gt_lt_5
-; CHECK: cmp
-; CHECK: b.le
-; CHECK: ret
-; CHECK-NOT: cmp
-; CHECK: b.ge
+; CHECK-LABEL: combine_gt_lt_5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #5 // =5
+; CHECK-NEXT:    b.le .LBB1_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB1_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_3: // %lor.lhs.false
+; CHECK-NEXT:    b.ge .LBB1_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB1_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* @a, align 4
   %cmp = icmp sgt i32 %0, 5
@@ -82,12 +141,40 @@ return:                                           ; preds = %if.end, %land.lhs.t
 
 ; (a < 5 && b == c) || (a <= 5 && b == d)
 define i32 @combine_lt_ge_5() #0 {
-; CHECK-LABEL: combine_lt_ge_5
-; CHECK: cmp
-; CHECK: b.ge
-; CHECK: ret
-; CHECK-NOT: cmp
-; CHECK: b.gt
+; CHECK-LABEL: combine_lt_ge_5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #5 // =5
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    b.ge .LBB2_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x10, :got:c
+; CHECK-NEXT:    ldr w9, [x8]
+; CHECK-NEXT:    ldr x10, [x10, :got_lo12:c]
+; CHECK-NEXT:    ldr w10, [x10]
+; CHECK-NEXT:    cmp w9, w10
+; CHECK-NEXT:    b.ne .LBB2_4
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_3: // %lor.lhs.false
+; CHECK-NEXT:    b.gt .LBB2_6
+; CHECK-NEXT:  .LBB2_4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB2_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* @a, align 4
   %cmp = icmp slt i32 %0, 5
@@ -119,12 +206,42 @@ return:                                           ; preds = %if.end, %land.lhs.t
 
 ; (a < 5 && b == c) || (a > 5 && b == d)
 define i32 @combine_lt_gt_5() #0 {
-; CHECK-LABEL: combine_lt_gt_5
-; CHECK: cmp
-; CHECK: b.ge
-; CHECK: ret
-; CHECK-NOT: cmp
-; CHECK: b.le
+; CHECK-LABEL: combine_lt_gt_5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #5 // =5
+; CHECK-NEXT:    b.ge .LBB3_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB3_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_3: // %lor.lhs.false
+; CHECK-NEXT:    b.le .LBB3_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB3_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* @a, align 4
   %cmp = icmp slt i32 %0, 5
@@ -156,12 +273,42 @@ return:                                           ; preds = %if.end, %land.lhs.t
 
 ; (a > -5 && b == c) || (a < -5 && b == d)
 define i32 @combine_gt_lt_n5() #0 {
-; CHECK-LABEL: combine_gt_lt_n5
-; CHECK: cmn
-; CHECK: b.le
-; CHECK: ret
-; CHECK-NOT: cmn
-; CHECK: b.ge
+; CHECK-LABEL: combine_gt_lt_n5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmn w8, #5 // =5
+; CHECK-NEXT:    b.le .LBB4_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB4_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB4_3: // %lor.lhs.false
+; CHECK-NEXT:    b.ge .LBB4_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB4_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB4_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* @a, align 4
   %cmp = icmp sgt i32 %0, -5
@@ -193,12 +340,42 @@ return:                                           ; preds = %if.end, %land.lhs.t
 
 ; (a < -5 && b == c) || (a > -5 && b == d)
 define i32 @combine_lt_gt_n5() #0 {
-; CHECK-LABEL: combine_lt_gt_n5
-; CHECK: cmn
-; CHECK: b.ge
-; CHECK: ret
-; CHECK-NOT: cmn
-; CHECK: b.le
+; CHECK-LABEL: combine_lt_gt_n5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmn w8, #5 // =5
+; CHECK-NEXT:    b.ge .LBB5_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB5_6
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB5_3: // %lor.lhs.false
+; CHECK-NEXT:    b.le .LBB5_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB5_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB5_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* @a, align 4
   %cmp = icmp slt i32 %0, -5
@@ -236,6 +413,38 @@ declare %struct.Struct* @Update(%struct.Struct*) #1
 
 ; no checks for this case, it just should be processed without errors
 define void @combine_non_adjacent_cmp_br(%struct.Struct* nocapture readonly %hdCall) #0 {
+; CHECK-LABEL: combine_non_adjacent_cmp_br:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x30, [sp, #-48]! // 8-byte Folded Spill
+; CHECK-NEXT:    stp x22, x21, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w20, -16
+; CHECK-NEXT:    .cfi_offset w21, -24
+; CHECK-NEXT:    .cfi_offset w22, -32
+; CHECK-NEXT:    .cfi_offset w30, -48
+; CHECK-NEXT:    ldr x19, [x0]
+; CHECK-NEXT:    mov w20, #24
+; CHECK-NEXT:    adrp x22, glob
+; CHECK-NEXT:    add x21, x19, #2 // =2
+; CHECK-NEXT:  .LBB6_1: // %land.rhs
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr x8, [x20]
+; CHECK-NEXT:    cmp x8, #1 // =1
+; CHECK-NEXT:    b.lt .LBB6_3
+; CHECK-NEXT:  // %bb.2: // %while.body
+; CHECK-NEXT:    // in Loop: Header=BB6_1 Depth=1
+; CHECK-NEXT:    ldr x0, [x22, :lo12:glob]
+; CHECK-NEXT:    bl Update
+; CHECK-NEXT:    sub x21, x21, #2 // =2
+; CHECK-NEXT:    cmp x19, x21
+; CHECK-NEXT:    b.lt .LBB6_1
+; CHECK-NEXT:  .LBB6_3: // %while.end
+; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x22, x21, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp], #48 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
 entry:
   %size = getelementptr inbounds %struct.Struct, %struct.Struct* %hdCall, i64 0, i32 0
   %0 = load i64, i64* %size, align 8
@@ -262,11 +471,49 @@ while.end:
 declare void @do_something() #1
 
 define i32 @do_nothing_if_resultant_opcodes_would_
diff er() #0 {
-; CHECK-LABEL: do_nothing_if_resultant_opcodes_would_
diff er
-; CHECK: cmn
-; CHECK: b.gt
-; CHECK: cmp
-; CHECK: b.gt
+; CHECK-LABEL: do_nothing_if_resultant_opcodes_would_
diff er:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT:    stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w20, -16
+; CHECK-NEXT:    .cfi_offset w30, -32
+; CHECK-NEXT:    adrp x19, :got:a
+; CHECK-NEXT:    ldr x19, [x19, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x19]
+; CHECK-NEXT:    cmn w8, #2 // =2
+; CHECK-NEXT:    b.gt .LBB7_4
+; CHECK-NEXT:  // %bb.1: // %while.body.preheader
+; CHECK-NEXT:    sub w20, w8, #1 // =1
+; CHECK-NEXT:  .LBB7_2: // %while.body
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    bl do_something
+; CHECK-NEXT:    adds w20, w20, #1 // =1
+; CHECK-NEXT:    b.mi .LBB7_2
+; CHECK-NEXT:  // %bb.3: // %while.cond.while.end_crit_edge
+; CHECK-NEXT:    ldr w8, [x19]
+; CHECK-NEXT:  .LBB7_4: // %while.end
+; CHECK-NEXT:    cmp w8, #1 // =1
+; CHECK-NEXT:    b.gt .LBB7_7
+; CHECK-NEXT:  // %bb.5: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB7_7
+; CHECK-NEXT:  // %bb.6:
+; CHECK-NEXT:    mov w0, #123
+; CHECK-NEXT:    b .LBB7_8
+; CHECK-NEXT:  .LBB7_7: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:  .LBB7_8: // %return
+; CHECK-NEXT:    ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* @a, align 4
   %cmp4 = icmp slt i32 %0, -1
@@ -306,11 +553,47 @@ return:                                           ; preds = %if.end, %land.lhs.t
 }
 
 define i32 @do_nothing_if_compares_can_not_be_adjusted_to_each_other() #0 {
-; CHECK-LABEL: do_nothing_if_compares_can_not_be_adjusted_to_each_other
-; CHECK: cmp
-; CHECK: b.gt
-; CHECK: cmn
-; CHECK: b.lt
+; CHECK-LABEL: do_nothing_if_compares_can_not_be_adjusted_to_each_other:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    b.gt .LBB8_3
+; CHECK-NEXT:  // %bb.1: // %while.body.preheader
+; CHECK-NEXT:    sub w19, w8, #1 // =1
+; CHECK-NEXT:  .LBB8_2: // %while.body
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    bl do_something
+; CHECK-NEXT:    adds w19, w19, #1 // =1
+; CHECK-NEXT:    b.mi .LBB8_2
+; CHECK-NEXT:  .LBB8_3: // %while.end
+; CHECK-NEXT:    adrp x8, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmn w8, #2 // =2
+; CHECK-NEXT:    b.lt .LBB8_6
+; CHECK-NEXT:  // %bb.4: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB8_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #123
+; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB8_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* @a, align 4
   %cmp4 = icmp slt i32 %0, 1
@@ -356,18 +639,46 @@ return:                                           ; preds = %if.end, %land.lhs.t
 ; b.gt .LBB0_5
 
 define i32 @fcmpri(i32 %argc, i8** nocapture readonly %argv) {
-
 ; CHECK-LABEL: fcmpri:
-; CHECK: cmp w0, #2
-; CHECK: b.lt .LBB9_3
-; CHECK-NOT: cmp w0, #1
-; CHECK-NOT: b.le .LBB9_3
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str d8, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT:    stp x30, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    .cfi_offset b8, -32
+; CHECK-NEXT:    cmp w0, #2 // =2
+; CHECK-NEXT:    b.lt .LBB9_3
+; CHECK-NEXT:  // %bb.1: // %land.lhs.true
+; CHECK-NEXT:    ldr x8, [x1, #8]
+; CHECK-NEXT:    cbz x8, .LBB9_3
+; CHECK-NEXT:  // %bb.2:
+; CHECK-NEXT:    mov w0, #3
+; CHECK-NEXT:    b .LBB9_4
+; CHECK-NEXT:  .LBB9_3: // %if.end
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    bl zoo
+; CHECK-NEXT:    mov w19, w0
+; CHECK-NEXT:    mov w0, #-1
+; CHECK-NEXT:    bl yoo
+; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cinc w0, w19, gt
+; CHECK-NEXT:    mov w1, #2
+; CHECK-NEXT:    mov v8.16b, v0.16b
+; CHECK-NEXT:    bl xoo
+; CHECK-NEXT:    fmov d0, #-1.00000000
+; CHECK-NEXT:    fadd d0, d8, d0
+; CHECK-NEXT:    fcmp d8, #0.0
+; CHECK-NEXT:    fcsel d0, d8, d0, gt
+; CHECK-NEXT:    fmov d1, #-2.00000000
+; CHECK-NEXT:    bl woo
+; CHECK-NEXT:    mov w0, #4
+; CHECK-NEXT:  .LBB9_4: // %return
+; CHECK-NEXT:    ldp x30, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr d8, [sp], #32 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
 
 ; CHECK-LABEL-DAG: .LBB9_3
-; CHECK: cmp w19, #0
-; CHECK: fcmp d8, #0.0
-; CHECK-NOT: cmp w19, #1
-; CHECK-NOT: b.ge .LBB9_5
 
 entry:
   %cmp = icmp sgt i32 %argc, 1
@@ -405,9 +716,28 @@ return:                                           ; preds = %land.lhs.true, %con
 
 define void @cmp_shifted(i32 %in, i32 %lhs, i32 %rhs) {
 ; CHECK-LABEL: cmp_shifted:
-; CHECK: cmp w0, #2, lsl #12
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    cmp w0, #2, lsl #12 // =8192
+; CHECK-NEXT:    b.lt .LBB10_2
+; CHECK-NEXT:  // %bb.1: // %true
+; CHECK-NEXT:    mov w0, #128
+; CHECK-NEXT:    b .LBB10_5
+; CHECK-NEXT:  .LBB10_2: // %false
+; CHECK-NEXT:    cmp w0, #1 // =1
+; CHECK-NEXT:    b.lt .LBB10_4
+; CHECK-NEXT:  // %bb.3: // %truer
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    b .LBB10_5
+; CHECK-NEXT:  .LBB10_4: // %falser
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:  .LBB10_5: // %true
+; CHECK-NEXT:    bl zoo
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
 ; [...]
-; CHECK: cmp w0, #1
 
   %tst_low = icmp sgt i32 %in, 8191
   br i1 %tst_low, label %true, label %false
@@ -430,10 +760,46 @@ falser:
 }
 
 define i32 @combine_gt_ge_sel(i64 %v, i64* %p) #0 {
-; CHECK-LABEL: combine_gt_ge_sel
-; CHECK: ldr [[reg1:w[0-9]*]],
-; CHECK: cmp [[reg1]], #0
-; CHECK: csel {{.*}}, gt
+; CHECK-LABEL: combine_gt_ge_sel:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adrp x8, :got:a
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    csel x9, x0, xzr, gt
+; CHECK-NEXT:    str x9, [x1]
+; CHECK-NEXT:    b.le .LBB11_2
+; CHECK-NEXT:  // %bb.1: // %lor.lhs.false
+; CHECK-NEXT:    cmp w8, #2 // =2
+; CHECK-NEXT:    b.ge .LBB11_4
+; CHECK-NEXT:    b .LBB11_6
+; CHECK-NEXT:  .LBB11_2: // %land.lhs.true
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:c
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:c]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB11_4
+; CHECK-NEXT:  // %bb.3:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB11_4: // %land.lhs.true3
+; CHECK-NEXT:    adrp x8, :got:b
+; CHECK-NEXT:    adrp x9, :got:d
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    b.ne .LBB11_6
+; CHECK-NEXT:  // %bb.5:
+; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB11_6: // %if.end
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* @a, align 4
   %cmp = icmp sgt i32 %0, 0

diff  --git a/llvm/test/CodeGen/AArch64/cond-br-tuning.ll b/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
index e0b6a2f050373..f018fecb848c1 100644
--- a/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
+++ b/llvm/test/CodeGen/AArch64/cond-br-tuning.ll
@@ -1,14 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -debugify-and-strip-all-safe < %s -O3 -mtriple=aarch64-eabi -verify-machineinstrs | FileCheck %s
 
 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64-linaro-linux-gnueabi"
 
 ; CMN is an alias of ADDS.
-; CHECK-LABEL: test_add_cbz:
-; CHECK: cmn w0, w1
-; CHECK: b.eq
-; CHECK: ret
+
 define void @test_add_cbz(i32 %a, i32 %b, i32* %ptr) {
+; CHECK-LABEL: test_add_cbz:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmn w0, w1
+; CHECK-NEXT:    b.eq .LBB0_2
+; CHECK-NEXT:  // %bb.1: // %L1
+; CHECK-NEXT:    str wzr, [x2]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_2: // %L2
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    str w8, [x2]
+; CHECK-NEXT:    ret
   %c = add nsw i32 %a, %b
   %d = icmp ne i32 %c, 0
   br i1 %d, label %L1, label %L2
@@ -20,11 +29,17 @@ L2:
   ret void
 }
 
-; CHECK-LABEL: test_add_cbz_multiple_use:
-; CHECK: adds
-; CHECK: b.eq
-; CHECK: ret
 define void @test_add_cbz_multiple_use(i32 %a, i32 %b, i32* %ptr) {
+; CHECK-LABEL: test_add_cbz_multiple_use:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adds w8, w0, w1
+; CHECK-NEXT:    b.eq .LBB1_2
+; CHECK-NEXT:  // %bb.1: // %L1
+; CHECK-NEXT:    str wzr, [x2]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_2: // %L2
+; CHECK-NEXT:    str w8, [x2]
+; CHECK-NEXT:    ret
   %c = add nsw i32 %a, %b
   %d = icmp ne i32 %c, 0
   br i1 %d, label %L1, label %L2
@@ -36,10 +51,18 @@ L2:
   ret void
 }
 
-; CHECK-LABEL: test_add_cbz_64:
-; CHECK: cmn x0, x1
-; CHECK: b.eq
 define void @test_add_cbz_64(i64 %a, i64 %b, i64* %ptr) {
+; CHECK-LABEL: test_add_cbz_64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmn x0, x1
+; CHECK-NEXT:    b.eq .LBB2_2
+; CHECK-NEXT:  // %bb.1: // %L1
+; CHECK-NEXT:    str xzr, [x2]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_2: // %L2
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    str x8, [x2]
+; CHECK-NEXT:    ret
   %c = add nsw i64 %a, %b
   %d = icmp ne i64 %c, 0
   br i1 %d, label %L1, label %L2
@@ -51,10 +74,18 @@ L2:
   ret void
 }
 
-; CHECK-LABEL: test_and_cbz:
-; CHECK: tst w0, #0x6
-; CHECK: b.eq
 define void @test_and_cbz(i32 %a, i32* %ptr) {
+; CHECK-LABEL: test_and_cbz:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    tst w0, #0x6
+; CHECK-NEXT:    b.eq .LBB3_2
+; CHECK-NEXT:  // %bb.1: // %L1
+; CHECK-NEXT:    str wzr, [x1]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_2: // %L2
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    str w8, [x1]
+; CHECK-NEXT:    ret
   %c = and i32 %a, 6
   %d = icmp ne i32 %c, 0
   br i1 %d, label %L1, label %L2
@@ -66,10 +97,18 @@ L2:
   ret void
 }
 
-; CHECK-LABEL: test_bic_cbnz:
-; CHECK: bics wzr, w1, w0
-; CHECK: b.ne
 define void @test_bic_cbnz(i32 %a, i32 %b, i32* %ptr) {
+; CHECK-LABEL: test_bic_cbnz:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bics wzr, w1, w0
+; CHECK-NEXT:    b.ne .LBB4_2
+; CHECK-NEXT:  // %bb.1: // %L1
+; CHECK-NEXT:    str wzr, [x2]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB4_2: // %L2
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    str w8, [x2]
+; CHECK-NEXT:    ret
   %c = and i32 %a, %b
   %d = icmp eq i32 %c, %b
   br i1 %d, label %L1, label %L2
@@ -81,11 +120,15 @@ L2:
   ret void
 }
 
-; CHECK-LABEL: test_add_tbz:
-; CHECK: adds
-; CHECK: b.pl
-; CHECK: ret
 define void @test_add_tbz(i32 %a, i32 %b, i32* %ptr) {
+; CHECK-LABEL: test_add_tbz:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adds w8, w0, w1
+; CHECK-NEXT:    b.pl .LBB5_2
+; CHECK-NEXT:  // %bb.1: // %L1
+; CHECK-NEXT:    str w8, [x2]
+; CHECK-NEXT:  .LBB5_2: // %L2
+; CHECK-NEXT:    ret
 entry:
   %add = add nsw i32 %a, %b
   %cmp36 = icmp sge i32 %add, 0
@@ -97,11 +140,15 @@ L2:
   ret void
 }
 
-; CHECK-LABEL: test_subs_tbz:
-; CHECK: subs
-; CHECK: b.pl
-; CHECK: ret
 define void @test_subs_tbz(i32 %a, i32 %b, i32* %ptr) {
+; CHECK-LABEL: test_subs_tbz:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    subs w8, w0, w1
+; CHECK-NEXT:    b.pl .LBB6_2
+; CHECK-NEXT:  // %bb.1: // %L1
+; CHECK-NEXT:    str w8, [x2]
+; CHECK-NEXT:  .LBB6_2: // %L2
+; CHECK-NEXT:    ret
 entry:
   %sub = sub nsw i32 %a, %b
   %cmp36 = icmp sge i32 %sub, 0
@@ -113,11 +160,15 @@ L2:
   ret void
 }
 
-; CHECK-LABEL: test_add_tbnz
-; CHECK: adds
-; CHECK: b.mi
-; CHECK: ret
 define void @test_add_tbnz(i32 %a, i32 %b, i32* %ptr) {
+; CHECK-LABEL: test_add_tbnz:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    adds w8, w0, w1
+; CHECK-NEXT:    b.mi .LBB7_2
+; CHECK-NEXT:  // %bb.1: // %L1
+; CHECK-NEXT:    str w8, [x2]
+; CHECK-NEXT:  .LBB7_2: // %L2
+; CHECK-NEXT:    ret
 entry:
   %add = add nsw i32 %a, %b
   %cmp36 = icmp slt i32 %add, 0
@@ -129,11 +180,15 @@ L2:
   ret void
 }
 
-; CHECK-LABEL: test_subs_tbnz
-; CHECK: subs
-; CHECK: b.mi
-; CHECK: ret
 define void @test_subs_tbnz(i32 %a, i32 %b, i32* %ptr) {
+; CHECK-LABEL: test_subs_tbnz:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    subs w8, w0, w1
+; CHECK-NEXT:    b.mi .LBB8_2
+; CHECK-NEXT:  // %bb.1: // %L1
+; CHECK-NEXT:    str w8, [x2]
+; CHECK-NEXT:  .LBB8_2: // %L2
+; CHECK-NEXT:    ret
 entry:
   %sub = sub nsw i32 %a, %b
   %cmp36 = icmp slt i32 %sub, 0
@@ -149,11 +204,22 @@ declare void @foo()
 declare void @bar(i32)
 
 ; Don't transform since the call will clobber the NZCV bits.
-; CHECK-LABEL: test_call_clobber:
-; CHECK: and w[[DST:[0-9]+]], w1, #0x6
-; CHECK: bl bar
-; CHECK: cbnz w[[DST]]
 define void @test_call_clobber(i32 %unused, i32 %a) {
+; CHECK-LABEL: test_call_clobber:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    and w19, w1, #0x6
+; CHECK-NEXT:    mov w0, w19
+; CHECK-NEXT:    bl bar
+; CHECK-NEXT:    cbnz w19, .LBB9_2
+; CHECK-NEXT:  // %bb.1: // %if.end
+; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB9_2: // %if.then
+; CHECK-NEXT:    bl foo
 entry:
   %c = and i32 %a, 6
   call void @bar(i32 %c)

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll b/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll
index 398bf3c985b5e..a1470b13ef448 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll
@@ -1,9 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s
 
-; CHECK-LABEL: test_or
-; CHECK:       cbnz w0, {{LBB[0-9]+_2}}
-; CHECK:       cbz w1, {{LBB[0-9]+_1}}
 define i64 @test_or(i32 %a, i32 %b) {
+; CHECK-LABEL: test_or:
+; CHECK:       ; %bb.0: ; %bb1
+; CHECK-NEXT:    cbnz w0, LBB0_2
+; CHECK-NEXT:  LBB0_1: ; %bb3
+; CHECK-NEXT:    mov x0, xzr
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  LBB0_2: ; %bb1.cond.split
+; CHECK-NEXT:    cbz w1, LBB0_1
+; CHECK-NEXT:  ; %bb.3: ; %bb4
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    bl _bar
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
 bb1:
   %0 = icmp eq i32 %a, 0
   %1 = icmp eq i32 %b, 0
@@ -18,10 +32,23 @@ bb4:
   ret i64 %2
 }
 
-; CHECK-LABEL: test_or_select
-; CHECK:       cbnz w0, {{LBB[0-9]+_2}}
-; CHECK:       cbz w1, {{LBB[0-9]+_1}}
 define i64 @test_or_select(i32 %a, i32 %b) {
+; CHECK-LABEL: test_or_select:
+; CHECK:       ; %bb.0: ; %bb1
+; CHECK-NEXT:    cbnz w0, LBB1_2
+; CHECK-NEXT:  LBB1_1: ; %bb3
+; CHECK-NEXT:    mov x0, xzr
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  LBB1_2: ; %bb1.cond.split
+; CHECK-NEXT:    cbz w1, LBB1_1
+; CHECK-NEXT:  ; %bb.3: ; %bb4
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    bl _bar
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
 bb1:
   %0 = icmp eq i32 %a, 0
   %1 = icmp eq i32 %b, 0
@@ -36,10 +63,23 @@ bb4:
   ret i64 %2
 }
 
-; CHECK-LABEL: test_and
-; CHECK:       cbnz w0, {{LBB[0-9]+_2}}
-; CHECK:       cbz w1, {{LBB[0-9]+_1}}
 define i64 @test_and(i32 %a, i32 %b) {
+; CHECK-LABEL: test_and:
+; CHECK:       ; %bb.0: ; %bb1
+; CHECK-NEXT:    cbnz w0, LBB2_2
+; CHECK-NEXT:  LBB2_1: ; %bb3
+; CHECK-NEXT:    mov x0, xzr
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  LBB2_2: ; %bb1.cond.split
+; CHECK-NEXT:    cbz w1, LBB2_1
+; CHECK-NEXT:  ; %bb.3: ; %bb4
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    bl _bar
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
 bb1:
   %0 = icmp ne i32 %a, 0
   %1 = icmp ne i32 %b, 0
@@ -54,10 +94,23 @@ bb4:
   ret i64 %2
 }
 
-; CHECK-LABEL: test_and_select
-; CHECK:       cbnz w0, {{LBB[0-9]+_2}}
-; CHECK:       cbz w1, {{LBB[0-9]+_1}}
 define i64 @test_and_select(i32 %a, i32 %b) {
+; CHECK-LABEL: test_and_select:
+; CHECK:       ; %bb.0: ; %bb1
+; CHECK-NEXT:    cbnz w0, LBB3_2
+; CHECK-NEXT:  LBB3_1: ; %bb3
+; CHECK-NEXT:    mov x0, xzr
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  LBB3_2: ; %bb1.cond.split
+; CHECK-NEXT:    cbz w1, LBB3_1
+; CHECK-NEXT:  ; %bb.3: ; %bb4
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    bl _bar
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
 bb1:
   %0 = icmp ne i32 %a, 0
   %1 = icmp ne i32 %b, 0
@@ -74,14 +127,26 @@ bb4:
 
 ; If the branch is unpredictable, don't add another branch.
 
-; CHECK-LABEL: test_or_unpredictable
-; CHECK:       cmp   w0, #0
-; CHECK-NEXT:  cset  w8, eq
-; CHECK-NEXT:  cmp   w1, #0
-; CHECK-NEXT:  cset  w9, eq
-; CHECK-NEXT:  orr   w8, w8, w9
-; CHECK-NEXT:  tbnz w8, #0,
 define i64 @test_or_unpredictable(i32 %a, i32 %b) {
+; CHECK-LABEL: test_or_unpredictable:
+; CHECK:       ; %bb.0: ; %bb1
+; CHECK-NEXT:    cmp w0, #0 ; =0
+; CHECK-NEXT:    cset w8, eq
+; CHECK-NEXT:    cmp w1, #0 ; =0
+; CHECK-NEXT:    cset w9, eq
+; CHECK-NEXT:    orr w8, w8, w9
+; CHECK-NEXT:    tbnz w8, #0, LBB4_2
+; CHECK-NEXT:  ; %bb.1: ; %bb4
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    bl _bar
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  LBB4_2: ; %bb3
+; CHECK-NEXT:    mov x0, xzr
+; CHECK-NEXT:    ret
 bb1:
   %0 = icmp eq i32 %a, 0
   %1 = icmp eq i32 %b, 0
@@ -96,14 +161,26 @@ bb4:
   ret i64 %2
 }
 
-; CHECK-LABEL: test_and_unpredictable
-; CHECK:       cmp   w0, #0
-; CHECK-NEXT:  cset  w8, ne
-; CHECK-NEXT:  cmp   w1, #0
-; CHECK-NEXT:  cset  w9, ne
-; CHECK-NEXT:  and   w8, w8, w9
-; CHECK-NEXT:  tbz w8, #0,
 define i64 @test_and_unpredictable(i32 %a, i32 %b) {
+; CHECK-LABEL: test_and_unpredictable:
+; CHECK:       ; %bb.0: ; %bb1
+; CHECK-NEXT:    cmp w0, #0 ; =0
+; CHECK-NEXT:    cset w8, ne
+; CHECK-NEXT:    cmp w1, #0 ; =0
+; CHECK-NEXT:    cset w9, ne
+; CHECK-NEXT:    and w8, w8, w9
+; CHECK-NEXT:    tbz w8, #0, LBB5_2
+; CHECK-NEXT:  ; %bb.1: ; %bb4
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    bl _bar
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  LBB5_2: ; %bb3
+; CHECK-NEXT:    mov x0, xzr
+; CHECK-NEXT:    ret
 bb1:
   %0 = icmp ne i32 %a, 0
   %1 = icmp ne i32 %b, 0

diff  --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
index 1c15f1521c561..842788a86a4f5 100644
--- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
+++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
 
 @var1_32 = global i32 0
@@ -8,6 +9,54 @@
 
 define void @logical_32bit() minsize {
 ; CHECK-LABEL: logical_32bit:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x19, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w19, -16
+; CHECK-NEXT:    adrp x8, :got:var1_32
+; CHECK-NEXT:    adrp x9, :got:var2_32
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var1_32]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:var2_32]
+; CHECK-NEXT:    ldr w10, [x8]
+; CHECK-NEXT:    ldr w9, [x9]
+; CHECK-NEXT:    and w11, w10, w9
+; CHECK-NEXT:    bic w12, w10, w9
+; CHECK-NEXT:    orr w13, w10, w9
+; CHECK-NEXT:    orn w14, w10, w9
+; CHECK-NEXT:    eor w15, w10, w9
+; CHECK-NEXT:    eon w16, w9, w10
+; CHECK-NEXT:    and w17, w10, w9, lsl #31
+; CHECK-NEXT:    bic w18, w10, w9, lsl #31
+; CHECK-NEXT:    orr w0, w10, w9, lsl #31
+; CHECK-NEXT:    orn w1, w10, w9, lsl #31
+; CHECK-NEXT:    eor w2, w10, w9, lsl #31
+; CHECK-NEXT:    eon w3, w10, w9, lsl #31
+; CHECK-NEXT:    bic w4, w10, w9, asr #10
+; CHECK-NEXT:    eor w5, w10, w9, asr #10
+; CHECK-NEXT:    orn w6, w10, w9, lsr #1
+; CHECK-NEXT:    eor w7, w10, w9, lsr #1
+; CHECK-NEXT:    eon w19, w10, w9, ror #20
+; CHECK-NEXT:    and w9, w10, w9, ror #20
+; CHECK-NEXT:    str w11, [x8]
+; CHECK-NEXT:    str w12, [x8]
+; CHECK-NEXT:    str w13, [x8]
+; CHECK-NEXT:    str w14, [x8]
+; CHECK-NEXT:    str w15, [x8]
+; CHECK-NEXT:    str w16, [x8]
+; CHECK-NEXT:    str w17, [x8]
+; CHECK-NEXT:    str w18, [x8]
+; CHECK-NEXT:    str w0, [x8]
+; CHECK-NEXT:    str w1, [x8]
+; CHECK-NEXT:    str w2, [x8]
+; CHECK-NEXT:    str w3, [x8]
+; CHECK-NEXT:    str w4, [x8]
+; CHECK-NEXT:    str w5, [x8]
+; CHECK-NEXT:    str w6, [x8]
+; CHECK-NEXT:    str w7, [x8]
+; CHECK-NEXT:    str w19, [x8]
+; CHECK-NEXT:    str w9, [x8]
+; CHECK-NEXT:    ldr x19, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val1 = load i32, i32* @var1_32
   %val2 = load i32, i32* @var2_32
 
@@ -15,24 +64,18 @@ define void @logical_32bit() minsize {
   %neg_val2 = xor i32 -1, %val2
 
   %and_noshift = and i32 %val1, %val2
-; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
   store volatile i32 %and_noshift, i32* @var1_32
   %bic_noshift = and i32 %neg_val2, %val1
-; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
   store volatile i32 %bic_noshift, i32* @var1_32
 
   %or_noshift = or i32 %val1, %val2
-; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
   store volatile i32 %or_noshift, i32* @var1_32
   %orn_noshift = or i32 %neg_val2, %val1
-; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
   store volatile i32 %orn_noshift, i32* @var1_32
 
   %xor_noshift = xor i32 %val1, %val2
-; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
   store volatile i32 %xor_noshift, i32* @var1_32
   %xorn_noshift = xor i32 %neg_val2, %val1
-; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}
   store volatile i32 %xorn_noshift, i32* @var1_32
 
   ; Check the maximum shift on each
@@ -40,24 +83,18 @@ define void @logical_32bit() minsize {
   %neg_operand_lsl31 = xor i32 -1, %operand_lsl31
 
   %and_lsl31 = and i32 %val1, %operand_lsl31
-; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
   store volatile i32 %and_lsl31, i32* @var1_32
   %bic_lsl31 = and i32 %val1, %neg_operand_lsl31
-; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
   store volatile i32 %bic_lsl31, i32* @var1_32
 
   %or_lsl31 = or i32 %val1, %operand_lsl31
-; CHECK: orr {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
   store volatile i32 %or_lsl31, i32* @var1_32
   %orn_lsl31 = or i32 %val1, %neg_operand_lsl31
-; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
   store volatile i32 %orn_lsl31, i32* @var1_32
 
   %xor_lsl31 = xor i32 %val1, %operand_lsl31
-; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
   store volatile i32 %xor_lsl31, i32* @var1_32
   %xorn_lsl31 = xor i32 %val1, %neg_operand_lsl31
-; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
   store volatile i32 %xorn_lsl31, i32* @var1_32
 
   ; Check other shifts on a subset
@@ -65,20 +102,16 @@ define void @logical_32bit() minsize {
   %neg_operand_asr10 = xor i32 -1, %operand_asr10
 
   %bic_asr10 = and i32 %val1, %neg_operand_asr10
-; CHECK: bic {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #10
   store volatile i32 %bic_asr10, i32* @var1_32
   %xor_asr10 = xor i32 %val1, %operand_asr10
-; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #10
   store volatile i32 %xor_asr10, i32* @var1_32
 
   %operand_lsr1 = lshr i32 %val2, 1
   %neg_operand_lsr1 = xor i32 -1, %operand_lsr1
 
   %orn_lsr1 = or i32 %val1, %neg_operand_lsr1
-; CHECK: orn {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #1
   store volatile i32 %orn_lsr1, i32* @var1_32
   %xor_lsr1 = xor i32 %val1, %operand_lsr1
-; CHECK: eor {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #1
   store volatile i32 %xor_lsr1, i32* @var1_32
 
   %operand_ror20_big = shl i32 %val2, 12
@@ -87,10 +120,8 @@ define void @logical_32bit() minsize {
   %neg_operand_ror20 = xor i32 -1, %operand_ror20
 
   %xorn_ror20 = xor i32 %val1, %neg_operand_ror20
-; CHECK: eon {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, ror #20
   store volatile i32 %xorn_ror20, i32* @var1_32
   %and_ror20 = and i32 %val1, %operand_ror20
-; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, ror #20
   store volatile i32 %and_ror20, i32* @var1_32
 
   ret void
@@ -98,6 +129,54 @@ define void @logical_32bit() minsize {
 
 define void @logical_64bit() minsize {
 ; CHECK-LABEL: logical_64bit:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x19, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w19, -16
+; CHECK-NEXT:    adrp x8, :got:var1_64
+; CHECK-NEXT:    adrp x9, :got:var2_64
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var1_64]
+; CHECK-NEXT:    ldr x9, [x9, :got_lo12:var2_64]
+; CHECK-NEXT:    ldr x10, [x8]
+; CHECK-NEXT:    ldr x9, [x9]
+; CHECK-NEXT:    and x11, x10, x9
+; CHECK-NEXT:    bic x12, x10, x9
+; CHECK-NEXT:    orr x13, x10, x9
+; CHECK-NEXT:    orn x14, x10, x9
+; CHECK-NEXT:    eor x15, x10, x9
+; CHECK-NEXT:    eon x16, x9, x10
+; CHECK-NEXT:    and x17, x10, x9, lsl #63
+; CHECK-NEXT:    bic x18, x10, x9, lsl #63
+; CHECK-NEXT:    orr x0, x10, x9, lsl #63
+; CHECK-NEXT:    orn x1, x10, x9, lsl #63
+; CHECK-NEXT:    eor x2, x10, x9, lsl #63
+; CHECK-NEXT:    eon x3, x10, x9, lsl #63
+; CHECK-NEXT:    bic x4, x10, x9, asr #10
+; CHECK-NEXT:    eor x5, x10, x9, asr #10
+; CHECK-NEXT:    orn x6, x10, x9, lsr #1
+; CHECK-NEXT:    eor x7, x10, x9, lsr #1
+; CHECK-NEXT:    eon x19, x10, x9, ror #20
+; CHECK-NEXT:    and x9, x10, x9, ror #20
+; CHECK-NEXT:    str x11, [x8]
+; CHECK-NEXT:    str x12, [x8]
+; CHECK-NEXT:    str x13, [x8]
+; CHECK-NEXT:    str x14, [x8]
+; CHECK-NEXT:    str x15, [x8]
+; CHECK-NEXT:    str x16, [x8]
+; CHECK-NEXT:    str x17, [x8]
+; CHECK-NEXT:    str x18, [x8]
+; CHECK-NEXT:    str x0, [x8]
+; CHECK-NEXT:    str x1, [x8]
+; CHECK-NEXT:    str x2, [x8]
+; CHECK-NEXT:    str x3, [x8]
+; CHECK-NEXT:    str x4, [x8]
+; CHECK-NEXT:    str x5, [x8]
+; CHECK-NEXT:    str x6, [x8]
+; CHECK-NEXT:    str x7, [x8]
+; CHECK-NEXT:    str x19, [x8]
+; CHECK-NEXT:    str x9, [x8]
+; CHECK-NEXT:    ldr x19, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %val1 = load i64, i64* @var1_64
   %val2 = load i64, i64* @var2_64
 
@@ -105,24 +184,18 @@ define void @logical_64bit() minsize {
   %neg_val2 = xor i64 -1, %val2
 
   %and_noshift = and i64 %val1, %val2
-; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
   store volatile i64 %and_noshift, i64* @var1_64
   %bic_noshift = and i64 %neg_val2, %val1
-; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
   store volatile i64 %bic_noshift, i64* @var1_64
 
   %or_noshift = or i64 %val1, %val2
-; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
   store volatile i64 %or_noshift, i64* @var1_64
   %orn_noshift = or i64 %neg_val2, %val1
-; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
   store volatile i64 %orn_noshift, i64* @var1_64
 
   %xor_noshift = xor i64 %val1, %val2
-; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
   store volatile i64 %xor_noshift, i64* @var1_64
   %xorn_noshift = xor i64 %neg_val2, %val1
-; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}
   store volatile i64 %xorn_noshift, i64* @var1_64
 
   ; Check the maximum shift on each
@@ -130,24 +203,18 @@ define void @logical_64bit() minsize {
   %neg_operand_lsl63 = xor i64 -1, %operand_lsl63
 
   %and_lsl63 = and i64 %val1, %operand_lsl63
-; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
   store volatile i64 %and_lsl63, i64* @var1_64
   %bic_lsl63 = and i64 %val1, %neg_operand_lsl63
-; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
   store volatile i64 %bic_lsl63, i64* @var1_64
 
   %or_lsl63 = or i64 %val1, %operand_lsl63
-; CHECK: orr {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
   store volatile i64 %or_lsl63, i64* @var1_64
   %orn_lsl63 = or i64 %val1, %neg_operand_lsl63
-; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
   store volatile i64 %orn_lsl63, i64* @var1_64
 
   %xor_lsl63 = xor i64 %val1, %operand_lsl63
-; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
   store volatile i64 %xor_lsl63, i64* @var1_64
   %xorn_lsl63 = xor i64 %val1, %neg_operand_lsl63
-; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
   store volatile i64 %xorn_lsl63, i64* @var1_64
 
   ; Check other shifts on a subset
@@ -155,20 +222,16 @@ define void @logical_64bit() minsize {
   %neg_operand_asr10 = xor i64 -1, %operand_asr10
 
   %bic_asr10 = and i64 %val1, %neg_operand_asr10
-; CHECK: bic {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #10
   store volatile i64 %bic_asr10, i64* @var1_64
   %xor_asr10 = xor i64 %val1, %operand_asr10
-; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #10
   store volatile i64 %xor_asr10, i64* @var1_64
 
   %operand_lsr1 = lshr i64 %val2, 1
   %neg_operand_lsr1 = xor i64 -1, %operand_lsr1
 
   %orn_lsr1 = or i64 %val1, %neg_operand_lsr1
-; CHECK: orn {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #1
   store volatile i64 %orn_lsr1, i64* @var1_64
   %xor_lsr1 = xor i64 %val1, %operand_lsr1
-; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #1
   store volatile i64 %xor_lsr1, i64* @var1_64
 
   ; Construct a rotate-right from a bunch of other logical
@@ -180,10 +243,8 @@ define void @logical_64bit() minsize {
   %neg_operand_ror20 = xor i64 -1, %operand_ror20
 
   %xorn_ror20 = xor i64 %val1, %neg_operand_ror20
-; CHECK: eon {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, ror #20
   store volatile i64 %xorn_ror20, i64* @var1_64
   %and_ror20 = and i64 %val1, %operand_ror20
-; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, ror #20
   store volatile i64 %and_ror20, i64* @var1_64
 
   ret void
@@ -191,26 +252,39 @@ define void @logical_64bit() minsize {
 
 define void @flag_setting() {
 ; CHECK-LABEL: flag_setting:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, :got:var1_64
+; CHECK-NEXT:    adrp x10, :got:var2_64
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var1_64]
+; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var2_64]
+; CHECK-NEXT:    ldr x9, [x8]
+; CHECK-NEXT:    ldr x10, [x10]
+; CHECK-NEXT:    tst x9, x10
+; CHECK-NEXT:    b.gt .LBB2_4
+; CHECK-NEXT:  // %bb.1: // %test2
+; CHECK-NEXT:    tst x9, x10, lsl #63
+; CHECK-NEXT:    b.lt .LBB2_4
+; CHECK-NEXT:  // %bb.2: // %test3
+; CHECK-NEXT:    tst x9, x10, asr #12
+; CHECK-NEXT:    b.gt .LBB2_4
+; CHECK-NEXT:  // %bb.3: // %other_exit
+; CHECK-NEXT:    str x9, [x8]
+; CHECK-NEXT:  .LBB2_4: // %ret
+; CHECK-NEXT:    ret
   %val1 = load i64, i64* @var1_64
   %val2 = load i64, i64* @var2_64
 
-; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}
-; CHECK: b.gt .L
   %simple_and = and i64 %val1, %val2
   %tst1 = icmp sgt i64 %simple_and, 0
   br i1 %tst1, label %ret, label %test2, !prof !1
 
 test2:
-; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}, lsl #63
-; CHECK: b.lt .L
   %shifted_op = shl i64 %val2, 63
   %shifted_and = and i64 %val1, %shifted_op
   %tst2 = icmp slt i64 %shifted_and, 0
   br i1 %tst2, label %ret, label %test3, !prof !1
 
 test3:
-; CHECK: tst {{x[0-9]+}}, {{x[0-9]+}}, asr #12
-; CHECK: b.gt .L
   %asr_op = ashr i64 %val2, 12
   %asr_and = and i64 %asr_op, %val1
   %tst3 = icmp sgt i64 %asr_and, 0

diff  --git a/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll b/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
index ab4ad5e2ce93d..9f128f5b52c7a 100644
--- a/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
+++ b/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -verify-machineinstrs -o - %s | FileCheck %s
 target triple = "arm64--"
 
@@ -8,11 +9,29 @@ target triple = "arm64--"
 ; Writing a stable/simple test is tricky since most tbz instructions are already
 ; formed in SelectionDAG, optimizeCondBranch() only triggers if the and
 ; instruction is in a 
diff erent block than the conditional jump.
-;
-; CHECK-LABEL: func
-; CHECK-NOT: and
-; CHECK: tbz
+
 define void @func() {
+; CHECK-LABEL: func:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    cbnz w8, .LBB0_3
+; CHECK-NEXT:  // %bb.1: // %b1
+; CHECK-NEXT:    cbz wzr, .LBB0_4
+; CHECK-NEXT:  // %bb.2: // %b3
+; CHECK-NEXT:    ldr w8, [x8]
+; CHECK-NEXT:    tbz w8, #8, .LBB0_5
+; CHECK-NEXT:  .LBB0_3: // %b7
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    b extfunc
+; CHECK-NEXT:  .LBB0_4: // %b2
+; CHECK-NEXT:    bl extfunc
+; CHECK-NEXT:    cbnz w0, .LBB0_3
+; CHECK-NEXT:  .LBB0_5: // %b8
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   %c0 = icmp sgt i64 0, 0
   br i1 %c0, label %b1, label %b6
 

diff  --git a/llvm/test/CodeGen/ARM/ifcvt-callback.ll b/llvm/test/CodeGen/ARM/ifcvt-callback.ll
index a91b84b5ab233..ee3f651136065 100644
--- a/llvm/test/CodeGen/ARM/ifcvt-callback.ll
+++ b/llvm/test/CodeGen/ARM/ifcvt-callback.ll
@@ -1,12 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=thumb-- %s -o - | FileCheck %s
 
 ; This test checks that if-conversion pass is unconditionally added to the pass
 ; pipeline and is conditionally executed based on the per-function targert-cpu
 ; attribute.
- 
-; CHECK: ite eq
 
 define i32 @test_ifcvt(i32 %a, i32 %b) #0 {
+; CHECK-LABEL: test_ifcvt:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    cmp r0, #0
+; CHECK-NEXT:    ite eq
+; CHECK-NEXT:    subeq r0, r1, #1
+; CHECK-NEXT:    addne r0, r1, #1
+; CHECK-NEXT:    bx lr
   %tmp2 = icmp eq i32 %a, 0
   br i1 %tmp2, label %cond_false, label %cond_true
 

diff  --git a/llvm/test/CodeGen/ARM/ifcvt1.ll b/llvm/test/CodeGen/ARM/ifcvt1.ll
index cae2399d87368..6645b76e2f97f 100644
--- a/llvm/test/CodeGen/ARM/ifcvt1.ll
+++ b/llvm/test/CodeGen/ARM/ifcvt1.ll
@@ -1,21 +1,29 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s -check-prefix=A8
 ; RUN: llc -mtriple=arm-eabi -mcpu=swift %s -o - | FileCheck %s -check-prefix=SWIFT
 
 define i32 @t1(i32 %a, i32 %b) {
 ; A8-LABEL: t1:
+; A8:       @ %bb.0:
+; A8-NEXT:    cmp r0, #0
+; A8-NEXT:    subeq r0, r1, #1
+; A8-NEXT:    addne r0, r1, #1
+; A8-NEXT:    bx lr
+;
 ; SWIFT-LABEL: t1:
+; SWIFT:       @ %bb.0:
+; SWIFT-NEXT:    cmp r0, #0
+; SWIFT-NEXT:    sub r0, r1, #1
+; SWIFT-NEXT:    addne r0, r1, #1
+; SWIFT-NEXT:    bx lr
 	%tmp2 = icmp eq i32 %a, 0
 	br i1 %tmp2, label %cond_false, label %cond_true
 
 cond_true:
-; A8: subeq r0, r1, #1
-; SWIFT: sub r0, r1, #1
 	%tmp5 = add i32 %b, 1
 	ret i32 %tmp5
 
 cond_false:
-; A8: addne r0, r1, #1
-; SWIFT: addne r0, r1, #1
 	%tmp7 = add i32 %b, -1
 	ret i32 %tmp7
 }

diff  --git a/llvm/test/CodeGen/ARM/ifcvt3.ll b/llvm/test/CodeGen/ARM/ifcvt3.ll
index be2a0fcc1d951..dd79287a032ff 100644
--- a/llvm/test/CodeGen/ARM/ifcvt3.ll
+++ b/llvm/test/CodeGen/ARM/ifcvt3.ll
@@ -1,21 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
 ; RUN: llc -mtriple=arm-eabi -mattr=+v4t %s -o - | FileCheck %s -check-prefix CHECK-V4-CMP
 ; RUN: llc -mtriple=arm-eabi -mattr=+v4t %s -o - | FileCheck %s -check-prefix CHECK-V4-BX
 
 define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) {
 ; CHECK-LABEL: t1:
-; CHECK: cmp r2, #7
-; CHECK: cmpne r2, #1
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    cmp r2, #7
+; CHECK-NEXT:    cmpne r2, #1
+; CHECK-NEXT:    addne r0, r1, r0
+; CHECK-NEXT:    addeq r0, r0, r1
+; CHECK-NEXT:    addeq r0, r0, #1
+; CHECK-NEXT:    bx lr
+;
+; CHECK-V4-CMP-LABEL: t1:
+; CHECK-V4-CMP:       @ %bb.0:
+; CHECK-V4-CMP-NEXT:    cmp r2, #7
+; CHECK-V4-CMP-NEXT:    cmpne r2, #1
+; CHECK-V4-CMP-NEXT:    addne r0, r1, r0
+; CHECK-V4-CMP-NEXT:    addeq r0, r0, r1
+; CHECK-V4-CMP-NEXT:    addeq r0, r0, #1
+; CHECK-V4-CMP-NEXT:    bx lr
+;
+; CHECK-V4-BX-LABEL: t1:
+; CHECK-V4-BX:       @ %bb.0:
+; CHECK-V4-BX-NEXT:    cmp r2, #7
+; CHECK-V4-BX-NEXT:    cmpne r2, #1
+; CHECK-V4-BX-NEXT:    addne r0, r1, r0
+; CHECK-V4-BX-NEXT:    addeq r0, r0, r1
+; CHECK-V4-BX-NEXT:    addeq r0, r0, #1
+; CHECK-V4-BX-NEXT:    bx lr
 	switch i32 %c, label %cond_next [
 		 i32 1, label %cond_true
 		 i32 7, label %cond_true
 	]
 
 cond_true:
-; CHECK: addne r0
-; CHECK: addeq r0
-; CHECK: addeq r0
-; CHECK: bx
 	%tmp12 = add i32 %a, 1
 	%tmp1518 = add i32 %tmp12, %b
 	ret i32 %tmp1518
@@ -24,10 +44,3 @@ cond_next:
 	%tmp15 = add i32 %b, %a
 	ret i32 %tmp15
 }
-
-; CHECK-V4-CMP: cmpne
-; CHECK-V4-CMP-NOT: cmpne
-
-; CHECK-V4-BX: bx
-; CHECK-V4-BX-NOT: bx
-

diff  --git a/llvm/test/CodeGen/ARM/load-global2.ll b/llvm/test/CodeGen/ARM/load-global2.ll
index 7ffd65ce7eda4..d5d6283a6d6dd 100644
--- a/llvm/test/CodeGen/ARM/load-global2.ll
+++ b/llvm/test/CodeGen/ARM/load-global2.ll
@@ -1,13 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; PR35221. Test that external global address is not reloaded from GOT in each BB.
 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -relocation-model=pic | FileCheck %s -check-prefix=LINUX-PIC
 
 @x = external global i8, align 1
 
 define signext i8 @foo() {
+; LINUX-PIC-LABEL: foo:
+; LINUX-PIC:       @ %bb.0: @ %entry
+; LINUX-PIC-NEXT:    .save {r4, lr}
+; LINUX-PIC-NEXT:    push {r4, lr}
+; LINUX-PIC-NEXT:    ldr r4, .LCPI0_0
+; LINUX-PIC-NEXT:  .LPC0_0:
+; LINUX-PIC-NEXT:    ldr r4, [pc, r4]
+; LINUX-PIC-NEXT:    ldrb r0, [r4]
+; LINUX-PIC-NEXT:    cmp r0, #0
+; LINUX-PIC-NEXT:    movne r0, #0
+; LINUX-PIC-NEXT:    popne {r4, pc}
+; LINUX-PIC-NEXT:  .LBB0_1: @ %bb1
+; LINUX-PIC-NEXT:    bl bar
+; LINUX-PIC-NEXT:    ldrsb r0, [r4]
+; LINUX-PIC-NEXT:    pop {r4, pc}
+; LINUX-PIC-NEXT:    .p2align 2
+; LINUX-PIC-NEXT:  @ %bb.2:
+; LINUX-PIC-NEXT:  .LCPI0_0:
+; LINUX-PIC-NEXT:  .Ltmp0:
+; LINUX-PIC-NEXT:    .long x(GOT_PREL)-((.LPC0_0+8)-.Ltmp0)
 entry:
-; LINUX-PIC:     ldr	r[[A:.]], .LCPI0_0
-; LINUX-PIC:     ldr	r[[B:.]], [pc, r[[A]]]
-; LINUX-PIC:     ldrb	r{{.}}, [r[[B]]]
   %0 = load i8, i8* @x
   %tobool = icmp eq i8 %0, 0
   br i1 %tobool, label %bb1, label %bb2
@@ -15,9 +33,6 @@ entry:
 bb1:
   call void @bar()
 ; No more pc-relative loads! Reuse r[[B]].
-; LINUX-PIC:     bl	bar
-; LINUX-PIC-NOT: ldr{{.*}}[pc,
-; LINUX-PIC:     ldrsb	r{{.}}, [r[[B]]]
   %1 = load i8, i8* @x
   ret i8 %1
 

diff  --git a/llvm/test/CodeGen/ARM/smml.ll b/llvm/test/CodeGen/ARM/smml.ll
index 712aaa4392f60..86373066b9231 100644
--- a/llvm/test/CodeGen/ARM/smml.ll
+++ b/llvm/test/CodeGen/ARM/smml.ll
@@ -1,12 +1,13 @@
-; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V4
-; RUN: llc -mtriple=armv6-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V6
-; RUN: llc -mtriple=armv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V6
-; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMB
-; RUN: llc -mtriple=thumbv6-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMB
-; RUN: llc -mtriple=thumbv6t2-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMBV6T2
-; RUN: llc -mtriple=thumbv7-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMBV6T2
-; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-V4
-; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-THUMBV6T2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-V4
+; RUN: llc -mtriple=armv6-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-V6
+; RUN: llc -mtriple=armv7-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-V6
+; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMB
+; RUN: llc -mtriple=thumbv6-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMBV6
+; RUN: llc -mtriple=thumbv6t2-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMBV6T2
+; RUN: llc -mtriple=thumbv7-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMBV6T2
+; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-V4-THUMBV7M
+; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMBV6T2
 
 ; Next test would previously trigger an assertion responsible for verification of
 ; call site info state.
@@ -14,11 +15,61 @@
 ; CHECK-CALLSITE: name:  test_used_flags
 ; CHECK-CALLSITE: callSites:
 
-
 define i32 @Test0(i32 %a, i32 %b, i32 %c) nounwind readnone ssp {
+; CHECK-V4-LABEL: Test0:
+; CHECK-V4:       @ %bb.0: @ %entry
+; CHECK-V4-NEXT:    smull r3, r12, r2, r1
+; CHECK-V4-NEXT:    sub r0, r0, r12
+; CHECK-V4-NEXT:    mov pc, lr
+;
+; CHECK-V6-LABEL: Test0:
+; CHECK-V6:       @ %bb.0: @ %entry
+; CHECK-V6-NEXT:    smmul r1, r2, r1
+; CHECK-V6-NEXT:    sub r0, r0, r1
+; CHECK-V6-NEXT:    bx lr
+;
+; CHECK-THUMB-LABEL: Test0:
+; CHECK-THUMB:       @ %bb.0: @ %entry
+; CHECK-THUMB-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-THUMB-NEXT:    push {r4, r5, r7, lr}
+; CHECK-THUMB-NEXT:    movs r5, r1
+; CHECK-THUMB-NEXT:    movs r4, r0
+; CHECK-THUMB-NEXT:    asrs r1, r2, #31
+; CHECK-THUMB-NEXT:    asrs r3, r5, #31
+; CHECK-THUMB-NEXT:    movs r0, r2
+; CHECK-THUMB-NEXT:    movs r2, r5
+; CHECK-THUMB-NEXT:    bl __aeabi_lmul
+; CHECK-THUMB-NEXT:    subs r0, r4, r1
+; CHECK-THUMB-NEXT:    pop {r4, r5, r7}
+; CHECK-THUMB-NEXT:    pop {r1}
+; CHECK-THUMB-NEXT:    bx r1
+;
+; CHECK-THUMBV6-LABEL: Test0:
+; CHECK-THUMBV6:       @ %bb.0: @ %entry
+; CHECK-THUMBV6-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-THUMBV6-NEXT:    push {r4, r5, r7, lr}
+; CHECK-THUMBV6-NEXT:    mov r5, r1
+; CHECK-THUMBV6-NEXT:    mov r4, r0
+; CHECK-THUMBV6-NEXT:    asrs r1, r2, #31
+; CHECK-THUMBV6-NEXT:    asrs r3, r5, #31
+; CHECK-THUMBV6-NEXT:    mov r0, r2
+; CHECK-THUMBV6-NEXT:    mov r2, r5
+; CHECK-THUMBV6-NEXT:    bl __aeabi_lmul
+; CHECK-THUMBV6-NEXT:    subs r0, r4, r1
+; CHECK-THUMBV6-NEXT:    pop {r4, r5, r7, pc}
+;
+; CHECK-THUMBV6T2-LABEL: Test0:
+; CHECK-THUMBV6T2:       @ %bb.0: @ %entry
+; CHECK-THUMBV6T2-NEXT:    smmul r1, r2, r1
+; CHECK-THUMBV6T2-NEXT:    subs r0, r0, r1
+; CHECK-THUMBV6T2-NEXT:    bx lr
+;
+; CHECK-V4-THUMBV7M-LABEL: Test0:
+; CHECK-V4-THUMBV7M:       @ %bb.0: @ %entry
+; CHECK-V4-THUMBV7M-NEXT:    smull r1, r2, r2, r1
+; CHECK-V4-THUMBV7M-NEXT:    subs r0, r0, r2
+; CHECK-V4-THUMBV7M-NEXT:    bx lr
 entry:
-; CHECK-LABEL: Test0
-; CHECK-NOT: smmls
   %conv4 = zext i32 %a to i64
   %conv1 = sext i32 %b to i64
   %conv2 = sext i32 %c to i64
@@ -30,11 +81,63 @@ entry:
 }
 
 define i32 @Test1(i32 %a, i32 %b, i32 %c) {
-;CHECK-LABEL: Test1
-;CHECK-V4-NOT: smmls
-;CHECK-THUMB-NOT: smmls
-;CHECK-V6: smmls r0, [[Rn:r[1-2]]], [[Rm:r[1-2]]], r0
-;CHECK-THUMBV6T2: smmls r0, [[Rn:r[1-2]]], [[Rm:r[1-2]]], r0
+; CHECK-V4-LABEL: Test1:
+; CHECK-V4:       @ %bb.0: @ %entry
+; CHECK-V4-NEXT:    smull r3, r12, r2, r1
+; CHECK-V4-NEXT:    rsbs r1, r3, #0
+; CHECK-V4-NEXT:    sbc r0, r0, r12
+; CHECK-V4-NEXT:    mov pc, lr
+;
+; CHECK-V6-LABEL: Test1:
+; CHECK-V6:       @ %bb.0: @ %entry
+; CHECK-V6-NEXT:    smmls r0, r2, r1, r0
+; CHECK-V6-NEXT:    bx lr
+;
+; CHECK-THUMB-LABEL: Test1:
+; CHECK-THUMB:       @ %bb.0: @ %entry
+; CHECK-THUMB-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-THUMB-NEXT:    push {r4, r5, r7, lr}
+; CHECK-THUMB-NEXT:    movs r5, r1
+; CHECK-THUMB-NEXT:    movs r4, r0
+; CHECK-THUMB-NEXT:    asrs r1, r2, #31
+; CHECK-THUMB-NEXT:    asrs r3, r5, #31
+; CHECK-THUMB-NEXT:    movs r0, r2
+; CHECK-THUMB-NEXT:    movs r2, r5
+; CHECK-THUMB-NEXT:    bl __aeabi_lmul
+; CHECK-THUMB-NEXT:    rsbs r0, r0, #0
+; CHECK-THUMB-NEXT:    sbcs r4, r1
+; CHECK-THUMB-NEXT:    movs r0, r4
+; CHECK-THUMB-NEXT:    pop {r4, r5, r7}
+; CHECK-THUMB-NEXT:    pop {r1}
+; CHECK-THUMB-NEXT:    bx r1
+;
+; CHECK-THUMBV6-LABEL: Test1:
+; CHECK-THUMBV6:       @ %bb.0: @ %entry
+; CHECK-THUMBV6-NEXT:    .save {r4, r5, r7, lr}
+; CHECK-THUMBV6-NEXT:    push {r4, r5, r7, lr}
+; CHECK-THUMBV6-NEXT:    mov r5, r1
+; CHECK-THUMBV6-NEXT:    mov r4, r0
+; CHECK-THUMBV6-NEXT:    asrs r1, r2, #31
+; CHECK-THUMBV6-NEXT:    asrs r3, r5, #31
+; CHECK-THUMBV6-NEXT:    mov r0, r2
+; CHECK-THUMBV6-NEXT:    mov r2, r5
+; CHECK-THUMBV6-NEXT:    bl __aeabi_lmul
+; CHECK-THUMBV6-NEXT:    rsbs r0, r0, #0
+; CHECK-THUMBV6-NEXT:    sbcs r4, r1
+; CHECK-THUMBV6-NEXT:    mov r0, r4
+; CHECK-THUMBV6-NEXT:    pop {r4, r5, r7, pc}
+;
+; CHECK-THUMBV6T2-LABEL: Test1:
+; CHECK-THUMBV6T2:       @ %bb.0: @ %entry
+; CHECK-THUMBV6T2-NEXT:    smmls r0, r2, r1, r0
+; CHECK-THUMBV6T2-NEXT:    bx lr
+;
+; CHECK-V4-THUMBV7M-LABEL: Test1:
+; CHECK-V4-THUMBV7M:       @ %bb.0: @ %entry
+; CHECK-V4-THUMBV7M-NEXT:    smull r1, r2, r2, r1
+; CHECK-V4-THUMBV7M-NEXT:    rsbs r1, r1, #0
+; CHECK-V4-THUMBV7M-NEXT:    sbcs r0, r2
+; CHECK-V4-THUMBV7M-NEXT:    bx lr
 entry:
   %conv = sext i32 %b to i64
   %conv1 = sext i32 %c to i64
@@ -49,18 +152,107 @@ entry:
 
 declare void @opaque(i32)
 define void @test_used_flags(i32 %in1, i32 %in2) {
-; CHECK-LABEL: test_used_flags:
-; CHECK-THUMB: movs    r2, #0
-; CHECK-THUMB: rsbs    r0, r0, #0
-; CHECK-THUMB: sbcs    r2, r1
-; CHECK-THUMB: bge
-; CHECK-V6: smull [[PROD_LO:r[0-9]+]], [[PROD_HI:r[0-9]+]], r0, r1
-; CHECK-V6: rsbs {{.*}}, [[PROD_LO]], #0
-; CHECK-V6: rscs {{.*}}, [[PROD_HI]], #0
-; CHECK-THUMBV6T2: smull [[PROD_LO:r[0-9]+]], [[PROD_HI:r[0-9]+]], r0, r1
-; CHECK-THUMBV6T2: movs	[[ZERO:r[0-9]+]], #0
-; CHECK-THUMBV6T2: rsbs	{{.*}}, [[PROD_LO]], #0
-; CHECK-THUMBV6T2: sbcs.w {{.*}}, [[ZERO]], [[PROD_HI]]
+; CHECK-V4-LABEL: test_used_flags:
+; CHECK-V4:       @ %bb.0:
+; CHECK-V4-NEXT:    .save {r11, lr}
+; CHECK-V4-NEXT:    push {r11, lr}
+; CHECK-V4-NEXT:    smull r2, r3, r0, r1
+; CHECK-V4-NEXT:    rsbs r0, r2, #0
+; CHECK-V4-NEXT:    rscs r0, r3, #0
+; CHECK-V4-NEXT:    movge r0, #42
+; CHECK-V4-NEXT:    movlt r0, #56
+; CHECK-V4-NEXT:    bl opaque
+; CHECK-V4-NEXT:    pop {r11, lr}
+; CHECK-V4-NEXT:    mov pc, lr
+;
+; CHECK-V6-LABEL: test_used_flags:
+; CHECK-V6:       @ %bb.0:
+; CHECK-V6-NEXT:    .save {r11, lr}
+; CHECK-V6-NEXT:    push {r11, lr}
+; CHECK-V6-NEXT:    smull r0, r1, r0, r1
+; CHECK-V6-NEXT:    rsbs r0, r0, #0
+; CHECK-V6-NEXT:    rscs r0, r1, #0
+; CHECK-V6-NEXT:    bge .LBB2_2
+; CHECK-V6-NEXT:  @ %bb.1: @ %false
+; CHECK-V6-NEXT:    mov r0, #56
+; CHECK-V6-NEXT:    bl opaque
+; CHECK-V6-NEXT:    pop {r11, pc}
+; CHECK-V6-NEXT:  .LBB2_2: @ %true
+; CHECK-V6-NEXT:    mov r0, #42
+; CHECK-V6-NEXT:    bl opaque
+; CHECK-V6-NEXT:    pop {r11, pc}
+;
+; CHECK-THUMB-LABEL: test_used_flags:
+; CHECK-THUMB:       @ %bb.0:
+; CHECK-THUMB-NEXT:    .save {r7, lr}
+; CHECK-THUMB-NEXT:    push {r7, lr}
+; CHECK-THUMB-NEXT:    movs r2, r1
+; CHECK-THUMB-NEXT:    asrs r1, r0, #31
+; CHECK-THUMB-NEXT:    asrs r3, r2, #31
+; CHECK-THUMB-NEXT:    bl __aeabi_lmul
+; CHECK-THUMB-NEXT:    movs r2, #0
+; CHECK-THUMB-NEXT:    rsbs r0, r0, #0
+; CHECK-THUMB-NEXT:    sbcs r2, r1
+; CHECK-THUMB-NEXT:    bge .LBB2_2
+; CHECK-THUMB-NEXT:  @ %bb.1: @ %false
+; CHECK-THUMB-NEXT:    movs r0, #56
+; CHECK-THUMB-NEXT:    b .LBB2_3
+; CHECK-THUMB-NEXT:  .LBB2_2: @ %true
+; CHECK-THUMB-NEXT:    movs r0, #42
+; CHECK-THUMB-NEXT:  .LBB2_3: @ %true
+; CHECK-THUMB-NEXT:    bl opaque
+; CHECK-THUMB-NEXT:    pop {r7}
+; CHECK-THUMB-NEXT:    pop {r0}
+; CHECK-THUMB-NEXT:    bx r0
+;
+; CHECK-THUMBV6-LABEL: test_used_flags:
+; CHECK-THUMBV6:       @ %bb.0:
+; CHECK-THUMBV6-NEXT:    .save {r7, lr}
+; CHECK-THUMBV6-NEXT:    push {r7, lr}
+; CHECK-THUMBV6-NEXT:    mov r2, r1
+; CHECK-THUMBV6-NEXT:    asrs r1, r0, #31
+; CHECK-THUMBV6-NEXT:    asrs r3, r2, #31
+; CHECK-THUMBV6-NEXT:    bl __aeabi_lmul
+; CHECK-THUMBV6-NEXT:    movs r2, #0
+; CHECK-THUMBV6-NEXT:    rsbs r0, r0, #0
+; CHECK-THUMBV6-NEXT:    sbcs r2, r1
+; CHECK-THUMBV6-NEXT:    bge .LBB2_2
+; CHECK-THUMBV6-NEXT:  @ %bb.1: @ %false
+; CHECK-THUMBV6-NEXT:    movs r0, #56
+; CHECK-THUMBV6-NEXT:    bl opaque
+; CHECK-THUMBV6-NEXT:    pop {r7, pc}
+; CHECK-THUMBV6-NEXT:  .LBB2_2: @ %true
+; CHECK-THUMBV6-NEXT:    movs r0, #42
+; CHECK-THUMBV6-NEXT:    bl opaque
+; CHECK-THUMBV6-NEXT:    pop {r7, pc}
+;
+; CHECK-THUMBV6T2-LABEL: test_used_flags:
+; CHECK-THUMBV6T2:       @ %bb.0:
+; CHECK-THUMBV6T2-NEXT:    .save {r7, lr}
+; CHECK-THUMBV6T2-NEXT:    push {r7, lr}
+; CHECK-THUMBV6T2-NEXT:    smull r0, r1, r0, r1
+; CHECK-THUMBV6T2-NEXT:    movs r2, #0
+; CHECK-THUMBV6T2-NEXT:    rsbs r0, r0, #0
+; CHECK-THUMBV6T2-NEXT:    sbcs.w r0, r2, r1
+; CHECK-THUMBV6T2-NEXT:    ite lt
+; CHECK-THUMBV6T2-NEXT:    movlt r0, #56
+; CHECK-THUMBV6T2-NEXT:    movge r0, #42
+; CHECK-THUMBV6T2-NEXT:    bl opaque
+; CHECK-THUMBV6T2-NEXT:    pop {r7, pc}
+;
+; CHECK-V4-THUMBV7M-LABEL: test_used_flags:
+; CHECK-V4-THUMBV7M:       @ %bb.0:
+; CHECK-V4-THUMBV7M-NEXT:    .save {r7, lr}
+; CHECK-V4-THUMBV7M-NEXT:    push {r7, lr}
+; CHECK-V4-THUMBV7M-NEXT:    smull r0, r1, r0, r1
+; CHECK-V4-THUMBV7M-NEXT:    movs r2, #0
+; CHECK-V4-THUMBV7M-NEXT:    rsbs r0, r0, #0
+; CHECK-V4-THUMBV7M-NEXT:    sbcs.w r0, r2, r1
+; CHECK-V4-THUMBV7M-NEXT:    ite lt
+; CHECK-V4-THUMBV7M-NEXT:    movlt r0, #56
+; CHECK-V4-THUMBV7M-NEXT:    movge r0, #42
+; CHECK-V4-THUMBV7M-NEXT:    bl opaque
+; CHECK-V4-THUMBV7M-NEXT:    pop {r7, pc}
   %in1.64 = sext i32 %in1 to i64
   %in2.64 = sext i32 %in2 to i64
   %mul = mul nsw i64 %in1.64, %in2.64

diff  --git a/llvm/test/CodeGen/ARM/speculation-hardening-sls.ll b/llvm/test/CodeGen/ARM/speculation-hardening-sls.ll
index 2731cef93003a..a314d3e14061c 100644
--- a/llvm/test/CodeGen/ARM/speculation-hardening-sls.ll
+++ b/llvm/test/CodeGen/ARM/speculation-hardening-sls.ll
@@ -1,28 +1,52 @@
-; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,ARM,HARDEN,HARDEN-COMDAT,ISBDSB -dump-input-context=100
-; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,THUMB,HARDENTHUMB,HARDEN,HARDEN-COMDAT,ISBDSB -dump-input-context=100
-; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,ARM,HARDEN,HARDEN-COMDAT,SB -dump-input-context=100
-; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,THUMB,HARDENTHUMB,HARDEN,HARDEN-COMDAT,SB -dump-input-context=100
-; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,ARM,HARDEN,HARDEN-COMDAT-OFF,ISBDSB -dump-input-context=100
-; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,THUMB,HARDENTHUMB,HARDEN,HARDEN-COMDAT-OFF,ISBDSB -dump-input-context=100
-; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,ARM,HARDEN,HARDEN-COMDAT-OFF,SB -dump-input-context=100
-; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,THUMB,HARDENTHUMB,HARDEN,HARDEN-COMDAT-OFF,SB -dump-input-context=100
-; RUN: llc -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,ARM,NOHARDENARM -dump-input-context=100
-; RUN: llc -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,THUMB,NOHARDENTHUMB
-; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,ARM,HARDEN,HARDEN-COMDAT,ISBDSB
-; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,THUMB,HARDENTHUMB,HARDEN,HARDEN-COMDAT,ISBDSB
-; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-nocomdat -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,ARM,HARDEN,HARDEN-COMDAT-OFF,ISBDSB
-; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-nocomdat -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,THUMB,HARDENTHUMB,HARDEN,HARDEN-COMDAT-OFF,ISBDSB
-; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,ARM,HARDEN,SB
-; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,THUMB,HARDENTHUMB,HARDEN,SB
-; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,ARM,HARDEN,HARDEN-COMDAT,ISBDSB
-; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,THUMB,HARDENTHUMB,HARDEN,HARDEN-COMDAT,ISBDSB
-; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,ARM,HARDEN,HARDEN-COMDAT-OFF,ISBDSB
-; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,THUMB,HARDENTHUMB,HARDEN,HARDEN-COMDAT-OFF,ISBDSB
-; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,ARM,HARDEN,SB
-; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=CHECK,THUMB,HARDENTHUMB,HARDEN,SB
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB -dump-input-context=100
+; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB -dump-input-context=100
+; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,SB -dump-input-context=100
+; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,SB -dump-input-context=100
+; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB -dump-input-context=100
+; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB -dump-input-context=100
+; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,SB -dump-input-context=100
+; RUN: llc -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,SB -dump-input-context=100
+; RUN: llc -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=NOHARDENARM -dump-input-context=100
+; RUN: llc -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=NOHARDENTHUMB
+; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB
+; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB
+; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-nocomdat -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB
+; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-nocomdat -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB
+; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,SB
+; RUN: llc -global-isel -global-isel-abort=0 -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,SB
+; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB
+; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT,ISBDSB
+; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB
+; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=harden-sls-nocomdat -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,HARDEN-COMDAT-OFF,ISBDSB
+; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=armv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,SB
+; RUN: llc -fast-isel -mattr=harden-sls-retbr -mattr=harden-sls-blr -mattr=+sb -verify-machineinstrs -mtriple=thumbv8-linux-gnueabi < %s | FileCheck %s --check-prefixes=HARDEN,SB
 
 ; Function Attrs: norecurse nounwind readnone
 define dso_local i32 @double_return(i32 %a, i32 %b) local_unnamed_addr {
+; NOHARDENARM-LABEL: double_return:
+; NOHARDENARM:       @ %bb.0: @ %entry
+; NOHARDENARM-NEXT:    cmp r0, #1
+; NOHARDENARM-NEXT:    mulge r0, r1, r0
+; NOHARDENARM-NEXT:    bxge lr
+; NOHARDENARM-NEXT:  .LBB0_1: @ %if.else
+; NOHARDENARM-NEXT:    sdiv r1, r0, r1
+; NOHARDENARM-NEXT:    sdiv r1, r0, r1
+; NOHARDENARM-NEXT:    sdiv r0, r0, r1
+; NOHARDENARM-NEXT:    bx lr
+;
+; NOHARDENTHUMB-LABEL: double_return:
+; NOHARDENTHUMB:       @ %bb.0: @ %entry
+; NOHARDENTHUMB-NEXT:    cmp r0, #1
+; NOHARDENTHUMB-NEXT:    blt .LBB0_2
+; NOHARDENTHUMB-NEXT:  @ %bb.1: @ %if.then
+; NOHARDENTHUMB-NEXT:    muls r0, r1, r0
+; NOHARDENTHUMB-NEXT:    bx lr
+; NOHARDENTHUMB-NEXT:  .LBB0_2: @ %if.else
+; NOHARDENTHUMB-NEXT:    sdiv r1, r0, r1
+; NOHARDENTHUMB-NEXT:    sdiv r1, r0, r1
+; NOHARDENTHUMB-NEXT:    sdiv r0, r0, r1
+; NOHARDENTHUMB-NEXT:    bx lr
 entry:
   %cmp = icmp sgt i32 %a, 0
   br i1 %cmp, label %if.then, label %if.else
@@ -32,13 +56,6 @@ if.then:                                          ; preds = %entry
   ; it will not get predicated when sls-hardening is enabled.
   %mul = mul i32 %b, %a
   ret i32 %mul
-; CHECK-LABEL: double_return:
-; HARDEN:          {{bx lr$}}
-; NOHARDENARM:     {{bxge lr$}}
-; NOHARDENTHUMB:   {{bx lr$}}
-; ISBDSB-NEXT: dsb sy
-; ISBDSB-NEXT: isb
-; SB-NEXT:     {{ sb$}}
 
 if.else:                                          ; preds = %entry
   %div3 = sdiv i32 %a, %b
@@ -46,79 +63,149 @@ if.else:                                          ; preds = %entry
   %div1 = sdiv i32 %a, %div2
   ret i32 %div1
 
-; CHECK:       {{bx lr$}}
-; ISBDSB-NEXT: dsb sy
-; ISBDSB-NEXT: isb
-; SB-NEXT:     {{ sb$}}
-; CHECK-NEXT: .Lfunc_end
 }
 
 @__const.indirect_branch.ptr = private unnamed_addr constant [2 x i8*] [i8* blockaddress(@indirect_branch, %return), i8* blockaddress(@indirect_branch, %l2)], align 8
 
 ; Function Attrs: norecurse nounwind readnone
 define dso_local i32 @indirect_branch(i32 %a, i32 %b, i32 %i) {
-; CHECK-LABEL: indirect_branch:
+; NOHARDENARM-LABEL: indirect_branch:
+; NOHARDENARM:       @ %bb.0: @ %entry
+; NOHARDENARM-NEXT:    movw r0, :lower16:.L__const.indirect_branch.ptr
+; NOHARDENARM-NEXT:    movt r0, :upper16:.L__const.indirect_branch.ptr
+; NOHARDENARM-NEXT:    ldr r0, [r0, r2, lsl #2]
+; NOHARDENARM-NEXT:    bx r0
+; NOHARDENARM-NEXT:  .Ltmp0: @ Block address taken
+; NOHARDENARM-NEXT:  .LBB1_1: @ %return
+; NOHARDENARM-NEXT:    mov r0, #0
+; NOHARDENARM-NEXT:    bx lr
+; NOHARDENARM-NEXT:  .Ltmp1: @ Block address taken
+; NOHARDENARM-NEXT:  .LBB1_2: @ %l2
+; NOHARDENARM-NEXT:    mov r0, #1
+; NOHARDENARM-NEXT:    bx lr
+;
+; NOHARDENTHUMB-LABEL: indirect_branch:
+; NOHARDENTHUMB:       @ %bb.0: @ %entry
+; NOHARDENTHUMB-NEXT:    movw r0, :lower16:.L__const.indirect_branch.ptr
+; NOHARDENTHUMB-NEXT:    movt r0, :upper16:.L__const.indirect_branch.ptr
+; NOHARDENTHUMB-NEXT:    ldr.w r0, [r0, r2, lsl #2]
+; NOHARDENTHUMB-NEXT:    mov pc, r0
+; NOHARDENTHUMB-NEXT:  .Ltmp0: @ Block address taken
+; NOHARDENTHUMB-NEXT:  .LBB1_1: @ %return
+; NOHARDENTHUMB-NEXT:    movs r0, #0
+; NOHARDENTHUMB-NEXT:    bx lr
+; NOHARDENTHUMB-NEXT:  .Ltmp1: @ Block address taken
+; NOHARDENTHUMB-NEXT:  .LBB1_2: @ %l2
+; NOHARDENTHUMB-NEXT:    movs r0, #1
+; NOHARDENTHUMB-NEXT:    bx lr
 entry:
   %idxprom = sext i32 %i to i64
   %arrayidx = getelementptr inbounds [2 x i8*], [2 x i8*]* @__const.indirect_branch.ptr, i64 0, i64 %idxprom
   %0 = load i8*, i8** %arrayidx, align 8
   indirectbr i8* %0, [label %return, label %l2]
-; ARM:       bx r0
-; THUMB:     mov pc, r0
-; ISBDSB-NEXT: dsb sy
-; ISBDSB-NEXT: isb
-; SB-NEXT:     {{ sb$}}
 
 l2:                                               ; preds = %entry
   br label %return
-; CHECK:       {{bx lr$}}
-; ISBDSB-NEXT: dsb sy
-; ISBDSB-NEXT: isb
-; SB-NEXT:     {{ sb$}}
 
 return:                                           ; preds = %entry, %l2
   %retval.0 = phi i32 [ 1, %l2 ], [ 0, %entry ]
   ret i32 %retval.0
-; CHECK:       {{bx lr$}}
-; ISBDSB-NEXT: dsb sy
-; ISBDSB-NEXT: isb
-; SB-NEXT:     {{ sb$}}
-; CHECK-NEXT: .Lfunc_end
 }
 
 define i32 @asmgoto() {
+; NOHARDENARM-LABEL: asmgoto:
+; NOHARDENARM:       @ %bb.0: @ %entry
+; NOHARDENARM-NEXT:    @APP
+; NOHARDENARM-NEXT:    b .Ltmp2
+; NOHARDENARM-NEXT:    @NO_APP
+; NOHARDENARM-NEXT:  @ %bb.1: @ %asm.fallthrough
+; NOHARDENARM-NEXT:    mov r0, #0
+; NOHARDENARM-NEXT:    bx lr
+; NOHARDENARM-NEXT:  .Ltmp2: @ Block address taken
+; NOHARDENARM-NEXT:  .LBB2_2: @ %d
+; NOHARDENARM-NEXT:    mov r0, #1
+; NOHARDENARM-NEXT:    bx lr
+;
+; NOHARDENTHUMB-LABEL: asmgoto:
+; NOHARDENTHUMB:       @ %bb.0: @ %entry
+; NOHARDENTHUMB-NEXT:    @APP
+; NOHARDENTHUMB-NEXT:    b .Ltmp2
+; NOHARDENTHUMB-NEXT:    @NO_APP
+; NOHARDENTHUMB-NEXT:  @ %bb.1: @ %asm.fallthrough
+; NOHARDENTHUMB-NEXT:    movs r0, #0
+; NOHARDENTHUMB-NEXT:    bx lr
+; NOHARDENTHUMB-NEXT:  .Ltmp2: @ Block address taken
+; NOHARDENTHUMB-NEXT:  .LBB2_2: @ %d
+; NOHARDENTHUMB-NEXT:    movs r0, #1
+; NOHARDENTHUMB-NEXT:    bx lr
 entry:
-; CHECK-LABEL: asmgoto:
   callbr void asm sideeffect "B $0", "X"(i8* blockaddress(@asmgoto, %d))
             to label %asm.fallthrough [label %d]
      ; The asm goto above produces a direct branch:
-; CHECK:           @APP
-; CHECK-NEXT:      {{^[ \t]+b }}
-; CHECK-NEXT:      @NO_APP
      ; For direct branches, no mitigation is needed.
 ; ISDDSB-NOT: dsb sy
-; SB-NOT:     {{ sb$}}
 
 asm.fallthrough:               ; preds = %entry
   ret i32 0
-; CHECK:       {{bx lr$}}
-; ISBDSB-NEXT: dsb sy
-; ISBDSB-NEXT: isb
-; SB-NEXT:     {{ sb$}}
 
 d:                             ; preds = %asm.fallthrough, %entry
   ret i32 1
-; CHECK:       {{bx lr$}}
-; ISBDSB-NEXT: dsb sy
-; ISBDSB-NEXT: isb
-; SB-NEXT:     {{ sb$}}
-; CHECK-NEXT: .Lfunc_end
 }
 
 ; Check that indirect branches produced through switch jump tables are also
 ; hardened:
 define dso_local i32 @jumptable(i32 %a, i32 %b) {
-; CHECK-LABEL: jumptable:
+; NOHARDENARM-LABEL: jumptable:
+; NOHARDENARM:       @ %bb.0: @ %entry
+; NOHARDENARM-NEXT:    cmp r1, #4
+; NOHARDENARM-NEXT:    bxhi lr
+; NOHARDENARM-NEXT:  .LBB3_1: @ %entry
+; NOHARDENARM-NEXT:    adr r2, .LJTI3_0
+; NOHARDENARM-NEXT:    ldr pc, [r2, r1, lsl #2]
+; NOHARDENARM-NEXT:  @ %bb.2:
+; NOHARDENARM-NEXT:    .p2align 2
+; NOHARDENARM-NEXT:  .LJTI3_0:
+; NOHARDENARM-NEXT:    .long .LBB3_3
+; NOHARDENARM-NEXT:    .long .LBB3_4
+; NOHARDENARM-NEXT:    .long .LBB3_7
+; NOHARDENARM-NEXT:    .long .LBB3_5
+; NOHARDENARM-NEXT:    .long .LBB3_6
+; NOHARDENARM-NEXT:  .LBB3_3: @ %sw.bb
+; NOHARDENARM-NEXT:    lsl r0, r0, #1
+; NOHARDENARM-NEXT:  .LBB3_4: @ %sw.bb1
+; NOHARDENARM-NEXT:    lsl r0, r0, #1
+; NOHARDENARM-NEXT:  .LBB3_5: @ %sw.bb3
+; NOHARDENARM-NEXT:    lsl r0, r0, #1
+; NOHARDENARM-NEXT:  .LBB3_6: @ %sw.bb5
+; NOHARDENARM-NEXT:    lsl r0, r0, #1
+; NOHARDENARM-NEXT:  .LBB3_7: @ %sw.epilog
+; NOHARDENARM-NEXT:    bx lr
+;
+; NOHARDENTHUMB-LABEL: jumptable:
+; NOHARDENTHUMB:       @ %bb.0: @ %entry
+; NOHARDENTHUMB-NEXT:    cmp r1, #4
+; NOHARDENTHUMB-NEXT:    bhi .LBB3_7
+; NOHARDENTHUMB-NEXT:  @ %bb.1: @ %entry
+; NOHARDENTHUMB-NEXT:  .LCPI3_0:
+; NOHARDENTHUMB-NEXT:    tbb [pc, r1]
+; NOHARDENTHUMB-NEXT:  @ %bb.2:
+; NOHARDENTHUMB-NEXT:  .LJTI3_0:
+; NOHARDENTHUMB-NEXT:    .byte (.LBB3_3-(.LCPI3_0+4))/2
+; NOHARDENTHUMB-NEXT:    .byte (.LBB3_4-(.LCPI3_0+4))/2
+; NOHARDENTHUMB-NEXT:    .byte (.LBB3_7-(.LCPI3_0+4))/2
+; NOHARDENTHUMB-NEXT:    .byte (.LBB3_5-(.LCPI3_0+4))/2
+; NOHARDENTHUMB-NEXT:    .byte (.LBB3_6-(.LCPI3_0+4))/2
+; NOHARDENTHUMB-NEXT:    .p2align 1
+; NOHARDENTHUMB-NEXT:  .LBB3_3: @ %sw.bb
+; NOHARDENTHUMB-NEXT:    lsls r0, r0, #1
+; NOHARDENTHUMB-NEXT:  .LBB3_4: @ %sw.bb1
+; NOHARDENTHUMB-NEXT:    lsls r0, r0, #1
+; NOHARDENTHUMB-NEXT:  .LBB3_5: @ %sw.bb3
+; NOHARDENTHUMB-NEXT:    lsls r0, r0, #1
+; NOHARDENTHUMB-NEXT:  .LBB3_6: @ %sw.bb5
+; NOHARDENTHUMB-NEXT:    lsls r0, r0, #1
+; NOHARDENTHUMB-NEXT:  .LBB3_7: @ %sw.epilog
+; NOHARDENTHUMB-NEXT:    bx lr
 entry:
   switch i32 %b, label %sw.epilog [
     i32 0, label %sw.bb
@@ -126,12 +213,6 @@ entry:
     i32 3, label %sw.bb3
     i32 4, label %sw.bb5
   ]
-; ARM:             ldr pc, [{{r[0-9]}}, {{r[0-9]}}, lsl #2]
-; NOHARDENTHUMB:   tbb [pc, {{r[0-9]}}]
-; HARDENTHUMB:     mov pc, {{r[0-9]}}
-; ISBDSB-NEXT:     dsb sy
-; ISBDSB-NEXT:     isb
-; SB-NEXT:         {{ sb$}}
 
 
 sw.bb:                                            ; preds = %entry
@@ -156,42 +237,77 @@ sw.bb5:                                           ; preds = %entry, %sw.bb3
 sw.epilog:                                        ; preds = %sw.bb5, %entry
   %a.addr.3 = phi i32 [ %a, %entry ], [ %add6, %sw.bb5 ]
   ret i32 %a.addr.3
-; CHECK:       {{bx lr$}}
-; ISBDSB-NEXT: dsb sy
-; ISBDSB-NEXT: isb
-; SB-NEXT:     {{ sb$}}
 }
 
 define dso_local i32 @indirect_call(
+; NOHARDENARM-LABEL: indirect_call:
+; NOHARDENARM:       @ %bb.0: @ %entry
+; NOHARDENARM-NEXT:    .save {r4, r5, r11, lr}
+; NOHARDENARM-NEXT:    push {r4, r5, r11, lr}
+; NOHARDENARM-NEXT:    mov r4, r1
+; NOHARDENARM-NEXT:    blx r0
+; NOHARDENARM-NEXT:    mov r5, r0
+; NOHARDENARM-NEXT:    blx r4
+; NOHARDENARM-NEXT:    add r0, r0, r5
+; NOHARDENARM-NEXT:    pop {r4, r5, r11, pc}
+;
+; NOHARDENTHUMB-LABEL: indirect_call:
+; NOHARDENTHUMB:       @ %bb.0: @ %entry
+; NOHARDENTHUMB-NEXT:    .save {r4, r5, r7, lr}
+; NOHARDENTHUMB-NEXT:    push {r4, r5, r7, lr}
+; NOHARDENTHUMB-NEXT:    mov r4, r1
+; NOHARDENTHUMB-NEXT:    blx r0
+; NOHARDENTHUMB-NEXT:    mov r5, r0
+; NOHARDENTHUMB-NEXT:    blx r4
+; NOHARDENTHUMB-NEXT:    add r0, r5
+; NOHARDENTHUMB-NEXT:    pop {r4, r5, r7, pc}
 i32 (...)* nocapture %f1, i32 (...)* nocapture %f2) {
 entry:
-; CHECK-LABEL: indirect_call:
   %callee.knr.cast = bitcast i32 (...)* %f1 to i32 ()*
   %call = tail call i32 %callee.knr.cast()
 ; HARDENARM: bl {{__llvm_slsblr_thunk_arm_r[0-9]+$}}
-; HARDENTHUMB: bl {{__llvm_slsblr_thunk_thumb_r[0-9]+$}}
   %callee.knr.cast1 = bitcast i32 (...)* %f2 to i32 ()*
   %call2 = tail call i32 %callee.knr.cast1()
 ; HARDENARM: bl {{__llvm_slsblr_thunk_arm_r[0-9]+$}}
-; HARDENTHUMB: bl {{__llvm_slsblr_thunk_thumb_r[0-9]+$}}
   %add = add nsw i32 %call2, %call
   ret i32 %add
-; CHECK: .Lfunc_end
 }
 
 ; verify calling through a function pointer.
 @a = dso_local local_unnamed_addr global i32 (...)* null, align 8
 @b = dso_local local_unnamed_addr global i32 0, align 4
 define dso_local void @indirect_call_global() local_unnamed_addr {
-; CHECK-LABEL: indirect_call_global:
+; NOHARDENARM-LABEL: indirect_call_global:
+; NOHARDENARM:       @ %bb.0: @ %entry
+; NOHARDENARM-NEXT:    .save {r11, lr}
+; NOHARDENARM-NEXT:    push {r11, lr}
+; NOHARDENARM-NEXT:    movw r0, :lower16:a
+; NOHARDENARM-NEXT:    movt r0, :upper16:a
+; NOHARDENARM-NEXT:    ldr r0, [r0]
+; NOHARDENARM-NEXT:    blx r0
+; NOHARDENARM-NEXT:    movw r1, :lower16:b
+; NOHARDENARM-NEXT:    movt r1, :upper16:b
+; NOHARDENARM-NEXT:    str r0, [r1]
+; NOHARDENARM-NEXT:    pop {r11, pc}
+;
+; NOHARDENTHUMB-LABEL: indirect_call_global:
+; NOHARDENTHUMB:       @ %bb.0: @ %entry
+; NOHARDENTHUMB-NEXT:    .save {r7, lr}
+; NOHARDENTHUMB-NEXT:    push {r7, lr}
+; NOHARDENTHUMB-NEXT:    movw r0, :lower16:a
+; NOHARDENTHUMB-NEXT:    movt r0, :upper16:a
+; NOHARDENTHUMB-NEXT:    ldr r0, [r0]
+; NOHARDENTHUMB-NEXT:    blx r0
+; NOHARDENTHUMB-NEXT:    movw r1, :lower16:b
+; NOHARDENTHUMB-NEXT:    movt r1, :upper16:b
+; NOHARDENTHUMB-NEXT:    str r0, [r1]
+; NOHARDENTHUMB-NEXT:    pop {r7, pc}
 entry:
   %0 = load i32 ()*, i32 ()** bitcast (i32 (...)** @a to i32 ()**), align 8
   %call = tail call i32 %0()  nounwind
 ; HARDENARM: bl {{__llvm_slsblr_thunk_arm_r[0-9]+$}}
-; HARDENTHUMB: bl {{__llvm_slsblr_thunk_thumb_r[0-9]+$}}
   store i32 %call, i32* @b, align 4
   ret void
-; CHECK: .Lfunc_end
 }
 
 ; Verify that neither r12 nor lr are used as registers in indirect call
@@ -200,47 +316,106 @@ entry:
 ; (b) the hardening transformation isn't correct if lr is the register holding
 ;     the address of the function called.
 define i32 @check_r12(i32 ()** %fp) {
+; NOHARDENARM-LABEL: check_r12:
+; NOHARDENARM:       @ %bb.0: @ %entry
+; NOHARDENARM-NEXT:    .save {r11, lr}
+; NOHARDENARM-NEXT:    push {r11, lr}
+; NOHARDENARM-NEXT:    ldr r12, [r0]
+; NOHARDENARM-NEXT:    @APP
+; NOHARDENARM-NEXT:    add r12, r12, #0
+; NOHARDENARM-NEXT:    @NO_APP
+; NOHARDENARM-NEXT:    blx r12
+; NOHARDENARM-NEXT:    pop {r11, pc}
+;
+; NOHARDENTHUMB-LABEL: check_r12:
+; NOHARDENTHUMB:       @ %bb.0: @ %entry
+; NOHARDENTHUMB-NEXT:    .save {r7, lr}
+; NOHARDENTHUMB-NEXT:    push {r7, lr}
+; NOHARDENTHUMB-NEXT:    ldr.w r12, [r0]
+; NOHARDENTHUMB-NEXT:    @APP
+; NOHARDENTHUMB-NEXT:    add.w r12, r12, #0
+; NOHARDENTHUMB-NEXT:    @NO_APP
+; NOHARDENTHUMB-NEXT:    blx r12
+; NOHARDENTHUMB-NEXT:    pop {r7, pc}
 entry:
-; CHECK-LABEL: check_r12:
   %f = load i32 ()*, i32 ()** %fp, align 4
   ; Force f to be moved into r12
   %r12_f = tail call i32 ()* asm "add $0, $1, #0", "={r12},{r12}"(i32 ()* %f) nounwind
   %call = call i32 %r12_f()
-; NOHARDENARM:     blx r12
-; NOHARDENTHUMB:   blx r12
-; HARDEN-NOT: bl {{__llvm_slsblr_thunk_(arm|thumb)_r12}}
   ret i32 %call
-; CHECK: .Lfunc_end
 }
 
 define i32 @check_lr(i32 ()** %fp) {
+; NOHARDENARM-LABEL: check_lr:
+; NOHARDENARM:       @ %bb.0: @ %entry
+; NOHARDENARM-NEXT:    .save {r11, lr}
+; NOHARDENARM-NEXT:    push {r11, lr}
+; NOHARDENARM-NEXT:    ldr lr, [r0]
+; NOHARDENARM-NEXT:    @APP
+; NOHARDENARM-NEXT:    add lr, lr, #0
+; NOHARDENARM-NEXT:    @NO_APP
+; NOHARDENARM-NEXT:    blx lr
+; NOHARDENARM-NEXT:    pop {r11, pc}
+;
+; NOHARDENTHUMB-LABEL: check_lr:
+; NOHARDENTHUMB:       @ %bb.0: @ %entry
+; NOHARDENTHUMB-NEXT:    .save {r7, lr}
+; NOHARDENTHUMB-NEXT:    push {r7, lr}
+; NOHARDENTHUMB-NEXT:    ldr.w lr, [r0]
+; NOHARDENTHUMB-NEXT:    @APP
+; NOHARDENTHUMB-NEXT:    add.w lr, lr, #0
+; NOHARDENTHUMB-NEXT:    @NO_APP
+; NOHARDENTHUMB-NEXT:    blx lr
+; NOHARDENTHUMB-NEXT:    pop {r7, pc}
 entry:
-; CHECK-LABEL: check_lr:
   %f = load i32 ()*, i32 ()** %fp, align 4
   ; Force f to be moved into lr
   %lr_f = tail call i32 ()* asm "add $0, $1, #0", "={lr},{lr}"(i32 ()* %f) nounwind
   %call = call i32 %lr_f()
-; NOHARDENARM:     blx lr
-; NOHARDENTHUMB:   blx lr
-; HARDEN-NOT: bl {{__llvm_slsblr_thunk_(arm|thumb)_lr}}
   ret i32 %call
-; CHECK: .Lfunc_end
 }
 
 ; Verify that even when sls-harden-blr is enabled, "blx r12" is still an
 ; instruction that is accepted by the inline assembler
 define void @verify_inline_asm_blx_r12(void ()* %g) {
+; ISBDSB-LABEL: verify_inline_asm_blx_r12:
+; ISBDSB:       @ %bb.0: @ %entry
+; ISBDSB-NEXT:    mov r12, r0
+; ISBDSB-NEXT:    @APP
+; ISBDSB-NEXT:    blx r12
+; ISBDSB-NEXT:    @NO_APP
+; ISBDSB-NEXT:    bx lr
+; ISBDSB-NEXT:    dsb sy
+; ISBDSB-NEXT:    isb sy
+;
+; SB-LABEL: verify_inline_asm_blx_r12:
+; SB:       @ %bb.0: @ %entry
+; SB-NEXT:    mov r12, r0
+; SB-NEXT:    @APP
+; SB-NEXT:    blx r12
+; SB-NEXT:    @NO_APP
+; SB-NEXT:    bx lr
+; SB-NEXT:    sb
+;
+; NOHARDENARM-LABEL: verify_inline_asm_blx_r12:
+; NOHARDENARM:       @ %bb.0: @ %entry
+; NOHARDENARM-NEXT:    mov r12, r0
+; NOHARDENARM-NEXT:    @APP
+; NOHARDENARM-NEXT:    blx r12
+; NOHARDENARM-NEXT:    @NO_APP
+; NOHARDENARM-NEXT:    bx lr
+;
+; NOHARDENTHUMB-LABEL: verify_inline_asm_blx_r12:
+; NOHARDENTHUMB:       @ %bb.0: @ %entry
+; NOHARDENTHUMB-NEXT:    mov r12, r0
+; NOHARDENTHUMB-NEXT:    @APP
+; NOHARDENTHUMB-NEXT:    blx r12
+; NOHARDENTHUMB-NEXT:    @NO_APP
+; NOHARDENTHUMB-NEXT:    bx lr
 entry:
-; CHECK-LABEL: verify_inline_asm_blx_r12:
   %0 = bitcast void ()* %g to i8*
   tail call void asm sideeffect "blx $0", "{r12}"(i8* %0) nounwind
-; CHECK: blx r12
   ret void
-; CHECK:       {{bx lr$}}
-; ISBDSB-NEXT: dsb sy
-; ISBDSB-NEXT: isb
-; SB-NEXT:     {{ sb$}}
-; CHECK: .Lfunc_end
 }
 
 ; HARDEN-COMDAT:  .section {{.text.__llvm_slsblr_thunk_(arm|thumb)_r5}}

diff  --git a/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll b/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll
index 937877ea28585..d86b2d1e04057 100644
--- a/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll
+++ b/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll
@@ -1,8 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
 
-; CHECK-NOT: p1 =
-
 define i32 @f0(i32 %a0, i32 %a1) #0 {
+; CHECK-LABEL: f0:
+; CHECK:         .cfi_startproc
+; CHECK-NEXT:  // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = cmp.gt(r1,r0)
+; CHECK-NEXT:     if (p0.new) r0 = #0
+; CHECK-NEXT:     if (p0.new) jumpr:nt r31
+; CHECK-NEXT:    }
+; CHECK-NEXT:  .LBB0_1: // %b2
+; CHECK-NEXT:    {
+; CHECK-NEXT:     p0 = cmp.gt(r1,#99)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = mux(p0,##321,#123)
+; CHECK-NEXT:     jumpr r31
+; CHECK-NEXT:    }
 b0:
   %v0 = icmp slt i32 %a0, %a1
   br i1 %v0, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll b/llvm/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll
index fef55d7d9ccbb..1d21cb3c99805 100644
--- a/llvm/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll
+++ b/llvm/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
 
 ; Check that we don't generate .falign directives after function calls at O2.
@@ -7,12 +8,30 @@
 declare i32 @f0()
 
 ; We don't want faligns after the calls to foo.
-; CHECK:     call f0
-; CHECK-NOT: falign
-; CHECK:     call f0
-; CHECK-NOT: falign
-; CHECK:     dealloc_return
+
 define i32 @f1(i32 %a0) #0 {
+; CHECK-LABEL: f1:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     if (!p0.new) r0 = add(r0,#5)
+; CHECK-NEXT:     p0 = cmp.eq(r0,#0)
+; CHECK-NEXT:     if (!p0.new) jumpr:nt r31
+; CHECK-NEXT:    }
+; CHECK-NEXT:  .LBB0_1: // %b1
+; CHECK-NEXT:    {
+; CHECK-NEXT:     call f0
+; CHECK-NEXT:     memd(r29+#-16) = r17:16
+; CHECK-NEXT:     allocframe(#8)
+; CHECK-NEXT:    } // 8-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     call f0
+; CHECK-NEXT:     r16 = r0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = add(r16,r0)
+; CHECK-NEXT:     r17:16 = memd(r29+#0)
+; CHECK-NEXT:     dealloc_return
+; CHECK-NEXT:    } // 8-byte Folded Reload
 b0:
   %v0 = icmp eq i32 %a0, 0
   br i1 %v0, label %b1, label %b2

diff  --git a/llvm/test/CodeGen/Thumb2/tpsoft.ll b/llvm/test/CodeGen/Thumb2/tpsoft.ll
index 443cbdb959758..b0b3f6ee3d06d 100644
--- a/llvm/test/CodeGen/Thumb2/tpsoft.ll
+++ b/llvm/test/CodeGen/Thumb2/tpsoft.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc  %s -mtriple=thumbv7-linux-gnueabi -o - | \
 ; RUN:    FileCheck  -check-prefix=ELFASM %s
 ; RUN: llc  %s -mtriple=thumbebv7-linux-gnueabi -o - | \
@@ -15,6 +16,38 @@
 @b = external global [10 x i8]
 
 define arm_aapcs_vfpcc i32 @main() nounwind {
+; ELFASM-LABEL: main:
+; ELFASM:       @ %bb.0: @ %entry
+; ELFASM-NEXT:    .save {r7, lr}
+; ELFASM-NEXT:    push {r7, lr}
+; ELFASM-NEXT:    ldr r0, .LCPI0_0
+; ELFASM-NEXT:  .LPC0_0:
+; ELFASM-NEXT:    add r0, pc
+; ELFASM-NEXT:    ldr r1, [r0]
+; ELFASM-NEXT:    bl __aeabi_read_tp
+; ELFASM-NEXT:    ldr r0, [r0, r1]
+; ELFASM-NEXT:    cmp r0, #13
+; ELFASM-NEXT:    beq .LBB0_3
+; ELFASM-NEXT:  @ %bb.1: @ %entry
+; ELFASM-NEXT:    cmp r0, #12
+; ELFASM-NEXT:    itt ne
+; ELFASM-NEXT:    movne.w r0, #-1
+; ELFASM-NEXT:    popne {r7, pc}
+; ELFASM-NEXT:  .LBB0_2: @ %bb
+; ELFASM-NEXT:    movw r0, :lower16:a
+; ELFASM-NEXT:    movt r0, :upper16:a
+; ELFASM-NEXT:    pop.w {r7, lr}
+; ELFASM-NEXT:    b foo
+; ELFASM-NEXT:  .LBB0_3: @ %bb1
+; ELFASM-NEXT:    movw r0, :lower16:b
+; ELFASM-NEXT:    movt r0, :upper16:b
+; ELFASM-NEXT:    pop.w {r7, lr}
+; ELFASM-NEXT:    b bar
+; ELFASM-NEXT:    .p2align 2
+; ELFASM-NEXT:  @ %bb.4:
+; ELFASM-NEXT:  .LCPI0_0:
+; ELFASM-NEXT:  .Ltmp0:
+; ELFASM-NEXT:    .long i(GOTTPOFF)-((.LPC0_0+4)-.Ltmp0)
 entry:
   %0 = load i32, i32* @i, align 4
   switch i32 %0, label %bb2 [
@@ -25,7 +58,6 @@ entry:
 bb:                                               ; preds = %entry
   %1 = tail call arm_aapcs_vfpcc  i32 @foo(i8* @a) nounwind
   ret i32 %1
-; ELFASM:       	bl	__aeabi_read_tp
 
 
 ; ELFOBJ:      Sections [

diff  --git a/llvm/test/CodeGen/Thumb2/v8_IT_4.ll b/llvm/test/CodeGen/Thumb2/v8_IT_4.ll
index 5901a8e81cafa..6c8eae7820dda 100644
--- a/llvm/test/CodeGen/Thumb2/v8_IT_4.ll
+++ b/llvm/test/CodeGen/Thumb2/v8_IT_4.ll
@@ -1,7 +1,8 @@
-; RUN: llc < %s -mtriple=thumbv8-eabi -float-abi=hard | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-eabi -float-abi=hard -arm-restrict-it | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8-eabi -float-abi=hard -regalloc=basic | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv7-eabi -float-abi=hard -regalloc=basic -arm-restrict-it | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=thumbv8-eabi -float-abi=hard | FileCheck --check-prefixes=P01 %s
+; RUN: llc < %s -mtriple=thumbv7-eabi -float-abi=hard -arm-restrict-it | FileCheck --check-prefixes=P01 %s
+; RUN: llc < %s -mtriple=thumbv8-eabi -float-abi=hard -regalloc=basic | FileCheck --check-prefixes=P23 %s
+; RUN: llc < %s -mtriple=thumbv7-eabi -float-abi=hard -regalloc=basic -arm-restrict-it | FileCheck --check-prefixes=P23 %s
 
 %"struct.__gnu_cxx::__normal_iterator<char*,std::basic_string<char, std::char_traits<char>, std::allocator<char> > >" = type { i8* }
 %"struct.__gnu_cxx::new_allocator<char>" = type <{ i8 }>
@@ -11,13 +12,67 @@
 
 
 define weak arm_aapcs_vfpcc i32 @_ZNKSs7compareERKSs(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this, %"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) {
-; CHECK-LABEL: _ZNKSs7compareERKSs:
-; CHECK:      cbz	r0,
-; CHECK-NEXT: %bb1
-; CHECK-NEXT: pop.w
-; CHECK-NEXT: %bb
-; CHECK-NEXT: sub{{(.w)?}} r0, r{{[0-9]+}}, r{{[0-9]+}}
-; CHECK-NEXT: pop.w
+; P01-LABEL: _ZNKSs7compareERKSs:
+; P01:       @ %bb.0: @ %entry
+; P01-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; P01-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; P01-NEXT:    mov r6, r1
+; P01-NEXT:    mov r7, r0
+; P01-NEXT:    bl _ZNKSs4sizeEv
+; P01-NEXT:    mov r8, r0
+; P01-NEXT:    mov r0, r6
+; P01-NEXT:    bl _ZNKSs4sizeEv
+; P01-NEXT:    mov r4, r8
+; P01-NEXT:    cmp r0, r8
+; P01-NEXT:    mov r5, r0
+; P01-NEXT:    it lo
+; P01-NEXT:    movlo r4, r0
+; P01-NEXT:    mov r0, r7
+; P01-NEXT:    bl _ZNKSs7_M_dataEv
+; P01-NEXT:    mov r7, r0
+; P01-NEXT:    mov r0, r6
+; P01-NEXT:    bl _ZNKSs4dataEv
+; P01-NEXT:    mov r1, r0
+; P01-NEXT:    mov r0, r7
+; P01-NEXT:    mov r2, r4
+; P01-NEXT:    bl memcmp
+; P01-NEXT:    cbz r0, .LBB0_2
+; P01-NEXT:  @ %bb.1: @ %bb1
+; P01-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; P01-NEXT:  .LBB0_2: @ %bb
+; P01-NEXT:    sub.w r0, r8, r5
+; P01-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+;
+; P23-LABEL: _ZNKSs7compareERKSs:
+; P23:       @ %bb.0: @ %entry
+; P23-NEXT:    .save {r4, r5, r6, r7, r8, lr}
+; P23-NEXT:    push.w {r4, r5, r6, r7, r8, lr}
+; P23-NEXT:    mov r7, r1
+; P23-NEXT:    mov r5, r0
+; P23-NEXT:    bl _ZNKSs4sizeEv
+; P23-NEXT:    mov r8, r0
+; P23-NEXT:    mov r0, r7
+; P23-NEXT:    bl _ZNKSs4sizeEv
+; P23-NEXT:    mov r4, r8
+; P23-NEXT:    cmp r0, r8
+; P23-NEXT:    mov r6, r0
+; P23-NEXT:    it lo
+; P23-NEXT:    movlo r4, r0
+; P23-NEXT:    mov r0, r5
+; P23-NEXT:    bl _ZNKSs7_M_dataEv
+; P23-NEXT:    mov r5, r0
+; P23-NEXT:    mov r0, r7
+; P23-NEXT:    bl _ZNKSs4dataEv
+; P23-NEXT:    mov r1, r0
+; P23-NEXT:    mov r0, r5
+; P23-NEXT:    mov r2, r4
+; P23-NEXT:    bl memcmp
+; P23-NEXT:    cbz r0, .LBB0_2
+; P23-NEXT:  @ %bb.1: @ %bb1
+; P23-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
+; P23-NEXT:  .LBB0_2: @ %bb
+; P23-NEXT:    sub.w r0, r8, r6
+; P23-NEXT:    pop.w {r4, r5, r6, r7, r8, pc}
 entry:
   %0 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this) ; <i32> [#uses=3]
   %1 = tail call arm_aapcs_vfpcc  i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %__str) ; <i32> [#uses=3]


        


More information about the llvm-commits mailing list