[llvm] 21c878e - [AArch64][GlobalISel] Regenerate AArch64/GlobalISel/*.mir test checks. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Sat May 17 01:00:41 PDT 2025


Author: David Green
Date: 2025-05-17T09:00:34+01:00
New Revision: 21c878e72dddaa1495cb71ea636f69ddcab5e0f4

URL: https://github.com/llvm/llvm-project/commit/21c878e72dddaa1495cb71ea636f69ddcab5e0f4
DIFF: https://github.com/llvm/llvm-project/commit/21c878e72dddaa1495cb71ea636f69ddcab5e0f4.diff

LOG: [AArch64][GlobalISel] Regenerate AArch64/GlobalISel/*.mir test checks. NFC

This served as a test of update_mir_test_checks from #140296, although the test
changes are mostly because the tests have not ben regenerated in a long time.
We managed to stop it from removing extra comments, only the empty lines are
now removed.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/GlobalISel/artifact-combine-unmerge.mir
    llvm/test/CodeGen/AArch64/GlobalISel/artifact-find-value.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-canonicalize-icmp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-copy.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-fabs.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-fconstant.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-flog2.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-fneg.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-fptrunc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-fsqrt.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-icmp-to-lhs-known-bits.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-inttoptr-ptrtoint.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-mul-to-shl.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-ptradd-int2ptr.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-ptrtoint.mir
    llvm/test/CodeGen/AArch64/GlobalISel/combine-sext-trunc-sextload.mir
    llvm/test/CodeGen/AArch64/GlobalISel/constant-mir-debugify.mir
    llvm/test/CodeGen/AArch64/GlobalISel/contract-store.mir
    llvm/test/CodeGen/AArch64/GlobalISel/fold-brcond-fcmp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/fold-fp-select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/fold-global-offsets-target-features.mir
    llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-shr.mir
    llvm/test/CodeGen/AArch64/GlobalISel/fp16-copy-gpr.mir
    llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir
    llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
    llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
    llvm/test/CodeGen/AArch64/GlobalISel/labels-are-not-dead.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-bzero.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-cttz-zero-undef.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-divrem.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-extload.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-fcopysign.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-fexp2.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-fmaximum.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-fminimum.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp-arith.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp128-fconstant.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp16-fconstant.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-global-pic.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-global.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-ignore-hint.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-intrinsic-get-dynamic-area-offset.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-inttoptr.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-llround.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-fewerElts.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector-of-ptr.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-trunc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-lrint.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-lround.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-with-debug-info.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-fadd.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-or.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-sbfx.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext-copy.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext-zext-128.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-sextload.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-ubfx.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-shift.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-zextload.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalizer-combiner.mir
    llvm/test/CodeGen/AArch64/GlobalISel/lifetime-marker-no-dce.mir
    llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir
    llvm/test/CodeGen/AArch64/GlobalISel/localizer-propagate-debug-loc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/localizer.mir
    llvm/test/CodeGen/AArch64/GlobalISel/no-regclass.mir
    llvm/test/CodeGen/AArch64/GlobalISel/non-pow-2-extload-combine.mir
    llvm/test/CodeGen/AArch64/GlobalISel/observer-change-crash.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-shifted-reg-compare.mir
    llvm/test/CodeGen/AArch64/GlobalISel/phi-mir-debugify.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combine-ptr-add-chain.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-and-trivial-mask.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-copy-prop.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-identity.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-merge.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-redundant-sextinreg.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-ext.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-rev.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuf-to-ins.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-duplane.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-splat.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-trn.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-truncstore.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-zip.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extractvec-faddp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-hoist-same-hands.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-mulpow2.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-rotate.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postselectopt-constrain-new-regop.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern-align.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-s1.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-funnel-shifts-to-rotates.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-icmp-redundant-trunc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-not-really-equiv-insts.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-sextload-from-sextinreg.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-simplify-add.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir
    llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-xor-of-and-with-same-reg.mir
    llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-ceil.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-extract-vector-elt.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-extract.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-fcmp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-fma.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-fp-use-def.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic-round.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic-trunc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-llround.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-lround.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-maxnum.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-minnum.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-nearbyint.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-shift-imm-64.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-trunc-s128.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-build-vector.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-reductions.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-unmerge-vec.mir
    llvm/test/CodeGen/AArch64/GlobalISel/retry-artifact-combine.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-abs.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-add-low.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-bitreverse.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-binop.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-build-vector.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-ctlz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-ctpop.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-cttz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-ext.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-extload.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-extract-vector-elt-with-extend.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-faddp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-fcmp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-fma.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-fmaximum.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-fminimum.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-fmul-indexed.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-fp16-fconstant.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-frint-nofp16.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-frint.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-tiny.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-imm.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-round.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-trunc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-ld2.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-ld4.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-ldaxr-intrin.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-ldxr-intrin.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-load-store-vector-of-ptr.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-load.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-logical-imm.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-logical-shifted-reg.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-mul.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-muladd.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-nearbyint.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vector-fcmp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-reduce-add.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-rev.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-rotate.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-saddo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-sbfx.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-merge.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-sextload.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-shufflevec-undef-mask-elt.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-ssubo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-st2.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-stlxr-intrin.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-store-truncating-float.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-store.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-trn.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-uaddo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-usubo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-uzp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-with-no-legality-check.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-xor.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-zext-as-copy.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-zextload.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-zip.mir
    llvm/test/CodeGen/AArch64/GlobalISel/speculative-hardening-brcond.mir
    llvm/test/CodeGen/AArch64/GlobalISel/store-merging-debug.mir
    llvm/test/CodeGen/AArch64/GlobalISel/store-wro-addressing-modes.mir
    llvm/test/CodeGen/AArch64/GlobalISel/subreg-copy.mir
    llvm/test/CodeGen/AArch64/GlobalISel/xro-addressing-mode-constant.mir

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/artifact-combine-unmerge.mir b/llvm/test/CodeGen/AArch64/GlobalISel/artifact-combine-unmerge.mir
index 92c27f37249a5..46b1b6b439a63 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/artifact-combine-unmerge.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/artifact-combine-unmerge.mir
@@ -9,9 +9,9 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_none_none
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: $w0 = COPY [[COPY]](s32)
-    ; CHECK: $w1 = COPY [[COPY1]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
+    ; CHECK-NEXT: $w1 = COPY [[COPY1]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
     %2:_(s64) = G_MERGE_VALUES %0(s32), %1
@@ -25,9 +25,9 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_gpr_none
     ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $w1
-    ; CHECK: $w0 = COPY [[COPY]](s32)
-    ; CHECK: $w1 = COPY [[COPY1]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $w1
+    ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
+    ; CHECK-NEXT: $w1 = COPY [[COPY1]](s32)
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %2:_(s64) = G_MERGE_VALUES %0(s32), %1
@@ -41,11 +41,11 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_none_gpr
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr(s32) = COPY [[COPY]](s32)
-    ; CHECK: [[COPY3:%[0-9]+]]:gpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: $w0 = COPY [[COPY2]](s32)
-    ; CHECK: $w1 = COPY [[COPY3]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr(s32) = COPY [[COPY]](s32)
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[COPY2]](s32)
+    ; CHECK-NEXT: $w1 = COPY [[COPY3]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
     %2:_(s64) = G_MERGE_VALUES %0(s32), %1
@@ -59,11 +59,11 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_fpr_gpr
     ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr(s32) = COPY $w1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr(s32) = COPY [[COPY]](s32)
-    ; CHECK: [[COPY3:%[0-9]+]]:gpr(s32) = COPY [[COPY1]](s32)
-    ; CHECK: $w0 = COPY [[COPY2]](s32)
-    ; CHECK: $w1 = COPY [[COPY3]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr(s32) = COPY [[COPY]](s32)
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr(s32) = COPY [[COPY1]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[COPY2]](s32)
+    ; CHECK-NEXT: $w1 = COPY [[COPY3]](s32)
     %0:fpr(s32) = COPY $w0
     %1:fpr(s32) = COPY $w1
     %2:_(s64) = G_MERGE_VALUES %0(s32), %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/artifact-find-value.mir b/llvm/test/CodeGen/AArch64/GlobalISel/artifact-find-value.mir
index b5c8d8bdf136c..ae28641168f4c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/artifact-find-value.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/artifact-find-value.mir
@@ -9,21 +9,22 @@ body:             |
   liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
     ; CHECK-LABEL: name: combine_unmerge_from_unmerge_of_concat_tree
     ; CHECK: liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
-    ; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
-    ; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
-    ; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
-    ; CHECK: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
-    ; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
-    ; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
-    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
+    ; CHECK-NEXT: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
+    ; CHECK-NEXT: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
+    ; CHECK-NEXT: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
   %0:_(s64)  = COPY $x0
   %1:_(s64)  = COPY $x1
   %2:_(p0)  = COPY $x2
@@ -60,21 +61,22 @@ body:             |
   liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
     ; CHECK-LABEL: name: combine_unmerge_from_unmerge_of_concat_tree_high_bits
     ; CHECK: liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
-    ; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
-    ; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
-    ; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
-    ; CHECK: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
-    ; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
-    ; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
+    ; CHECK-NEXT: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
+    ; CHECK-NEXT: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
+    ; CHECK-NEXT: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
   %0:_(s64)  = COPY $x0
   %1:_(s64)  = COPY $x1
   %2:_(p0)  = COPY $x2
@@ -110,20 +112,21 @@ body:             |
   liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
     ; CHECK-LABEL: name: combine_unmerge_from_insert_into_low
     ; CHECK: liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
-    ; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
-    ; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
-    ; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
-    ; CHECK: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
-    ; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
-    ; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
-    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
+    ; CHECK-NEXT: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
+    ; CHECK-NEXT: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
+    ; CHECK-NEXT: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
   %0:_(s64)  = COPY $x0
   %1:_(s64)  = COPY $x1
   %2:_(p0)  = COPY $x2
@@ -156,20 +159,21 @@ body:             |
   liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
     ; CHECK-LABEL: name: combine_unmerge_from_insert_into_high
     ; CHECK: liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
-    ; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
-    ; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
-    ; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
-    ; CHECK: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
-    ; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
-    ; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
-    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
+    ; CHECK-NEXT: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
+    ; CHECK-NEXT: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
+    ; CHECK-NEXT: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
   %0:_(s64)  = COPY $x0
   %1:_(s64)  = COPY $x1
   %2:_(p0)  = COPY $x2
@@ -202,20 +206,21 @@ body:             |
   liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
     ; CHECK-LABEL: name: combine_unmerge_from_insert_look_into_container
     ; CHECK: liveins: $x0, $x1, $x2, $d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
-    ; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
-    ; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
-    ; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
-    ; CHECK: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
-    ; CHECK: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
-    ; CHECK: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
-    ; CHECK: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x2
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $d1
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $d2
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $d3
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $d4
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY $d5
+    ; CHECK-NEXT: %v2s64_val:_(<2 x s64>) = G_BUILD_VECTOR [[COPY5]](s64), [[COPY6]](s64)
+    ; CHECK-NEXT: %v2s64_val2:_(<2 x s64>) = G_BUILD_VECTOR [[COPY6]](s64), [[COPY8]](s64)
+    ; CHECK-NEXT: G_STORE %v2s64_val(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: G_STORE %v2s64_val2(<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
   %0:_(s64)  = COPY $x0
   %1:_(s64)  = COPY $x1
   %2:_(p0)  = COPY $x2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-canonicalize-icmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-canonicalize-icmp.mir
index bf04ac02d086a..3da5844d994dc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-canonicalize-icmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-canonicalize-icmp.mir
@@ -55,7 +55,7 @@ body:             |
 name:            test_icmp_no_canon_bv_neither_const
 body:             |
   bb.1:
-    ; CHECK-LABEL: name: test_icmp_no_canon_bv
+    ; CHECK-LABEL: name: test_icmp_no_canon_bv_neither_const
     ; CHECK: %opaque1:_(s64) = COPY $x0
     ; CHECK-NEXT: %opaque2:_(s64) = COPY $x0
     ; CHECK-NEXT: %const1:_(s64) = G_CONSTANT i64 11

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-copy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-copy.mir
index 990eb36a8aa54..27b629f6e32ee 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-copy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-copy.mir
@@ -9,7 +9,7 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_none_none
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: $x0 = COPY [[COPY]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[COPY]](s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY %0(s64)
     $x0 = COPY %1(s64)
@@ -20,7 +20,7 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_gpr_none
     ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
-    ; CHECK: $x0 = COPY [[COPY]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[COPY]](s64)
     %0:gpr(s64) = COPY $x0
     %1:_(s64) = COPY %0(s64)
     $x0 = COPY %1(s64)
@@ -31,8 +31,8 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_none_gpr
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64)
-    ; CHECK: $x0 = COPY [[COPY1]](s64)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[COPY1]](s64)
     %0:_(s64) = COPY $x0
     %1:gpr(s64) = COPY %0(s64)
     $x0 = COPY %1(s64)
@@ -43,8 +43,8 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_fpr_gpr
     ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64)
-    ; CHECK: $x0 = COPY [[COPY1]](s64)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[COPY1]](s64)
     %0:fpr(s64) = COPY $x0
     %1:gpr(s64) = COPY %0(s64)
     $x0 = COPY %1(s64)
@@ -55,8 +55,8 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_gpr64_gpr64_dst_no_llt
     ; CHECK: [[COPY:%[0-9]+]]:gpr64(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY [[COPY]](s64)
-    ; CHECK: $x0 = COPY [[COPY1]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[COPY]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[COPY1]]
     %0:gpr64(s64) = COPY $x0
     %1:gpr64 = COPY %0(s64)
     $x0 = COPY %1
@@ -67,8 +67,8 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_gpr64_gpr64_src_no_llt
     ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64(s64) = COPY [[COPY]]
-    ; CHECK: $x0 = COPY [[COPY1]](s64)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64(s64) = COPY [[COPY]]
+    ; CHECK-NEXT: $x0 = COPY [[COPY1]](s64)
     %0:gpr64 = COPY $x0
     %1:gpr64(s64) = COPY %0
     $x0 = COPY %1(s64)
@@ -79,7 +79,7 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_gpr64_gpr64_both_no_llt
     ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: $x0 = COPY [[COPY]]
+    ; CHECK-NEXT: $x0 = COPY [[COPY]]
     %0:gpr64 = COPY $x0
     %1:gpr64 = COPY %0
     $x0 = COPY %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-fabs.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-fabs.mir
index a543e7cd4c7e4..d2f73675e9439 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-fabs.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-fabs.mir
@@ -8,9 +8,11 @@ body:             |
   bb.1:
   liveins: $w0
     ; CHECK-LABEL: name: test_combine_fabs_fabs
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[COPY]]
-    ; CHECK: $w0 = COPY [[FABS]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[COPY]]
+    ; CHECK-NEXT: $w0 = COPY [[FABS]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_FABS %0(s32)
     %2:_(s32) = G_FABS %1(s32)
@@ -22,9 +24,11 @@ body:             |
   bb.1:
   liveins: $x0
     ; CHECK-LABEL: name: test_combine_fabs_fabs_vec
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $x0
-    ; CHECK: [[FABS:%[0-9]+]]:_(<2 x s32>) = G_FABS [[COPY]]
-    ; CHECK: $x0 = COPY [[FABS]](<2 x s32>)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $x0
+    ; CHECK-NEXT: [[FABS:%[0-9]+]]:_(<2 x s32>) = G_FABS [[COPY]]
+    ; CHECK-NEXT: $x0 = COPY [[FABS]](<2 x s32>)
     %0:_(<2 x s32>) = COPY $x0
     %1:_(<2 x s32>) = G_FABS %0(<2 x s32>)
     %2:_(<2 x s32>) = G_FABS %1(<2 x s32>)
@@ -36,7 +40,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_half_fabs_neg_constant
     ; CHECK: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4580
-    ; CHECK: $h0 = COPY [[C]](s16)
+    ; CHECK-NEXT: $h0 = COPY [[C]](s16)
     %0:_(s16) = G_FCONSTANT half 0xHC580
     %1:_(s16) = G_FABS %0
     $h0 = COPY %1(s16)
@@ -47,7 +51,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_half_fabs_pos_constant
     ; CHECK: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4580
-    ; CHECK: $h0 = COPY [[C]](s16)
+    ; CHECK-NEXT: $h0 = COPY [[C]](s16)
     %0:_(s16) = G_FCONSTANT half 0xH4580
     %1:_(s16) = G_FABS %0
     $h0 = COPY %1(s16)
@@ -58,8 +62,10 @@ body:             |
   bb.1:
     liveins: $w0
     ; CHECK-LABEL: name: test_combine_float_fabs_neg_constant
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.500000e+00
-    ; CHECK: $w0 = COPY [[C]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.500000e+00
+    ; CHECK-NEXT: $w0 = COPY [[C]](s32)
     %0:_(s32) = G_FCONSTANT float -5.500000e+00
     %1:_(s32) = G_FABS %0
     $w0 = COPY %1(s32)
@@ -70,8 +76,10 @@ body:             |
   bb.1:
     liveins: $w0
     ; CHECK-LABEL: name: test_combine_float_fabs_pos_constant
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.500000e+00
-    ; CHECK: $w0 = COPY [[C]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.500000e+00
+    ; CHECK-NEXT: $w0 = COPY [[C]](s32)
     %0:_(s32) = G_FCONSTANT float -5.500000e+00
     %1:_(s32) = G_FABS %0
     $w0 = COPY %1(s32)
@@ -82,8 +90,10 @@ body:             |
   bb.1:
     liveins: $x0
     ; CHECK-LABEL: name: test_combine_double_fabs_neg_constant
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.200000e+00
-    ; CHECK: $x0 = COPY [[C]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.200000e+00
+    ; CHECK-NEXT: $x0 = COPY [[C]](s64)
     %0:_(s64) = G_FCONSTANT double -4.200000e+00
     %1:_(s64) = G_FABS %0
     $x0 = COPY %1(s64)
@@ -94,8 +104,10 @@ body:             |
   bb.1:
     liveins: $x0
     ; CHECK-LABEL: name: test_combine_double_fabs_pos_constant
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.200000e+00
-    ; CHECK: $x0 = COPY [[C]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.200000e+00
+    ; CHECK-NEXT: $x0 = COPY [[C]](s64)
     %0:_(s64) = G_FCONSTANT double 4.200000e+00
     %1:_(s64) = G_FABS %0
     $x0 = COPY %0(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-fconstant.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-fconstant.mir
index 3ef70df291232..6362ed65d09e3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-fconstant.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-fconstant.mir
@@ -16,12 +16,13 @@ body:             |
     ; fmov, so it's strictly better to use a mov.
     ; CHECK-LABEL: name: fconstant_to_constant_s32
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1028443341
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 524
-    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0) :: (store (s32))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1028443341
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 524
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+    ; CHECK-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p0) :: (store (s32))
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(p0) = COPY $x0
     %3:_(s32) = G_FCONSTANT float 0x3FA99999A0000000
     %1:_(s64) = G_CONSTANT i64 524
@@ -41,10 +42,11 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: fconstant_to_constant_s64
     ; CHECK: liveins: $x0
-    ; CHECK: %ptr:_(p0) = COPY $x0
-    ; CHECK: %c:_(s64) = G_CONSTANT i64 0
-    ; CHECK: G_STORE %c(s64), %ptr(p0) :: (store (s64))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:_(p0) = COPY $x0
+    ; CHECK-NEXT: %c:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: G_STORE %c(s64), %ptr(p0) :: (store (s64))
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:_(p0) = COPY $x0
     %c:_(s64) = G_FCONSTANT double 0.0
     G_STORE %c(s64), %ptr(p0) :: (store (s64))
@@ -63,10 +65,11 @@ body:             |
     ; When we aren't feeding into a store, the combine shouldn't happen.
     ; CHECK-LABEL: name: no_store_means_no_combine
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %v:_(s64) = COPY $x0
-    ; CHECK: %c:_(s64) = G_FCONSTANT double 0.000000e+00
-    ; CHECK: %add:_(s64) = G_FADD %v, %c
-    ; CHECK: RET_ReallyLR implicit %add(s64)
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v:_(s64) = COPY $x0
+    ; CHECK-NEXT: %c:_(s64) = G_FCONSTANT double 0.000000e+00
+    ; CHECK-NEXT: %add:_(s64) = G_FADD %v, %c
+    ; CHECK-NEXT: RET_ReallyLR implicit %add(s64)
     %v:_(s64) = COPY $x0
     %c:_(s64) = G_FCONSTANT double 0.0
     %add:_(s64) = G_FADD %v, %c

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-flog2.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-flog2.mir
index 9e7e279e9e1a3..dfa131fc396d7 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-flog2.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-flog2.mir
@@ -7,7 +7,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_half_flog2_constant
     ; CHECK: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4000
-    ; CHECK: $h0 = COPY [[C]](s16)
+    ; CHECK-NEXT: $h0 = COPY [[C]](s16)
     %0:_(s16) = G_FCONSTANT half 4.000000e+00
     %1:_(s16) = G_FLOG2 %0
     $h0 = COPY %1(s16)
@@ -18,7 +18,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_float_flog2_constant
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
-    ; CHECK: $w0 = COPY [[C]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[C]](s32)
     %0:_(s32) = G_FCONSTANT float 4.000000e+00
     %1:_(s32) = G_FLOG2 %0
     $w0 = COPY %1(s32)
@@ -29,7 +29,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_double_flog2_constant
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
-    ; CHECK: $x0 = COPY [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[C]](s64)
     %0:_(s64) = G_FCONSTANT double 4.000000e+00
     %1:_(s64) = G_FLOG2 %0
     $x0 = COPY %1(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-fneg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-fneg.mir
index 1b1077854b4c1..db3ab3d451d4c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-fneg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-fneg.mir
@@ -6,8 +6,10 @@ body:             |
   bb.1:
   liveins: $w0
     ; CHECK-LABEL: name: test_combine_fneg_fneg
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: $w0 = COPY [[COPY]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_FNEG %0(s32)
     %2:_(s32) = G_FNEG %1(s32)
@@ -19,8 +21,10 @@ body:             |
   bb.1:
   liveins: $x0
     ; CHECK-LABEL: name: test_combine_fneg_fneg_vec
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $x0
-    ; CHECK: $x0 = COPY [[COPY]](<2 x s32>)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $x0
+    ; CHECK-NEXT: $x0 = COPY [[COPY]](<2 x s32>)
     %0:_(<2 x s32>) = COPY $x0
     %1:_(<2 x s32>) = G_FNEG %0(<2 x s32>)
     %2:_(<2 x s32>) = G_FNEG %1(<2 x s32>)
@@ -32,7 +36,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_half_fneg_neg_constant
     ; CHECK: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4580
-    ; CHECK: $h0 = COPY [[C]](s16)
+    ; CHECK-NEXT: $h0 = COPY [[C]](s16)
     %0:_(s16) = G_FCONSTANT half 0xHC580
     %1:_(s16) = G_FNEG %0
     $h0 = COPY %1(s16)
@@ -43,7 +47,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_half_fneg_pos_constant
     ; CHECK: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xHC580
-    ; CHECK: $h0 = COPY [[C]](s16)
+    ; CHECK-NEXT: $h0 = COPY [[C]](s16)
     %0:_(s16) = G_FCONSTANT half 0xH4580
     %1:_(s16) = G_FNEG %0
     $h0 = COPY %1(s16)
@@ -54,7 +58,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_float_fneg_neg_constant
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.500000e+00
-    ; CHECK: $w0 = COPY [[C]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[C]](s32)
     %0:_(s32) = G_FCONSTANT float -5.500000e+00
     %1:_(s32) = G_FNEG %0
     $w0 = COPY %1(s32)
@@ -65,7 +69,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_float_fneg_pos_constant
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float -5.500000e+00
-    ; CHECK: $w0 = COPY [[C]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[C]](s32)
     %0:_(s32) = G_FCONSTANT float 5.500000e+00
     %1:_(s32) = G_FNEG %0
     $w0 = COPY %1(s32)
@@ -76,7 +80,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_double_fneg_neg_constant
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.200000e+00
-    ; CHECK: $x0 = COPY [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[C]](s64)
     %0:_(s64) = G_FCONSTANT double -4.200000e+00
     %1:_(s64) = G_FNEG %0
     $x0 = COPY %1(s64)
@@ -87,7 +91,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_double_fneg_pos_constant
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -4.200000e+00
-    ; CHECK: $x0 = COPY [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[C]](s64)
     %0:_(s64) = G_FCONSTANT double 4.200000e+00
     %1:_(s64) = G_FNEG %0
     $x0 = COPY %1(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-fptrunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-fptrunc.mir
index 1fd7f6f39caca..ab4c62e313703 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-fptrunc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-fptrunc.mir
@@ -7,7 +7,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_float_to_half_fptrunc_constant
     ; CHECK: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4580
-    ; CHECK: $h0 = COPY [[C]](s16)
+    ; CHECK-NEXT: $h0 = COPY [[C]](s16)
     %0:_(s32) = G_FCONSTANT float 5.500000e+00
     %1:_(s16) = G_FPTRUNC %0(s32)
     $h0 = COPY %1(s16)
@@ -18,7 +18,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_double_to_half_fptrunc_constant
     ; CHECK: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4433
-    ; CHECK: $h0 = COPY [[C]](s16)
+    ; CHECK-NEXT: $h0 = COPY [[C]](s16)
     %0:_(s64) = G_FCONSTANT double 4.200000e+00
     %1:_(s16) = G_FPTRUNC %0(s64)
     $h0 = COPY %1(s16)
@@ -29,7 +29,7 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: test_combine_double_to_foat_fptrunc_constant
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x4010CCCCC0000000
-    ; CHECK: $w0 = COPY [[C]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[C]](s32)
     %0:_(s64) = G_FCONSTANT double 4.200000e+00
     %1:_(s32) = G_FPTRUNC %0(s64)
     $w0 = COPY %1(s32)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-fsqrt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-fsqrt.mir
index e114d01793167..4fe05c616eb3c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-fsqrt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-fsqrt.mir
@@ -8,7 +8,7 @@ body:             |
   liveins:
     ; CHECK-LABEL: name: test_combine_half_fsqrt_constant
     ; CHECK: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH4000
-    ; CHECK: $h0 = COPY [[C]](s16)
+    ; CHECK-NEXT: $h0 = COPY [[C]](s16)
     %0:_(s16) = G_FCONSTANT half 4.000000e+00
     %1:_(s16) = G_FSQRT %0
     $h0 = COPY %1
@@ -20,7 +20,7 @@ body:             |
   liveins:
     ; CHECK-LABEL: name: test_combine_float_fsqrt_constant
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
-    ; CHECK: $w0 = COPY [[C]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[C]](s32)
     %0:_(s32) = G_FCONSTANT float 4.000000e+00
     %1:_(s32) = G_FSQRT %0
     $w0 = COPY %1
@@ -32,7 +32,7 @@ body:             |
   liveins:
     ; CHECK-LABEL: name: test_combine_double_fsqrt_constant
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
-    ; CHECK: $x0 = COPY [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[C]](s64)
     %0:_(s64) = G_FCONSTANT double 4.000000e+00
     %1:_(s64) = G_FSQRT %0
     $x0 = COPY %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-icmp-to-lhs-known-bits.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-icmp-to-lhs-known-bits.mir
index 63343dd8ad935..da79a3a17ed0d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-icmp-to-lhs-known-bits.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-icmp-to-lhs-known-bits.mir
@@ -14,13 +14,14 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: apply_ne
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %one:_(s32) = G_CONSTANT i32 1
-    ; CHECK: %known_zero_or_one:_(s32) = G_AND %x, %one
-    ; CHECK: %cmp:_(s1) = G_TRUNC %known_zero_or_one(s32)
-    ; CHECK: %ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %known_zero_or_one:_(s32) = G_AND %x, %one
+    ; CHECK-NEXT: %cmp:_(s1) = G_TRUNC %known_zero_or_one(s32)
+    ; CHECK-NEXT: %ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %one:_(s32) = G_CONSTANT i32 1
     %known_zero_or_one:_(s32) = G_AND %x, %one
@@ -41,13 +42,14 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: apply_eq
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %one:_(s32) = G_CONSTANT i32 1
-    ; CHECK: %known_zero_or_one:_(s32) = G_AND %x, %one
-    ; CHECK: %cmp:_(s1) = G_TRUNC %known_zero_or_one(s32)
-    ; CHECK: %ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %known_zero_or_one:_(s32) = G_AND %x, %one
+    ; CHECK-NEXT: %cmp:_(s1) = G_TRUNC %known_zero_or_one(s32)
+    ; CHECK-NEXT: %ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %one:_(s32) = G_CONSTANT i32 1
     %known_zero_or_one:_(s32) = G_AND %x, %one
@@ -69,14 +71,15 @@ body:             |
 
     ; CHECK-LABEL: name: dont_apply_wrong_cst_eq
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %one:_(s32) = G_CONSTANT i32 1
-    ; CHECK: %known_zero_or_one:_(s32) = G_AND %x, %one
-    ; CHECK: %wrong_cst:_(s32) = G_CONSTANT i32 10
-    ; CHECK: %cmp:_(s1) = G_ICMP intpred(eq), %known_zero_or_one(s32), %wrong_cst
-    ; CHECK: %ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %known_zero_or_one:_(s32) = G_AND %x, %one
+    ; CHECK-NEXT: %wrong_cst:_(s32) = G_CONSTANT i32 10
+    ; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(eq), %known_zero_or_one(s32), %wrong_cst
+    ; CHECK-NEXT: %ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %one:_(s32) = G_CONSTANT i32 1
     %known_zero_or_one:_(s32) = G_AND %x, %one
@@ -99,14 +102,15 @@ body:             |
 
     ; CHECK-LABEL: name: dont_apply_wrong_cst_ne
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %one:_(s32) = G_CONSTANT i32 1
-    ; CHECK: %known_zero_or_one:_(s32) = G_AND %x, %one
-    ; CHECK: %wrong_cst:_(s32) = G_CONSTANT i32 10
-    ; CHECK: %cmp:_(s1) = G_ICMP intpred(ne), %known_zero_or_one(s32), %wrong_cst
-    ; CHECK: %ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %known_zero_or_one:_(s32) = G_AND %x, %one
+    ; CHECK-NEXT: %wrong_cst:_(s32) = G_CONSTANT i32 10
+    ; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(ne), %known_zero_or_one(s32), %wrong_cst
+    ; CHECK-NEXT: %ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %one:_(s32) = G_CONSTANT i32 1
     %known_zero_or_one:_(s32) = G_AND %x, %one
@@ -166,12 +170,13 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: apply_no_zext_or_trunc
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %one:_(s32) = G_CONSTANT i32 1
-    ; CHECK: %known_zero_or_one:_(s32) = G_AND %x, %one
-    ; CHECK: %cmp:_(s32) = COPY %known_zero_or_one(s32)
-    ; CHECK: $w0 = COPY %cmp(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %known_zero_or_one:_(s32) = G_AND %x, %one
+    ; CHECK-NEXT: %cmp:_(s32) = COPY %known_zero_or_one(s32)
+    ; CHECK-NEXT: $w0 = COPY %cmp(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %one:_(s32) = G_CONSTANT i32 1
     %known_zero_or_one:_(s32) = G_AND %x, %one
@@ -191,13 +196,14 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: apply_wide_cmp
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s64) = COPY $x0
-    ; CHECK: %one:_(s64) = G_CONSTANT i64 1
-    ; CHECK: %known_zero_or_one:_(s64) = G_AND %x, %one
-    ; CHECK: %cmp:_(s64) = COPY %known_zero_or_one(s64)
-    ; CHECK: %trunc:_(s32) = G_TRUNC %cmp(s64)
-    ; CHECK: $w0 = COPY %trunc(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s64) = COPY $x0
+    ; CHECK-NEXT: %one:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: %known_zero_or_one:_(s64) = G_AND %x, %one
+    ; CHECK-NEXT: %cmp:_(s64) = COPY %known_zero_or_one(s64)
+    ; CHECK-NEXT: %trunc:_(s32) = G_TRUNC %cmp(s64)
+    ; CHECK-NEXT: $w0 = COPY %trunc(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s64) = COPY $x0
     %one:_(s64) = G_CONSTANT i64 1
     %known_zero_or_one:_(s64) = G_AND %x, %one
@@ -218,12 +224,13 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: apply_narrow_lhs
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %one:_(s32) = G_CONSTANT i32 1
-    ; CHECK: %known_zero_or_one:_(s32) = G_AND %x, %one
-    ; CHECK: %cmp:_(s64) = G_ZEXT %known_zero_or_one(s32)
-    ; CHECK: $x0 = COPY %cmp(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %known_zero_or_one:_(s32) = G_AND %x, %one
+    ; CHECK-NEXT: %cmp:_(s64) = G_ZEXT %known_zero_or_one(s32)
+    ; CHECK-NEXT: $x0 = COPY %cmp(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s32) = COPY $w0
     %one:_(s32) = G_CONSTANT i32 1
     %known_zero_or_one:_(s32) = G_AND %x, %one

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-inttoptr-ptrtoint.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-inttoptr-ptrtoint.mir
index d99b38900383b..afcb5f74059a4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-inttoptr-ptrtoint.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-inttoptr-ptrtoint.mir
@@ -8,8 +8,10 @@ body:             |
   liveins: $x0, $x1
 
     ; CHECK-LABEL: name: test_combine_inttoptr_same_addressspace
-    ; CHECK: [[COPY:%[0-9]+]]:_(p64) = COPY $x0
-    ; CHECK: $x1 = COPY [[COPY]](p64)
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p64) = COPY $x0
+    ; CHECK-NEXT: $x1 = COPY [[COPY]](p64)
     %0:_(p64) = COPY $x0
     %1:_(s64) = G_PTRTOINT %0
     %2:_(p64) = G_INTTOPTR %1
@@ -22,10 +24,12 @@ body:             |
   liveins: $x0, $x1
 
     ; CHECK-LABEL: name: test_combine_inttoptr_
diff _addressspace
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY]](p0)
-    ; CHECK: [[INTTOPTR:%[0-9]+]]:_(p64) = G_INTTOPTR [[PTRTOINT]](s64)
-    ; CHECK: $x1 = COPY [[INTTOPTR]](p64)
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY]](p0)
+    ; CHECK-NEXT: [[INTTOPTR:%[0-9]+]]:_(p64) = G_INTTOPTR [[PTRTOINT]](s64)
+    ; CHECK-NEXT: $x1 = COPY [[INTTOPTR]](p64)
     %0:_(p0) = COPY $x0
     %1:_(s64) = G_PTRTOINT %0
     %2:_(p64) = G_INTTOPTR %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul-to-shl.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul-to-shl.mir
index 71eaf80f53ba4..283c89b5ae474 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul-to-shl.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul-to-shl.mir
@@ -13,11 +13,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: mul_to_shl
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
-    ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
-    ; CHECK: $x0 = COPY [[SHL]](s64)
-    ; CHECK: RET_ReallyLR implicit-def $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[SHL]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit-def $x0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 4
     %2:_(s64) = G_MUL %0, %1(s64)
@@ -36,11 +37,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: mul_to_shl_16
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-    ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
-    ; CHECK: $x0 = COPY [[SHL]](s64)
-    ; CHECK: RET_ReallyLR implicit-def $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[SHL]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit-def $x0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 16
     %2:_(s64) = G_MUL %0, %1(s64)
@@ -60,12 +62,13 @@ body:             |
     ; Currently not implemented.
     ; CHECK-LABEL: name: mul_to_shl_vector_16
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
-    ; CHECK: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[COPY]], [[BUILD_VECTOR]]
-    ; CHECK: $q0 = COPY [[MUL]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit-def $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[COPY]], [[BUILD_VECTOR]]
+    ; CHECK-NEXT: $q0 = COPY [[MUL]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit-def $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(s32) = G_CONSTANT i32 16
     %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
@@ -85,11 +88,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: mul_to_shl_non_pow2
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
-    ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[C]]
-    ; CHECK: $x0 = COPY [[MUL]](s64)
-    ; CHECK: RET_ReallyLR implicit-def $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[C]]
+    ; CHECK-NEXT: $x0 = COPY [[MUL]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit-def $x0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 10
     %2:_(s64) = G_MUL %0, %1(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptradd-int2ptr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptradd-int2ptr.mir
index 1233a0af42453..492c1ce3f7115 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptradd-int2ptr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptradd-int2ptr.mir
@@ -8,9 +8,11 @@ body:             |
   bb.1:
     liveins: $x0
     ; CHECK-LABEL: name: agc.test_combine_ptradd_constants_intres
-    ; CHECK: [[C:%[0-9]+]]:_(p64) = G_CONSTANT i64 44
-    ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[C]](p64)
-    ; CHECK: $x0 = COPY [[PTRTOINT]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(p64) = G_CONSTANT i64 44
+    ; CHECK-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[C]](p64)
+    ; CHECK-NEXT: $x0 = COPY [[PTRTOINT]](s64)
     %1:_(s64) = G_CONSTANT i64 42
     %2:_(s32) = G_CONSTANT i32 2
     %3:_(p64) = G_INTTOPTR %2
@@ -24,8 +26,10 @@ body:             |
   bb.1:
     liveins: $x0
     ; CHECK-LABEL: name: agc.test_combine_ptradd_constants_ptrres
-    ; CHECK: [[C:%[0-9]+]]:_(p64) = G_CONSTANT i64 44
-    ; CHECK: $x0 = COPY [[C]](p64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(p64) = G_CONSTANT i64 44
+    ; CHECK-NEXT: $x0 = COPY [[C]](p64)
     %1:_(s64) = G_CONSTANT i64 42
     %2:_(s32) = G_CONSTANT i32 2
     %3:_(p64) = G_INTTOPTR %2
@@ -39,11 +43,13 @@ body:             |
     liveins: $x0, $x1
     ; Ensure non-constant G_PTR_ADDs are not folded.
     ; CHECK-LABEL: name: agc.test_not_combine_variable_ptradd
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 42
-    ; CHECK: [[COPY:%[0-9]+]]:_(p64) = COPY $x1
-    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p64) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[PTR_ADD]](p64)
-    ; CHECK: $x0 = COPY [[PTRTOINT]](s64)
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 42
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p64) = COPY $x1
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p64) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[PTR_ADD]](p64)
+    ; CHECK-NEXT: $x0 = COPY [[PTRTOINT]](s64)
     %1:_(s64) = G_CONSTANT i64 42
     %2:_(p64) = COPY $x1
     %3:_(p64) = G_PTR_ADD %2, %1
@@ -57,7 +63,9 @@ body:             |
   liveins: $x0
 
     ; CHECK-LABEL: name: test_combine_zero_extend
-    ; CHECK: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 4291891236
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 4291891236
     ; CHECK-NEXT: $x0 = COPY [[C]](p0)
     %0:_(s32) = G_CONSTANT i32 -3076096
     %1:_(p0) = G_INTTOPTR %0:_(s32)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptrtoint.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptrtoint.mir
index 8164b1bf872fc..056ca6d28c0da 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptrtoint.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-ptrtoint.mir
@@ -8,8 +8,10 @@ body:             |
   liveins: $x0, $x1
 
     ; CHECK-LABEL: name: test_combine_ptrtoint
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: $x1 = COPY [[COPY]](s64)
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: $x1 = COPY [[COPY]](s64)
     %0:_(s64) = COPY $x0
     %1:_(p64) = G_INTTOPTR %0
     %2:_(s64) = G_PTRTOINT %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-sext-trunc-sextload.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-sext-trunc-sextload.mir
index 58856573f83cb..bbba345f0c178 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-sext-trunc-sextload.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-sext-trunc-sextload.mir
@@ -10,10 +10,11 @@ body: |
     liveins: $x0
     ; CHECK-LABEL: name: test_combine_sext_trunc_of_sextload
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s16))
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[SEXTLOAD]](s64)
-    ; CHECK: $w0 = COPY [[TRUNC]](s32)
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s16))
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[SEXTLOAD]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s64) = G_SEXTLOAD %0:_(p0) :: (load (s16))
     %2:_(s32) = G_TRUNC %1:_(s64)
@@ -29,9 +30,10 @@ body: |
     liveins: $x0
     ; CHECK-LABEL: name: test_combine_sext_of_sextload
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s16))
-    ; CHECK: $w0 = COPY [[SEXTLOAD]](s32)
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s16))
+    ; CHECK-NEXT: $w0 = COPY [[SEXTLOAD]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s32) = G_SEXTLOAD %0:_(p0) :: (load (s16))
     %2:_(s32) = COPY %1:_(s32)
@@ -48,9 +50,10 @@ body: |
     ; Here we're trying to extend from a smaller value than was extended in the load.
     ; CHECK-LABEL: name: test_combine_sext_of_sextload_not_matching
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s16))
-    ; CHECK: $w0 = COPY [[SEXTLOAD]](s32)
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s16))
+    ; CHECK-NEXT: $w0 = COPY [[SEXTLOAD]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s32) = G_SEXTLOAD %0:_(p0) :: (load (s16))
     %2:_(s32) = COPY %1:_(s32)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/constant-mir-debugify.mir b/llvm/test/CodeGen/AArch64/GlobalISel/constant-mir-debugify.mir
index 6571da0dea49f..c05ddf98640f8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/constant-mir-debugify.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/constant-mir-debugify.mir
@@ -13,17 +13,18 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: fconstant_to_constant_s32
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0, debug-location !11
-    ; CHECK: DBG_VALUE [[COPY]](p0), $noreg, !9, !DIExpression(), debug-location !11
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3FA99999A0000000, debug-location !DILocation(line: 2, column: 1, scope: !5)
-    ; CHECK: DBG_VALUE [[C]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 2, column: 1, scope: !5)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 524, debug-location !DILocation(line: 3, column: 1, scope: !5)
-    ; CHECK: DBG_VALUE [[C1]](s64), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 3, column: 1, scope: !5)
-    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64), debug-location !DILocation(line: 4, column: 1, scope: !5)
-    ; CHECK: DBG_VALUE [[PTR_ADD]](p0), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 4, column: 1, scope: !5)
-    ; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0), debug-location !DILocation(line: 5, column: 1, scope: !5) :: (store (s32))
-    ; CHECK: DBG_VALUE 0, $noreg, !9, !DIExpression(), debug-location !DILocation(line: 5, column: 1, scope: !5)
-    ; CHECK: RET_ReallyLR debug-location !DILocation(line: 6, column: 1, scope: !5)
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0,  debug-location !11
+    ; CHECK-NEXT: DBG_VALUE [[COPY]](p0), $noreg, !9, !DIExpression(),  debug-location !11
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x3FA99999A0000000,  debug-location !DILocation(line: 2, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE [[C]](s32), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 2, column: 1, scope: !5)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 524,  debug-location !DILocation(line: 3, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE [[C1]](s64), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 3, column: 1, scope: !5)
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64),  debug-location !DILocation(line: 4, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE [[PTR_ADD]](p0), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 4, column: 1, scope: !5)
+    ; CHECK-NEXT: G_STORE [[C]](s32), [[PTR_ADD]](p0),  debug-location !DILocation(line: 5, column: 1, scope: !5) :: (store (s32))
+    ; CHECK-NEXT: DBG_VALUE 0, $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 5, column: 1, scope: !5)
+    ; CHECK-NEXT: RET_ReallyLR debug-location !DILocation(line: 6, column: 1, scope: !5)
     %0:_(p0) = COPY $x0
     %3:_(s32) = G_FCONSTANT float 0x3FA99999A0000000
     %1:_(s64) = G_CONSTANT i64 524

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/contract-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/contract-store.mir
index 3265d560ef778..1562cd5384961 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/contract-store.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/contract-store.mir
@@ -9,7 +9,9 @@ body:             |
   bb.0:
     liveins: $x0, $x1
     ; CHECK-LABEL: name: contract_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK-NEXT: STRXui [[COPY1]], [[COPY]], 0 :: (store (s64))
     %0:gpr(p0) = COPY $x0
@@ -25,7 +27,9 @@ body:             |
   bb.0:
     liveins: $x0, $w1
     ; CHECK-LABEL: name: contract_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: STRWui [[COPY1]], [[COPY]], 0 :: (store (s32))
     %0:gpr(p0) = COPY $x0
@@ -41,7 +45,9 @@ body:             |
   bb.0:
     liveins: $x0, $d1
     ; CHECK-LABEL: name: contract_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK-NEXT: STRDui [[COPY1]], [[COPY]], 0 :: (store (s64))
     %0:gpr(p0) = COPY $x0
@@ -57,7 +63,9 @@ body:             |
   bb.0:
     liveins: $x0, $s1
     ; CHECK-LABEL: name: contract_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK-NEXT: STRSui [[COPY1]], [[COPY]], 0 :: (store (s32))
     %0:gpr(p0) = COPY $x0
@@ -73,7 +81,9 @@ body:             |
   bb.0:
     liveins: $x0, $h1
     ; CHECK-LABEL: name: contract_s16_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $h1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr16 = COPY $h1
     ; CHECK-NEXT: STRHui [[COPY1]], [[COPY]], 0 :: (store (s16))
     %0:gpr(p0) = COPY $x0
@@ -89,7 +99,9 @@ body:             |
   bb.0:
     liveins: $x0, $x1
     ; CHECK-LABEL: name: contract_g_unmerge_values_first
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (dereferenceable load (<2 x s64>))
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY [[LDRQui]].dsub
     ; CHECK-NEXT: [[DUPi64_:%[0-9]+]]:fpr64 = DUPi64 [[LDRQui]], 1
@@ -109,7 +121,9 @@ body:             |
   bb.0:
     liveins: $x0, $x1
     ; CHECK-LABEL: name: contract_g_unmerge_values_second
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (dereferenceable load (<2 x s64>))
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY [[LDRQui]].dsub
     ; CHECK-NEXT: [[DUPi64_:%[0-9]+]]:fpr64 = DUPi64 [[LDRQui]], 1
@@ -129,7 +143,9 @@ body:             |
   bb.0:
     liveins: $x0, $s1
     ; CHECK-LABEL: name: contract_s16_truncstore
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr16 = COPY [[COPY1]].hsub
     ; CHECK-NEXT: STRHui [[COPY2]], [[COPY]], 0 :: (store (s16))

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/fold-brcond-fcmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/fold-brcond-fcmp.mir
index cf80828151f49..ba9227b671528 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/fold-brcond-fcmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/fold-brcond-fcmp.mir
@@ -20,7 +20,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 0, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}
@@ -61,7 +61,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 12, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}
@@ -102,7 +102,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 10, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}
@@ -143,7 +143,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 4, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}
@@ -184,7 +184,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 9, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}
@@ -225,7 +225,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 4, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   Bcc 12, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
@@ -267,7 +267,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 7, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}
@@ -308,7 +308,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 6, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}
@@ -349,7 +349,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 0, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   Bcc 6, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
@@ -391,7 +391,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 8, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}
@@ -432,7 +432,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 5, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}
@@ -473,7 +473,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 11, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}
@@ -514,7 +514,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 13, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}
@@ -555,7 +555,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cmp_lhs:fpr32 = COPY $s0
   ; CHECK-NEXT:   %cmp_rhs:fpr32 = COPY $s1
-  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv, implicit $fpcr
   ; CHECK-NEXT:   Bcc 1, %bb.2, implicit $nzcv
   ; CHECK-NEXT:   B %bb.1
   ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/fold-fp-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/fold-fp-select.mir
index 0d83719d40823..cb01154a872bb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/fold-fp-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/fold-fp-select.mir
@@ -28,7 +28,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK-NEXT: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
-    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
     ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[CSINCWr]], 0, implicit-def $nzcv
     ; CHECK-NEXT: [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[FMOVS0_]], [[COPY1]], 1, implicit $nzcv
@@ -61,9 +61,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK-NEXT: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
-    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[FMOVS0_]], [[COPY1]], 0, implicit $nzcv
-    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[FCSELSrrr1:%[0-9]+]]:fpr32 = FCSELSrrr [[COPY1]], [[FMOVS0_]], 0, implicit $nzcv
     ; CHECK-NEXT: $s0 = COPY [[FCSELSrrr]]
     ; CHECK-NEXT: $s1 = COPY [[FCSELSrrr1]]
@@ -125,7 +125,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK-NEXT: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
-    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[FMOVS0_]], [[COPY1]], 0, implicit $nzcv
     ; CHECK-NEXT: $s0 = COPY [[FCSELSrrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
@@ -154,7 +154,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK-NEXT: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
-    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
     ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
     ; CHECK-NEXT: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[CSINCWr]], [[CSINCWr1]]
@@ -187,7 +187,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK-NEXT: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
-    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
     ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
     ; CHECK-NEXT: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[CSINCWr]], [[CSINCWr1]]
@@ -220,7 +220,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK-NEXT: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
-    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[COPY1]], [[FMOVS0_]], 1, implicit $nzcv
     ; CHECK-NEXT: $s0 = COPY [[FCSELSrrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
@@ -249,7 +249,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK-NEXT: [[FMOVD0_:%[0-9]+]]:fpr64 = FMOVD0
-    ; CHECK-NEXT: nofpexcept FCMPDri [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPDri [[COPY]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[FCSELDrrr:%[0-9]+]]:fpr64 = FCSELDrrr [[FMOVD0_]], [[COPY1]], 0, implicit $nzcv
     ; CHECK-NEXT: $d0 = COPY [[FCSELDrrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
@@ -278,7 +278,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK-NEXT: [[FMOVD0_:%[0-9]+]]:fpr64 = FMOVD0
-    ; CHECK-NEXT: nofpexcept FCMPDri [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPDri [[COPY]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
     ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
     ; CHECK-NEXT: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[CSINCWr]], [[CSINCWr1]]
@@ -311,7 +311,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK-NEXT: [[FMOVD0_:%[0-9]+]]:fpr64 = FMOVD0
-    ; CHECK-NEXT: nofpexcept FCMPDri [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPDri [[COPY]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
     ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
     ; CHECK-NEXT: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[CSINCWr]], [[CSINCWr1]]
@@ -344,7 +344,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK-NEXT: [[FMOVD0_:%[0-9]+]]:fpr64 = FMOVD0
-    ; CHECK-NEXT: nofpexcept FCMPDri [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPDri [[COPY]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[FCSELDrrr:%[0-9]+]]:fpr64 = FCSELDrrr [[COPY1]], [[FMOVD0_]], 1, implicit $nzcv
     ; CHECK-NEXT: $d0 = COPY [[FCSELDrrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $d0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/fold-global-offsets-target-features.mir b/llvm/test/CodeGen/AArch64/GlobalISel/fold-global-offsets-target-features.mir
index 639c51d92d9c1..8197bf9e63220 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/fold-global-offsets-target-features.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/fold-global-offsets-target-features.mir
@@ -38,30 +38,35 @@ body:             |
 
     ; DEFAULT-LABEL: name: test_external_linkage
     ; DEFAULT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_linkage + 1
-    ; DEFAULT: $x0 = COPY [[GV]](p0)
-    ; DEFAULT: RET_ReallyLR implicit $x0
+    ; DEFAULT-NEXT: $x0 = COPY [[GV]](p0)
+    ; DEFAULT-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; LARGE-MACHO-LABEL: name: test_external_linkage
     ; LARGE-MACHO: %global:_(p0) = G_GLOBAL_VALUE @external_linkage
-    ; LARGE-MACHO: %imm:_(s64) = G_CONSTANT i64 1
-    ; LARGE-MACHO: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; LARGE-MACHO: $x0 = COPY %ptr_add(p0)
-    ; LARGE-MACHO: RET_ReallyLR implicit $x0
+    ; LARGE-MACHO-NEXT: %imm:_(s64) = G_CONSTANT i64 1
+    ; LARGE-MACHO-NEXT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
+    ; LARGE-MACHO-NEXT: $x0 = COPY %ptr_add(p0)
+    ; LARGE-MACHO-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; SMALL-MACHO-LABEL: name: test_external_linkage
     ; SMALL-MACHO: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_linkage + 1
-    ; SMALL-MACHO: $x0 = COPY [[GV]](p0)
-    ; SMALL-MACHO: RET_ReallyLR implicit $x0
+    ; SMALL-MACHO-NEXT: $x0 = COPY [[GV]](p0)
+    ; SMALL-MACHO-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; LARGE-ELF-LABEL: name: test_external_linkage
     ; LARGE-ELF: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_linkage + 1
-    ; LARGE-ELF: $x0 = COPY [[GV]](p0)
-    ; LARGE-ELF: RET_ReallyLR implicit $x0
+    ; LARGE-ELF-NEXT: $x0 = COPY [[GV]](p0)
+    ; LARGE-ELF-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; TINY-LABEL: name: test_external_linkage
     ; TINY: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_linkage + 1
-    ; TINY: $x0 = COPY [[GV]](p0)
-    ; TINY: RET_ReallyLR implicit $x0
+    ; TINY-NEXT: $x0 = COPY [[GV]](p0)
+    ; TINY-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; WINDOWS-LABEL: name: test_external_linkage
     ; WINDOWS: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_linkage + 1
-    ; WINDOWS: $x0 = COPY [[GV]](p0)
-    ; WINDOWS: RET_ReallyLR implicit $x0
+    ; WINDOWS-NEXT: $x0 = COPY [[GV]](p0)
+    ; WINDOWS-NEXT: RET_ReallyLR implicit $x0
     %global:_(p0) = G_GLOBAL_VALUE @external_linkage
     %imm:_(s64) = G_CONSTANT i64 1
     %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
@@ -80,30 +85,35 @@ body:             |
 
     ; DEFAULT-LABEL: name: test_internal_linkage
     ; DEFAULT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @internal_linkage + 1
-    ; DEFAULT: $x0 = COPY [[GV]](p0)
-    ; DEFAULT: RET_ReallyLR implicit $x0
+    ; DEFAULT-NEXT: $x0 = COPY [[GV]](p0)
+    ; DEFAULT-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; LARGE-MACHO-LABEL: name: test_internal_linkage
     ; LARGE-MACHO: %global:_(p0) = G_GLOBAL_VALUE @internal_linkage
-    ; LARGE-MACHO: %imm:_(s64) = G_CONSTANT i64 1
-    ; LARGE-MACHO: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; LARGE-MACHO: $x0 = COPY %ptr_add(p0)
-    ; LARGE-MACHO: RET_ReallyLR implicit $x0
+    ; LARGE-MACHO-NEXT: %imm:_(s64) = G_CONSTANT i64 1
+    ; LARGE-MACHO-NEXT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
+    ; LARGE-MACHO-NEXT: $x0 = COPY %ptr_add(p0)
+    ; LARGE-MACHO-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; SMALL-MACHO-LABEL: name: test_internal_linkage
     ; SMALL-MACHO: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @internal_linkage + 1
-    ; SMALL-MACHO: $x0 = COPY [[GV]](p0)
-    ; SMALL-MACHO: RET_ReallyLR implicit $x0
+    ; SMALL-MACHO-NEXT: $x0 = COPY [[GV]](p0)
+    ; SMALL-MACHO-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; LARGE-ELF-LABEL: name: test_internal_linkage
     ; LARGE-ELF: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @internal_linkage + 1
-    ; LARGE-ELF: $x0 = COPY [[GV]](p0)
-    ; LARGE-ELF: RET_ReallyLR implicit $x0
+    ; LARGE-ELF-NEXT: $x0 = COPY [[GV]](p0)
+    ; LARGE-ELF-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; TINY-LABEL: name: test_internal_linkage
     ; TINY: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @internal_linkage + 1
-    ; TINY: $x0 = COPY [[GV]](p0)
-    ; TINY: RET_ReallyLR implicit $x0
+    ; TINY-NEXT: $x0 = COPY [[GV]](p0)
+    ; TINY-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; WINDOWS-LABEL: name: test_internal_linkage
     ; WINDOWS: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @internal_linkage + 1
-    ; WINDOWS: $x0 = COPY [[GV]](p0)
-    ; WINDOWS: RET_ReallyLR implicit $x0
+    ; WINDOWS-NEXT: $x0 = COPY [[GV]](p0)
+    ; WINDOWS-NEXT: RET_ReallyLR implicit $x0
     %global:_(p0) = G_GLOBAL_VALUE @internal_linkage
     %imm:_(s64) = G_CONSTANT i64 1
     %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
@@ -120,38 +130,43 @@ body:             |
   bb.0:
     ; DEFAULT-LABEL: name: test_common_linkage
     ; DEFAULT: %global:_(p0) = G_GLOBAL_VALUE @common_linkage
-    ; DEFAULT: %imm:_(s64) = G_CONSTANT i64 1
-    ; DEFAULT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; DEFAULT: $x0 = COPY %ptr_add(p0)
-    ; DEFAULT: RET_ReallyLR implicit $x0
+    ; DEFAULT-NEXT: %imm:_(s64) = G_CONSTANT i64 1
+    ; DEFAULT-NEXT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
+    ; DEFAULT-NEXT: $x0 = COPY %ptr_add(p0)
+    ; DEFAULT-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; LARGE-MACHO-LABEL: name: test_common_linkage
     ; LARGE-MACHO: %global:_(p0) = G_GLOBAL_VALUE @common_linkage
-    ; LARGE-MACHO: %imm:_(s64) = G_CONSTANT i64 1
-    ; LARGE-MACHO: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; LARGE-MACHO: $x0 = COPY %ptr_add(p0)
-    ; LARGE-MACHO: RET_ReallyLR implicit $x0
+    ; LARGE-MACHO-NEXT: %imm:_(s64) = G_CONSTANT i64 1
+    ; LARGE-MACHO-NEXT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
+    ; LARGE-MACHO-NEXT: $x0 = COPY %ptr_add(p0)
+    ; LARGE-MACHO-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; SMALL-MACHO-LABEL: name: test_common_linkage
     ; SMALL-MACHO: %global:_(p0) = G_GLOBAL_VALUE @common_linkage
-    ; SMALL-MACHO: %imm:_(s64) = G_CONSTANT i64 1
-    ; SMALL-MACHO: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; SMALL-MACHO: $x0 = COPY %ptr_add(p0)
-    ; SMALL-MACHO: RET_ReallyLR implicit $x0
+    ; SMALL-MACHO-NEXT: %imm:_(s64) = G_CONSTANT i64 1
+    ; SMALL-MACHO-NEXT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
+    ; SMALL-MACHO-NEXT: $x0 = COPY %ptr_add(p0)
+    ; SMALL-MACHO-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; LARGE-ELF-LABEL: name: test_common_linkage
     ; LARGE-ELF: %global:_(p0) = G_GLOBAL_VALUE @common_linkage
-    ; LARGE-ELF: %imm:_(s64) = G_CONSTANT i64 1
-    ; LARGE-ELF: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; LARGE-ELF: $x0 = COPY %ptr_add(p0)
-    ; LARGE-ELF: RET_ReallyLR implicit $x0
+    ; LARGE-ELF-NEXT: %imm:_(s64) = G_CONSTANT i64 1
+    ; LARGE-ELF-NEXT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
+    ; LARGE-ELF-NEXT: $x0 = COPY %ptr_add(p0)
+    ; LARGE-ELF-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; TINY-LABEL: name: test_common_linkage
     ; TINY: %global:_(p0) = G_GLOBAL_VALUE @common_linkage
-    ; TINY: %imm:_(s64) = G_CONSTANT i64 1
-    ; TINY: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; TINY: $x0 = COPY %ptr_add(p0)
-    ; TINY: RET_ReallyLR implicit $x0
+    ; TINY-NEXT: %imm:_(s64) = G_CONSTANT i64 1
+    ; TINY-NEXT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
+    ; TINY-NEXT: $x0 = COPY %ptr_add(p0)
+    ; TINY-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; WINDOWS-LABEL: name: test_common_linkage
     ; WINDOWS: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @common_linkage + 1
-    ; WINDOWS: $x0 = COPY [[GV]](p0)
-    ; WINDOWS: RET_ReallyLR implicit $x0
+    ; WINDOWS-NEXT: $x0 = COPY [[GV]](p0)
+    ; WINDOWS-NEXT: RET_ReallyLR implicit $x0
     %global:_(p0) = G_GLOBAL_VALUE @common_linkage
     %imm:_(s64) = G_CONSTANT i64 1
     %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
@@ -166,42 +181,12 @@ tracksRegLiveness: true
 machineFunctionInfo: {}
 body:             |
   bb.0:
-    ; DEFAULT-LABEL: name: test_extern_weak_linkage
-    ; DEFAULT: %global:_(p0) = G_GLOBAL_VALUE @extern_weak_linkage
-    ; DEFAULT: %imm:_(s64) = G_CONSTANT i64 1
-    ; DEFAULT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; DEFAULT: $x0 = COPY %ptr_add(p0)
-    ; DEFAULT: RET_ReallyLR implicit $x0
-    ; LARGE-MACHO-LABEL: name: test_extern_weak_linkage
-    ; LARGE-MACHO: %global:_(p0) = G_GLOBAL_VALUE @extern_weak_linkage
-    ; LARGE-MACHO: %imm:_(s64) = G_CONSTANT i64 1
-    ; LARGE-MACHO: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; LARGE-MACHO: $x0 = COPY %ptr_add(p0)
-    ; LARGE-MACHO: RET_ReallyLR implicit $x0
-    ; SMALL-MACHO-LABEL: name: test_extern_weak_linkage
-    ; SMALL-MACHO: %global:_(p0) = G_GLOBAL_VALUE @extern_weak_linkage
-    ; SMALL-MACHO: %imm:_(s64) = G_CONSTANT i64 1
-    ; SMALL-MACHO: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; SMALL-MACHO: $x0 = COPY %ptr_add(p0)
-    ; SMALL-MACHO: RET_ReallyLR implicit $x0
-    ; LARGE-ELF-LABEL: name: test_extern_weak_linkage
-    ; LARGE-ELF: %global:_(p0) = G_GLOBAL_VALUE @extern_weak_linkage
-    ; LARGE-ELF: %imm:_(s64) = G_CONSTANT i64 1
-    ; LARGE-ELF: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; LARGE-ELF: $x0 = COPY %ptr_add(p0)
-    ; LARGE-ELF: RET_ReallyLR implicit $x0
-    ; TINY-LABEL: name: test_extern_weak_linkage
-    ; TINY: %global:_(p0) = G_GLOBAL_VALUE @extern_weak_linkage
-    ; TINY: %imm:_(s64) = G_CONSTANT i64 1
-    ; TINY: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; TINY: $x0 = COPY %ptr_add(p0)
-    ; TINY: RET_ReallyLR implicit $x0
-    ; WINDOWS-LABEL: name: test_extern_weak_linkage
-    ; WINDOWS: %global:_(p0) = G_GLOBAL_VALUE @extern_weak_linkage
-    ; WINDOWS: %imm:_(s64) = G_CONSTANT i64 1
-    ; WINDOWS: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
-    ; WINDOWS: $x0 = COPY %ptr_add(p0)
-    ; WINDOWS: RET_ReallyLR implicit $x0
+    ; CHECK-LABEL: name: test_extern_weak_linkage
+    ; CHECK: %global:_(p0) = G_GLOBAL_VALUE @extern_weak_linkage
+    ; CHECK-NEXT: %imm:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
+    ; CHECK-NEXT: $x0 = COPY %ptr_add(p0)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %global:_(p0) = G_GLOBAL_VALUE @extern_weak_linkage
     %imm:_(s64) = G_CONSTANT i64 1
     %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
@@ -217,7 +202,11 @@ machineFunctionInfo: {}
 body:             |
   bb.0:
     ; CHECK-LABEL: name: never_fold_tagged_globals
-    ; CHECK-NOT: %global:_(p0) = G_GLOBAL_VALUE @external_linkage + 1
+    ; CHECK: %global:_(p0) = G_GLOBAL_VALUE @external_linkage
+    ; CHECK-NEXT: %imm:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
+    ; CHECK-NEXT: $x0 = COPY %ptr_add(p0)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %global:_(p0) = G_GLOBAL_VALUE @external_linkage
     %imm:_(s64) = G_CONSTANT i64 1
     %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
@@ -233,7 +222,11 @@ machineFunctionInfo: {}
 body:             |
   bb.0:
     ; CHECK-LABEL: name: test_dll_import
-    ; CHECK-NOT: %global:_(p0) = G_GLOBAL_VALUE @dll_import + 1
+    ; CHECK: %global:_(p0) = G_GLOBAL_VALUE @dll_import
+    ; CHECK-NEXT: %imm:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)
+    ; CHECK-NEXT: $x0 = COPY %ptr_add(p0)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %global:_(p0) = G_GLOBAL_VALUE @dll_import
     %imm:_(s64) = G_CONSTANT i64 1
     %ptr_add:_(p0) = G_PTR_ADD %global, %imm(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
index b396d084446b0..147d0ab392ff2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
@@ -52,7 +52,7 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
-    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY1]], implicit-def $nzcv
+    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY1]], implicit-def $nzcv, implicit $fpcr
     ; CHECK-NEXT: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[COPY2]], [[COPY]], 0, implicit $nzcv
     ; CHECK-NEXT: $w0 = COPY [[CSELWr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-shr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-shr.mir
index 3ed50d3c67fd9..ff8bab972214e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-shr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/form-bitfield-extract-from-shr.mir
@@ -10,10 +10,10 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: apply_ashr_shl_to_sbfx
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; CHECK: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY]], [[C1]](s32), [[C]]
-    ; CHECK: $w0 = COPY [[SBFX]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
+    ; CHECK-NEXT: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY]], [[C1]](s32), [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[SBFX]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 5
     %2:_(s32) = G_CONSTANT i32 16
@@ -29,9 +29,9 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: apply_ashr_shl_to_sbfx_lower_bound
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY]], [[C]](s32), [[C]]
-    ; CHECK: $w0 = COPY [[SBFX]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY]], [[C]](s32), [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[SBFX]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 30
     %2:_(s32) = G_CONSTANT i32 31
@@ -47,10 +47,10 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: apply_ashr_shl_to_sbfx_upper_bound
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 30
-    ; CHECK: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY]], [[C1]](s32), [[C]]
-    ; CHECK: $w0 = COPY [[SBFX]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 30
+    ; CHECK-NEXT: [[SBFX:%[0-9]+]]:_(s32) = G_SBFX [[COPY]], [[C1]](s32), [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[SBFX]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 1
     %2:_(s32) = G_CONSTANT i32 31
@@ -66,10 +66,10 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: apply_lshr_shl_to_ubfx
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
-    ; CHECK: [[UBFX:%[0-9]+]]:_(s32) = G_UBFX [[COPY]], [[C1]](s32), [[C]]
-    ; CHECK: $w0 = COPY [[UBFX]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 11
+    ; CHECK-NEXT: [[UBFX:%[0-9]+]]:_(s32) = G_UBFX [[COPY]], [[C1]](s32), [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[UBFX]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 5
     %2:_(s32) = G_CONSTANT i32 16
@@ -85,9 +85,9 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: apply_lshr_shl_to_ubfx_lower_bound
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[UBFX:%[0-9]+]]:_(s32) = G_UBFX [[COPY]], [[C]](s32), [[C]]
-    ; CHECK: $w0 = COPY [[UBFX]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[UBFX:%[0-9]+]]:_(s32) = G_UBFX [[COPY]], [[C]](s32), [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[UBFX]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 30
     %2:_(s32) = G_CONSTANT i32 31
@@ -103,10 +103,10 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: apply_lshr_shl_to_ubfx_upper_bound
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 30
-    ; CHECK: [[UBFX:%[0-9]+]]:_(s32) = G_UBFX [[COPY]], [[C1]](s32), [[C]]
-    ; CHECK: $w0 = COPY [[UBFX]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 30
+    ; CHECK-NEXT: [[UBFX:%[0-9]+]]:_(s32) = G_UBFX [[COPY]], [[C1]](s32), [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[UBFX]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 1
     %2:_(s32) = G_CONSTANT i32 31
@@ -122,11 +122,11 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: dont_apply_pos_out_of_bounds
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
-    ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
-    ; CHECK: $w0 = COPY [[ASHR]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[ASHR]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 5
     %2:_(s32) = G_CONSTANT i32 2
@@ -142,11 +142,11 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: dont_apply_no_constant
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
-    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32)
-    ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
-    ; CHECK: $w0 = COPY [[ASHR]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32)
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[ASHR]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
     %2:_(s32) = G_CONSTANT i32 2
@@ -162,12 +162,12 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: dont_apply_more_than_one_use
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
-    ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
-    ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SHL]], [[ASHR]]
-    ; CHECK: $w0 = COPY [[MUL]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SHL]], [[ASHR]]
+    ; CHECK-NEXT: $w0 = COPY [[MUL]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 5
     %2:_(s32) = G_CONSTANT i32 16

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/fp16-copy-gpr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/fp16-copy-gpr.mir
index 065f581c599af..99bb62b117080 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/fp16-copy-gpr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/fp16-copy-gpr.mir
@@ -45,18 +45,19 @@ body:             |
 
     ; CHECK-LABEL: name: fp16_to_gpr
     ; CHECK: liveins: $h0, $h1, $x0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr16 = COPY $h1
-    ; CHECK: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[COPY]], %subreg.hsub
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[SUBREG_TO_REG]]
-    ; CHECK: [[BFMWri:%[0-9]+]]:gpr32 = BFMWri [[DEF]], [[COPY2]], 0, 15
-    ; CHECK: [[SUBREG_TO_REG1:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[COPY1]], %subreg.hsub
-    ; CHECK: [[COPY3:%[0-9]+]]:gpr32 = COPY [[SUBREG_TO_REG1]]
-    ; CHECK: [[BFMWri1:%[0-9]+]]:gpr32 = BFMWri [[BFMWri]], [[COPY3]], 16, 15
-    ; CHECK: [[COPY4:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: STRWui [[BFMWri1]], [[COPY4]], 0 :: (store (s32) into %ir.addr, align 2)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr16 = COPY $h1
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[COPY]], %subreg.hsub
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: [[BFMWri:%[0-9]+]]:gpr32 = BFMWri [[DEF]], [[COPY2]], 0, 15
+    ; CHECK-NEXT: [[SUBREG_TO_REG1:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[COPY1]], %subreg.hsub
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[SUBREG_TO_REG1]]
+    ; CHECK-NEXT: [[BFMWri1:%[0-9]+]]:gpr32 = BFMWri [[BFMWri]], [[COPY3]], 16, 15
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: STRWui [[BFMWri1]], [[COPY4]], 0 :: (store (s32) into %ir.addr, align 2)
+    ; CHECK-NEXT: RET_ReallyLR
     %1:fpr(s16) = COPY $h0
     %2:fpr(s16) = COPY $h1
     %3:gpr(s32) = G_IMPLICIT_DEF
@@ -87,11 +88,12 @@ body:             |
 
     ; CHECK-LABEL: name: gpr_to_fp16
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]]
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr16 = COPY [[COPY1]].hsub
-    ; CHECK: $h0 = COPY [[COPY2]]
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr16 = COPY [[COPY1]].hsub
+    ; CHECK-NEXT: $h0 = COPY [[COPY2]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s16) = G_TRUNC %0(s32)
     %2:fpr(s16) = COPY %1(s16)
@@ -114,11 +116,12 @@ body:             |
 
     ; CHECK-LABEL: name: gpr_to_fp16_physreg
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]]
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr16 = COPY [[COPY1]].hsub
-    ; CHECK: $h0 = COPY [[COPY2]]
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr16 = COPY [[COPY1]].hsub
+    ; CHECK-NEXT: $h0 = COPY [[COPY2]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s16) = G_TRUNC %0(s32)
     $h0 = COPY %1(s16)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir
index b83985fa32638..f50540ba4c06d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy-forced.mir
@@ -32,51 +32,52 @@ body:             |
 
     ; CHECK-LABEL: name: test_memcpy_inline
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
-    ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
-    ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
-    ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CHECK: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
-    ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
-    ; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CHECK: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
-    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
-    ; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
-    ; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
-    ; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CHECK: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
-    ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
-    ; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
-    ; CHECK: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD6]](p0) :: (load (s128) from %ir.1 + 64, align 4)
-    ; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CHECK: G_STORE [[LOAD4]](s128), [[PTR_ADD7]](p0) :: (store (s128) into %ir.0 + 64, align 4)
-    ; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
-    ; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
-    ; CHECK: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD8]](p0) :: (load (s128) from %ir.1 + 80, align 4)
-    ; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; CHECK: G_STORE [[LOAD5]](s128), [[PTR_ADD9]](p0) :: (store (s128) into %ir.0 + 80, align 4)
-    ; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
-    ; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64)
-    ; CHECK: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD10]](p0) :: (load (s128) from %ir.1 + 96, align 4)
-    ; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; CHECK: G_STORE [[LOAD6]](s128), [[PTR_ADD11]](p0) :: (store (s128) into %ir.0 + 96, align 4)
-    ; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
-    ; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C6]](s64)
-    ; CHECK: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD12]](p0) :: (load (s128) from %ir.1 + 112, align 4)
-    ; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; CHECK: G_STORE [[LOAD7]](s128), [[PTR_ADD13]](p0) :: (store (s128) into %ir.0 + 112, align 4)
-    ; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
-    ; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C7]](s64)
-    ; CHECK: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD14]](p0) :: (load (s128) from %ir.1 + 127, align 1, basealign 4)
-    ; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; CHECK: G_STORE [[LOAD8]](s128), [[PTR_ADD15]](p0) :: (store (s128) into %ir.0 + 127, align 1, basealign 4)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
+    ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+    ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
+    ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+    ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+    ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
+    ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+    ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+    ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD6]](p0) :: (load (s128) from %ir.1 + 64, align 4)
+    ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD4]](s128), [[PTR_ADD7]](p0) :: (store (s128) into %ir.0 + 64, align 4)
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
+    ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
+    ; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD8]](p0) :: (load (s128) from %ir.1 + 80, align 4)
+    ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD5]](s128), [[PTR_ADD9]](p0) :: (store (s128) into %ir.0 + 80, align 4)
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
+    ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64)
+    ; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD10]](p0) :: (load (s128) from %ir.1 + 96, align 4)
+    ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD6]](s128), [[PTR_ADD11]](p0) :: (store (s128) into %ir.0 + 96, align 4)
+    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
+    ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C6]](s64)
+    ; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD12]](p0) :: (load (s128) from %ir.1 + 112, align 4)
+    ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD7]](s128), [[PTR_ADD13]](p0) :: (store (s128) into %ir.0 + 112, align 4)
+    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
+    ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C7]](s64)
+    ; CHECK-NEXT: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD14]](p0) :: (load (s128) from %ir.1 + 127, align 1, basealign 4)
+    ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD8]](s128), [[PTR_ADD15]](p0) :: (store (s128) into %ir.0 + 127, align 1, basealign 4)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(p0) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s64) = G_CONSTANT i64 143

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
index 0a22c7d1f94bb..a2e20caf43ff9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
@@ -77,11 +77,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_memcpy1
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
-    ; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
+    ; CHECK-NEXT: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(p0) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s64) = COPY $x2
@@ -104,31 +105,32 @@ body:             |
 
     ; CHECK-LABEL: name: test_memcpy2_const
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
-    ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
-    ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load (s128) from %ir.1 + 16, align 4)
-    ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
-    ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
-    ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
-    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
-    ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
-    ; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
-    ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
-    ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
-    ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
-    ; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP6]](p0) :: (load (s64) from %ir.1 + 64, align 4)
-    ; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CHECK: G_STORE [[LOAD4]](s64), [[GEP7]](p0) :: (store (s64) into %ir.0 + 64, align 4)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
+    ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+    ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
+    ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+    ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+    ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
+    ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+    ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+    ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s64) from %ir.1 + 64, align 4)
+    ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD4]](s64), [[PTR_ADD7]](p0) :: (store (s64) into %ir.0 + 64, align 4)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(p0) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s64) = G_CONSTANT i64 72
@@ -151,31 +153,32 @@ body:             |
 
     ; CHECK-LABEL: name: test_memcpy2_const_optsize
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
-    ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
-    ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load (s128) from %ir.1 + 16, align 4)
-    ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
-    ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
-    ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
-    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
-    ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
-    ; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
-    ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
-    ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
-    ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
-    ; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP6]](p0) :: (load (s64) from %ir.1 + 64, align 4)
-    ; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CHECK: G_STORE [[LOAD4]](s64), [[GEP7]](p0) :: (store (s64) into %ir.0 + 64, align 4)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
+    ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+    ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
+    ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+    ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+    ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
+    ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+    ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+    ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p0) :: (load (s64) from %ir.1 + 64, align 4)
+    ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD4]](s64), [[PTR_ADD7]](p0) :: (store (s64) into %ir.0 + 64, align 4)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(p0) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s64) = G_CONSTANT i64 72
@@ -198,11 +201,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_memcpy2_const_minsize
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72
-    ; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72
+    ; CHECK-NEXT: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(p0) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s64) = G_CONSTANT i64 72
@@ -225,51 +229,52 @@ body:             |
 
     ; CHECK-LABEL: name: test_memcpy3_const_arrays_unaligned
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
-    ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
-    ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load (s128) from %ir.1 + 16, align 4)
-    ; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
-    ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
-    ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
-    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
-    ; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
-    ; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
-    ; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
-    ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
-    ; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
-    ; CHECK: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[GEP6]](p0) :: (load (s128) from %ir.1 + 64, align 4)
-    ; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CHECK: G_STORE [[LOAD4]](s128), [[GEP7]](p0) :: (store (s128) into %ir.0 + 64, align 4)
-    ; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
-    ; CHECK: [[GEP8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
-    ; CHECK: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[GEP8]](p0) :: (load (s128) from %ir.1 + 80, align 4)
-    ; CHECK: [[GEP9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
-    ; CHECK: G_STORE [[LOAD5]](s128), [[GEP9]](p0) :: (store (s128) into %ir.0 + 80, align 4)
-    ; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
-    ; CHECK: [[GEP10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64)
-    ; CHECK: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[GEP10]](p0) :: (load (s128) from %ir.1 + 96, align 4)
-    ; CHECK: [[GEP11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
-    ; CHECK: G_STORE [[LOAD6]](s128), [[GEP11]](p0) :: (store (s128) into %ir.0 + 96, align 4)
-    ; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
-    ; CHECK: [[GEP12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C6]](s64)
-    ; CHECK: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[GEP12]](p0) :: (load (s128) from %ir.1 + 112, align 4)
-    ; CHECK: [[GEP13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
-    ; CHECK: G_STORE [[LOAD7]](s128), [[GEP13]](p0) :: (store (s128) into %ir.0 + 112, align 4)
-    ; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
-    ; CHECK: [[GEP14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C7]](s64)
-    ; CHECK: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[GEP14]](p0) :: (load (s128) from %ir.1 + 127, align 1, basealign 4)
-    ; CHECK: [[GEP15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
-    ; CHECK: G_STORE [[LOAD8]](s128), [[GEP15]](p0) :: (store (s128) into %ir.0 + 127, align 1, basealign 4)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
+    ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+    ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
+    ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+    ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+    ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
+    ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+    ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+    ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD6]](p0) :: (load (s128) from %ir.1 + 64, align 4)
+    ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD4]](s128), [[PTR_ADD7]](p0) :: (store (s128) into %ir.0 + 64, align 4)
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
+    ; CHECK-NEXT: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
+    ; CHECK-NEXT: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD8]](p0) :: (load (s128) from %ir.1 + 80, align 4)
+    ; CHECK-NEXT: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD5]](s128), [[PTR_ADD9]](p0) :: (store (s128) into %ir.0 + 80, align 4)
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
+    ; CHECK-NEXT: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64)
+    ; CHECK-NEXT: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD10]](p0) :: (load (s128) from %ir.1 + 96, align 4)
+    ; CHECK-NEXT: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD6]](s128), [[PTR_ADD11]](p0) :: (store (s128) into %ir.0 + 96, align 4)
+    ; CHECK-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
+    ; CHECK-NEXT: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C6]](s64)
+    ; CHECK-NEXT: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD12]](p0) :: (load (s128) from %ir.1 + 112, align 4)
+    ; CHECK-NEXT: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD7]](s128), [[PTR_ADD13]](p0) :: (store (s128) into %ir.0 + 112, align 4)
+    ; CHECK-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
+    ; CHECK-NEXT: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C7]](s64)
+    ; CHECK-NEXT: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD14]](p0) :: (load (s128) from %ir.1 + 127, align 1, basealign 4)
+    ; CHECK-NEXT: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD8]](s128), [[PTR_ADD15]](p0) :: (store (s128) into %ir.0 + 127, align 1, basealign 4)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(p0) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s64) = G_CONSTANT i64 143
@@ -292,31 +297,32 @@ body:             |
 
     ; CHECK-LABEL: name: test_memcpy_addrspace
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p2) = COPY $x1
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p2) :: (load (s128) from %ir.1, align 4, addrspace 2)
-    ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p1) :: (store (s128) into %ir.0, align 4, addrspace 1)
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C]](s64)
-    ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p2) :: (load (s128) from %ir.1 + 16, align 4, addrspace 2)
-    ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CHECK: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p1) :: (store (s128) into %ir.0 + 16, align 4, addrspace 1)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C1]](s64)
-    ; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p2) :: (load (s128) from %ir.1 + 32, align 4, addrspace 2)
-    ; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CHECK: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p1) :: (store (s128) into %ir.0 + 32, align 4, addrspace 1)
-    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
-    ; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C2]](s64)
-    ; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p2) :: (load (s128) from %ir.1 + 48, align 4, addrspace 2)
-    ; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CHECK: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p1) :: (store (s128) into %ir.0 + 48, align 4, addrspace 1)
-    ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
-    ; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C3]](s64)
-    ; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p2) :: (load (s64) from %ir.1 + 64, align 4, addrspace 2)
-    ; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
-    ; CHECK: G_STORE [[LOAD4]](s64), [[PTR_ADD7]](p1) :: (store (s64) into %ir.0 + 64, align 4, addrspace 1)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p1) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p2) = COPY $x1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p2) :: (load (s128) from %ir.1, align 4, addrspace 2)
+    ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p1) :: (store (s128) into %ir.0, align 4, addrspace 1)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p2) :: (load (s128) from %ir.1 + 16, align 4, addrspace 2)
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p1) :: (store (s128) into %ir.0 + 16, align 4, addrspace 1)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+    ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p2) :: (load (s128) from %ir.1 + 32, align 4, addrspace 2)
+    ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p1) :: (store (s128) into %ir.0 + 32, align 4, addrspace 1)
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
+    ; CHECK-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+    ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p2) :: (load (s128) from %ir.1 + 48, align 4, addrspace 2)
+    ; CHECK-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p1) :: (store (s128) into %ir.0 + 48, align 4, addrspace 1)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+    ; CHECK-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p2) = G_PTR_ADD [[COPY1]], [[C3]](s64)
+    ; CHECK-NEXT: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD6]](p2) :: (load (s64) from %ir.1 + 64, align 4, addrspace 2)
+    ; CHECK-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD4]](s64), [[PTR_ADD7]](p1) :: (store (s64) into %ir.0 + 64, align 4, addrspace 1)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(p1) = COPY $x0
     %1:_(p2) = COPY $x1
     %2:_(s64) = G_CONSTANT i64 72

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
index 699594b3d3e9a..8d8f717d257dc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-small-memcpy.mir
@@ -40,16 +40,17 @@ body:             |
 
     ; CHECK-LABEL: name: test_small_memcpy
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
-    ; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
-    ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
-    ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CHECK: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
+    ; CHECK-NEXT: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(p0) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s64) = G_CONSTANT i64 32
@@ -72,11 +73,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_large_memcpy
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 36
-    ; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 36
+    ; CHECK-NEXT: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(p0) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s64) = G_CONSTANT i64 36

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/labels-are-not-dead.mir b/llvm/test/CodeGen/AArch64/GlobalISel/labels-are-not-dead.mir
index 01f0624553a48..76d7e04fd3a06 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/labels-are-not-dead.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/labels-are-not-dead.mir
@@ -13,14 +13,14 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: no_erase_local_escape
     ; CHECK: LOCAL_ESCAPE <mcsymbol .Llocal_escape$frame_escape_0>, %stack.0
-    ; CHECK: LOCAL_ESCAPE <mcsymbol .Llocal_escape$frame_escape_1>, %stack.1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
-    ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
-    ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
-    ; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
-    ; CHECK: G_STORE [[C1]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: LOCAL_ESCAPE <mcsymbol .Llocal_escape$frame_escape_1>, %stack.1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
+    ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
+    ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
+    ; CHECK-NEXT: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
+    ; CHECK-NEXT: G_STORE [[C1]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
+    ; CHECK-NEXT: RET_ReallyLR
     LOCAL_ESCAPE <mcsymbol .Llocal_escape$frame_escape_0>, %stack.0
     LOCAL_ESCAPE <mcsymbol .Llocal_escape$frame_escape_1>, %stack.1
     %2:_(s32) = G_CONSTANT i32 42

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir
index d5d0a1b122352..18ed5d5f2b204 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir
@@ -17,10 +17,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i8
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s8) on %ir.addr)
-    ; CHECK: $w0 = COPY [[ATOMICRMW_ADD]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s8) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[ATOMICRMW_ADD]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s8) = G_CONSTANT i8 1
     %2:_(s8) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s8) on %ir.addr)
@@ -35,10 +37,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i16
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s16) on %ir.addr)
-    ; CHECK: $w0 = COPY [[ATOMICRMW_ADD]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s16) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[ATOMICRMW_ADD]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s16) = G_CONSTANT i16 1
     %2:_(s16) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s16) on %ir.addr)
@@ -53,10 +57,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i32
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[ATOMICRMW_ADD]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[ATOMICRMW_ADD]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s32) = G_CONSTANT i32 1
     %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s32) on %ir.addr)
@@ -70,10 +76,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i64
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s64) on %ir.addr)
-    ; CHECK: $x0 = COPY [[ATOMICRMW_ADD]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s64) on %ir.addr)
+    ; CHECK-NEXT: $x0 = COPY [[ATOMICRMW_ADD]](s64)
     %0:_(p0) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 1
     %2:_(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s64) on %ir.addr)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bzero.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bzero.mir
index e7a5a94541ff0..ee86c897b36e2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bzero.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-bzero.mir
@@ -12,14 +12,15 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: bzero
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %ptr:_(p0) = COPY $x0
-    ; CHECK: %width:_(s64) = COPY $x1
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $x0 = COPY %ptr(p0)
-    ; CHECK: $x1 = COPY %width(s64)
-    ; CHECK: BL &bzero, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:_(p0) = COPY $x0
+    ; CHECK-NEXT: %width:_(s64) = COPY $x1
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $x0 = COPY %ptr(p0)
+    ; CHECK-NEXT: $x1 = COPY %width(s64)
+    ; CHECK-NEXT: BL &bzero, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:_(p0) = COPY $x0
     %width:_(s64) = COPY $x1
     G_BZERO %ptr(p0), %width(s64), 0 :: (store (s32))
@@ -34,11 +35,12 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: bzero_tail_call
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %ptr:_(p0) = COPY $x0
-    ; CHECK: %width:_(s64) = COPY $x1
-    ; CHECK: $x0 = COPY %ptr(p0)
-    ; CHECK: $x1 = COPY %width(s64)
-    ; CHECK: TCRETURNdi &bzero, 0, csr_darwin_aarch64_aapcs, implicit $sp, implicit $x0, implicit $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:_(p0) = COPY $x0
+    ; CHECK-NEXT: %width:_(s64) = COPY $x1
+    ; CHECK-NEXT: $x0 = COPY %ptr(p0)
+    ; CHECK-NEXT: $x1 = COPY %width(s64)
+    ; CHECK-NEXT: TCRETURNdi &bzero, 0, csr_darwin_aarch64_aapcs, implicit $sp, implicit $x0, implicit $x1
     %ptr:_(p0) = COPY $x0
     %width:_(s64) = COPY $x1
     G_BZERO %ptr(p0), %width(s64), 1 :: (store (s32))

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir
index 2ab14afdb8e1e..6e557e719094d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir
@@ -17,11 +17,13 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i8
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s8) on %ir.addr)
-    ; CHECK: $w0 = COPY [[ATOMIC_CMPXCHG]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s8) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[ATOMIC_CMPXCHG]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s8) = G_CONSTANT i8 0
     %2:_(s8) = G_CONSTANT i8 1
@@ -37,11 +39,13 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i16
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s16) on %ir.addr)
-    ; CHECK: $w0 = COPY [[ATOMIC_CMPXCHG]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s16) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[ATOMIC_CMPXCHG]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s16) = G_CONSTANT i16 0
     %2:_(s16) = G_CONSTANT i16 1
@@ -57,11 +61,13 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i32
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[ATOMIC_CMPXCHG]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[ATOMIC_CMPXCHG]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s32) = G_CONSTANT i32 0
     %2:_(s32) = G_CONSTANT i32 1
@@ -76,11 +82,13 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i64
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
-    ; CHECK: $x0 = COPY [[ATOMIC_CMPXCHG]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
+    ; CHECK-NEXT: $x0 = COPY [[ATOMIC_CMPXCHG]](s64)
     %0:_(p0) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 0
     %2:_(s64) = G_CONSTANT i64 1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
index 116f59ae1d601..d34751e1fb1ed 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
@@ -19,12 +19,14 @@ body: |
 
     ; Here the types don't match.
     ; CHECK-LABEL: name: test_combines_2
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
-    ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[ADD]](s32)
-    ; CHECK: $w0 = COPY [[COPY]](s32)
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
-    ; CHECK: $x0 = COPY [[COPY1]](s64)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[ADD]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[COPY1]](s64)
     %0:_(s32) = COPY $w0
 
     %1:_(s32) = G_ADD %0, %0
@@ -43,10 +45,12 @@ body: |
     liveins: $w0
 
     ; CHECK-LABEL: name: test_combines_3
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
-    ; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ADD]]
-    ; CHECK: $w0 = COPY [[ADD1]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ADD]]
+    ; CHECK-NEXT: $w0 = COPY [[ADD1]](s32)
     %0:_(s32) = COPY $w0
 
     %1:_(s32) = G_ADD %0, %0
@@ -63,10 +67,12 @@ body: |
     liveins: $x0
 
     ; CHECK-LABEL: name: test_combines_4
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
-    ; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY1]]
-    ; CHECK: $x0 = COPY [[ADD]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY1]]
+    ; CHECK-NEXT: $x0 = COPY [[ADD]](s64)
     %0:_(s64) = COPY $x0
 
     %1:_(s128) = G_MERGE_VALUES %0, %0
@@ -82,10 +88,12 @@ body: |
     liveins: $w0
 
     ; CHECK-LABEL: name: test_combines_5
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
-    ; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ADD]]
-    ; CHECK: $w0 = COPY [[ADD1]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ADD]]
+    ; CHECK-NEXT: $w0 = COPY [[ADD1]](s32)
     %0:_(s32) = COPY $w0
 
     %1:_(s32) = G_ADD %0, %0
@@ -105,14 +113,16 @@ body: |
     liveins: $w0
 
     ; CHECK-LABEL: name: test_combines_6
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
-    ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[ADD]](s32)
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64)
-    ; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ADD]]
-    ; CHECK: $w0 = COPY [[ADD1]](s32)
-    ; CHECK: $x0 = COPY [[COPY2]](s64)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[ADD]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64)
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ADD]]
+    ; CHECK-NEXT: $w0 = COPY [[ADD1]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[COPY2]](s64)
     %0:_(s32) = COPY $w0
 
     %1:_(s32) = G_ADD %0, %0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cttz-zero-undef.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cttz-zero-undef.mir
index ef6af8fe22dc4..cf51a3776790f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cttz-zero-undef.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cttz-zero-undef.mir
@@ -18,6 +18,7 @@ body:             |
     ; CHECK-NEXT: [[CTLZ:%[0-9]+]]:_(s32) = G_CTLZ [[BITREVERSE]](s32)
     ; CHECK-NEXT: $w0 = COPY [[CTLZ]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; CHECK-CSSC-LABEL: name: s8
     ; CHECK-CSSC: liveins: $w0
     ; CHECK-CSSC-NEXT: {{  $}}
@@ -50,6 +51,7 @@ body:             |
     ; CHECK-NEXT: [[CTLZ:%[0-9]+]]:_(s32) = G_CTLZ [[BITREVERSE]](s32)
     ; CHECK-NEXT: $w0 = COPY [[CTLZ]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; CHECK-CSSC-LABEL: name: s16
     ; CHECK-CSSC: liveins: $w0
     ; CHECK-CSSC-NEXT: {{  $}}
@@ -82,6 +84,7 @@ body:             |
     ; CHECK-NEXT: [[CTLZ:%[0-9]+]]:_(s32) = G_CTLZ [[BITREVERSE]](s32)
     ; CHECK-NEXT: $w0 = COPY [[CTLZ]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; CHECK-CSSC-LABEL: name: s32
     ; CHECK-CSSC: liveins: $w0
     ; CHECK-CSSC-NEXT: {{  $}}
@@ -111,6 +114,7 @@ body:             |
     ; CHECK-NEXT: [[CTLZ:%[0-9]+]]:_(s64) = G_CTLZ [[BITREVERSE]](s64)
     ; CHECK-NEXT: $x0 = COPY [[CTLZ]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; CHECK-CSSC-LABEL: name: s64
     ; CHECK-CSSC: liveins: $x0
     ; CHECK-CSSC-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-divrem.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-divrem.mir
index 72ecb295f857f..9b3a5f1ae3d42 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-divrem.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-divrem.mir
@@ -6,12 +6,12 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_udivrem_64
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[COPY]], [[COPY1]]
-    ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[UDIV]], [[COPY1]]
-    ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[MUL]]
-    ; CHECK: $x0 = COPY [[UDIV]](s64)
-    ; CHECK: $x1 = COPY [[SUB]](s64)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[UDIV]], [[COPY1]]
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[MUL]]
+    ; CHECK-NEXT: $x0 = COPY [[UDIV]](s64)
+    ; CHECK-NEXT: $x1 = COPY [[SUB]](s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
     %2:_(s64), %3:_(s64) = G_UDIVREM %0, %1
@@ -25,14 +25,14 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_sdivrem_32
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
-    ; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[TRUNC]], [[TRUNC1]]
-    ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SDIV]], [[TRUNC1]]
-    ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[MUL]]
-    ; CHECK: $w0 = COPY [[SDIV]](s32)
-    ; CHECK: $w1 = COPY [[SUB]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+    ; CHECK-NEXT: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SDIV]], [[TRUNC1]]
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[MUL]]
+    ; CHECK-NEXT: $w0 = COPY [[SDIV]](s32)
+    ; CHECK-NEXT: $w1 = COPY [[SUB]](s32)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
     %2:_(s32) = G_TRUNC %0(s64)
@@ -48,18 +48,18 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_sdivrem_8
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC]], 8
-    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
-    ; CHECK: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC1]], 8
-    ; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[SEXT_INREG]], [[SEXT_INREG1]]
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
-    ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SDIV]], [[COPY2]]
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
-    ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY3]], [[MUL]]
-    ; CHECK: $w0 = COPY [[SDIV]](s32)
-    ; CHECK: $w1 = COPY [[SUB]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC]], 8
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+    ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC1]], 8
+    ; CHECK-NEXT: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[SEXT_INREG]], [[SEXT_INREG1]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SDIV]], [[COPY2]]
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY3]], [[MUL]]
+    ; CHECK-NEXT: $w0 = COPY [[SDIV]](s32)
+    ; CHECK-NEXT: $w1 = COPY [[SUB]](s32)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
     %2:_(s8) = G_TRUNC %0(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extload.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extload.mir
index 68d55e3ffd263..bf10e5c3b8ad6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extload.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-extload.mir
@@ -7,9 +7,11 @@ body: |
   bb.0.entry:
     liveins: $x0
     ; CHECK-LABEL: name: test_extload
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
-    ; CHECK: $w0 = COPY [[LOAD]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
+    ; CHECK-NEXT: $w0 = COPY [[LOAD]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s32) = G_LOAD %0 :: (load (s8))
     $w0 = COPY %1
@@ -21,10 +23,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: sext_i32_i64
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s32))
-    ; CHECK: $x0 = COPY [[SEXTLOAD]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s32))
+    ; CHECK-NEXT: $x0 = COPY [[SEXTLOAD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(p0) = COPY $x0
     %2:_(s64) = G_SEXTLOAD %0(p0) :: (load (s32))
     $x0 = COPY %2(s64)
@@ -38,10 +42,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: sext_i16_i64
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s16))
-    ; CHECK: $x0 = COPY [[SEXTLOAD]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s16))
+    ; CHECK-NEXT: $x0 = COPY [[SEXTLOAD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(p0) = COPY $x0
     %2:_(s64) = G_SEXTLOAD %0(p0) :: (load (s16))
     $x0 = COPY %2(s64)
@@ -55,10 +61,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: sext_i8_i64
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
-    ; CHECK: $x0 = COPY [[SEXTLOAD]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
+    ; CHECK-NEXT: $x0 = COPY [[SEXTLOAD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(p0) = COPY $x0
     %2:_(s64) = G_SEXTLOAD %0(p0) :: (load (s8))
     $x0 = COPY %2(s64)
@@ -72,10 +80,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zext_i32_i64
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s32))
-    ; CHECK: $x0 = COPY [[ZEXTLOAD]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s32))
+    ; CHECK-NEXT: $x0 = COPY [[ZEXTLOAD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(p0) = COPY $x0
     %2:_(s64) = G_ZEXTLOAD %0(p0) :: (load (s32))
     $x0 = COPY %2(s64)
@@ -89,10 +99,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zext_i16_i64
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
-    ; CHECK: $x0 = COPY [[ZEXTLOAD]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16))
+    ; CHECK-NEXT: $x0 = COPY [[ZEXTLOAD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(p0) = COPY $x0
     %2:_(s64) = G_ZEXTLOAD %0(p0) :: (load (s16))
     $x0 = COPY %2(s64)
@@ -106,10 +118,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zext_i8_i64
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
-    ; CHECK: $x0 = COPY [[ZEXTLOAD]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
+    ; CHECK-NEXT: $x0 = COPY [[ZEXTLOAD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(p0) = COPY $x0
     %2:_(s64) = G_ZEXTLOAD %0(p0) :: (load (s8))
     $x0 = COPY %2(s64)
@@ -122,10 +136,12 @@ body:             |
   bb.1:
     liveins: $x0
     ; CHECK-LABEL: name: zext_i8_i88
-    ; CHECK: %ptr:_(p0) = COPY $x0
-    ; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
-    ; CHECK: $x0 = COPY [[ZEXTLOAD]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
+    ; CHECK-NEXT: $x0 = COPY [[ZEXTLOAD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %ptr:_(p0) = COPY $x0
     %load:_(s88) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
     %trunc:_(s64) = G_TRUNC %load
@@ -139,10 +155,12 @@ body:             |
   bb.1:
     liveins: $x0
     ; CHECK-LABEL: name: sext_i8_i88
-    ; CHECK: %ptr:_(p0) = COPY $x0
-    ; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD %ptr(p0) :: (load (s8))
-    ; CHECK: $x0 = COPY [[SEXTLOAD]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD %ptr(p0) :: (load (s8))
+    ; CHECK-NEXT: $x0 = COPY [[SEXTLOAD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %ptr:_(p0) = COPY $x0
     %load:_(s88) = G_SEXTLOAD %ptr(p0) :: (load (s8))
     %trunc:_(s64) = G_TRUNC %load

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fcopysign.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fcopysign.mir
index cfdf0900f2f06..131a6563a1bcb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fcopysign.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fcopysign.mir
@@ -22,8 +22,8 @@ body:             |
     ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32)
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR]], [[BUILD_VECTOR3]]
     ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s32>) = G_AND [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
-    ; CHECK-NEXT: %6:_(<2 x s32>) = disjoint G_OR [[AND]], [[AND1]]
-    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES %6(<2 x s32>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s32>) = disjoint G_OR [[AND]], [[AND1]]
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[OR]](<2 x s32>)
     ; CHECK-NEXT: %fcopysign:_(s32) = COPY [[UV]](s32)
     ; CHECK-NEXT: $s0 = COPY %fcopysign(s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
@@ -54,8 +54,8 @@ body:             |
     ; CHECK-NEXT: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[BUILD_VECTOR]], [[BUILD_VECTOR3]]
     ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
-    ; CHECK-NEXT: %6:_(<2 x s64>) = disjoint G_OR [[AND]], [[AND1]]
-    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES %6(<2 x s64>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = disjoint G_OR [[AND]], [[AND1]]
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[OR]](<2 x s64>)
     ; CHECK-NEXT: %fcopysign:_(s64) = COPY [[UV]](s64)
     ; CHECK-NEXT: $d0 = COPY %fcopysign(s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $d0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fexp2.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fexp2.mir
index 4f8999d8f55c7..242bc750ead53 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fexp2.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fexp2.mir
@@ -12,39 +12,40 @@ body:             |
     liveins: $d0
     ; CHECK-LABEL: name: test_v4f16.exp2
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
-    ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
-    ; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
-    ; CHECK: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT1]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32)
-    ; CHECK: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT2]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY3]](s32)
-    ; CHECK: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT3]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY4]](s32)
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16)
-    ; CHECK: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+    ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
+    ; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT1]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32)
+    ; CHECK-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT2]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY3]](s32)
+    ; CHECK-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT3]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY4]](s32)
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16)
+    ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<4 x s16>) = COPY $d0
     %1:_(<4 x s16>) = G_FEXP2 %0
     $d0 = COPY %1(<4 x s16>)
@@ -60,67 +61,68 @@ body:             |
     liveins: $q0
     ; CHECK-LABEL: name: test_v8f16.exp2
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
-    ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<8 x s16>)
-    ; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
-    ; CHECK: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT1]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32)
-    ; CHECK: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT2]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY3]](s32)
-    ; CHECK: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT3]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY4]](s32)
-    ; CHECK: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT4]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY5]](s32)
-    ; CHECK: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT5]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC5:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY6]](s32)
-    ; CHECK: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT6]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC6:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY7]](s32)
-    ; CHECK: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT7]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC7:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY8]](s32)
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16), [[FPTRUNC4]](s16), [[FPTRUNC5]](s16), [[FPTRUNC6]](s16), [[FPTRUNC7]](s16)
-    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<8 x s16>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<8 x s16>)
+    ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
+    ; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT1]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32)
+    ; CHECK-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT2]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY3]](s32)
+    ; CHECK-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT3]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY4]](s32)
+    ; CHECK-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT4]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY5]](s32)
+    ; CHECK-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT5]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC5:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY6]](s32)
+    ; CHECK-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT6]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC6:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY7]](s32)
+    ; CHECK-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT7]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC7:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY8]](s32)
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16), [[FPTRUNC4]](s16), [[FPTRUNC5]](s16), [[FPTRUNC6]](s16), [[FPTRUNC7]](s16)
+    ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<8 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<8 x s16>) = COPY $q0
     %1:_(<8 x s16>) = G_FEXP2 %0
     $q0 = COPY %1(<8 x s16>)
@@ -136,21 +138,22 @@ body:             |
     liveins: $d0
     ; CHECK-LABEL: name: test_v2f32.exp2
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV1]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32)
-    ; CHECK: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV1]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32)
+    ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = G_FEXP2 %0
     $d0 = COPY %1(<2 x s32>)
@@ -166,31 +169,32 @@ body:             |
     liveins: $q0
     ; CHECK-LABEL: name: test_v4f32.exp2
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV1]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV2]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV3]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32)
-    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV1]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV2]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV3]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32)
+    ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = G_FEXP2 %0
     $q0 = COPY %1(<4 x s32>)
@@ -206,21 +210,22 @@ body:             |
     liveins: $q0
     ; CHECK-LABEL: name: test_v2f64.exp2
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $d0 = COPY [[UV]](s64)
-    ; CHECK: BL &exp2, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit-def $d0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $d0 = COPY [[UV1]](s64)
-    ; CHECK: BL &exp2, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit-def $d0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY1]](s64), [[COPY2]](s64)
-    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<2 x s64>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $d0 = COPY [[UV]](s64)
+    ; CHECK-NEXT: BL &exp2, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit-def $d0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $d0 = COPY [[UV1]](s64)
+    ; CHECK-NEXT: BL &exp2, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit-def $d0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY1]](s64), [[COPY2]](s64)
+    ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<2 x s64>) = COPY $q0
     %1:_(<2 x s64>) = G_FEXP2 %0
     $q0 = COPY %1(<2 x s64>)
@@ -236,16 +241,17 @@ body:             |
     liveins: $h0
     ; CHECK-LABEL: name: test_exp2_half
     ; CHECK: liveins: $h0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
-    ; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[COPY]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT]](s32)
-    ; CHECK: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
-    ; CHECK: $h0 = COPY [[FPTRUNC]](s16)
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s16) = COPY $h0
+    ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[COPY]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32)
+    ; CHECK-NEXT: BL &exp2f, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY1]](s32)
+    ; CHECK-NEXT: $h0 = COPY [[FPTRUNC]](s16)
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:_(s16) = COPY $h0
     %1:_(s16) = G_FEXP2 %0
     $h0 = COPY %1(s16)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fmaximum.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fmaximum.mir
index 0162898ae03cf..b2250202f3e80 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fmaximum.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fmaximum.mir
@@ -16,6 +16,7 @@ body:             |
     ; FP16-NEXT: %legalize_me:_(s16) = G_FMAXIMUM %a, %b
     ; FP16-NEXT: $h0 = COPY %legalize_me(s16)
     ; FP16-NEXT: RET_ReallyLR implicit $h0
+    ;
     ; NO-FP16-LABEL: name: s16_legal_with_full_fp16
     ; NO-FP16: liveins: $h0, $h1
     ; NO-FP16-NEXT: {{  $}}
@@ -48,6 +49,7 @@ body:             |
     ; FP16-NEXT: %legalize_me:_(s32) = G_FMAXIMUM %a, %b
     ; FP16-NEXT: $s0 = COPY %legalize_me(s32)
     ; FP16-NEXT: RET_ReallyLR implicit $s0
+    ;
     ; NO-FP16-LABEL: name: s32_legal
     ; NO-FP16: liveins: $s0, $s1
     ; NO-FP16-NEXT: {{  $}}
@@ -77,6 +79,7 @@ body:             |
     ; FP16-NEXT: %legalize_me:_(s64) = G_FMAXIMUM %a, %b
     ; FP16-NEXT: $d0 = COPY %legalize_me(s64)
     ; FP16-NEXT: RET_ReallyLR implicit $d0
+    ;
     ; NO-FP16-LABEL: name: s64_legal
     ; NO-FP16: liveins: $d0, $d1
     ; NO-FP16-NEXT: {{  $}}
@@ -105,6 +108,7 @@ body:             |
     ; FP16-NEXT: %maximum:_(<2 x s32>) = G_FMAXIMUM %a, %b
     ; FP16-NEXT: $d0 = COPY %maximum(<2 x s32>)
     ; FP16-NEXT: RET_ReallyLR implicit $d0
+    ;
     ; NO-FP16-LABEL: name: v2s32
     ; NO-FP16: liveins: $d0, $d1
     ; NO-FP16-NEXT: {{  $}}
@@ -134,6 +138,7 @@ body:             |
     ; FP16-NEXT: %maximum:_(<4 x s32>) = G_FMAXIMUM %a, %b
     ; FP16-NEXT: $q0 = COPY %maximum(<4 x s32>)
     ; FP16-NEXT: RET_ReallyLR implicit $q0
+    ;
     ; NO-FP16-LABEL: name: v4s32
     ; NO-FP16: liveins: $q0, $q1
     ; NO-FP16-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fminimum.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fminimum.mir
index 1c3c8bbcece61..655b3b9f58619 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fminimum.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fminimum.mir
@@ -16,6 +16,7 @@ body:             |
     ; FP16-NEXT: %legalize_me:_(s16) = G_FMINIMUM %a, %b
     ; FP16-NEXT: $h0 = COPY %legalize_me(s16)
     ; FP16-NEXT: RET_ReallyLR implicit $h0
+    ;
     ; NO-FP16-LABEL: name: s16_legal_with_full_fp16
     ; NO-FP16: liveins: $h0, $h1
     ; NO-FP16-NEXT: {{  $}}
@@ -48,6 +49,7 @@ body:             |
     ; FP16-NEXT: %legalize_me:_(s32) = G_FMINIMUM %a, %b
     ; FP16-NEXT: $s0 = COPY %legalize_me(s32)
     ; FP16-NEXT: RET_ReallyLR implicit $s0
+    ;
     ; NO-FP16-LABEL: name: s32_legal
     ; NO-FP16: liveins: $s0, $s1
     ; NO-FP16-NEXT: {{  $}}
@@ -77,6 +79,7 @@ body:             |
     ; FP16-NEXT: %legalize_me:_(s64) = G_FMINIMUM %a, %b
     ; FP16-NEXT: $d0 = COPY %legalize_me(s64)
     ; FP16-NEXT: RET_ReallyLR implicit $d0
+    ;
     ; NO-FP16-LABEL: name: s64_legal
     ; NO-FP16: liveins: $d0, $d1
     ; NO-FP16-NEXT: {{  $}}
@@ -105,6 +108,7 @@ body:             |
     ; FP16-NEXT: %minimum:_(<4 x s32>) = G_FMINIMUM %a, %b
     ; FP16-NEXT: $q0 = COPY %minimum(<4 x s32>)
     ; FP16-NEXT: RET_ReallyLR implicit $q0
+    ;
     ; NO-FP16-LABEL: name: v4s32
     ; NO-FP16: liveins: $q0, $q1
     ; NO-FP16-NEXT: {{  $}}
@@ -139,6 +143,7 @@ body:             |
     ; FP16-NEXT: $q0 = COPY [[FMINIMUM]](<4 x s32>)
     ; FP16-NEXT: $q1 = COPY [[FMINIMUM1]](<4 x s32>)
     ; FP16-NEXT: RET_ReallyLR implicit $q0
+    ;
     ; NO-FP16-LABEL: name: v8s32
     ; NO-FP16: liveins: $q0, $q1, $q2, $q3
     ; NO-FP16-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp-arith.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp-arith.mir
index be397ede0b894..3f4c6e0b1257c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp-arith.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp-arith.mir
@@ -6,9 +6,9 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_fadd_v2s64
     ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
-    ; CHECK: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[FADD]](<2 x s64>)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK-NEXT: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[FADD]](<2 x s64>)
     %0:_(<2 x s64>) = COPY $q0
     %1:_(<2 x s64>) = COPY $q1
     %2:_(<2 x s64>) = G_FADD %0, %1
@@ -21,9 +21,9 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_fdiv_v2s32
     ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[FDIV:%[0-9]+]]:_(<2 x s32>) = G_FDIV [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[FDIV]](<2 x s32>)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[FDIV:%[0-9]+]]:_(<2 x s32>) = G_FDIV [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[FDIV]](<2 x s32>)
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = COPY $d1
     %2:_(<2 x s32>) = G_FDIV %0, %1
@@ -36,9 +36,9 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_fsub_v2s32
     ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[FSUB:%[0-9]+]]:_(<2 x s32>) = G_FSUB [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[FSUB]](<2 x s32>)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[FSUB:%[0-9]+]]:_(<2 x s32>) = G_FSUB [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[FSUB]](<2 x s32>)
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = COPY $d1
     %2:_(<2 x s32>) = G_FSUB %0, %1
@@ -51,8 +51,8 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_fneg_v2s32
     ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[FNEG:%[0-9]+]]:_(<2 x s32>) = G_FNEG [[COPY]]
-    ; CHECK: $d0 = COPY [[FNEG]](<2 x s32>)
+    ; CHECK-NEXT: [[FNEG:%[0-9]+]]:_(<2 x s32>) = G_FNEG [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[FNEG]](<2 x s32>)
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = G_FNEG %0
     $d0 = COPY %1(<2 x s32>)
@@ -64,9 +64,9 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_fmul_v4s32
     ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[FMUL]](<4 x s32>)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[FMUL]](<4 x s32>)
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = COPY $q1
     %2:_(<4 x s32>) = G_FMUL %0, %1
@@ -79,10 +79,10 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_fmul_v4s64
     ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
-    ; CHECK: [[FMUL:%[0-9]+]]:_(<2 x s64>) = G_FMUL [[DEF]], [[DEF]]
-    ; CHECK: [[FMUL1:%[0-9]+]]:_(<2 x s64>) = G_FMUL [[DEF]], [[DEF]]
-    ; CHECK: $q0 = COPY [[FMUL]](<2 x s64>)
-    ; CHECK: $q1 = COPY [[FMUL1]](<2 x s64>)
+    ; CHECK-NEXT: [[FMUL:%[0-9]+]]:_(<2 x s64>) = G_FMUL [[DEF]], [[DEF]]
+    ; CHECK-NEXT: [[FMUL1:%[0-9]+]]:_(<2 x s64>) = G_FMUL [[DEF]], [[DEF]]
+    ; CHECK-NEXT: $q0 = COPY [[FMUL]](<2 x s64>)
+    ; CHECK-NEXT: $q1 = COPY [[FMUL1]](<2 x s64>)
     %0:_(<4 x s64>) = G_IMPLICIT_DEF
     %1:_(<4 x s64>) = G_IMPLICIT_DEF
     %2:_(<4 x s64>) = G_FMUL %0, %1
@@ -97,10 +97,10 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_fmul_v8s32
     ; CHECK: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
-    ; CHECK: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[DEF]], [[DEF]]
-    ; CHECK: [[FMUL1:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[DEF]], [[DEF]]
-    ; CHECK: $q0 = COPY [[FMUL]](<4 x s32>)
-    ; CHECK: $q1 = COPY [[FMUL1]](<4 x s32>)
+    ; CHECK-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[DEF]], [[DEF]]
+    ; CHECK-NEXT: [[FMUL1:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[DEF]], [[DEF]]
+    ; CHECK-NEXT: $q0 = COPY [[FMUL]](<4 x s32>)
+    ; CHECK-NEXT: $q1 = COPY [[FMUL1]](<4 x s32>)
     %0:_(<8 x s32>) = G_IMPLICIT_DEF
     %1:_(<8 x s32>) = G_IMPLICIT_DEF
     %2:_(<8 x s32>) = G_FMUL %0, %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp128-fconstant.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp128-fconstant.mir
index a0979c5f5d1e0..8c460f6ec7c7c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp128-fconstant.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp128-fconstant.mir
@@ -12,9 +12,9 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fp128-fconstant
     ; CHECK: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) %const.0
-    ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0
-    ; CHECK: $q0 = COPY [[LDRQui]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0 :: (load (s128) from constant-pool)
+    ; CHECK-NEXT: $q0 = COPY [[LDRQui]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(s128) = G_FCONSTANT fp128 0xL00000000000000004000000000000000
     $q0 = COPY %0:fpr(s128)
     RET_ReallyLR implicit $q0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp16-fconstant.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp16-fconstant.mir
index 44d6b95eb5491..ddf219dc4927e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp16-fconstant.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fp16-fconstant.mir
@@ -11,6 +11,7 @@ body: |
     ; NO-FP16: %cst:_(s16) = G_CONSTANT i16 0
     ; NO-FP16-NEXT: $h0 = COPY %cst(s16)
     ; NO-FP16-NEXT: RET_ReallyLR implicit $h0
+    ;
     ; FP16-LABEL: name: fp16
     ; FP16: %cst:_(s16) = G_FCONSTANT half 0xH0000
     ; FP16-NEXT: $h0 = COPY %cst(s16)
@@ -28,6 +29,7 @@ body: |
     ; NO-FP16: %cst:_(s16) = G_CONSTANT i16 16384
     ; NO-FP16-NEXT: $h0 = COPY %cst(s16)
     ; NO-FP16-NEXT: RET_ReallyLR implicit $h0
+    ;
     ; FP16-LABEL: name: fp16_non_zero
     ; FP16: %cst:_(s16) = G_FCONSTANT half 0xH4000
     ; FP16-NEXT: $h0 = COPY %cst(s16)
@@ -46,6 +48,7 @@ body:             |
     ; NO-FP16-NEXT: %ext:_(s32) = G_FPEXT %cst(s16)
     ; NO-FP16-NEXT: $w0 = COPY %ext(s32)
     ; NO-FP16-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; FP16-LABEL: name: nan
     ; FP16: %cst:_(s16) = G_FCONSTANT half 0xH7C01
     ; FP16-NEXT: %ext:_(s32) = G_FPEXT %cst(s16)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir
index 89515e1e62d0d..588dfd994df88 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fpext.mir
@@ -14,16 +14,17 @@ body:             |
 
     ; CHECK-LABEL: name: fpext_v4s64_v4s32
     ; CHECK: liveins: $q0, $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
-    ; CHECK: [[FPEXT:%[0-9]+]]:_(<2 x s64>) = G_FPEXT [[UV]](<2 x s32>)
-    ; CHECK: [[FPEXT1:%[0-9]+]]:_(<2 x s64>) = G_FPEXT [[UV1]](<2 x s32>)
-    ; CHECK: G_STORE [[FPEXT]](<2 x s64>), [[COPY1]](p0) :: (store (<2 x s64>), align 32)
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
-    ; CHECK: G_STORE [[FPEXT1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(<2 x s64>) = G_FPEXT [[UV]](<2 x s32>)
+    ; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(<2 x s64>) = G_FPEXT [[UV1]](<2 x s32>)
+    ; CHECK-NEXT: G_STORE [[FPEXT]](<2 x s64>), [[COPY1]](p0) :: (store (<2 x s64>), align 32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+    ; CHECK-NEXT: G_STORE [[FPEXT1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(<4 x s32>) = COPY $q0
     %1:_(p0) = COPY $x0
     %2:_(<4 x s64>) = G_FPEXT %0(<4 x s32>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir
index 73af9c4ba105b..082752f9c8945 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir
@@ -7,9 +7,11 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_fptosi_s32_s32
-    ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
-    ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[DEF]](s32)
-    ; CHECK: $w0 = COPY [[FPTOSI]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[DEF]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[FPTOSI]](s32)
     %0:_(s32) = G_IMPLICIT_DEF
     %1:_(s32) = G_FPTOSI %0
     $w0 = COPY %1
@@ -21,9 +23,11 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_fptoui_s32_s32
-    ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
-    ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[DEF]](s32)
-    ; CHECK: $w0 = COPY [[FPTOUI]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[DEF]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[FPTOUI]](s32)
     %0:_(s32) = G_IMPLICIT_DEF
     %1:_(s32) = G_FPTOUI %0
     $w0 = COPY %1
@@ -35,9 +39,11 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_fptosi_s32_s64
-    ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
-    ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[DEF]](s32)
-    ; CHECK: $w0 = COPY [[FPTOSI]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[DEF]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[FPTOSI]](s32)
     %0:_(s32) = G_IMPLICIT_DEF
     %1:_(s32) = G_FPTOSI %0
     $w0 = COPY %1
@@ -49,9 +55,11 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_fptoui_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s64)
-    ; CHECK: $w0 = COPY [[FPTOUI]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[FPTOUI]](s32)
     %0:_(s64) = COPY $x0
     %1:_(s32) = G_FPTOUI %0
     $w0 = COPY %1
@@ -63,9 +71,11 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_fptosi_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY]](s32)
-    ; CHECK: $x0 = COPY [[FPTOSI]](s64)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[FPTOSI]](s64)
     %0:_(s32) = COPY $w0
     %1:_(s64) = G_FPTOSI %0
     $x0 = COPY %1
@@ -77,9 +87,11 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_fptoui_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[FPTOUI:%[0-9]+]]:_(s64) = G_FPTOUI [[COPY]](s32)
-    ; CHECK: $x0 = COPY [[FPTOUI]](s64)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[FPTOUI:%[0-9]+]]:_(s64) = G_FPTOUI [[COPY]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[FPTOUI]](s64)
     %0:_(s32) = COPY $w0
     %1:_(s64) = G_FPTOUI %0
     $x0 = COPY %1
@@ -91,9 +103,11 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_fptosi_s64_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY]](s64)
-    ; CHECK: $x0 = COPY [[FPTOSI]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[FPTOSI]](s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_FPTOSI %0
     $x0 = COPY %1
@@ -105,9 +119,11 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_fptoui_s64_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[FPTOUI:%[0-9]+]]:_(s64) = G_FPTOUI [[COPY]](s64)
-    ; CHECK: $x0 = COPY [[FPTOUI]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[FPTOUI:%[0-9]+]]:_(s64) = G_FPTOUI [[COPY]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[FPTOUI]](s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_FPTOUI %0
     $x0 = COPY %1
@@ -121,10 +137,12 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_fptosi_s1_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[FPTOSI]](s32)
-    ; CHECK: $x0 = COPY [[ANYEXT]](s64)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[FPTOSI]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[ANYEXT]](s64)
     %0:_(s32) = COPY $w0
     %1:_(s1) = G_FPTOSI %0
     %2:_(s64) = G_ANYEXT %1
@@ -137,9 +155,11 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_fptoui_s1_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s32)
-    ; CHECK: $w0 = COPY [[FPTOUI]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[FPTOUI]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s1) = G_FPTOUI %0
     %2:_(s32) = G_ANYEXT %1
@@ -152,9 +172,11 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_fptosi_s8_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s64)
-    ; CHECK: $w0 = COPY [[FPTOSI]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[FPTOSI]](s32)
     %0:_(s64) = COPY $x0
     %1:_(s8) = G_FPTOSI %0
     %2:_(s32) = G_ANYEXT %1
@@ -167,9 +189,11 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_fptoui_s8_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s64)
-    ; CHECK: $w0 = COPY [[FPTOUI]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[FPTOUI]](s32)
     %0:_(s64) = COPY $x0
     %1:_(s8) = G_FPTOUI %0
     %2:_(s32) = G_ANYEXT %1
@@ -182,9 +206,11 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_fptosi_s16_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
-    ; CHECK: $w0 = COPY [[FPTOSI]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[FPTOSI]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s16) = G_FPTOSI %0
     %2:_(s32) = G_ANYEXT %1
@@ -197,9 +223,11 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_fptoui_s16_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s32)
-    ; CHECK: $w0 = COPY [[FPTOUI]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[FPTOUI]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s16) = G_FPTOUI %0
     %2:_(s32) = G_ANYEXT %1
@@ -212,9 +240,11 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_fptoui_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[FPTOUI:%[0-9]+]]:_(<4 x s32>) = G_FPTOUI [[COPY]](<4 x s32>)
-    ; CHECK: $q0 = COPY [[FPTOUI]](<4 x s32>)
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[FPTOUI:%[0-9]+]]:_(<4 x s32>) = G_FPTOUI [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: $q0 = COPY [[FPTOUI]](<4 x s32>)
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = G_FPTOUI %0
     $q0 = COPY %1
@@ -226,9 +256,11 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_fptosi_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[FPTOSI:%[0-9]+]]:_(<4 x s32>) = G_FPTOSI [[COPY]](<4 x s32>)
-    ; CHECK: $q0 = COPY [[FPTOSI]](<4 x s32>)
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[FPTOSI:%[0-9]+]]:_(<4 x s32>) = G_FPTOSI [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: $q0 = COPY [[FPTOSI]](<4 x s32>)
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = G_FPTOSI %0
     $q0 = COPY %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global-pic.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global-pic.mir
index 37fad041311dc..a03fe25acf97c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global-pic.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global-pic.mir
@@ -20,8 +20,8 @@ body: |
 
     ; PIC-LABEL: name: test_global
     ; PIC: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var
-    ; PIC: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[GV]](p0)
-    ; PIC: $x0 = COPY [[PTRTOINT]](s64)
+    ; PIC-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[GV]](p0)
+    ; PIC-NEXT: $x0 = COPY [[PTRTOINT]](s64)
     %0(p0) = G_GLOBAL_VALUE @var
     %1:_(s64) = G_PTRTOINT %0
     $x0 = COPY %1
@@ -34,8 +34,8 @@ body: |
   bb.0:
     ; PIC-LABEL: name: test_global_with_offset
     ; PIC: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var + 1
-    ; PIC: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[GV]](p0)
-    ; PIC: $x0 = COPY [[PTRTOINT]](s64)
+    ; PIC-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[GV]](p0)
+    ; PIC-NEXT: $x0 = COPY [[PTRTOINT]](s64)
     %0(p0) = G_GLOBAL_VALUE @var + 1
     %1:_(s64) = G_PTRTOINT %0
     $x0 = COPY %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global.mir
index 18c861b3d20e7..348545cc8c596 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-global.mir
@@ -20,13 +20,14 @@ body: |
     ; model isn't 'Small'.
     ; CHECK-LABEL: name: test_global
     ; CHECK: [[ADRP:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var
-    ; CHECK: [[ADD_LOW:%[0-9]+]]:_(p0) = G_ADD_LOW [[ADRP]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var
-    ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[ADD_LOW]](p0)
-    ; CHECK: $x0 = COPY [[PTRTOINT]](s64)
+    ; CHECK-NEXT: [[ADD_LOW:%[0-9]+]]:_(p0) = G_ADD_LOW [[ADRP]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var
+    ; CHECK-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[ADD_LOW]](p0)
+    ; CHECK-NEXT: $x0 = COPY [[PTRTOINT]](s64)
+    ;
     ; CMLARGE-LABEL: name: test_global
     ; CMLARGE: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var
-    ; CMLARGE: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[GV]](p0)
-    ; CMLARGE: $x0 = COPY [[PTRTOINT]](s64)
+    ; CMLARGE-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[GV]](p0)
+    ; CMLARGE-NEXT: $x0 = COPY [[PTRTOINT]](s64)
     %0(p0) = G_GLOBAL_VALUE @var
     %1:_(s64) = G_PTRTOINT %0
     $x0 = COPY %1
@@ -37,16 +38,16 @@ body: |
   bb.0:
     ; When we legalize into ADRP + G_ADD_LOW, both should inherit the offset
     ; from the original G_GLOBAL_VALUE.
-    ;
     ; CHECK-LABEL: name: test_global_with_offset
     ; CHECK: [[ADRP:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var + 1
-    ; CHECK: [[ADD_LOW:%[0-9]+]]:_(p0) = G_ADD_LOW [[ADRP]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var + 1
-    ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[ADD_LOW]](p0)
-    ; CHECK: $x0 = COPY [[PTRTOINT]](s64)
+    ; CHECK-NEXT: [[ADD_LOW:%[0-9]+]]:_(p0) = G_ADD_LOW [[ADRP]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var + 1
+    ; CHECK-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[ADD_LOW]](p0)
+    ; CHECK-NEXT: $x0 = COPY [[PTRTOINT]](s64)
+    ;
     ; CMLARGE-LABEL: name: test_global_with_offset
     ; CMLARGE: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var + 1
-    ; CMLARGE: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[GV]](p0)
-    ; CMLARGE: $x0 = COPY [[PTRTOINT]](s64)
+    ; CMLARGE-NEXT: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[GV]](p0)
+    ; CMLARGE-NEXT: $x0 = COPY [[PTRTOINT]](s64)
     %0:_(p0) = G_GLOBAL_VALUE @var + 1
     %1:_(s64) = G_PTRTOINT %0
     $x0 = COPY %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ignore-hint.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ignore-hint.mir
index 8cf3e70b29355..3a3848aea09c1 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ignore-hint.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ignore-hint.mir
@@ -10,10 +10,12 @@ body: |
   bb.0:
     liveins: $w0, $w1
     ; CHECK-LABEL: name: assert_zext
-    ; CHECK: %copy:_(s32) = COPY $w1
-    ; CHECK: %hint:_(s32) = G_ASSERT_ZEXT %copy, 16
-    ; CHECK: $w0 = COPY %hint(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s32) = COPY $w1
+    ; CHECK-NEXT: %hint:_(s32) = G_ASSERT_ZEXT %copy, 16
+    ; CHECK-NEXT: $w0 = COPY %hint(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:_(s32) = COPY $w1
     %hint:_(s32) = G_ASSERT_ZEXT %copy, 16
     $w0 = COPY %hint

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir
index 899eb67fc6bab..e1381aa40f487 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir
@@ -17,8 +17,10 @@ body: |
     liveins: $x0
 
     ; CHECK-LABEL: name: test_copy
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: $x0 = COPY [[COPY]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: $x0 = COPY [[COPY]](s64)
     %0(s64) = COPY $x0
     $x0 = COPY %0
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-intrinsic-get-dynamic-area-offset.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-intrinsic-get-dynamic-area-offset.mir
index a157554132530..c64876ebf58fc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-intrinsic-get-dynamic-area-offset.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-intrinsic-get-dynamic-area-offset.mir
@@ -13,8 +13,8 @@ body:             |
     liveins:
     ; CHECK-LABEL: name: test_64
     ; CHECK: %v:_(s64) = G_CONSTANT i64 0
-    ; CHECK: $x0 = COPY %v(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: $x0 = COPY %v(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %v:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.get.dynamic.area.offset)
     $x0 = COPY %v(s64)
     RET_ReallyLR implicit $x0
@@ -30,8 +30,8 @@ body:             |
     liveins:
     ; CHECK-LABEL: name: test_32
     ; CHECK: %v:_(s32) = G_CONSTANT i32 0
-    ; CHECK: $w0 = COPY %v(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: $w0 = COPY %v(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %v:_(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.get.dynamic.area.offset)
     $w0 = COPY %v(s32)
     RET_ReallyLR implicit $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-inttoptr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-inttoptr.mir
index cde5889a946b1..b2fbabf9e6642 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-inttoptr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-inttoptr.mir
@@ -10,10 +10,11 @@ body:             |
     liveins: $q0, $x0
     ; CHECK-LABEL: name: p0_s64_legal
     ; CHECK: liveins: $q0, $x0
-    ; CHECK: %copy:_(s64) = COPY $x0
-    ; CHECK: %inttoptr:_(p0) = G_INTTOPTR %copy(s64)
-    ; CHECK: $x0 = COPY %inttoptr(p0)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s64) = COPY $x0
+    ; CHECK-NEXT: %inttoptr:_(p0) = G_INTTOPTR %copy(s64)
+    ; CHECK-NEXT: $x0 = COPY %inttoptr(p0)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy:_(s64) = COPY $x0
     %inttoptr:_(p0) = G_INTTOPTR %copy(s64)
     $x0 = COPY %inttoptr(p0)
@@ -28,12 +29,13 @@ body:             |
 
     ; CHECK-LABEL: name: v2s64_to_v2p0_legal
     ; CHECK: liveins: $q0, $x0
-    ; CHECK: %copy:_(<2 x s64>) = COPY $q0
-    ; CHECK: %idx:_(s64) = G_CONSTANT i64 0
-    ; CHECK: %inttoptr:_(<2 x p0>) = G_INTTOPTR %copy(<2 x s64>)
-    ; CHECK: %extract:_(p0) = G_EXTRACT_VECTOR_ELT %inttoptr(<2 x p0>), %idx(s64)
-    ; CHECK: $x0 = COPY %extract(p0)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: %idx:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: %inttoptr:_(<2 x p0>) = G_INTTOPTR %copy(<2 x s64>)
+    ; CHECK-NEXT: %extract:_(p0) = G_EXTRACT_VECTOR_ELT %inttoptr(<2 x p0>), %idx(s64)
+    ; CHECK-NEXT: $x0 = COPY %extract(p0)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy:_(<2 x s64>) = COPY $q0
     %idx:_(s64) = G_CONSTANT i64 0
     %inttoptr:_(<2 x p0>) = G_INTTOPTR %copy(<2 x s64>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-llround.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-llround.mir
index 1b0b0e742e972..be626ff663daa 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-llround.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-llround.mir
@@ -10,11 +10,12 @@ body:             |
     liveins: $s0
     ; CHECK-LABEL: name: s64_s32_legal
     ; CHECK: liveins: $s0
-    ; CHECK: %copy:_(s32) = COPY $s0
-    ; CHECK: %llround:_(s64) = G_LLROUND %copy(s32)
-    ; CHECK: %trunc:_(s32) = G_TRUNC %llround(s64)
-    ; CHECK: $w0 = COPY %trunc(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s32) = COPY $s0
+    ; CHECK-NEXT: %llround:_(s64) = G_LLROUND %copy(s32)
+    ; CHECK-NEXT: %trunc:_(s32) = G_TRUNC %llround(s64)
+    ; CHECK-NEXT: $w0 = COPY %trunc(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:_(s32) = COPY $s0
     %llround:_(s64) = G_LLROUND %copy(s32)
     %trunc:_(s32) = G_TRUNC %llround
@@ -29,10 +30,11 @@ body:             |
     liveins: $d0
     ; CHECK-LABEL: name: s64_s64_legal
     ; CHECK: liveins: $d0
-    ; CHECK: %copy:_(s64) = COPY $d0
-    ; CHECK: %llround:_(s64) = G_LLROUND %copy(s64)
-    ; CHECK: $x0 = COPY %llround(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s64) = COPY $d0
+    ; CHECK-NEXT: %llround:_(s64) = G_LLROUND %copy(s64)
+    ; CHECK-NEXT: $x0 = COPY %llround(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy:_(s64) = COPY $d0
     %llround:_(s64) = G_LLROUND %copy(s64)
     $x0 = COPY %llround

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-fewerElts.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-fewerElts.mir
index 69b9483d47c48..f2eeb08520654 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-fewerElts.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-fewerElts.mir
@@ -10,10 +10,11 @@ body:             |
 
     ; CHECK-LABEL: name: load_v4s32
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
-    ; CHECK: G_STORE [[LOAD]](<4 x s32>), [[COPY1]](p0) :: (store (<4 x s32>))
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
+    ; CHECK-NEXT: G_STORE [[LOAD]](<4 x s32>), [[COPY1]](p0) :: (store (<4 x s32>))
     %0:_(p0) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(<4 x s32>) = G_LOAD %0(p0) :: (load (<4 x s32>))
@@ -30,10 +31,11 @@ body:             |
 
     ; CHECK-LABEL: name: load_v2s64
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>))
-    ; CHECK: G_STORE [[LOAD]](<2 x s64>), [[COPY1]](p0) :: (store (<2 x s64>))
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>))
+    ; CHECK-NEXT: G_STORE [[LOAD]](<2 x s64>), [[COPY1]](p0) :: (store (<2 x s64>))
     %0:_(p0) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(<2 x s64>) = G_LOAD %0(p0) :: (load (<2 x s64>))

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector-of-ptr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector-of-ptr.mir
index a8f373f4f6135..d6a2cd8c04b6e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector-of-ptr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-store-vector-of-ptr.mir
@@ -29,11 +29,12 @@ body:             |
 
     ; CHECK-LABEL: name: store_v2p0
     ; CHECK: liveins: $q0, $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x p0>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[COPY]](<2 x p0>)
-    ; CHECK: G_STORE [[BITCAST]](<2 x s64>), [[COPY1]](p0) :: (store (<2 x s64>) into %ir.ptr)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x p0>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[COPY]](<2 x p0>)
+    ; CHECK-NEXT: G_STORE [[BITCAST]](<2 x s64>), [[COPY1]](p0) :: (store (<2 x s64>) into %ir.ptr)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(<2 x p0>) = COPY $q0
     %1:_(p0) = COPY $x0
     G_STORE %0(<2 x p0>), %1(p0) :: (store (<2 x p0>) into %ir.ptr)
@@ -51,11 +52,12 @@ body:             |
 
     ; CHECK-LABEL: name: load_v2p0
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>) from %ir.ptr)
-    ; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD]](<2 x s64>)
-    ; CHECK: $q0 = COPY [[BITCAST]](<2 x p0>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>) from %ir.ptr)
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD]](<2 x s64>)
+    ; CHECK-NEXT: $q0 = COPY [[BITCAST]](<2 x p0>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(p0) = COPY $x0
     %1:_(<2 x p0>) = G_LOAD %0(p0) :: (load (<2 x p0>) from %ir.ptr)
     $q0 = COPY %1(<2 x p0>)
@@ -75,10 +77,11 @@ body:             |
 
     ; CHECK-LABEL: name: load_v2p1
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[LOAD:%[0-9]+]]:_(<2 x p1>) = G_LOAD [[COPY]](p0) :: (load (<2 x p1>) from %ir.ptr)
-    ; CHECK: $q0 = COPY [[LOAD]](<2 x p1>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x p1>) = G_LOAD [[COPY]](p0) :: (load (<2 x p1>) from %ir.ptr)
+    ; CHECK-NEXT: $q0 = COPY [[LOAD]](<2 x p1>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(p0) = COPY $x0
     %1:_(<2 x p1>) = G_LOAD %0(p0) :: (load (<2 x p1>) from %ir.ptr)
     $q0 = COPY %1(<2 x p1>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-trunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-trunc.mir
index c6e428bf425ed..6d013327761cc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-trunc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-load-trunc.mir
@@ -11,10 +11,10 @@ body: |
 
     ; CHECK-LABEL: name: test_load_trunc
     ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s16))
-    ; CHECK: [[ASSERT_ZEXT:%[0-9]+]]:_(s16) = G_ASSERT_ZEXT [[LOAD]], 10
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ASSERT_ZEXT]](s16)
-    ; CHECK: RET_ReallyLR implicit [[TRUNC]](s1)
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s16))
+    ; CHECK-NEXT: [[ASSERT_ZEXT:%[0-9]+]]:_(s16) = G_ASSERT_ZEXT [[LOAD]], 10
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ASSERT_ZEXT]](s16)
+    ; CHECK-NEXT: RET_ReallyLR implicit [[TRUNC]](s1)
     %0:_(p0) = G_FRAME_INDEX %stack.0
     %1:_(s10) = G_LOAD %0(p0) :: (load (s10))
     %2:_(s1) = G_TRUNC %1(s10)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-lrint.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-lrint.mir
index ddc4f07e1a146..203544d85372a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-lrint.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-lrint.mir
@@ -12,11 +12,12 @@ body:             |
 
     ; CHECK-LABEL: name: testmsws
     ; CHECK: liveins: $s0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[INTRINSIC_LRINT:%[0-9]+]]:_(s64) = G_INTRINSIC_LRINT [[COPY]](s32)
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[INTRINSIC_LRINT]](s64)
-    ; CHECK: $w0 = COPY [[TRUNC]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[INTRINSIC_LRINT:%[0-9]+]]:_(s64) = G_INTRINSIC_LRINT [[COPY]](s32)
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[INTRINSIC_LRINT]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $s0
     %1:_(s64) = G_INTRINSIC_LRINT %0(s32)
     %2:_(s32) = G_TRUNC %1(s64)
@@ -36,10 +37,11 @@ body:             |
 
     ; CHECK-LABEL: name: testmsxs
     ; CHECK: liveins: $s0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[INTRINSIC_LRINT:%[0-9]+]]:_(s64) = G_INTRINSIC_LRINT [[COPY]](s32)
-    ; CHECK: $x0 = COPY [[INTRINSIC_LRINT]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[INTRINSIC_LRINT:%[0-9]+]]:_(s64) = G_INTRINSIC_LRINT [[COPY]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[INTRINSIC_LRINT]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s32) = COPY $s0
     %1:_(s64) = G_INTRINSIC_LRINT %0(s32)
     $x0 = COPY %1(s64)
@@ -58,11 +60,12 @@ body:             |
 
     ; CHECK-LABEL: name: testmswd
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: [[INTRINSIC_LRINT:%[0-9]+]]:_(s64) = G_INTRINSIC_LRINT [[COPY]](s64)
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[INTRINSIC_LRINT]](s64)
-    ; CHECK: $w0 = COPY [[TRUNC]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: [[INTRINSIC_LRINT:%[0-9]+]]:_(s64) = G_INTRINSIC_LRINT [[COPY]](s64)
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[INTRINSIC_LRINT]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s64) = COPY $d0
     %1:_(s64) = G_INTRINSIC_LRINT %0(s64)
     %2:_(s32) = G_TRUNC %1(s64)
@@ -82,10 +85,11 @@ body:             |
 
     ; CHECK-LABEL: name: testmsxd
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: [[INTRINSIC_LRINT:%[0-9]+]]:_(s64) = G_INTRINSIC_LRINT [[COPY]](s64)
-    ; CHECK: $x0 = COPY [[INTRINSIC_LRINT]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: [[INTRINSIC_LRINT:%[0-9]+]]:_(s64) = G_INTRINSIC_LRINT [[COPY]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[INTRINSIC_LRINT]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $d0
     %1:_(s64) = G_INTRINSIC_LRINT %0(s64)
     $x0 = COPY %1(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-lround.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-lround.mir
index c61fa6c3ee147..c7baac584fbb7 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-lround.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-lround.mir
@@ -10,11 +10,12 @@ body:             |
     liveins: $s0
     ; CHECK-LABEL: name: s64_s32_legal
     ; CHECK: liveins: $s0
-    ; CHECK: %copy:_(s32) = COPY $s0
-    ; CHECK: %lround:_(s64) = G_LROUND %copy(s32)
-    ; CHECK: %trunc:_(s32) = G_TRUNC %lround(s64)
-    ; CHECK: $w0 = COPY %trunc(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s32) = COPY $s0
+    ; CHECK-NEXT: %lround:_(s64) = G_LROUND %copy(s32)
+    ; CHECK-NEXT: %trunc:_(s32) = G_TRUNC %lround(s64)
+    ; CHECK-NEXT: $w0 = COPY %trunc(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:_(s32) = COPY $s0
     %lround:_(s64) = G_LROUND %copy(s32)
     %trunc:_(s32) = G_TRUNC %lround
@@ -29,10 +30,11 @@ body:             |
     liveins: $d0
     ; CHECK-LABEL: name: s64_s64_legal
     ; CHECK: liveins: $d0
-    ; CHECK: %copy:_(s64) = COPY $d0
-    ; CHECK: %lround:_(s64) = G_LROUND %copy(s64)
-    ; CHECK: $x0 = COPY %lround(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s64) = COPY $d0
+    ; CHECK-NEXT: %lround:_(s64) = G_LROUND %copy(s64)
+    ; CHECK-NEXT: $x0 = COPY %lround(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy:_(s64) = COPY $d0
     %lround:_(s64) = G_LROUND %copy(s64)
     $x0 = COPY %lround

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-with-debug-info.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-with-debug-info.mir
index f2f99133c032b..59d2630cb0241 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-with-debug-info.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-memcpy-with-debug-info.mir
@@ -39,14 +39,16 @@ body:             |
     liveins: $w2, $x0, $x1
 
     ; CHECK-LABEL: name: test_memcpy_tail
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0, debug-location !10
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1, debug-location !DILocation(line: 3, column: 1, scope: !5)
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2, debug-location !DILocation(line: 4, column: 1, scope: !5)
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32), debug-location !DILocation(line: 5, column: 1, scope: !5)
-    ; CHECK: $x0 = COPY [[COPY]](p0), debug-location !DILocation(line: 6, column: 1, scope: !5)
-    ; CHECK: $x1 = COPY [[COPY1]](p0), debug-location !DILocation(line: 6, column: 1, scope: !5)
-    ; CHECK: $x2 = COPY [[ZEXT]](s64), debug-location !DILocation(line: 6, column: 1, scope: !5)
-    ; CHECK: TCRETURNdi &memcpy, 0, csr_aarch64_aapcs, implicit $sp, implicit $x0, implicit $x1, implicit $x2, debug-location !DILocation(line: 6, column: 1, scope: !5)
+    ; CHECK: liveins: $w2, $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0,  debug-location !10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1,  debug-location !DILocation(line: 3, column: 1, scope: !5)
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2,  debug-location !DILocation(line: 4, column: 1, scope: !5)
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32),  debug-location !DILocation(line: 5, column: 1, scope: !5)
+    ; CHECK-NEXT: $x0 = COPY [[COPY]](p0),  debug-location !DILocation(line: 6, column: 1, scope: !5)
+    ; CHECK-NEXT: $x1 = COPY [[COPY1]](p0),  debug-location !DILocation(line: 6, column: 1, scope: !5)
+    ; CHECK-NEXT: $x2 = COPY [[ZEXT]](s64),  debug-location !DILocation(line: 6, column: 1, scope: !5)
+    ; CHECK-NEXT: TCRETURNdi &memcpy, 0, csr_aarch64_aapcs, implicit $sp, implicit $x0, implicit $x1, implicit $x2,  debug-location !DILocation(line: 6, column: 1, scope: !5)
     %0:_(p0) = COPY $x0, debug-location !14
     %1:_(p0) = COPY $x1, debug-location !DILocation(line: 3, column: 1, scope: !11)
     %2:_(s32) = COPY $w2, debug-location !DILocation(line: 4, column: 1, scope: !11)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
index 2ba8e76c777f4..7b3be3468b93a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
@@ -7,7 +7,9 @@ body: |
   bb.0:
     liveins: $x0, $x1, $x2, $x3
     ; CHECK-LABEL: name: test_scalar_or_small
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: liveins: $x0, $x1, $x2, $x3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
@@ -33,7 +35,9 @@ body: |
     ; copying the results of the G_OR ops.
 
     ; CHECK-LABEL: name: test_big_scalar_power_of_2
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: liveins: $x0, $x1, $x2, $x3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
@@ -183,19 +187,19 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s8) = COPY $b0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s8) = COPY $b1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s8) = COPY $b2
-    ; CHECK-NEXT: [[ANYEXT0:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8)
     ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY1]](s8)
     ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY2]](s8)
-    ; CHECK-NEXT: [[IMPLICIT_DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[ANYEXT0]](s16), [[ANYEXT1]](s16), [[ANYEXT2]](s16), [[IMPLICIT_DEF]](s16)
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[ANYEXT]](s16), [[ANYEXT1]](s16), [[ANYEXT2]](s16), [[DEF]](s16)
     ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8)
     ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY1]](s8)
     ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY2]](s8)
-    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[ANYEXT3]](s16), [[ANYEXT4]](s16), [[ANYEXT5]](s16), [[IMPLICIT_DEF]](s16)
+    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[ANYEXT3]](s16), [[ANYEXT4]](s16), [[ANYEXT5]](s16), [[DEF]](s16)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s16>) = G_OR [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
-    ; CHECK-NEXT: [[VAL0:%[0-9]+]]:_(s16), [[VAL1:%[0-9]+]]:_(s16), [[VAL2:%[0-9]+]]:_(s16), [[VAL3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[OR]](<4 x s16>)
-    ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[VAL0]](s16)
-    ; CHECK-NEXT: $b0 = COPY [[TRUNC3]](s8)
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[OR]](<4 x s16>)
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s16)
+    ; CHECK-NEXT: $b0 = COPY [[TRUNC]](s8)
     ; CHECK-NEXT: RET_ReallyLR implicit $b0
     %1:_(s8) = COPY $b0
     %2:_(s8) = COPY $b1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir
index 10d86c663664c..2b7dc19d6a9a8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir
@@ -6,23 +6,23 @@ body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: test_pow
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s2
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $s3
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $d0 = COPY [[COPY]](s64)
-    ; CHECK: $d1 = COPY [[COPY1]](s64)
-    ; CHECK: BL &pow, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit-def $d0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: $x0 = COPY [[COPY4]](s64)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[COPY2]](s32)
-    ; CHECK: $s1 = COPY [[COPY3]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: $w0 = COPY [[COPY5]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s2
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $s3
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $d0 = COPY [[COPY]](s64)
+    ; CHECK-NEXT: $d1 = COPY [[COPY1]](s64)
+    ; CHECK-NEXT: BL &pow, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit-def $d0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: $x0 = COPY [[COPY4]](s64)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[COPY2]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[COPY3]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: $w0 = COPY [[COPY5]](s32)
     %0:_(s64) = COPY $d0
     %1:_(s64) = COPY $d1
     %2:_(s32) = COPY $s2
@@ -43,49 +43,50 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f16.pow
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $d1
-    ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
-    ; CHECK: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
-    ; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
-    ; CHECK: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT1]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32)
-    ; CHECK: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
-    ; CHECK: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT2]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT3]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY3]](s32)
-    ; CHECK: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
-    ; CHECK: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT4]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT5]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY4]](s32)
-    ; CHECK: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
-    ; CHECK: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT6]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT7]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY5]](s32)
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16)
-    ; CHECK: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $d1
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+    ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+    ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+    ; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT1]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32)
+    ; CHECK-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+    ; CHECK-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT2]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT3]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY3]](s32)
+    ; CHECK-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT4]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT5]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY4]](s32)
+    ; CHECK-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT6]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT7]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY5]](s32)
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16)
+    ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<4 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<4 x s16>) = COPY $d0
     %1:_(<4 x s16>) = COPY $d1
     %2:_(<4 x s16>) = G_FPOW %0, %1
@@ -103,85 +104,86 @@ body:             |
 
     ; CHECK-LABEL: name: test_v8f16.pow
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
-    ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<8 x s16>)
-    ; CHECK: [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16), [[UV12:%[0-9]+]]:_(s16), [[UV13:%[0-9]+]]:_(s16), [[UV14:%[0-9]+]]:_(s16), [[UV15:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<8 x s16>)
-    ; CHECK: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
-    ; CHECK: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV8]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT1]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32)
-    ; CHECK: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
-    ; CHECK: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV9]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT2]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT3]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY3]](s32)
-    ; CHECK: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
-    ; CHECK: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV10]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT4]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT5]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY4]](s32)
-    ; CHECK: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
-    ; CHECK: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV11]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT6]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT7]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY5]](s32)
-    ; CHECK: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
-    ; CHECK: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[UV12]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT8]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT9]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY6]](s32)
-    ; CHECK: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
-    ; CHECK: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[UV13]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT10]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT11]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC5:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY7]](s32)
-    ; CHECK: [[FPEXT12:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
-    ; CHECK: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[UV14]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT12]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT13]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC6:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY8]](s32)
-    ; CHECK: [[FPEXT14:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
-    ; CHECK: [[FPEXT15:%[0-9]+]]:_(s32) = G_FPEXT [[UV15]](s16)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[FPEXT14]](s32)
-    ; CHECK: $s1 = COPY [[FPEXT15]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[FPTRUNC7:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY9]](s32)
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16), [[FPTRUNC4]](s16), [[FPTRUNC5]](s16), [[FPTRUNC6]](s16), [[FPTRUNC7]](s16)
-    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<8 x s16>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<8 x s16>)
+    ; CHECK-NEXT: [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16), [[UV12:%[0-9]+]]:_(s16), [[UV13:%[0-9]+]]:_(s16), [[UV14:%[0-9]+]]:_(s16), [[UV15:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<8 x s16>)
+    ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:_(s32) = G_FPEXT [[UV]](s16)
+    ; CHECK-NEXT: [[FPEXT1:%[0-9]+]]:_(s32) = G_FPEXT [[UV8]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT1]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY2]](s32)
+    ; CHECK-NEXT: [[FPEXT2:%[0-9]+]]:_(s32) = G_FPEXT [[UV1]](s16)
+    ; CHECK-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[UV9]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT2]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT3]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY3]](s32)
+    ; CHECK-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[UV10]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT4]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT5]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY4]](s32)
+    ; CHECK-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[UV11]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT6]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT7]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC3:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY5]](s32)
+    ; CHECK-NEXT: [[FPEXT8:%[0-9]+]]:_(s32) = G_FPEXT [[UV4]](s16)
+    ; CHECK-NEXT: [[FPEXT9:%[0-9]+]]:_(s32) = G_FPEXT [[UV12]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT8]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT9]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY6]](s32)
+    ; CHECK-NEXT: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[UV5]](s16)
+    ; CHECK-NEXT: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[UV13]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT10]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT11]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC5:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY7]](s32)
+    ; CHECK-NEXT: [[FPEXT12:%[0-9]+]]:_(s32) = G_FPEXT [[UV6]](s16)
+    ; CHECK-NEXT: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[UV14]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT12]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT13]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC6:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY8]](s32)
+    ; CHECK-NEXT: [[FPEXT14:%[0-9]+]]:_(s32) = G_FPEXT [[UV7]](s16)
+    ; CHECK-NEXT: [[FPEXT15:%[0-9]+]]:_(s32) = G_FPEXT [[UV15]](s16)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[FPEXT14]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[FPEXT15]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTRUNC7:%[0-9]+]]:_(s16) = G_FPTRUNC [[COPY9]](s32)
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[FPTRUNC]](s16), [[FPTRUNC1]](s16), [[FPTRUNC2]](s16), [[FPTRUNC3]](s16), [[FPTRUNC4]](s16), [[FPTRUNC5]](s16), [[FPTRUNC6]](s16), [[FPTRUNC7]](s16)
+    ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<8 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<8 x s16>) = COPY $q0
     %1:_(<8 x s16>) = COPY $q1
     %2:_(<8 x s16>) = G_FPOW %0, %1
@@ -199,25 +201,26 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f32.pow
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
-    ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV]](s32)
-    ; CHECK: $s1 = COPY [[UV2]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV1]](s32)
-    ; CHECK: $s1 = COPY [[UV3]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
-    ; CHECK: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[UV2]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV1]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[UV3]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
+    ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = COPY $d1
     %2:_(<2 x s32>) = G_FPOW %0, %1
@@ -235,37 +238,38 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f32.pow
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
-    ; CHECK: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV]](s32)
-    ; CHECK: $s1 = COPY [[UV4]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV1]](s32)
-    ; CHECK: $s1 = COPY [[UV5]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV2]](s32)
-    ; CHECK: $s1 = COPY [[UV6]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $s0 = COPY [[UV3]](s32)
-    ; CHECK: $s1 = COPY [[UV7]](s32)
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $s0
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32)
-    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[UV4]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV1]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[UV5]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV2]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[UV6]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $s0 = COPY [[UV3]](s32)
+    ; CHECK-NEXT: $s1 = COPY [[UV7]](s32)
+    ; CHECK-NEXT: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32)
+    ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = COPY $q1
     %2:_(<4 x s32>) = G_FPOW %0, %1
@@ -283,25 +287,26 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f64.pow
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
-    ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
-    ; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $d0 = COPY [[UV]](s64)
-    ; CHECK: $d1 = COPY [[UV2]](s64)
-    ; CHECK: BL &pow, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit-def $d0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: $d0 = COPY [[UV1]](s64)
-    ; CHECK: $d1 = COPY [[UV3]](s64)
-    ; CHECK: BL &pow, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit-def $d0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY2]](s64), [[COPY3]](s64)
-    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<2 x s64>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $d0 = COPY [[UV]](s64)
+    ; CHECK-NEXT: $d1 = COPY [[UV2]](s64)
+    ; CHECK-NEXT: BL &pow, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit-def $d0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: $d0 = COPY [[UV1]](s64)
+    ; CHECK-NEXT: $d1 = COPY [[UV3]](s64)
+    ; CHECK-NEXT: BL &pow, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit-def $d0
+    ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[COPY2]](s64), [[COPY3]](s64)
+    ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<2 x s64>) = COPY $q0
     %1:_(<2 x s64>) = COPY $q1
     %2:_(<2 x s64>) = G_FPOW %0, %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-fadd.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-fadd.mir
index fded199eb1f6a..a9a65184bf5c0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-fadd.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-fadd.mir
@@ -10,10 +10,11 @@ body:             |
 
     ; CHECK-LABEL: name: fadd_v2s32
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[VECREDUCE_FADD:%[0-9]+]]:_(s32) = G_VECREDUCE_FADD [[COPY]](<2 x s32>)
-    ; CHECK: $w0 = COPY [[VECREDUCE_FADD]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[VECREDUCE_FADD:%[0-9]+]]:_(s32) = G_VECREDUCE_FADD [[COPY]](<2 x s32>)
+    ; CHECK-NEXT: $w0 = COPY [[VECREDUCE_FADD]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(s32) = G_VECREDUCE_FADD %0(<2 x s32>)
     $w0 = COPY %1(s32)
@@ -29,10 +30,11 @@ body:             |
 
     ; CHECK-LABEL: name: fadd_v2s64
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[VECREDUCE_FADD:%[0-9]+]]:_(s64) = G_VECREDUCE_FADD [[COPY]](<2 x s64>)
-    ; CHECK: $x0 = COPY [[VECREDUCE_FADD]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[VECREDUCE_FADD:%[0-9]+]]:_(s64) = G_VECREDUCE_FADD [[COPY]](<2 x s64>)
+    ; CHECK-NEXT: $x0 = COPY [[VECREDUCE_FADD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(<2 x s64>) = COPY $q0
     %2:_(s64) = G_VECREDUCE_FADD %0(<2 x s64>)
     $x0 = COPY %2(s64)
@@ -49,16 +51,17 @@ body:             |
     ; This is a power-of-2 legalization, so use a tree reduction.
     ; CHECK-LABEL: name: fadd_v8s64
     ; CHECK: liveins: $q0, $q1, $q2, $q3
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
-    ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
-    ; CHECK: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[COPY]], [[COPY1]]
-    ; CHECK: [[FADD1:%[0-9]+]]:_(<2 x s64>) = G_FADD [[COPY2]], [[COPY3]]
-    ; CHECK: [[FADD2:%[0-9]+]]:_(<2 x s64>) = G_FADD [[FADD]], [[FADD1]]
-    ; CHECK: [[VECREDUCE_FADD:%[0-9]+]]:_(s64) = G_VECREDUCE_FADD [[FADD2]](<2 x s64>)
-    ; CHECK: $x0 = COPY [[VECREDUCE_FADD]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
+    ; CHECK-NEXT: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[FADD1:%[0-9]+]]:_(<2 x s64>) = G_FADD [[COPY2]], [[COPY3]]
+    ; CHECK-NEXT: [[FADD2:%[0-9]+]]:_(<2 x s64>) = G_FADD [[FADD]], [[FADD1]]
+    ; CHECK-NEXT: [[VECREDUCE_FADD:%[0-9]+]]:_(s64) = G_VECREDUCE_FADD [[FADD2]](<2 x s64>)
+    ; CHECK-NEXT: $x0 = COPY [[VECREDUCE_FADD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(<2 x s64>) = COPY $q0
     %1:_(<2 x s64>) = COPY $q1
     %2:_(<2 x s64>) = COPY $q2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-or.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-or.mir
index 26a43331f505d..dd3dca105167a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-or.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-reduce-or.mir
@@ -13,11 +13,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v1i1
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
-    ; CHECK: $w0 = COPY [[AND]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:_(s32) = COPY $w0
     %0:_(s1) = G_TRUNC %1(s32)
     %2:_(s1) = G_VECREDUCE_OR %0(s1)
@@ -38,13 +39,14 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v2i1
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV]], [[UV1]]
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C]]
-    ; CHECK: $w0 = COPY [[AND]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[OR]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:_(<2 x s32>) = COPY $d0
     %0:_(<2 x s1>) = G_TRUNC %1(<2 x s32>)
     %2:_(s1) = G_VECREDUCE_OR %0(<2 x s1>)
@@ -65,19 +67,20 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v4i1
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
-    ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
-    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
-    ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
-    ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
-    ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
-    ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[OR2]], [[C]]
-    ; CHECK: $w0 = COPY [[AND]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[OR2]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:_(<4 x s16>) = COPY $d0
     %0:_(<4 x s1>) = G_TRUNC %1(<4 x s16>)
     %2:_(s1) = G_VECREDUCE_OR %0(<4 x s1>)
@@ -98,27 +101,28 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v8i1
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
-    ; CHECK: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<8 x s8>)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
-    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
-    ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
-    ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
-    ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
-    ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
-    ; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
-    ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT4]], [[ANYEXT5]]
-    ; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
-    ; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
-    ; CHECK: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT6]], [[ANYEXT7]]
-    ; CHECK: [[OR4:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
-    ; CHECK: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[OR3]]
-    ; CHECK: [[OR6:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[OR5]]
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[OR6]], [[C]]
-    ; CHECK: $w0 = COPY [[AND]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<8 x s8>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
+    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT4]], [[ANYEXT5]]
+    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
+    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
+    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT6]], [[ANYEXT7]]
+    ; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
+    ; CHECK-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[OR3]]
+    ; CHECK-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[OR5]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[OR6]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:_(<8 x s8>) = COPY $d0
     %0:_(<8 x s1>) = G_TRUNC %1(<8 x s8>)
     %2:_(s1) = G_VECREDUCE_OR %0(<8 x s1>)
@@ -142,43 +146,44 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v16i1
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
-    ; CHECK: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8), [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<16 x s8>)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
-    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
-    ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
-    ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
-    ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
-    ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
-    ; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
-    ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT4]], [[ANYEXT5]]
-    ; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
-    ; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
-    ; CHECK: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT6]], [[ANYEXT7]]
-    ; CHECK: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
-    ; CHECK: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
-    ; CHECK: [[OR4:%[0-9]+]]:_(s32) = G_OR [[ANYEXT8]], [[ANYEXT9]]
-    ; CHECK: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[UV10]](s8)
-    ; CHECK: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[UV11]](s8)
-    ; CHECK: [[OR5:%[0-9]+]]:_(s32) = G_OR [[ANYEXT10]], [[ANYEXT11]]
-    ; CHECK: [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[UV12]](s8)
-    ; CHECK: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[UV13]](s8)
-    ; CHECK: [[OR6:%[0-9]+]]:_(s32) = G_OR [[ANYEXT12]], [[ANYEXT13]]
-    ; CHECK: [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[UV14]](s8)
-    ; CHECK: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[UV15]](s8)
-    ; CHECK: [[OR7:%[0-9]+]]:_(s32) = G_OR [[ANYEXT14]], [[ANYEXT15]]
-    ; CHECK: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
-    ; CHECK: [[OR9:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[OR3]]
-    ; CHECK: [[OR10:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[OR5]]
-    ; CHECK: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR6]], [[OR7]]
-    ; CHECK: [[OR12:%[0-9]+]]:_(s32) = G_OR [[OR8]], [[OR9]]
-    ; CHECK: [[OR13:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[OR11]]
-    ; CHECK: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR12]], [[OR13]]
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[OR14]], [[C]]
-    ; CHECK: $w0 = COPY [[AND]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8), [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<16 x s8>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
+    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT4]], [[ANYEXT5]]
+    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
+    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
+    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT6]], [[ANYEXT7]]
+    ; CHECK-NEXT: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
+    ; CHECK-NEXT: [[ANYEXT9:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
+    ; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[ANYEXT8]], [[ANYEXT9]]
+    ; CHECK-NEXT: [[ANYEXT10:%[0-9]+]]:_(s32) = G_ANYEXT [[UV10]](s8)
+    ; CHECK-NEXT: [[ANYEXT11:%[0-9]+]]:_(s32) = G_ANYEXT [[UV11]](s8)
+    ; CHECK-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[ANYEXT10]], [[ANYEXT11]]
+    ; CHECK-NEXT: [[ANYEXT12:%[0-9]+]]:_(s32) = G_ANYEXT [[UV12]](s8)
+    ; CHECK-NEXT: [[ANYEXT13:%[0-9]+]]:_(s32) = G_ANYEXT [[UV13]](s8)
+    ; CHECK-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[ANYEXT12]], [[ANYEXT13]]
+    ; CHECK-NEXT: [[ANYEXT14:%[0-9]+]]:_(s32) = G_ANYEXT [[UV14]](s8)
+    ; CHECK-NEXT: [[ANYEXT15:%[0-9]+]]:_(s32) = G_ANYEXT [[UV15]](s8)
+    ; CHECK-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[ANYEXT14]], [[ANYEXT15]]
+    ; CHECK-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
+    ; CHECK-NEXT: [[OR9:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[OR3]]
+    ; CHECK-NEXT: [[OR10:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[OR5]]
+    ; CHECK-NEXT: [[OR11:%[0-9]+]]:_(s32) = G_OR [[OR6]], [[OR7]]
+    ; CHECK-NEXT: [[OR12:%[0-9]+]]:_(s32) = G_OR [[OR8]], [[OR9]]
+    ; CHECK-NEXT: [[OR13:%[0-9]+]]:_(s32) = G_OR [[OR10]], [[OR11]]
+    ; CHECK-NEXT: [[OR14:%[0-9]+]]:_(s32) = G_OR [[OR12]], [[OR13]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[OR14]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:_(<16 x s8>) = COPY $q0
     %0:_(<16 x s1>) = G_TRUNC %1(<16 x s8>)
     %2:_(s1) = G_VECREDUCE_OR %0(<16 x s1>)
@@ -199,11 +204,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v1i8
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
-    ; CHECK: [[BITCAST:%[0-9]+]]:_(s64) = G_BITCAST [[COPY]](<8 x s8>)
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[BITCAST]](s64)
-    ; CHECK: $w0 = COPY [[TRUNC]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s64) = G_BITCAST [[COPY]](<8 x s8>)
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[BITCAST]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[TRUNC]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:_(<8 x s8>) = COPY $d0
     %11:_(s64) = G_BITCAST %1(<8 x s8>)
     %0:_(s8) = G_TRUNC %11(s64)
@@ -227,13 +233,14 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v3i8
     ; CHECK: liveins: $w0, $w1, $w2
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[COPY2]]
-    ; CHECK: $w0 = COPY [[OR1]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[COPY2]]
+    ; CHECK-NEXT: $w0 = COPY [[OR1]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:_(s32) = COPY $w0
     %2:_(s32) = COPY $w1
     %3:_(s32) = COPY $w2
@@ -257,17 +264,18 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v4i8
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
-    ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
-    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
-    ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
-    ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
-    ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
-    ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
-    ; CHECK: $w0 = COPY [[OR2]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
+    ; CHECK-NEXT: $w0 = COPY [[OR2]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:_(<4 x s16>) = COPY $d0
     %0:_(<4 x s8>) = G_TRUNC %1(<4 x s16>)
     %2:_(s8) = G_VECREDUCE_OR %0(<4 x s8>)
@@ -288,25 +296,26 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v8i8
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
-    ; CHECK: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<8 x s8>)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
-    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
-    ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
-    ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
-    ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
-    ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
-    ; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
-    ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT4]], [[ANYEXT5]]
-    ; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
-    ; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
-    ; CHECK: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT6]], [[ANYEXT7]]
-    ; CHECK: [[OR4:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
-    ; CHECK: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[OR3]]
-    ; CHECK: [[OR6:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[OR5]]
-    ; CHECK: $w0 = COPY [[OR6]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[COPY]](<8 x s8>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s8)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s8)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
+    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT4]], [[ANYEXT5]]
+    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
+    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
+    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT6]], [[ANYEXT7]]
+    ; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
+    ; CHECK-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[OR3]]
+    ; CHECK-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[OR5]]
+    ; CHECK-NEXT: $w0 = COPY [[OR6]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(<8 x s8>) = COPY $d0
     %1:_(s8) = G_VECREDUCE_OR %0(<8 x s8>)
     %2:_(s32) = G_ANYEXT %1(s8)
@@ -326,27 +335,28 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v16i8
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
-    ; CHECK: [[UV:%[0-9]+]]:_(<8 x s8>), [[UV1:%[0-9]+]]:_(<8 x s8>) = G_UNMERGE_VALUES [[COPY]](<16 x s8>)
-    ; CHECK: [[OR:%[0-9]+]]:_(<8 x s8>) = G_OR [[UV]], [[UV1]]
-    ; CHECK: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[OR]](<8 x s8>)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
-    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
-    ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
-    ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
-    ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
-    ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
-    ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
-    ; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
-    ; CHECK: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT4]], [[ANYEXT5]]
-    ; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
-    ; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
-    ; CHECK: [[OR4:%[0-9]+]]:_(s32) = G_OR [[ANYEXT6]], [[ANYEXT7]]
-    ; CHECK: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[OR2]]
-    ; CHECK: [[OR6:%[0-9]+]]:_(s32) = G_OR [[OR3]], [[OR4]]
-    ; CHECK: [[OR7:%[0-9]+]]:_(s32) = G_OR [[OR5]], [[OR6]]
-    ; CHECK: $w0 = COPY [[OR7]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<8 x s8>), [[UV1:%[0-9]+]]:_(<8 x s8>) = G_UNMERGE_VALUES [[COPY]](<16 x s8>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<8 x s8>) = G_OR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[OR]](<8 x s8>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
+    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
+    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT4]], [[ANYEXT5]]
+    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
+    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
+    ; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[ANYEXT6]], [[ANYEXT7]]
+    ; CHECK-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[OR2]]
+    ; CHECK-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[OR3]], [[OR4]]
+    ; CHECK-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[OR5]], [[OR6]]
+    ; CHECK-NEXT: $w0 = COPY [[OR7]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(<16 x s8>) = COPY $q0
     %1:_(s8) = G_VECREDUCE_OR %0(<16 x s8>)
     %2:_(s32) = G_ANYEXT %1(s8)
@@ -367,29 +377,30 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v32i8
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
-    ; CHECK: [[OR:%[0-9]+]]:_(<16 x s8>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK: [[UV:%[0-9]+]]:_(<8 x s8>), [[UV1:%[0-9]+]]:_(<8 x s8>) = G_UNMERGE_VALUES [[OR]](<16 x s8>)
-    ; CHECK: [[OR1:%[0-9]+]]:_(<8 x s8>) = G_OR [[UV]], [[UV1]]
-    ; CHECK: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[OR1]](<8 x s8>)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
-    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
-    ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
-    ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
-    ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
-    ; CHECK: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
-    ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
-    ; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
-    ; CHECK: [[OR4:%[0-9]+]]:_(s32) = G_OR [[ANYEXT4]], [[ANYEXT5]]
-    ; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
-    ; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
-    ; CHECK: [[OR5:%[0-9]+]]:_(s32) = G_OR [[ANYEXT6]], [[ANYEXT7]]
-    ; CHECK: [[OR6:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[OR3]]
-    ; CHECK: [[OR7:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[OR5]]
-    ; CHECK: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR6]], [[OR7]]
-    ; CHECK: $w0 = COPY [[OR8]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<16 x s8>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<8 x s8>), [[UV1:%[0-9]+]]:_(<8 x s8>) = G_UNMERGE_VALUES [[OR]](<16 x s8>)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(<8 x s8>) = G_OR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[OR1]](<8 x s8>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s8)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s8)
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s8)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s8)
+    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s8)
+    ; CHECK-NEXT: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s8)
+    ; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[ANYEXT4]], [[ANYEXT5]]
+    ; CHECK-NEXT: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s8)
+    ; CHECK-NEXT: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s8)
+    ; CHECK-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[ANYEXT6]], [[ANYEXT7]]
+    ; CHECK-NEXT: [[OR6:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[OR3]]
+    ; CHECK-NEXT: [[OR7:%[0-9]+]]:_(s32) = G_OR [[OR4]], [[OR5]]
+    ; CHECK-NEXT: [[OR8:%[0-9]+]]:_(s32) = G_OR [[OR6]], [[OR7]]
+    ; CHECK-NEXT: $w0 = COPY [[OR8]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:_(<16 x s8>) = COPY $q0
     %2:_(<16 x s8>) = COPY $q1
     %0:_(<32 x s8>) = G_CONCAT_VECTORS %1(<16 x s8>), %2(<16 x s8>)
@@ -411,17 +422,18 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v4i16
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
-    ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
-    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
-    ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
-    ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
-    ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
-    ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
-    ; CHECK: $w0 = COPY [[OR2]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR]], [[OR1]]
+    ; CHECK-NEXT: $w0 = COPY [[OR2]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(<4 x s16>) = COPY $d0
     %1:_(s16) = G_VECREDUCE_OR %0(<4 x s16>)
     %2:_(s32) = G_ANYEXT %1(s16)
@@ -441,19 +453,20 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v8i16
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
-    ; CHECK: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[COPY]](<8 x s16>)
-    ; CHECK: [[OR:%[0-9]+]]:_(<4 x s16>) = G_OR [[UV]], [[UV1]]
-    ; CHECK: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[OR]](<4 x s16>)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
-    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
-    ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
-    ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
-    ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
-    ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
-    ; CHECK: [[OR3:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[OR2]]
-    ; CHECK: $w0 = COPY [[OR3]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[COPY]](<8 x s16>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s16>) = G_OR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[OR]](<4 x s16>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[OR2]]
+    ; CHECK-NEXT: $w0 = COPY [[OR3]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(<8 x s16>) = COPY $q0
     %1:_(s16) = G_VECREDUCE_OR %0(<8 x s16>)
     %2:_(s32) = G_ANYEXT %1(s16)
@@ -474,21 +487,22 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v16i16
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
-    ; CHECK: [[OR:%[0-9]+]]:_(<8 x s16>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[OR]](<8 x s16>)
-    ; CHECK: [[OR1:%[0-9]+]]:_(<4 x s16>) = G_OR [[UV]], [[UV1]]
-    ; CHECK: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[OR1]](<4 x s16>)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
-    ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
-    ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
-    ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
-    ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
-    ; CHECK: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
-    ; CHECK: [[OR4:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[OR3]]
-    ; CHECK: $w0 = COPY [[OR4]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s16>) = COPY $q1
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<8 x s16>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s16>), [[UV1:%[0-9]+]]:_(<4 x s16>) = G_UNMERGE_VALUES [[OR]](<8 x s16>)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(<4 x s16>) = G_OR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[OR1]](<4 x s16>)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
+    ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ANYEXT]], [[ANYEXT1]]
+    ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV4]](s16)
+    ; CHECK-NEXT: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV5]](s16)
+    ; CHECK-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ANYEXT2]], [[ANYEXT3]]
+    ; CHECK-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[OR2]], [[OR3]]
+    ; CHECK-NEXT: $w0 = COPY [[OR4]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:_(<8 x s16>) = COPY $q0
     %2:_(<8 x s16>) = COPY $q1
     %0:_(<16 x s16>) = G_CONCAT_VECTORS %1(<8 x s16>), %2(<8 x s16>)
@@ -510,12 +524,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v2i32
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV]], [[UV1]]
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[OR]](s32)
-    ; CHECK: $w0 = COPY [[COPY1]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[OR]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(s32) = G_VECREDUCE_OR %0(<2 x s32>)
     $w0 = COPY %1(s32)
@@ -534,14 +549,15 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v4i32
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
-    ; CHECK: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR [[UV]], [[UV1]]
-    ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[OR]](<2 x s32>)
-    ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV2]], [[UV3]]
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[OR1]](s32)
-    ; CHECK: $w0 = COPY [[COPY1]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[OR]](<2 x s32>)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV2]], [[UV3]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY [[OR1]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(s32) = G_VECREDUCE_OR %0(<4 x s32>)
     $w0 = COPY %1(s32)
@@ -561,16 +577,17 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v8i32
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[OR:%[0-9]+]]:_(<4 x s32>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[OR]](<4 x s32>)
-    ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s32>) = G_OR [[UV]], [[UV1]]
-    ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[OR1]](<2 x s32>)
-    ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[UV2]], [[UV3]]
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
-    ; CHECK: $w0 = COPY [[COPY2]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s32>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[OR]](<4 x s32>)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(<2 x s32>) = G_OR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[OR1]](<2 x s32>)
+    ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[UV2]], [[UV3]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR2]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[COPY2]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:_(<4 x s32>) = COPY $q0
     %2:_(<4 x s32>) = COPY $q1
     %0:_(<8 x s32>) = G_CONCAT_VECTORS %1(<4 x s32>), %2(<4 x s32>)
@@ -591,12 +608,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v2i64
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
-    ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[UV]], [[UV1]]
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[OR]](s64)
-    ; CHECK: $x0 = COPY [[COPY1]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY [[OR]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[COPY1]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(<2 x s64>) = COPY $q0
     %1:_(s64) = G_VECREDUCE_OR %0(<2 x s64>)
     $x0 = COPY %1(s64)
@@ -616,14 +634,15 @@ body:             |
 
     ; CHECK-LABEL: name: test_redor_v4i64
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
-    ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[COPY]], [[COPY1]]
-    ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[OR]](<2 x s64>)
-    ; CHECK: [[OR1:%[0-9]+]]:_(s64) = G_OR [[UV]], [[UV1]]
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[OR1]](s64)
-    ; CHECK: $x0 = COPY [[COPY2]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[OR]](<2 x s64>)
+    ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[UV]], [[UV1]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[OR1]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[COPY2]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %1:_(<2 x s64>) = COPY $q0
     %2:_(<2 x s64>) = COPY $q1
     %0:_(<4 x s64>) = G_CONCAT_VECTORS %1(<2 x s64>), %2(<2 x s64>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sbfx.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sbfx.mir
index 804d493f42a59..58cdbac017216 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sbfx.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sbfx.mir
@@ -7,11 +7,13 @@ body: |
   bb.0.entry:
     liveins: $w0
     ; CHECK-LABEL: name: s32
-    ; CHECK: %copy:_(s32) = COPY $w0
-    ; CHECK: %lsb:_(s32) = G_CONSTANT i32 1
-    ; CHECK: %width:_(s32) = G_CONSTANT i32 2
-    ; CHECK: %sbfx:_(s32) = G_SBFX %copy, %lsb(s32), %width
-    ; CHECK: $w0 = COPY %sbfx(s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s32) = COPY $w0
+    ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %width:_(s32) = G_CONSTANT i32 2
+    ; CHECK-NEXT: %sbfx:_(s32) = G_SBFX %copy, %lsb(s32), %width
+    ; CHECK-NEXT: $w0 = COPY %sbfx(s32)
     %copy:_(s32) = COPY $w0
     %lsb:_(s32) = G_CONSTANT i32 1
     %width:_(s32) = G_CONSTANT i32 2
@@ -24,11 +26,13 @@ body: |
   bb.0.entry:
     liveins: $x0
     ; CHECK-LABEL: name: s64
-    ; CHECK: %copy:_(s64) = COPY $x0
-    ; CHECK: %lsb:_(s64) = G_CONSTANT i64 1
-    ; CHECK: %width:_(s64) = G_CONSTANT i64 2
-    ; CHECK: %sbfx:_(s64) = G_SBFX %copy, %lsb(s64), %width
-    ; CHECK: $x0 = COPY %sbfx(s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s64) = COPY $x0
+    ; CHECK-NEXT: %lsb:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: %width:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: %sbfx:_(s64) = G_SBFX %copy, %lsb(s64), %width
+    ; CHECK-NEXT: $x0 = COPY %sbfx(s64)
     %copy:_(s64) = COPY $x0
     %lsb:_(s64) = G_CONSTANT i64 1
     %width:_(s64) = G_CONSTANT i64 2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext-copy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext-copy.mir
index f4dae5b29ae6c..a7d8c946e49b3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext-copy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext-copy.mir
@@ -6,12 +6,14 @@ body: |
   bb.0.entry:
     liveins: $w0, $w1
     ; CHECK-LABEL: name: test_sext_copy
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: $w0 = COPY [[COPY]](s32)
-    ; CHECK: $w0 = COPY [[COPY]](s32)
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
-    ; CHECK: $x0 = COPY [[SEXT]](s64)
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[SEXT]](s64)
     %0:_(s32) = COPY $w1
     $w0 = COPY %0(s32)
     $w0 = COPY %0(s32)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext-zext-128.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext-zext-128.mir
index a1a82221b84df..54614c13afb58 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext-zext-128.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sext-zext-128.mir
@@ -9,13 +9,14 @@ body:             |
 
     ; CHECK-LABEL: name: narrow_sext_s128
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
-    ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s64)
-    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[ASHR]](s64)
-    ; CHECK: G_STORE [[MV]](s128), [[COPY1]](p0) :: (store (s128))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[ASHR]](s64)
+    ; CHECK-NEXT: G_STORE [[MV]](s128), [[COPY1]](p0) :: (store (s128))
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(s64) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s128) = G_SEXT %0(s64)
@@ -32,12 +33,13 @@ body:             |
 
     ; CHECK-LABEL: name: narrow_zext_s128
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[C]](s64)
-    ; CHECK: G_STORE [[MV]](s128), [[COPY1]](p0) :: (store (s128))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[C]](s64)
+    ; CHECK-NEXT: G_STORE [[MV]](s128), [[COPY1]](p0) :: (store (s128))
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(s64) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s128) = G_ZEXT %0(s64)
@@ -54,14 +56,15 @@ body:             |
 
     ; CHECK-LABEL: name: narrow_zext_s128_from_s32
     ; CHECK: liveins: $w0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[C]](s32)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[MV]](s64), [[C1]](s64)
-    ; CHECK: G_STORE [[MV1]](s128), [[COPY1]](p0) :: (store (s128))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[MV]](s64), [[C1]](s64)
+    ; CHECK-NEXT: G_STORE [[MV1]](s128), [[COPY1]](p0) :: (store (s128))
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(s32) = COPY $w0
     %1:_(p0) = COPY $x1
     %2:_(s128) = G_ZEXT %0(s32)
@@ -78,13 +81,14 @@ body:             |
 
     ; CHECK-LABEL: name: narrow_zext_s192
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64))
-    ; CHECK: G_STORE [[C]](s64), [[COPY1]](p0) :: (store (s64))
-    ; CHECK: G_STORE [[C]](s64), [[COPY1]](p0) :: (store (s64))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64))
+    ; CHECK-NEXT: G_STORE [[C]](s64), [[COPY1]](p0) :: (store (s64))
+    ; CHECK-NEXT: G_STORE [[C]](s64), [[COPY1]](p0) :: (store (s64))
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(s64) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s192) = G_ZEXT %0(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sextload.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sextload.mir
index 630d998a5065c..1229da65e4e44 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sextload.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sextload.mir
@@ -6,9 +6,11 @@ body: |
   bb.0.entry:
     liveins: $x0
     ; CHECK-LABEL: name: test_sextload
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
-    ; CHECK: $w0 = COPY [[SEXTLOAD]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
+    ; CHECK-NEXT: $w0 = COPY [[SEXTLOAD]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s32) = G_SEXTLOAD %0 :: (load (s8))
     $w0 = COPY %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ubfx.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ubfx.mir
index f37a489f57e2b..51ae0b4867bca 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ubfx.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ubfx.mir
@@ -7,11 +7,13 @@ body: |
   bb.0.entry:
     liveins: $w0
     ; CHECK-LABEL: name: s32
-    ; CHECK: %copy:_(s32) = COPY $w0
-    ; CHECK: %lsb:_(s32) = G_CONSTANT i32 1
-    ; CHECK: %width:_(s32) = G_CONSTANT i32 2
-    ; CHECK: %ubfx:_(s32) = G_UBFX %copy, %lsb(s32), %width
-    ; CHECK: $w0 = COPY %ubfx(s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s32) = COPY $w0
+    ; CHECK-NEXT: %lsb:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %width:_(s32) = G_CONSTANT i32 2
+    ; CHECK-NEXT: %ubfx:_(s32) = G_UBFX %copy, %lsb(s32), %width
+    ; CHECK-NEXT: $w0 = COPY %ubfx(s32)
     %copy:_(s32) = COPY $w0
     %lsb:_(s32) = G_CONSTANT i32 1
     %width:_(s32) = G_CONSTANT i32 2
@@ -24,11 +26,13 @@ body: |
   bb.0.entry:
     liveins: $x0
     ; CHECK-LABEL: name: s64
-    ; CHECK: %copy:_(s64) = COPY $x0
-    ; CHECK: %lsb:_(s64) = G_CONSTANT i64 1
-    ; CHECK: %width:_(s64) = G_CONSTANT i64 2
-    ; CHECK: %ubfx:_(s64) = G_UBFX %copy, %lsb(s64), %width
-    ; CHECK: $x0 = COPY %ubfx(s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:_(s64) = COPY $x0
+    ; CHECK-NEXT: %lsb:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: %width:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: %ubfx:_(s64) = G_UBFX %copy, %lsb(s64), %width
+    ; CHECK-NEXT: $x0 = COPY %ubfx(s64)
     %copy:_(s64) = COPY $x0
     %lsb:_(s64) = G_CONSTANT i64 1
     %width:_(s64) = G_CONSTANT i64 2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
index de41b63b8aed1..2609eb07b2629 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
@@ -8,7 +8,7 @@ body: |
 
     ; CHECK-LABEL: name: test_implicit_def
     ; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
-    ; CHECK: $x0 = COPY [[DEF]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[DEF]](s64)
     %0:_(s128) = G_IMPLICIT_DEF
     %1:_(s64) = G_TRUNC %0(s128)
     $x0 = COPY %1(s64)
@@ -22,8 +22,8 @@ body: |
 
     ; CHECK-LABEL: name: test_implicit_def_s3
     ; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
-    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[DEF]], 3
-    ; CHECK: $x0 = COPY [[SEXT_INREG]](s64)
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[DEF]], 3
+    ; CHECK-NEXT: $x0 = COPY [[SEXT_INREG]](s64)
     %0:_(s3) = G_IMPLICIT_DEF
     %1:_(s64) = G_SEXT %0
     $x0 = COPY %1(s64)
@@ -38,9 +38,9 @@ body: |
 
     ; CHECK-LABEL: name: test_implicit_def_v4s32
     ; CHECK: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
-    ; CHECK: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[DEF]](<4 x s32>)
-    ; CHECK: $x0 = COPY [[UV]](<2 x s32>)
-    ; CHECK: $x1 = COPY [[UV1]](<2 x s32>)
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[DEF]](<4 x s32>)
+    ; CHECK-NEXT: $x0 = COPY [[UV]](<2 x s32>)
+    ; CHECK-NEXT: $x1 = COPY [[UV1]](<2 x s32>)
     %0:_(<4 x s32>) = G_IMPLICIT_DEF
     %1:_(<2 x s32> ), %2:_(<2 x s32>) = G_UNMERGE_VALUES %0
     $x0 = COPY %1
@@ -54,8 +54,8 @@ body: |
 
     ; CHECK-LABEL: name: test_implicit_def_v4s64
     ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
-    ; CHECK: $q0 = COPY [[DEF]](<2 x s64>)
-    ; CHECK: $q1 = COPY [[DEF]](<2 x s64>)
+    ; CHECK-NEXT: $q0 = COPY [[DEF]](<2 x s64>)
+    ; CHECK-NEXT: $q1 = COPY [[DEF]](<2 x s64>)
     %0:_(<4 x s64>) = G_IMPLICIT_DEF
     %1:_(<2 x s64> ), %2:_(<2 x s64>) = G_UNMERGE_VALUES %0
     $q0 = COPY %1
@@ -68,9 +68,9 @@ body: |
 
     ; CHECK-LABEL: name: test_implicit_def_v2s32
     ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
-    ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
-    ; CHECK: $w0 = COPY [[UV]](s32)
-    ; CHECK: $w1 = COPY [[UV1]](s32)
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<2 x s32>)
+    ; CHECK-NEXT: $w0 = COPY [[UV]](s32)
+    ; CHECK-NEXT: $w1 = COPY [[UV1]](s32)
     %0:_(<2 x s32>) = G_IMPLICIT_DEF
     %1:_(s32), %2:_(s32) = G_UNMERGE_VALUES %0
     $w0 = COPY %1
@@ -83,7 +83,7 @@ body: |
 
     ; CHECK-LABEL: name: test_implicit_def_v16s8
     ; CHECK: [[DEF:%[0-9]+]]:_(<16 x s8>) = G_IMPLICIT_DEF
-    ; CHECK: $q0 = COPY [[DEF]](<16 x s8>)
+    ; CHECK-NEXT: $q0 = COPY [[DEF]](<16 x s8>)
     %0:_(<16 x s8>) = G_IMPLICIT_DEF
     $q0 = COPY %0
 ...
@@ -94,7 +94,7 @@ body: |
 
     ; CHECK-LABEL: name: test_implicit_def_v8s16
     ; CHECK: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
-    ; CHECK: $q0 = COPY [[DEF]](<8 x s16>)
+    ; CHECK-NEXT: $q0 = COPY [[DEF]](<8 x s16>)
     %0:_(<8 x s16>) = G_IMPLICIT_DEF
     $q0 = COPY %0
 ...
@@ -105,7 +105,7 @@ body: |
     liveins:
     ; CHECK-LABEL: name: test_implicit_def_s88
     ; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
-    ; CHECK: $x0 = COPY [[DEF]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[DEF]](s64)
     %undef:_(s88) = G_IMPLICIT_DEF
     %trunc:_(s64) = G_TRUNC %undef
     $x0 = COPY %trunc(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir
index 32abeedf2057f..622803fbeeff3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir
@@ -33,20 +33,20 @@ body: |
   bb.0:
     ; CHECK-LABEL: name: test_vaarg
     ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (load (p0))
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C]](s64), debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
-    ; CHECK: G_STORE [[PTR_ADD]](p0), [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (store (p0))
-    ; CHECK: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0), debug-location !DILocation(line: 5, column: 1, scope: {{.*}}) :: (load (p0))
-    ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD1]], [[C]](s64), debug-location !DILocation(line: 5, column: 1, scope: {{.*}})
-    ; CHECK: G_STORE [[PTR_ADD1]](p0), [[COPY]](p0), debug-location !DILocation(line: 5, column: 1, scope: {{.*}}) :: (store (p0))
-    ; CHECK: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (load (p0))
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
-    ; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD2]], [[C1]](s64), debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
-    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16
-    ; CHECK: [[PTRMASK:%[0-9]+]]:_(p0) = G_PTRMASK [[PTR_ADD2]], [[C2]](s64), debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
-    ; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTRMASK]], [[C]](s64), debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
-    ; CHECK: G_STORE [[PTR_ADD3]](p0), [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (store (p0))
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0),  debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (load (p0))
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C]](s64),  debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
+    ; CHECK-NEXT: G_STORE [[PTR_ADD]](p0), [[COPY]](p0),  debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (store (p0))
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0),  debug-location !DILocation(line: 5, column: 1, scope: {{.*}}) :: (load (p0))
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD1]], [[C]](s64),  debug-location !DILocation(line: 5, column: 1, scope: {{.*}})
+    ; CHECK-NEXT: G_STORE [[PTR_ADD1]](p0), [[COPY]](p0),  debug-location !DILocation(line: 5, column: 1, scope: {{.*}}) :: (store (p0))
+    ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0),  debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (load (p0))
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+    ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD2]], [[C1]](s64),  debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16
+    ; CHECK-NEXT: [[PTRMASK:%[0-9]+]]:_(p0) = G_PTRMASK [[PTR_ADD2]], [[C2]](s64),  debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
+    ; CHECK-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTRMASK]], [[C]](s64),  debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
+    ; CHECK-NEXT: G_STORE [[PTR_ADD3]](p0), [[COPY]](p0),  debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (store (p0))
     %0:_(p0) = COPY $x0
 
     %1:_(s8) = G_VAARG %0(p0), 1, debug-location !11

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir
index 3076eecf0bb46..e6656371af263 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vacopy.mir
@@ -9,32 +9,37 @@ body: |
         liveins: $x0, $x1
 
     ; CHECK-DARWIN-LABEL: name: test_vaarg
-    ; CHECK-DARWIN: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK-DARWIN: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK-DARWIN: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load (s64))
-    ; CHECK-DARWIN: G_STORE [[LOAD]](s64), [[COPY]](p0) :: (store (s64))
-    ; CHECK-DARWIN: RET_ReallyLR
+    ; CHECK-DARWIN: liveins: $x0, $x1
+    ; CHECK-DARWIN-NEXT: {{  $}}
+    ; CHECK-DARWIN-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-DARWIN-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-DARWIN-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load (s64))
+    ; CHECK-DARWIN-NEXT: G_STORE [[LOAD]](s64), [[COPY]](p0) :: (store (s64))
+    ; CHECK-DARWIN-NEXT: RET_ReallyLR
+    ;
     ; CHECK-LINUX-LABEL: name: test_vaarg
-    ; CHECK-LINUX: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK-LINUX: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK-LINUX: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load (s64))
-    ; CHECK-LINUX: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-    ; CHECK-LINUX: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
-    ; CHECK-LINUX: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 8)
-    ; CHECK-LINUX: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CHECK-LINUX: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
-    ; CHECK-LINUX: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64) from unknown-address + 16)
-    ; CHECK-LINUX: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; CHECK-LINUX: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
-    ; CHECK-LINUX: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s64) from unknown-address + 24)
-    ; CHECK-LINUX: G_STORE [[LOAD]](s64), [[COPY]](p0) :: (store (s64))
-    ; CHECK-LINUX: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CHECK-LINUX: G_STORE [[LOAD1]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 8)
-    ; CHECK-LINUX: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CHECK-LINUX: G_STORE [[LOAD2]](s64), [[PTR_ADD4]](p0) :: (store (s64) into unknown-address + 16)
-    ; CHECK-LINUX: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
-    ; CHECK-LINUX: G_STORE [[LOAD3]](s64), [[PTR_ADD5]](p0) :: (store (s64) into unknown-address + 24)
-    ; CHECK-LINUX: RET_ReallyLR
+    ; CHECK-LINUX: liveins: $x0, $x1
+    ; CHECK-LINUX-NEXT: {{  $}}
+    ; CHECK-LINUX-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-LINUX-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-LINUX-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load (s64))
+    ; CHECK-LINUX-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+    ; CHECK-LINUX-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+    ; CHECK-LINUX-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 8)
+    ; CHECK-LINUX-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-LINUX-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
+    ; CHECK-LINUX-NEXT: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64) from unknown-address + 16)
+    ; CHECK-LINUX-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; CHECK-LINUX-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
+    ; CHECK-LINUX-NEXT: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD2]](p0) :: (load (s64) from unknown-address + 24)
+    ; CHECK-LINUX-NEXT: G_STORE [[LOAD]](s64), [[COPY]](p0) :: (store (s64))
+    ; CHECK-LINUX-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK-LINUX-NEXT: G_STORE [[LOAD1]](s64), [[PTR_ADD3]](p0) :: (store (s64) into unknown-address + 8)
+    ; CHECK-LINUX-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+    ; CHECK-LINUX-NEXT: G_STORE [[LOAD2]](s64), [[PTR_ADD4]](p0) :: (store (s64) into unknown-address + 16)
+    ; CHECK-LINUX-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
+    ; CHECK-LINUX-NEXT: G_STORE [[LOAD3]](s64), [[PTR_ADD5]](p0) :: (store (s64) into unknown-address + 24)
+    ; CHECK-LINUX-NEXT: RET_ReallyLR
         %0:_(p0) = COPY $x0
         %1:_(p0) = COPY $x1
         G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), %0(p0), %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-shift.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-shift.mir
index 8626a0b2f87ef..2f620fe3e2378 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-shift.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-vector-shift.mir
@@ -7,11 +7,13 @@ body:             |
     liveins: $q0, $q1
 
     ; CHECK-LABEL: name: lshr_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[COPY1]](<4 x s32>)
-    ; CHECK: $q0 = COPY [[LSHR]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[COPY1]](<4 x s32>)
+    ; CHECK-NEXT: $q0 = COPY [[LSHR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = COPY $q1
     %2:_(<4 x s32>) = G_LSHR %0, %1(<4 x s32>)
@@ -26,11 +28,13 @@ body:             |
     liveins: $q0, $q1
 
     ; CHECK-LABEL: name: lshr_v2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
-    ; CHECK: [[LSHR:%[0-9]+]]:_(<2 x s64>) = G_LSHR [[COPY]], [[COPY1]](<2 x s64>)
-    ; CHECK: $q0 = COPY [[LSHR]](<2 x s64>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<2 x s64>) = G_LSHR [[COPY]], [[COPY1]](<2 x s64>)
+    ; CHECK-NEXT: $q0 = COPY [[LSHR]](<2 x s64>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<2 x s64>) = COPY $q0
     %1:_(<2 x s64>) = COPY $q1
     %2:_(<2 x s64>) = G_LSHR %0, %1(<2 x s64>)
@@ -45,11 +49,13 @@ body:             |
     liveins: $q0, $q1
 
     ; CHECK-LABEL: name: ashr_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[ASHR:%[0-9]+]]:_(<4 x s32>) = G_ASHR [[COPY]], [[COPY1]](<4 x s32>)
-    ; CHECK: $q0 = COPY [[ASHR]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(<4 x s32>) = G_ASHR [[COPY]], [[COPY1]](<4 x s32>)
+    ; CHECK-NEXT: $q0 = COPY [[ASHR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = COPY $q1
     %2:_(<4 x s32>) = G_ASHR %0, %1(<4 x s32>)
@@ -64,11 +70,13 @@ body:             |
     liveins: $q0, $q1
 
     ; CHECK-LABEL: name: ashr_v2s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
-    ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[COPY]], [[COPY1]](<2 x s64>)
-    ; CHECK: $q0 = COPY [[ASHR]](<2 x s64>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[COPY]], [[COPY1]](<2 x s64>)
+    ; CHECK-NEXT: $q0 = COPY [[ASHR]](<2 x s64>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<2 x s64>) = COPY $q0
     %1:_(<2 x s64>) = COPY $q1
     %2:_(<2 x s64>) = G_ASHR %0, %1(<2 x s64>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
index 12635621e1c09..9c528623eca23 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
@@ -230,15 +230,15 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s8) = COPY $b0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s8) = COPY $b1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s8) = COPY $b2
-    ; CHECK-NEXT: [[ANYEXT0:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8)
     ; CHECK-NEXT: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY1]](s8)
     ; CHECK-NEXT: [[ANYEXT2:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY2]](s8)
-    ; CHECK-NEXT: [[IMPLICIT_DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[ANYEXT0]](s16), [[ANYEXT1]](s16), [[ANYEXT2]](s16), [[IMPLICIT_DEF]](s16)
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[ANYEXT]](s16), [[ANYEXT1]](s16), [[ANYEXT2]](s16), [[DEF]](s16)
     ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s16>) = G_XOR [[BUILD_VECTOR]], [[BUILD_VECTOR]]
-    ; CHECK-NEXT: [[VAL0:%[0-9]+]]:_(s16), [[VAL1:%[0-9]+]]:_(s16), [[VAL2:%[0-9]+]]:_(s16), [[VAL3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[XOR]](<4 x s16>)
-    ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[VAL0]](s16)
-    ; CHECK-NEXT: $b0 = COPY [[TRUNC3]](s8)
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[XOR]](<4 x s16>)
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV]](s16)
+    ; CHECK-NEXT: $b0 = COPY [[TRUNC]](s8)
     ; CHECK-NEXT: RET_ReallyLR implicit $b0
     %1:_(s8) = COPY $b0
     %2:_(s8) = COPY $b1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-zextload.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-zextload.mir
index 6930da61bceeb..c039e537e91bc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-zextload.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-zextload.mir
@@ -6,9 +6,11 @@ body: |
   bb.0.entry:
     liveins: $x0
     ; CHECK-LABEL: name: test_zextload
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
-    ; CHECK: $w0 = COPY [[ZEXTLOAD]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
+    ; CHECK-NEXT: $w0 = COPY [[ZEXTLOAD]](s32)
     %0:_(p0) = COPY $x0
     %1:_(s32) = G_ZEXTLOAD %0 :: (load (s8))
     $w0 = COPY %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-combiner.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-combiner.mir
index efc7e7ac6deae..8c4988fa59545 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-combiner.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-combiner.mir
@@ -7,8 +7,10 @@ body:             |
   bb.1:
     liveins: $w0
     ; CHECK-LABEL: name: test_unmerge
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: $w0 = COPY [[COPY]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: $w0 = COPY [[COPY]](s32)
     %0:_(s32) = COPY $w0
     %1:_(<4 x s32>) = G_BUILD_VECTOR %0(s32), %0(s32), %0(s32), %0(s32)
     %2:_(s32), %3:_(s32), %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<4 x s32>)
@@ -21,10 +23,12 @@ body:             |
   bb.1:
     liveins: $w0
     ; CHECK-LABEL: name: test_legal_const_ext
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C]]
-    ; CHECK: $w0 = COPY [[ADD]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[ADD]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s1) = G_TRUNC %0(s32)
     %2:_(s1) = G_CONSTANT i1 2
@@ -45,12 +49,13 @@ body:             |
 
     ; CHECK-LABEL: name: concat_vectors_unmerge_to_bitcast
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
-    ; CHECK: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[COPY]](<2 x s64>)
-    ; CHECK: [[BITCAST1:%[0-9]+]]:_(s128) = G_BITCAST [[COPY1]](<2 x s64>)
-    ; CHECK: $q0 = COPY [[BITCAST]](s128)
-    ; CHECK: $q1 = COPY [[BITCAST1]](s128)
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[COPY]](<2 x s64>)
+    ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(s128) = G_BITCAST [[COPY1]](<2 x s64>)
+    ; CHECK-NEXT: $q0 = COPY [[BITCAST]](s128)
+    ; CHECK-NEXT: $q1 = COPY [[BITCAST1]](s128)
     %0:_(<2 x s64>) = COPY $q0
     %1:_(<2 x s64>) = COPY $q1
     %2:_(<4 x s64>) = G_CONCAT_VECTORS %0(<2 x s64>), %1(<2 x s64>)
@@ -72,14 +77,15 @@ body:             |
 
     ; CHECK-LABEL: name: concat_vectors_unmerge_to_unmerge
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
-    ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
-    ; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
-    ; CHECK: $x0 = COPY [[UV]](s64)
-    ; CHECK: $x1 = COPY [[UV1]](s64)
-    ; CHECK: $x2 = COPY [[UV2]](s64)
-    ; CHECK: $x3 = COPY [[UV3]](s64)
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+    ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
+    ; CHECK-NEXT: $x0 = COPY [[UV]](s64)
+    ; CHECK-NEXT: $x1 = COPY [[UV1]](s64)
+    ; CHECK-NEXT: $x2 = COPY [[UV2]](s64)
+    ; CHECK-NEXT: $x3 = COPY [[UV3]](s64)
     %0:_(<2 x s64>) = COPY $q0
     %1:_(<2 x s64>) = COPY $q1
     %2:_(<4 x s64>) = G_CONCAT_VECTORS %0(<2 x s64>), %1(<2 x s64>)
@@ -97,20 +103,21 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: unmerge_merge_combine
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load (s128))
-    ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD1]](s128)
-    ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[UV]]
-    ; CHECK: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[C]], [[UV]]
-    ; CHECK: [[MUL2:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[UV1]]
-    ; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[LOAD]], [[UV]]
-    ; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MUL1]], [[MUL2]]
-    ; CHECK: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[UMULH]]
-    ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[MUL]](s64), [[ADD1]](s64)
-    ; CHECK: $q0 = COPY [[MV]](s128)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load (s128))
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD1]](s128)
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[UV]]
+    ; CHECK-NEXT: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[C]], [[UV]]
+    ; CHECK-NEXT: [[MUL2:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[UV1]]
+    ; CHECK-NEXT: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[LOAD]], [[UV]]
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MUL1]], [[MUL2]]
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[UMULH]]
+    ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[MUL]](s64), [[ADD1]](s64)
+    ; CHECK-NEXT: $q0 = COPY [[MV]](s128)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(p0) = COPY $x0
     %1:_(s128) = G_ZEXTLOAD %0:_(p0) :: (load (s64))
     %2:_(s128) = G_LOAD %0:_(p0) :: (load (s128))

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/lifetime-marker-no-dce.mir b/llvm/test/CodeGen/AArch64/GlobalISel/lifetime-marker-no-dce.mir
index 16f2d70cd604b..2dd968561e5e8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/lifetime-marker-no-dce.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/lifetime-marker-no-dce.mir
@@ -15,8 +15,8 @@ body:             |
     ;%0:_(p0) = G_FRAME_INDEX %stack.0.slot
     ; CHECK-LABEL: name: test_lifetime_no_dce
     ; CHECK: LIFETIME_START %stack.0
-    ; CHECK: LIFETIME_END %stack.0
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: LIFETIME_END %stack.0
+    ; CHECK-NEXT: RET_ReallyLR
     LIFETIME_START %stack.0
     LIFETIME_END %stack.0
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir
index 6a4b1248e7a4c..5bc7c3e6fe7fb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/load-wro-addressing-modes.mir
@@ -20,11 +20,12 @@ body:             |
 
     ; CHECK-LABEL: name: shl_gep_sext_ldrwrow
     ; CHECK: liveins: $w1, $x0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %load:gpr32 = LDRWroW %base, %foo, 1, 1 :: (load (s32))
-    ; CHECK: $w0 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %load:gpr32 = LDRWroW %base, %foo, 1, 1 :: (load (s32))
+    ; CHECK-NEXT: $w0 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_SEXT %foo(s32)
@@ -54,11 +55,12 @@ body:             |
 
     ; CHECK-LABEL: name: shl_gep_zext_ldrwrow
     ; CHECK: liveins: $w1, $x0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
-    ; CHECK: $w0 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
+    ; CHECK-NEXT: $w0 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_ZEXT %foo(s32)
@@ -88,11 +90,12 @@ body:             |
 
     ; CHECK-LABEL: name: shl_gep_anyext_ldrwrow
     ; CHECK: liveins: $w1, $x0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
-    ; CHECK: $w0 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
+    ; CHECK-NEXT: $w0 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_ANYEXT %foo(s32)
@@ -120,11 +123,12 @@ body:             |
     liveins: $w1, $x0
     ; CHECK-LABEL: name: mul_gep_sext_ldrwrow
     ; CHECK: liveins: $w1, $x0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %load:gpr32 = LDRWroW %base, %foo, 1, 1 :: (load (s32))
-    ; CHECK: $w0 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %load:gpr32 = LDRWroW %base, %foo, 1, 1 :: (load (s32))
+    ; CHECK-NEXT: $w0 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_SEXT %foo(s32)
@@ -152,11 +156,12 @@ body:             |
 
     ; CHECK-LABEL: name: mul_gep_zext_ldrwrow
     ; CHECK: liveins: $w1, $x0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
-    ; CHECK: $w0 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
+    ; CHECK-NEXT: $w0 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_ZEXT %foo(s32)
@@ -184,11 +189,12 @@ body:             |
 
     ; CHECK-LABEL: name: mul_gep_anyext_ldrwrow
     ; CHECK: liveins: $w1, $x0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
-    ; CHECK: $w0 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
+    ; CHECK-NEXT: $w0 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_ANYEXT %foo(s32)
@@ -216,11 +222,12 @@ body:             |
 
     ; CHECK-LABEL: name: ldrdrow
     ; CHECK: liveins: $w1, $x0, $d0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %load:fpr64 = LDRDroW %base, %foo, 1, 1 :: (load (<2 x s32>))
-    ; CHECK: $x0 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %load:fpr64 = LDRDroW %base, %foo, 1, 1 :: (load (<2 x s32>))
+    ; CHECK-NEXT: $x0 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_SEXT %foo(s32)
@@ -248,11 +255,12 @@ body:             |
 
     ; CHECK-LABEL: name: ldrxrow
     ; CHECK: liveins: $w1, $x0, $d0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %load:gpr64 = LDRXroW %base, %foo, 1, 1 :: (load (s64))
-    ; CHECK: $x0 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %load:gpr64 = LDRXroW %base, %foo, 1, 1 :: (load (s64))
+    ; CHECK-NEXT: $x0 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_SEXT %foo(s32)
@@ -281,11 +289,12 @@ body:             |
 
     ; CHECK-LABEL: name: ldrbbrow
     ; CHECK: liveins: $x0, $w0, $w1
-    ; CHECK: %val:gpr32 = COPY $w1
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %load:gpr32 = LDRBBroW %base, %val, 1, 0 :: (load (s8))
-    ; CHECK: $w0 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %val:gpr32 = COPY $w1
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %load:gpr32 = LDRBBroW %base, %val, 1, 0 :: (load (s8))
+    ; CHECK-NEXT: $w0 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %val:gpr(s32) = COPY $w1
     %base:gpr(p0) = COPY $x0
     %ext:gpr(s64) = G_SEXT %val(s32)
@@ -311,11 +320,12 @@ body:             |
 
     ; CHECK-LABEL: name: ldrhrow
     ; CHECK: liveins: $w1, $x0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %load:fpr16 = LDRHroW %base, %foo, 1, 1 :: (load (s16))
-    ; CHECK: $h0 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %load:fpr16 = LDRHroW %base, %foo, 1, 1 :: (load (s16))
+    ; CHECK-NEXT: $h0 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_SEXT %foo(s32)
@@ -343,12 +353,13 @@ body:             |
 
     ; CHECK-LABEL: name: bad_and_mask_1
     ; CHECK: liveins: $x0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %imp:gpr64 = IMPLICIT_DEF
-    ; CHECK: %and:gpr64common = ANDXri %imp, 4103
-    ; CHECK: %load:gpr64 = LDRXroX %base, %and, 0, 1 :: (load (s64))
-    ; CHECK: $x1 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %imp:gpr64 = IMPLICIT_DEF
+    ; CHECK-NEXT: %and:gpr64common = ANDXri %imp, 4103
+    ; CHECK-NEXT: %load:gpr64 = LDRXroX %base, %and, 0, 1 :: (load (s64))
+    ; CHECK-NEXT: $x1 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $x1
     %base:gpr(p0) = COPY $x0
     %imp:gpr(s64) = G_IMPLICIT_DEF
     %bad_mask:gpr(s64) = G_CONSTANT i64 255
@@ -377,12 +388,13 @@ body:             |
 
     ; CHECK-LABEL: name: bad_and_mask_2
     ; CHECK: liveins: $x0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %imp:gpr64 = IMPLICIT_DEF
-    ; CHECK: %and:gpr64common = ANDXri %imp, 4111
-    ; CHECK: %load:gpr64 = LDRXroX %base, %and, 0, 1 :: (load (s64))
-    ; CHECK: $x1 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %imp:gpr64 = IMPLICIT_DEF
+    ; CHECK-NEXT: %and:gpr64common = ANDXri %imp, 4111
+    ; CHECK-NEXT: %load:gpr64 = LDRXroX %base, %and, 0, 1 :: (load (s64))
+    ; CHECK-NEXT: $x1 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $x1
     %base:gpr(p0) = COPY $x0
     %imp:gpr(s64) = G_IMPLICIT_DEF
     %bad_mask:gpr(s64) = G_CONSTANT i64 65535
@@ -410,13 +422,14 @@ body:             |
 
     ; CHECK-LABEL: name: and_uxtw
     ; CHECK: liveins: $x0
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %imp:gpr64 = IMPLICIT_DEF
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %imp.sub_32
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
-    ; CHECK: %load:gpr64 = LDRXroW %base, [[COPY1]], 0, 1 :: (load (s64))
-    ; CHECK: $x1 = COPY %load
-    ; CHECK: RET_ReallyLR implicit $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %imp:gpr64 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %imp.sub_32
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
+    ; CHECK-NEXT: %load:gpr64 = LDRXroW %base, [[COPY1]], 0, 1 :: (load (s64))
+    ; CHECK-NEXT: $x1 = COPY %load
+    ; CHECK-NEXT: RET_ReallyLR implicit $x1
     %base:gpr(p0) = COPY $x0
     %imp:gpr(s64) = G_IMPLICIT_DEF
     %mask:gpr(s64) = G_CONSTANT i64 4294967295
@@ -445,12 +458,13 @@ body:             |
 
     ; CHECK-LABEL: name: zext_shl_LDRWroW
     ; CHECK: liveins: $w0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[ANDWri:%[0-9]+]]:gpr32common = ANDWri [[COPY]], 7
-    ; CHECK: [[LDRWroW:%[0-9]+]]:gpr32 = LDRWroW [[COPY1]], [[ANDWri]], 0, 1 :: (load (s32))
-    ; CHECK: $w0 = COPY [[LDRWroW]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[ANDWri:%[0-9]+]]:gpr32common = ANDWri [[COPY]], 7
+    ; CHECK-NEXT: [[LDRWroW:%[0-9]+]]:gpr32 = LDRWroW [[COPY1]], [[ANDWri]], 0, 1 :: (load (s32))
+    ; CHECK-NEXT: $w0 = COPY [[LDRWroW]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(p0) = COPY $x1
     %2:gpr(s32) = G_CONSTANT i32 255

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/localizer-propagate-debug-loc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/localizer-propagate-debug-loc.mir
index c04d0e32ae27b..903bf27cef105 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/localizer-propagate-debug-loc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/localizer-propagate-debug-loc.mir
@@ -71,18 +71,73 @@ stack:
       stack-id: default, callee-saved-register: '', callee-saved-restored: true,
       debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
 body:             |
-  ; CHECK:   [[ADRP3:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @A, debug-location !10
-  ; CHECK-NEXT:   [[ADD_LOW3:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP3]](p0), target-flags(aarch64-pageoff, aarch64-nc) @A, debug-location !10
-  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[ADD_LOW3]](p0), debug-location !10 :: (dereferenceable load (s32))
-
-  ; CHECK:   [[ADRP4:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @B, debug-location !DILocation(line: 0, scope: !14)
-  ; CHECK-NEXT:   [[ADD_LOW4:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP4]](p0), target-flags(aarch64-pageoff, aarch64-nc) @B, debug-location !DILocation(line: 0, scope: !14)
-  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:gpr(s32) = G_LOAD [[ADD_LOW4]](p0), debug-location !13 :: (dereferenceable load (s32))
-
-  ; CHECK:   [[ADRP5:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @C, debug-location !17
-  ; CHECK-NEXT:   [[ADD_LOW5:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP5]](p0), target-flags(aarch64-pageoff, aarch64-nc) @C, debug-location !17
-  ; CHECK-NEXT:   [[C5:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3, debug-location !17
-  ; CHECK-NEXT:   G_STORE [[C5]](s32), [[ADD_LOW5]](p0), debug-location !17 :: (store (s32) into @C)
+  ; CHECK-LABEL: name: foo
+  ; CHECK: bb.0:
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
+  ; CHECK-NEXT:   [[ADRP:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @C,  debug-location !DILocation(line: 0, scope: !18)
+  ; CHECK-NEXT:   [[ADD_LOW:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP]](p0), target-flags(aarch64-pageoff, aarch64-nc) @C,  debug-location !DILocation(line: 0, scope: !18)
+  ; CHECK-NEXT:   [[ADRP1:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @B,  debug-location !DILocation(line: 0, scope: !14)
+  ; CHECK-NEXT:   [[ADD_LOW1:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP1]](p0), target-flags(aarch64-pageoff, aarch64-nc) @B,  debug-location !DILocation(line: 0, scope: !14)
+  ; CHECK-NEXT:   [[ADRP2:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @A,  debug-location !DILocation(line: 0, scope: !11)
+  ; CHECK-NEXT:   [[ADD_LOW2:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP2]](p0), target-flags(aarch64-pageoff, aarch64-nc) @A,  debug-location !DILocation(line: 0, scope: !11)
+  ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %stack.0
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr(s32) = COPY [[C1]](s32)
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[XOR:%[0-9]+]]:gpr(s32) = G_XOR [[COPY]], [[C2]]
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[XOR]](s32)
+  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.2
+  ; CHECK-NEXT:   G_BR %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.5(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[ADRP3:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @A,  debug-location !10
+  ; CHECK-NEXT:   [[ADD_LOW3:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP3]](p0), target-flags(aarch64-pageoff, aarch64-nc) @A,  debug-location !10
+  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[ADD_LOW3]](p0),  debug-location !10 :: (dereferenceable load (s32))
+  ; CHECK-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %stack.0
+  ; CHECK-NEXT:   G_STORE [[LOAD]](s32), [[FRAME_INDEX1]](p0) :: (volatile store (s32) into %ir.1)
+  ; CHECK-NEXT:   G_BR %bb.5
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.4(0x40000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C3:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
+  ; CHECK-NEXT:   [[C4:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[XOR1:%[0-9]+]]:gpr(s32) = G_XOR [[C3]], [[C4]]
+  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:gpr(s1) = G_TRUNC [[XOR1]](s32)
+  ; CHECK-NEXT:   G_BRCOND [[TRUNC1]](s1), %bb.4
+  ; CHECK-NEXT:   G_BR %bb.3
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.3:
+  ; CHECK-NEXT:   successors: %bb.5(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[ADRP4:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @B,  debug-location !DILocation(line: 0, scope: !14)
+  ; CHECK-NEXT:   [[ADD_LOW4:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP4]](p0), target-flags(aarch64-pageoff, aarch64-nc) @B,  debug-location !DILocation(line: 0, scope: !14)
+  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:gpr(s32) = G_LOAD [[ADD_LOW4]](p0),  debug-location !13 :: (dereferenceable load (s32))
+  ; CHECK-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %stack.0
+  ; CHECK-NEXT:   G_STORE [[LOAD1]](s32), [[FRAME_INDEX2]](p0) :: (volatile store (s32) into %ir.1)
+  ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:gpr(s32) = G_LOAD [[ADD_LOW4]](p0),  debug-location !16 :: (dereferenceable load (s32))
+  ; CHECK-NEXT:   G_STORE [[LOAD2]](s32), [[FRAME_INDEX2]](p0) :: (volatile store (s32) into %ir.1)
+  ; CHECK-NEXT:   G_BR %bb.5
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.4:
+  ; CHECK-NEXT:   successors: %bb.5(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[ADRP5:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @C,  debug-location !17
+  ; CHECK-NEXT:   [[ADD_LOW5:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP5]](p0), target-flags(aarch64-pageoff, aarch64-nc) @C,  debug-location !17
+  ; CHECK-NEXT:   [[C5:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3,  debug-location !17
+  ; CHECK-NEXT:   G_STORE [[C5]](s32), [[ADD_LOW5]](p0),  debug-location !17 :: (store (s32) into @C)
+  ; CHECK-NEXT:   G_BR %bb.5
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.5:
+  ; CHECK-NEXT:   [[C6:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
+  ; CHECK-NEXT:   $w0 = COPY [[C6]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
+
+
   bb.1:
     successors: %bb.2(0x40000000), %bb.3(0x40000000)
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/localizer.mir b/llvm/test/CodeGen/AArch64/GlobalISel/localizer.mir
index c894e87760e12..a110956ade319 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/localizer.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/localizer.mir
@@ -69,7 +69,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: local_use
     ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-    ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
     %0:gpr(s32) = G_CONSTANT i32 1
     %1:gpr(s32) = G_ADD %0, %0
 ...
@@ -81,12 +81,14 @@ regBankSelected: true
 body:             |
   ; CHECK-LABEL: name: non_local_1use
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
-  ; CHECK: bb.1:
-  ; CHECK:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[C1]], [[ADD]]
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[C1]], [[ADD]]
 
   ; Existing registers should be left untouched
   ; The newly created reg should be on the same regbank/regclass as its origin.
@@ -108,12 +110,14 @@ regBankSelected: true
 body:             |
   ; CHECK-LABEL: name: non_local_2uses
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
-  ; CHECK: bb.1:
-  ; CHECK:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[C1]], [[C1]]
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[C1]], [[C1]]
 
   ; Existing registers should be left untouched
   ; The newly created reg should be on the same regbank/regclass as its origin.
@@ -136,15 +140,19 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: non_local_phi_use
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK: bb.2:
-  ; CHECK:   [[PHI:%[0-9]+]]:gpr(s32) = PHI [[C1]](s32), %bb.1
-  ; CHECK:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[PHI]]
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr(s32) = PHI [[C1]](s32), %bb.1
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[PHI]]
 
   ; Existing registers should be left untouched
   ; The newly created reg should be on the same regbank/regclass as its origin.
@@ -171,16 +179,20 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: non_local_phi_use_followed_by_use
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK: bb.2:
-  ; CHECK:   [[PHI:%[0-9]+]]:gpr(s32) = PHI [[C1]](s32), %bb.1
-  ; CHECK:   [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[C2]]
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr(s32) = PHI [[C1]](s32), %bb.1
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[C2]]
 
   ; Existing registers should be left untouched
   ; The newly created reg should be on the same regbank/regclass as its origin.
@@ -207,16 +219,20 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: non_local_phi_use_followed_by_use_fi
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   [[FRAME_INDEX:%[0-9]+]]:gpr(s32) = G_FRAME_INDEX 1
-  ; CHECK:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[FRAME_INDEX]], [[FRAME_INDEX]]
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK:   [[FRAME_INDEX1:%[0-9]+]]:gpr(s32) = G_FRAME_INDEX 1
-  ; CHECK: bb.2:
-  ; CHECK:   [[PHI:%[0-9]+]]:gpr(s32) = PHI [[FRAME_INDEX1]](s32), %bb.1
-  ; CHECK:   [[FRAME_INDEX2:%[0-9]+]]:gpr(s32) = G_FRAME_INDEX 1
-  ; CHECK:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[FRAME_INDEX2]]
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:gpr(s32) = G_FRAME_INDEX 1
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[FRAME_INDEX]], [[FRAME_INDEX]]
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:gpr(s32) = G_FRAME_INDEX 1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr(s32) = PHI [[FRAME_INDEX1]](s32), %bb.1
+  ; CHECK-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:gpr(s32) = G_FRAME_INDEX 1
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[FRAME_INDEX2]]
 
   ; Existing registers should be left untouched
   ; The newly created reg should be on the same regbank/regclass as its origin.
@@ -243,16 +259,20 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: float_non_local_phi_use_followed_by_use_fi
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   [[C:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
-  ; CHECK:   [[FADD:%[0-9]+]]:fpr(s32) = G_FADD [[C]], [[C]]
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK:   [[C1:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
-  ; CHECK: bb.2:
-  ; CHECK:   [[PHI:%[0-9]+]]:fpr(s32) = PHI [[C1]](s32), %bb.1
-  ; CHECK:   [[C2:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
-  ; CHECK:   [[FADD1:%[0-9]+]]:fpr(s32) = G_FADD [[PHI]], [[C2]]
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK-NEXT:   [[FADD:%[0-9]+]]:fpr(s32) = G_FADD [[C]], [[C]]
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:fpr(s32) = PHI [[C1]](s32), %bb.1
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK-NEXT:   [[FADD1:%[0-9]+]]:fpr(s32) = G_FADD [[PHI]], [[C2]]
 
   ; Existing registers should be left untouched
   ; The newly created reg should be on the same regbank/regclass as its origin.
@@ -281,15 +301,18 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: non_local_phi
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   [[C:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
-  ; CHECK:   [[FADD:%[0-9]+]]:fpr(s32) = G_FADD [[C]], [[C]]
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   [[PHI:%[0-9]+]]:fpr(s32) = PHI [[FADD]](s32), %bb.0, %4(s32), %bb.1
-  ; CHECK:   [[FADD1:%[0-9]+]]:fpr(s32) = G_FADD [[PHI]], [[FADD]]
-  ; CHECK:   [[C1:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
-  ; CHECK:   G_BR %bb.1
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK-NEXT:   [[FADD:%[0-9]+]]:fpr(s32) = G_FADD [[C]], [[C]]
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:fpr(s32) = PHI [[FADD]](s32), %bb.0, %4(s32), %bb.1
+  ; CHECK-NEXT:   [[FADD1:%[0-9]+]]:fpr(s32) = G_FADD [[PHI]], [[FADD]]
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK-NEXT:   G_BR %bb.1
 
   ; Existing registers should be left untouched
   ; The newly created reg should be on the same regbank/regclass as its origin.
@@ -317,16 +340,19 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: non_local_label
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: $s0
-  ; CHECK:   [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-  ; CHECK:   [[C:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   EH_LABEL 1
-  ; CHECK:   [[C1:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
-  ; CHECK:   [[FADD:%[0-9]+]]:fpr(s32) = G_FADD [[COPY]], [[C1]]
-  ; CHECK:   G_BR %bb.1
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT:   liveins: $s0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   EH_LABEL 1
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
+  ; CHECK-NEXT:   [[FADD:%[0-9]+]]:fpr(s32) = G_FADD [[COPY]], [[C1]]
+  ; CHECK-NEXT:   G_BR %bb.1
 
   ; Existing registers should be left untouched
   ; The newly created reg should be on the same regbank/regclass as its origin.
@@ -353,34 +379,38 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: intrablock_with_globalvalue
   ; CHECK: bb.0.entry:
-  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
-  ; CHECK:   [[GV:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var2
-  ; CHECK:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
-  ; CHECK:   [[GV1:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var3
-  ; CHECK:   [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
-  ; CHECK:   [[GV2:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var1
-  ; CHECK:   [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[GV2]](p0) :: (load (s32) from @var1)
-  ; CHECK:   [[C3:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK:   [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(eq), [[LOAD]](s32), [[C3]]
-  ; CHECK:   [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK:   G_BRCOND [[TRUNC]](s1), %bb.1
-  ; CHECK:   G_BR %bb.2
-  ; CHECK: bb.1.if.then:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK:   [[GV3:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var2
-  ; CHECK:   [[C4:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
-  ; CHECK:   G_STORE [[C4]](s32), [[GV3]](p0) :: (store (s32) into @var2)
-  ; CHECK:   [[C5:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
-  ; CHECK:   [[GV4:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var1
-  ; CHECK:   G_STORE [[C5]](s32), [[GV4]](p0) :: (store (s32) into @var1)
-  ; CHECK:   [[GV5:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var3
-  ; CHECK:   G_STORE [[C4]](s32), [[GV5]](p0) :: (store (s32) into @var3)
-  ; CHECK:   G_STORE [[C5]](s32), [[GV4]](p0) :: (store (s32) into @var1)
-  ; CHECK: bb.2.if.end:
-  ; CHECK:   [[C6:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
-  ; CHECK:   $w0 = COPY [[C6]](s32)
-  ; CHECK:   RET_ReallyLR implicit $w0
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
+  ; CHECK-NEXT:   [[GV:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var2
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
+  ; CHECK-NEXT:   [[GV1:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var3
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
+  ; CHECK-NEXT:   [[GV2:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var1
+  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[GV2]](p0) :: (load (s32) from @var1)
+  ; CHECK-NEXT:   [[C3:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(eq), [[LOAD]](s32), [[C3]]
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
+  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK-NEXT:   G_BR %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1.if.then:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[GV3:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var2
+  ; CHECK-NEXT:   [[C4:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
+  ; CHECK-NEXT:   G_STORE [[C4]](s32), [[GV3]](p0) :: (store (s32) into @var2)
+  ; CHECK-NEXT:   [[C5:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
+  ; CHECK-NEXT:   [[GV4:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var1
+  ; CHECK-NEXT:   G_STORE [[C5]](s32), [[GV4]](p0) :: (store (s32) into @var1)
+  ; CHECK-NEXT:   [[GV5:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var3
+  ; CHECK-NEXT:   G_STORE [[C4]](s32), [[GV5]](p0) :: (store (s32) into @var3)
+  ; CHECK-NEXT:   G_STORE [[C5]](s32), [[GV4]](p0) :: (store (s32) into @var1)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2.if.end:
+  ; CHECK-NEXT:   [[C6:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
+  ; CHECK-NEXT:   $w0 = COPY [[C6]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
 
   ; Some of these instructions are dead. We're checking that the other instructions are
   ; sunk immediately before their first user in the if.then block or as close as possible.
@@ -417,40 +447,44 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: adrp_add
   ; CHECK: bb.0.entry:
-  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   [[ADRP:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var1
-  ; CHECK:   %addlow1:gpr(p0) = G_ADD_LOW [[ADRP]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var1
-  ; CHECK:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
-  ; CHECK:   [[ADRP1:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var2
-  ; CHECK:   %addlow2:gpr(p0) = G_ADD_LOW [[ADRP1]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var2
-  ; CHECK:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
-  ; CHECK:   [[ADRP2:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var3
-  ; CHECK:   %addlow3:gpr(p0) = G_ADD_LOW [[ADRP2]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var3
-  ; CHECK:   [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
-  ; CHECK:   [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[ADRP]](p0) :: (load (s32) from @var1)
-  ; CHECK:   [[C3:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK:   [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(eq), [[LOAD]](s32), [[C3]]
-  ; CHECK:   [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK:   G_BRCOND [[TRUNC]](s1), %bb.1
-  ; CHECK:   G_BR %bb.2
-  ; CHECK: bb.1.if.then:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK:   [[ADRP3:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var2
-  ; CHECK:   [[ADD_LOW:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP3]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var2
-  ; CHECK:   [[C4:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
-  ; CHECK:   G_STORE [[C4]](s32), [[ADD_LOW]](p0) :: (store (s32) into @var2)
-  ; CHECK:   [[C5:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
-  ; CHECK:   [[ADRP4:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var1
-  ; CHECK:   [[ADD_LOW1:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP4]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var1
-  ; CHECK:   G_STORE [[C5]](s32), [[ADD_LOW1]](p0) :: (store (s32) into @var1)
-  ; CHECK:   [[ADRP5:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var3
-  ; CHECK:   [[ADD_LOW2:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP5]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var3
-  ; CHECK:   G_STORE [[C4]](s32), [[ADD_LOW2]](p0) :: (store (s32) into @var3)
-  ; CHECK:   G_STORE [[C5]](s32), [[ADD_LOW1]](p0) :: (store (s32) into @var1)
-  ; CHECK: bb.2.if.end:
-  ; CHECK:   [[C6:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
-  ; CHECK:   $w0 = COPY [[C6]](s32)
-  ; CHECK:   RET_ReallyLR implicit $w0
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[ADRP:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var1
+  ; CHECK-NEXT:   %addlow1:gpr(p0) = G_ADD_LOW [[ADRP]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var1
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
+  ; CHECK-NEXT:   [[ADRP1:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var2
+  ; CHECK-NEXT:   %addlow2:gpr(p0) = G_ADD_LOW [[ADRP1]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var2
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
+  ; CHECK-NEXT:   [[ADRP2:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var3
+  ; CHECK-NEXT:   %addlow3:gpr(p0) = G_ADD_LOW [[ADRP2]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var3
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
+  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[ADRP]](p0) :: (load (s32) from @var1)
+  ; CHECK-NEXT:   [[C3:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(eq), [[LOAD]](s32), [[C3]]
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
+  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK-NEXT:   G_BR %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1.if.then:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[ADRP3:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var2
+  ; CHECK-NEXT:   [[ADD_LOW:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP3]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var2
+  ; CHECK-NEXT:   [[C4:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
+  ; CHECK-NEXT:   G_STORE [[C4]](s32), [[ADD_LOW]](p0) :: (store (s32) into @var2)
+  ; CHECK-NEXT:   [[C5:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
+  ; CHECK-NEXT:   [[ADRP4:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var1
+  ; CHECK-NEXT:   [[ADD_LOW1:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP4]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var1
+  ; CHECK-NEXT:   G_STORE [[C5]](s32), [[ADD_LOW1]](p0) :: (store (s32) into @var1)
+  ; CHECK-NEXT:   [[ADRP5:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var3
+  ; CHECK-NEXT:   [[ADD_LOW2:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP5]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var3
+  ; CHECK-NEXT:   G_STORE [[C4]](s32), [[ADD_LOW2]](p0) :: (store (s32) into @var3)
+  ; CHECK-NEXT:   G_STORE [[C5]](s32), [[ADD_LOW1]](p0) :: (store (s32) into @var1)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2.if.end:
+  ; CHECK-NEXT:   [[C6:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
+  ; CHECK-NEXT:   $w0 = COPY [[C6]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
 
   ; Some of these instructions are dead.
   bb.1.entry:
@@ -490,31 +524,34 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: test_inttoptr
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   liveins: $w0, $x1
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY $x1
-  ; CHECK:   [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 0
-  ; CHECK:   [[INTTOPTR:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[C]](s64)
-  ; CHECK:   [[C1:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 128
-  ; CHECK:   [[INTTOPTR1:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[C1]](s64)
-  ; CHECK:   [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
-  ; CHECK:   [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(eq), [[COPY]](s32), [[C2]]
-  ; CHECK:   [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK:   G_BRCOND [[TRUNC]](s1), %bb.1
-  ; CHECK:   G_BR %bb.2
-  ; CHECK: bb.1:
-  ; CHECK:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY]]
-  ; CHECK:   G_STORE [[ADD]](s32), [[COPY1]](p0) :: (store (s32)
-  ; CHECK:   [[C3:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 128
-  ; CHECK:   [[INTTOPTR2:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[C3]](s64)
-  ; CHECK:   $x0 = COPY [[INTTOPTR2]](p0)
-  ; CHECK:   RET_ReallyLR implicit $x0
-  ; CHECK: bb.2:
-  ; CHECK:   [[C4:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 0
-  ; CHECK:   [[INTTOPTR3:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[C4]](s64)
-  ; CHECK:   $x0 = COPY [[INTTOPTR3]](p0)
-  ; CHECK:   RET_ReallyLR implicit $x0
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT:   liveins: $w0, $x1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY $x1
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 0
+  ; CHECK-NEXT:   [[INTTOPTR:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[C]](s64)
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 128
+  ; CHECK-NEXT:   [[INTTOPTR1:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[C1]](s64)
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
+  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(eq), [[COPY]](s32), [[C2]]
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
+  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK-NEXT:   G_BR %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY]]
+  ; CHECK-NEXT:   G_STORE [[ADD]](s32), [[COPY1]](p0) :: (store (s32))
+  ; CHECK-NEXT:   [[C3:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 128
+  ; CHECK-NEXT:   [[INTTOPTR2:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[C3]](s64)
+  ; CHECK-NEXT:   $x0 = COPY [[INTTOPTR2]](p0)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   [[C4:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 0
+  ; CHECK-NEXT:   [[INTTOPTR3:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[C4]](s64)
+  ; CHECK-NEXT:   $x0 = COPY [[INTTOPTR3]](p0)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $x0
   bb.1:
     liveins: $w0, $x1
 
@@ -550,13 +587,13 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: many_local_use_intra_block
     ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-    ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
-    ; CHECK: [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
-    ; CHECK: [[ADD2:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
-    ; CHECK: [[ADD3:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
-    ; CHECK: [[ADD4:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
-    ; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
-    ; CHECK: [[ADD5:%[0-9]+]]:gpr(s32) = G_ADD [[C1]], [[C1]]
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+    ; CHECK-NEXT: [[ADD4:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
+    ; CHECK-NEXT: [[ADD5:%[0-9]+]]:gpr(s32) = G_ADD [[C1]], [[C1]]
     %0:gpr(s32) = G_CONSTANT i32 1
     %1:gpr(s32) = G_CONSTANT i32 2
     %2:gpr(s32) = G_ADD %0, %0
@@ -575,19 +612,23 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: non_local_phi_single_use
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
-  ; CHECK:   %cmp:gpr(s32) = G_ICMP intpred(eq), [[ADD]](s32), [[C]]
-  ; CHECK:   %cond:gpr(s1) = G_TRUNC %cmp(s32)
-  ; CHECK:   G_BRCOND %cond(s1), %bb.1
-  ; CHECK:   G_BR %bb.2
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK:   [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK: bb.2:
-  ; CHECK:   [[PHI:%[0-9]+]]:gpr(s32) = G_PHI [[C2]](s32), %bb.1
-  ; CHECK:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[PHI]]
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+  ; CHECK-NEXT:   %cmp:gpr(s32) = G_ICMP intpred(eq), [[ADD]](s32), [[C]]
+  ; CHECK-NEXT:   %cond:gpr(s1) = G_TRUNC %cmp(s32)
+  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.1
+  ; CHECK-NEXT:   G_BR %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr(s32) = G_PHI [[C1]](s32), %bb.1, [[C]](s32), %bb.0
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[PHI]]
 
   ; Localize the 1 into bb.1, since the number of uses is under the threshold.
 
@@ -616,18 +657,21 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: non_local_phi_three_uses
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
-  ; CHECK:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
-  ; CHECK:   %cmp:gpr(s32) = G_ICMP intpred(eq), [[ADD]](s32), [[C]]
-  ; CHECK:   %cond:gpr(s1) = G_TRUNC %cmp(s32)
-  ; CHECK:   G_BRCOND %cond(s1), %bb.1
-  ; CHECK:   G_BR %bb.2
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK: bb.2:
-  ; CHECK:   [[PHI:%[0-9]+]]:gpr(s32) = G_PHI [[C]](s32), %bb.1
-  ; CHECK:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[PHI]]
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]]
+  ; CHECK-NEXT:   %cmp:gpr(s32) = G_ICMP intpred(eq), [[ADD]](s32), [[C]]
+  ; CHECK-NEXT:   %cond:gpr(s1) = G_TRUNC %cmp(s32)
+  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.1
+  ; CHECK-NEXT:   G_BR %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr(s32) = G_PHI [[C]](s32), %bb.1, [[C]](s32), %bb.0, [[C]](s32), %bb.0, [[C]](s32), %bb.0
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[PHI]]
 
   ; Don't localize the 1 into bb.1, above the thresold of uses in the phi.
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/no-regclass.mir b/llvm/test/CodeGen/AArch64/GlobalISel/no-regclass.mir
index 2c546ac27d3b4..dfce266f65fd6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/no-regclass.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/no-regclass.mir
@@ -22,8 +22,9 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: unused_reg
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
-    ; CHECK: $w0 = COPY [[COPY]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
+    ; CHECK-NEXT: $w0 = COPY [[COPY]]
     %0:gpr(s32) = COPY $w0
     %1:gpr(s64) = G_MERGE_VALUES %0(s32), %0(s32)
     %2:gpr(s32), %3:gpr(s32) = G_UNMERGE_VALUES %1(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/non-pow-2-extload-combine.mir b/llvm/test/CodeGen/AArch64/GlobalISel/non-pow-2-extload-combine.mir
index 7d03da69ab942..5bc845fb35037 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/non-pow-2-extload-combine.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/non-pow-2-extload-combine.mir
@@ -22,11 +22,12 @@ body:             |
 
     ; CHECK-LABEL: name: ld_zext_i24
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[COPY]](p0) :: (load (s24) from %ir.ptr, align 1)
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s24)
-    ; CHECK: $w0 = COPY [[ZEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[COPY]](p0) :: (load (s24) from %ir.ptr, align 1)
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s24)
+    ; CHECK-NEXT: $w0 = COPY [[ZEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(p0) = COPY $x0
     %1:_(p0) = COPY $x1
     %2:_(s24) = G_LOAD %0(p0) :: (load (s24) from %ir.ptr, align 1)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/observer-change-crash.mir b/llvm/test/CodeGen/AArch64/GlobalISel/observer-change-crash.mir
index 8d3add417cc1e..5ea12871f6efe 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/observer-change-crash.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/observer-change-crash.mir
@@ -23,7 +23,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: test
     ; CHECK: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
-    ; CHECK: $x0 = COPY [[DEF]](p0)
+    ; CHECK-NEXT: $x0 = COPY [[DEF]](p0)
     %0:_(p0) = G_IMPLICIT_DEF
     %1:_(p0) = COPY %0(p0)
     %2:_(p0) = COPY %1(p0)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir
index c9749b84d8a5a..577bee3a9832b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir
@@ -11,14 +11,17 @@ regBankSelected: true
 body:             |
   ; CHECK-LABEL: name: fold_and_rhs
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
-  ; CHECK:   %copy:gpr64all = COPY $x0
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
-  ; CHECK:   TBNZW [[COPY1]], 3, %bb.1
-  ; CHECK:   B %bb.0
-  ; CHECK: bb.1:
-  ; CHECK:   RET_ReallyLR
+  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
+  ; CHECK-NEXT:   liveins: $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   %copy:gpr64all = COPY $x0
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
+  ; CHECK-NEXT:   TBNZW [[COPY1]], 3, %bb.1
+  ; CHECK-NEXT:   B %bb.0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   RET_ReallyLR
   bb.0:
     successors: %bb.0, %bb.1
     liveins: $x0
@@ -46,14 +49,17 @@ regBankSelected: true
 body:             |
   ; CHECK-LABEL: name: fold_and_lhs
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
-  ; CHECK:   %copy:gpr64all = COPY $x0
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
-  ; CHECK:   TBNZW [[COPY1]], 3, %bb.1
-  ; CHECK:   B %bb.0
-  ; CHECK: bb.1:
-  ; CHECK:   RET_ReallyLR
+  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
+  ; CHECK-NEXT:   liveins: $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   %copy:gpr64all = COPY $x0
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
+  ; CHECK-NEXT:   TBNZW [[COPY1]], 3, %bb.1
+  ; CHECK-NEXT:   B %bb.0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   RET_ReallyLR
   bb.0:
     successors: %bb.0, %bb.1
     liveins: $x0
@@ -80,15 +86,18 @@ regBankSelected: true
 body:             |
   ; CHECK-LABEL: name: dont_fold_and
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
-  ; CHECK:   %copy:gpr64 = COPY $x0
-  ; CHECK:   %fold_me:gpr64sp = ANDXri %copy, 4098
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr32all = COPY %fold_me.sub_32
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
-  ; CHECK:   TBNZW [[COPY1]], 3, %bb.1
-  ; CHECK:   B %bb.0
-  ; CHECK: bb.1:
-  ; CHECK:   RET_ReallyLR
+  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
+  ; CHECK-NEXT:   liveins: $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   %copy:gpr64 = COPY $x0
+  ; CHECK-NEXT:   %fold_me:gpr64sp = ANDXri %copy, 4098
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32all = COPY %fold_me.sub_32
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
+  ; CHECK-NEXT:   TBNZW [[COPY1]], 3, %bb.1
+  ; CHECK-NEXT:   B %bb.0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   RET_ReallyLR
   bb.0:
     successors: %bb.0, %bb.1
     liveins: $x0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir
index 2939c4de15666..aca58b6d140d4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir
@@ -42,13 +42,14 @@ body:             |
 
     ; CHECK-LABEL: name: cmn_s32_rhs
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
-    ; CHECK: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 0, implicit $nzcv
-    ; CHECK: $w0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
+    ; CHECK-NEXT: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 0, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = G_CONSTANT i32 0
@@ -72,13 +73,14 @@ body:             |
 
     ; CHECK-LABEL: name: cmn_s32_lhs
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
-    ; CHECK: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 0, implicit $nzcv
-    ; CHECK: $w0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
+    ; CHECK-NEXT: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 0, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = G_CONSTANT i32 0
@@ -102,14 +104,15 @@ body:             |
 
     ; CHECK-LABEL: name: no_cmn_s32_rhs
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
-    ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY1]], implicit-def dead $nzcv
-    ; CHECK: [[SUBSWrr1:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[SUBSWrr]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 10, implicit $nzcv
-    ; CHECK: $w0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
+    ; CHECK-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY1]], implicit-def dead $nzcv
+    ; CHECK-NEXT: [[SUBSWrr1:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[SUBSWrr]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 10, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = G_CONSTANT i32 0
@@ -133,14 +136,15 @@ body:             |
 
     ; CHECK-LABEL: name: no_cmn_s32_lhs
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
-    ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY]], implicit-def dead $nzcv
-    ; CHECK: [[SUBSWrr1:%[0-9]+]]:gpr32 = SUBSWrr [[SUBSWrr]], [[COPY1]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 10, implicit $nzcv
-    ; CHECK: $w0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
+    ; CHECK-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY]], implicit-def dead $nzcv
+    ; CHECK-NEXT: [[SUBSWrr1:%[0-9]+]]:gpr32 = SUBSWrr [[SUBSWrr]], [[COPY1]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY2]], $wzr, 10, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = G_CONSTANT i32 0
@@ -164,13 +168,14 @@ body:             |
 
     ; CHECK-LABEL: name: cmn_s64_rhs
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
-    ; CHECK: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
-    ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 0, implicit $nzcv
-    ; CHECK: $x0 = COPY [[CSINCXr]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
+    ; CHECK-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 0, implicit $nzcv
+    ; CHECK-NEXT: $x0 = COPY [[CSINCXr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = COPY $x1
     %2:gpr(s64) = G_CONSTANT i64 0
@@ -194,13 +199,14 @@ body:             |
 
     ; CHECK-LABEL: name: cmn_s64_lhs
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
-    ; CHECK: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
-    ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 0, implicit $nzcv
-    ; CHECK: $x0 = COPY [[CSINCXr]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
+    ; CHECK-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 0, implicit $nzcv
+    ; CHECK-NEXT: $x0 = COPY [[CSINCXr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = COPY $x1
     %2:gpr(s64) = G_CONSTANT i64 0
@@ -224,14 +230,15 @@ body:             |
 
     ; CHECK-LABEL: name: no_cmn_s64_rhs
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
-    ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY1]], implicit-def dead $nzcv
-    ; CHECK: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[SUBSXrr]], implicit-def $nzcv
-    ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 10, implicit $nzcv
-    ; CHECK: $x0 = COPY [[CSINCXr]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
+    ; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY1]], implicit-def dead $nzcv
+    ; CHECK-NEXT: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[SUBSXrr]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 10, implicit $nzcv
+    ; CHECK-NEXT: $x0 = COPY [[CSINCXr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = COPY $x1
     %2:gpr(s64) = G_CONSTANT i64 0
@@ -255,14 +262,15 @@ body:             |
 
     ; CHECK-LABEL: name: no_cmn_s64_lhs
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
-    ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY]], implicit-def dead $nzcv
-    ; CHECK: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[SUBSXrr]], [[COPY1]], implicit-def $nzcv
-    ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 10, implicit $nzcv
-    ; CHECK: $x0 = COPY [[CSINCXr]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr
+    ; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY]], implicit-def dead $nzcv
+    ; CHECK-NEXT: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr [[SUBSXrr]], [[COPY1]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY2]], $xzr, 10, implicit $nzcv
+    ; CHECK-NEXT: $x0 = COPY [[CSINCXr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = COPY $x1
     %2:gpr(s64) = G_CONSTANT i64 0
@@ -285,12 +293,13 @@ body:             |
     liveins: $w0, $w1
     ; CHECK-LABEL: name: tst_s32
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr
-    ; CHECK: [[ANDSWrr:%[0-9]+]]:gpr32 = ANDSWrr [[COPY1]], [[COPY]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 1, implicit $nzcv
-    ; CHECK: $w0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr
+    ; CHECK-NEXT: [[ANDSWrr:%[0-9]+]]:gpr32 = ANDSWrr [[COPY1]], [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = G_CONSTANT i32 0
@@ -314,12 +323,13 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: tst_s64
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $xzr
-    ; CHECK: [[ANDSXrr:%[0-9]+]]:gpr64 = ANDSXrr [[COPY1]], [[COPY]], implicit-def $nzcv
-    ; CHECK: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY1]], $xzr, 1, implicit $nzcv
-    ; CHECK: $x0 = COPY [[CSINCXr]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $xzr
+    ; CHECK-NEXT: [[ANDSXrr:%[0-9]+]]:gpr64 = ANDSXrr [[COPY1]], [[COPY]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCXr:%[0-9]+]]:gpr64 = CSINCXr [[COPY1]], $xzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $x0 = COPY [[CSINCXr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = COPY $x1
     %2:gpr(s64) = G_CONSTANT i64 0
@@ -343,13 +353,14 @@ body:             |
     liveins: $w0, $w1
     ; CHECK-LABEL: name: no_tst_unsigned_compare
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr
-    ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[COPY1]], [[COPY]]
-    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[ANDWrr]], 0, 0, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 9, implicit $nzcv
-    ; CHECK: $w0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr
+    ; CHECK-NEXT: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[ANDWrr]], 0, 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 9, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = G_CONSTANT i32 0
@@ -373,13 +384,14 @@ body:             |
     liveins: $w0, $w1
     ; CHECK-LABEL: name: no_tst_nonzero
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr
-    ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[COPY1]], [[COPY]]
-    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[ANDWrr]], 42, 0, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 9, implicit $nzcv
-    ; CHECK: $w0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr
+    ; CHECK-NEXT: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[ANDWrr]], 42, 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr [[COPY1]], $wzr, 9, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = G_CONSTANT i32 0
@@ -403,11 +415,12 @@ body:             |
     liveins: $w0, $w1
     ; CHECK-LABEL: name: imm_tst
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 1, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK: $w0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 1, implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = G_CONSTANT i32 0
@@ -435,12 +448,13 @@ body:             |
     liveins: $w0, $w1
     ; CHECK-LABEL: name: no_imm_tst_not_logical_imm
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm -1
-    ; CHECK: [[ANDSWrr:%[0-9]+]]:gpr32 = ANDSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK: $w0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm -1
+    ; CHECK-NEXT: [[ANDSWrr:%[0-9]+]]:gpr32 = ANDSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = G_CONSTANT i32 0
@@ -467,12 +481,13 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: test_physreg_copy
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK: $w0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = COPY $x1
     ; When we find the defs of the LHS and RHS of the compare, we walk over
@@ -493,15 +508,15 @@ body:             |
   bb.0:
     liveins: $x0, $x1
     ; We should fold the G_SHL into the ANDS to get ANDSXrs.
-    ;
     ; CHECK-LABEL: name: tst_fold_shift_s64
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %copy:gpr64 = COPY $x1
-    ; CHECK: %zero:gpr64 = COPY $xzr
-    ; CHECK: [[ANDSXrs:%[0-9]+]]:gpr64 = ANDSXrs %zero, %copy, 16, implicit-def $nzcv
-    ; CHECK: %select:gpr64 = CSINCXr %zero, $xzr, 1, implicit $nzcv
-    ; CHECK: $x0 = COPY %select
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:gpr64 = COPY $x1
+    ; CHECK-NEXT: %zero:gpr64 = COPY $xzr
+    ; CHECK-NEXT: [[ANDSXrs:%[0-9]+]]:gpr64 = ANDSXrs %zero, %copy, 16, implicit-def $nzcv
+    ; CHECK-NEXT: %select:gpr64 = CSINCXr %zero, $xzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $x0 = COPY %select
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy:gpr(s64) = COPY $x1
     %zero:gpr(s64) = G_CONSTANT i64 0
     %one:gpr(s64) = G_CONSTANT i64 1
@@ -524,15 +539,15 @@ body:             |
   bb.0:
     liveins: $w0, $w1
     ; We should fold the G_SHL into the ANDS to get ANDSWrs.
-    ;
     ; CHECK-LABEL: name: tst_fold_shift_s32
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy:gpr32 = COPY $w1
-    ; CHECK: %zero:gpr32 = COPY $wzr
-    ; CHECK: [[ANDSWrs:%[0-9]+]]:gpr32 = ANDSWrs %zero, %copy, 16, implicit-def $nzcv
-    ; CHECK: %select:gpr32 = CSINCWr %zero, $wzr, 1, implicit $nzcv
-    ; CHECK: $w0 = COPY %select
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:gpr32 = COPY $w1
+    ; CHECK-NEXT: %zero:gpr32 = COPY $wzr
+    ; CHECK-NEXT: [[ANDSWrs:%[0-9]+]]:gpr32 = ANDSWrs %zero, %copy, 16, implicit-def $nzcv
+    ; CHECK-NEXT: %select:gpr32 = CSINCWr %zero, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %select
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:gpr(s32) = COPY $w1
     %zero:gpr(s32) = G_CONSTANT i32 0
     %one:gpr(s32) = G_CONSTANT i32 1
@@ -557,11 +572,12 @@ body:             |
 
     ; CHECK-LABEL: name: cmn_s32_neg_imm
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %reg0:gpr32sp = COPY $w0
-    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg0, 1, 0, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %reg0:gpr32sp = COPY $w0
+    ; CHECK-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg0, 1, 0, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s32) = COPY $w0
     %negative_one:gpr(s32) = G_CONSTANT i32 -1
     %zero:gpr(s32) = G_CONSTANT i32 0
@@ -584,12 +600,13 @@ body:             |
 
     ; CHECK-LABEL: name: cmn_arith_extended_shl
     ; CHECK: liveins: $w0, $x0, $x1
-    ; CHECK: %reg0:gpr64sp = COPY $x0
-    ; CHECK: %reg1:gpr32 = COPY $w0
-    ; CHECK: [[ADDSXrx:%[0-9]+]]:gpr64 = ADDSXrx %reg0, %reg1, 50, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %reg0:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %reg1:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[ADDSXrx:%[0-9]+]]:gpr64 = ADDSXrx %reg0, %reg1, 50, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s64) = COPY $x0
     %zero:gpr(s64) = G_CONSTANT i64 0
     %sub:gpr(s64) = G_SUB %zero, %reg0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir
index bcda5c98b8cdf..f50f3bb78e6b8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir
@@ -10,15 +10,17 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: fold_trunc
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
-  ; CHECK:   liveins: $x0
-  ; CHECK:   %copy:gpr64all = COPY $x0
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
-  ; CHECK:   TBNZW [[COPY1]], 3, %bb.1
-  ; CHECK:   B %bb.0
-  ; CHECK: bb.1:
-  ; CHECK:   RET_ReallyLR
+  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
+  ; CHECK-NEXT:   liveins: $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   %copy:gpr64all = COPY $x0
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32all = COPY %copy.sub_32
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
+  ; CHECK-NEXT:   TBNZW [[COPY1]], 3, %bb.1
+  ; CHECK-NEXT:   B %bb.0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   RET_ReallyLR
   bb.0:
     successors: %bb.0, %bb.1
     liveins: $x0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and.mir
index c5a7e231b0737..351d6b39d56e5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-overlapping-and.mir
@@ -7,10 +7,10 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: bitmask_overlap1
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
-    ; CHECK: $w0 = COPY [[AND]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 -128
     %3:_(s32) = G_CONSTANT i32 255
@@ -26,10 +26,10 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: bitmask_overlap2
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
-    ; CHECK: $w0 = COPY [[AND]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 255
     %3:_(s32) = G_CONSTANT i32 -128
@@ -45,10 +45,10 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: bitmask_overlap3
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
-    ; CHECK: $w0 = COPY [[AND]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 255
     %3:_(s32) = G_CONSTANT i32 -128
@@ -64,10 +64,10 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: bitmask_overlap4
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
-    ; CHECK: $w0 = COPY [[AND]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 255
     %3:_(s32) = G_CONSTANT i32 -128
@@ -83,8 +83,8 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: bitmask_no_overlap
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: $w0 = COPY [[C]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: $w0 = COPY [[C]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 1
     %3:_(s32) = G_CONSTANT i32 2
@@ -100,14 +100,14 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: bitmask_overlap_extrause
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
-    ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
-    ; CHECK: G_STORE [[AND]](s32), [[COPY1]](p0) :: (store (s32))
-    ; CHECK: $w0 = COPY [[AND1]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
+    ; CHECK-NEXT: G_STORE [[AND]](s32), [[COPY1]](p0) :: (store (s32))
+    ; CHECK-NEXT: $w0 = COPY [[AND1]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(p0) = COPY $x1
     %2:_(s32) = G_CONSTANT i32 255

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-shifted-reg-compare.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-shifted-reg-compare.mir
index 7fafdd0c4f210..28ecffb38fc8e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-shifted-reg-compare.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-shifted-reg-compare.mir
@@ -21,12 +21,13 @@ body:             |
 
     ; CHECK-LABEL: name: eq_shl
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -47,12 +48,13 @@ body:             |
 
     ; CHECK-LABEL: name: eq_ashr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -73,12 +75,13 @@ body:             |
 
     ; CHECK-LABEL: name: eq_lshr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -99,12 +102,13 @@ body:             |
 
     ; CHECK-LABEL: name: ne_shl
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -125,12 +129,13 @@ body:             |
 
     ; CHECK-LABEL: name: ne_ashr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -151,12 +156,13 @@ body:             |
 
     ; CHECK-LABEL: name: ne_lshr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -177,12 +183,13 @@ body:             |
 
     ; CHECK-LABEL: name: ult_shl
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -202,12 +209,13 @@ body:             |
 
     ; CHECK-LABEL: name: ult_ashr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -227,12 +235,13 @@ body:             |
 
     ; CHECK-LABEL: name: ult_lshr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -252,12 +261,13 @@ body:             |
 
     ; CHECK-LABEL: name: ugt_shl
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -277,12 +287,13 @@ body:             |
 
     ; CHECK-LABEL: name: ugt_ashr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -302,12 +313,13 @@ body:             |
 
     ; CHECK-LABEL: name: ugt_lshr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -329,12 +341,13 @@ body:             |
 
     ; CHECK-LABEL: name: uge_shl
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -354,12 +367,13 @@ body:             |
 
     ; CHECK-LABEL: name: uge_ashr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -379,12 +393,13 @@ body:             |
 
     ; CHECK-LABEL: name: uge_lshr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -406,12 +421,13 @@ body:             |
 
     ; CHECK-LABEL: name: ule_shl
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -431,12 +447,13 @@ body:             |
 
     ; CHECK-LABEL: name: ule_ashr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -456,12 +473,13 @@ body:             |
 
     ; CHECK-LABEL: name: ule_lshr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -482,12 +500,13 @@ body:             |
 
     ; CHECK-LABEL: name: slt_shl
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -507,12 +526,13 @@ body:             |
 
     ; CHECK-LABEL: name: slt_ashr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -532,12 +552,13 @@ body:             |
 
     ; CHECK-LABEL: name: slt_lshr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -557,12 +578,13 @@ body:             |
 
     ; CHECK-LABEL: name: sgt_shl
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -582,12 +604,13 @@ body:             |
 
     ; CHECK-LABEL: name: sgt_ashr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -607,12 +630,13 @@ body:             |
 
     ; CHECK-LABEL: name: sgt_lshr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -632,12 +656,13 @@ body:             |
 
     ; CHECK-LABEL: name: sge_shl
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -657,12 +682,13 @@ body:             |
 
     ; CHECK-LABEL: name: sge_ashr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -682,12 +708,13 @@ body:             |
 
     ; CHECK-LABEL: name: sge_lshr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -709,12 +736,13 @@ body:             |
 
     ; CHECK-LABEL: name: sle_shl
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 3, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -734,12 +762,13 @@ body:             |
 
     ; CHECK-LABEL: name: sle_ashr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 131, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3
@@ -759,12 +788,13 @@ body:             |
 
     ; CHECK-LABEL: name: sle_lshr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy0:gpr32 = COPY $w0
-    ; CHECK: %copy1:gpr32 = COPY $w1
-    ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
-    ; CHECK: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
-    ; CHECK: $w0 = COPY %cmp
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy0:gpr32 = COPY $w0
+    ; CHECK-NEXT: %copy1:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs %copy1, %copy0, 67, implicit-def $nzcv
+    ; CHECK-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
+    ; CHECK-NEXT: $w0 = COPY %cmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy0:gpr(s32) = COPY $w0
     %copy1:gpr(s32) = COPY $w1
     %three:gpr(s32) = G_CONSTANT i32 3

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/phi-mir-debugify.mir b/llvm/test/CodeGen/AArch64/GlobalISel/phi-mir-debugify.mir
index 22f89a76459cc..7968c7be5d308 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/phi-mir-debugify.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/phi-mir-debugify.mir
@@ -34,43 +34,49 @@ liveins:
 body:             |
   ; CHECK-LABEL: name: legalize_phi
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   liveins: $w0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0, debug-location !11
-  ; CHECK:   DBG_VALUE [[COPY]](s32), $noreg, !9, !DIExpression(), debug-location !11
-  ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0, debug-location !DILocation(line: 2, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE [[C]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 2, column: 1, scope: !5)
-  ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1, debug-location !DILocation(line: 3, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE [[C1]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 3, column: 1, scope: !5)
-  ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2, debug-location !DILocation(line: 4, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE [[C2]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 4, column: 1, scope: !5)
-  ; CHECK:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[COPY]](s32), [[C]], debug-location !DILocation(line: 5, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE [[ICMP]](s1), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 5, column: 1, scope: !5)
-  ; CHECK:   G_BRCOND [[ICMP]](s1), %bb.1, debug-location !DILocation(line: 6, column: 1, scope: !5)
-  ; CHECK:   G_BR %bb.2, debug-location !DILocation(line: 7, column: 1, scope: !5)
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C1]], debug-location !DILocation(line: 8, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE [[ADD]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 8, column: 1, scope: !5)
-  ; CHECK:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ADD]](s32), debug-location !DILocation(line: 9, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE [[TRUNC]](s1), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 9, column: 1, scope: !5)
-  ; CHECK:   G_BR %bb.3, debug-location !DILocation(line: 10, column: 1, scope: !5)
-  ; CHECK: bb.2:
-  ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C2]], debug-location !DILocation(line: 11, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE [[ADD1]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 11, column: 1, scope: !5)
-  ; CHECK:   [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ADD1]](s32), debug-location !DILocation(line: 12, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE [[TRUNC1]](s1), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 12, column: 1, scope: !5)
-  ; CHECK: bb.3:
-  ; CHECK:   [[PHI:%[0-9]+]]:_(s1) = G_PHI [[TRUNC]](s1), %bb.1, [[TRUNC1]](s1), %bb.2, debug-location !DILocation(line: 13, column: 1, scope: !5)
-  ; CHECK:   [[PHI1:%[0-9]+]]:_(s1) = G_PHI [[TRUNC]](s1), %bb.1, [[TRUNC1]](s1), %bb.2, debug-location !DILocation(line: 14, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE [[PHI]](s1), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 13, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE [[PHI1]](s1), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 14, column: 1, scope: !5)
-  ; CHECK:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[PHI]](s1), debug-location !DILocation(line: 15, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE [[ZEXT]](s32), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 15, column: 1, scope: !5)
-  ; CHECK:   $w0 = COPY [[ZEXT]](s32), debug-location !DILocation(line: 16, column: 1, scope: !5)
-  ; CHECK:   DBG_VALUE $w0, $noreg, !9, !DIExpression(), debug-location !DILocation(line: 16, column: 1, scope: !5)
-  ; CHECK:   RET_ReallyLR implicit $w0, debug-location !DILocation(line: 17, column: 1, scope: !5)
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0,  debug-location !11
+  ; CHECK-NEXT:   DBG_VALUE [[COPY]](s32), $noreg, !9, !DIExpression(),  debug-location !11
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0,  debug-location !DILocation(line: 2, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE [[C]](s32), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 2, column: 1, scope: !5)
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1,  debug-location !DILocation(line: 3, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE [[C1]](s32), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 3, column: 1, scope: !5)
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2,  debug-location !DILocation(line: 4, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE [[C2]](s32), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 4, column: 1, scope: !5)
+  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ugt), [[COPY]](s32), [[C]],  debug-location !DILocation(line: 5, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE [[ICMP]](s1), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 5, column: 1, scope: !5)
+  ; CHECK-NEXT:   G_BRCOND [[ICMP]](s1), %bb.1,  debug-location !DILocation(line: 6, column: 1, scope: !5)
+  ; CHECK-NEXT:   G_BR %bb.2,  debug-location !DILocation(line: 7, column: 1, scope: !5)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C1]],  debug-location !DILocation(line: 8, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE [[ADD]](s32), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 8, column: 1, scope: !5)
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ADD]](s32),  debug-location !DILocation(line: 9, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE [[TRUNC]](s1), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 9, column: 1, scope: !5)
+  ; CHECK-NEXT:   G_BR %bb.3,  debug-location !DILocation(line: 10, column: 1, scope: !5)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C2]],  debug-location !DILocation(line: 11, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE [[ADD1]](s32), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 11, column: 1, scope: !5)
+  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ADD1]](s32),  debug-location !DILocation(line: 12, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE [[TRUNC1]](s1), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 12, column: 1, scope: !5)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.3:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s1) = G_PHI [[TRUNC]](s1), %bb.1, [[TRUNC1]](s1), %bb.2,  debug-location !DILocation(line: 13, column: 1, scope: !5)
+  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s1) = G_PHI [[TRUNC]](s1), %bb.1, [[TRUNC1]](s1), %bb.2,  debug-location !DILocation(line: 14, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE [[PHI]](s1), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 13, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE [[PHI1]](s1), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 14, column: 1, scope: !5)
+  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[PHI]](s1),  debug-location !DILocation(line: 15, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE [[ZEXT]](s32), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 15, column: 1, scope: !5)
+  ; CHECK-NEXT:   $w0 = COPY [[ZEXT]](s32),  debug-location !DILocation(line: 16, column: 1, scope: !5)
+  ; CHECK-NEXT:   DBG_VALUE $w0, $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 16, column: 1, scope: !5)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0,  debug-location !DILocation(line: 17, column: 1, scope: !5)
   bb.0:
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combine-ptr-add-chain.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combine-ptr-add-chain.mir
index a022a287b4633..4ed9322802693 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combine-ptr-add-chain.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combine-ptr-add-chain.mir
@@ -13,11 +13,12 @@ body:             |
 
     ; CHECK-LABEL: name: ptradd_chain
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CHECK: $x0 = COPY [[PTR_ADD]](p0)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[PTR_ADD]](p0)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(p0) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 4
     %2:_(s64) = G_CONSTANT i64 12

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-and-trivial-mask.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-and-trivial-mask.mir
index 93e5b4c4ba064..1f7d5e7ed434e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-and-trivial-mask.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-and-trivial-mask.mir
@@ -12,20 +12,18 @@ body:             |
   bb.0:
     liveins: $w0, $w1
     ; G_ICMP produces a single bit. The mask is 1.
-    ;
     ; cmp = 000...0?
     ; mask = 000...01
     ; cmp & mask = 000...0?
-    ;
     ; Remove the G_AND.
-    ;
     ; CHECK-LABEL: name: remove_and_with_one_bit
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %y:_(s32) = COPY $w1
-    ; CHECK: %cmp:_(s32) = G_ICMP intpred(eq), %x(s32), %y
-    ; CHECK: $w0 = COPY %cmp(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %cmp:_(s32) = G_ICMP intpred(eq), %x(s32), %y
+    ; CHECK-NEXT: $w0 = COPY %cmp(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %cmp:_(s32) = G_ICMP intpred(eq), %x(s32), %y
@@ -43,12 +41,14 @@ body:             |
   bb.0:
     liveins: $w0, $w1, $w2
     ; -1 is all ones. Therefore z & -1 = z. Remove the G_AND.
-    ;
     ; CHECK-LABEL: name: remove_and_all_ones_mask
     ; CHECK: liveins: $w0, $w1, $w2
-    ; CHECK: %z:_(s32) = COPY $w2
-    ; CHECK: $w0 = COPY %z(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %z:_(s32) = COPY $w2
+    ; CHECK-NEXT: $w0 = COPY %z(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %z:_(s32) = COPY $w2
@@ -68,15 +68,16 @@ body:             |
     ; %z is a s32, so it can be at most the all-ones value on 32 bits.
     ; In decimal this is 4294967295. Any zero-extension of %z is at most this
     ; value.
-    ;
     ; Therefore, zext(z) & 4294967295 == z. Remove the G_AND.
-    ;
     ; CHECK-LABEL: name: remove_and_all_ones_zext
     ; CHECK: liveins: $w0, $w1, $w2
-    ; CHECK: %z:_(s32) = COPY $w2
-    ; CHECK: %ext:_(s64) = G_ZEXT %z(s32)
-    ; CHECK: $x0 = COPY %ext(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %z:_(s32) = COPY $w2
+    ; CHECK-NEXT: %ext:_(s64) = G_ZEXT %z(s32)
+    ; CHECK-NEXT: $x0 = COPY %ext(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %z:_(s32) = COPY $w2
@@ -95,13 +96,15 @@ body:             |
   bb.0:
     liveins: $w0, $w1, $w2
     ; This is the same as the zext case.
-    ;
     ; CHECK-LABEL: name: remove_and_all_ones_anyext
     ; CHECK: liveins: $w0, $w1, $w2
-    ; CHECK: %z:_(s32) = COPY $w2
-    ; CHECK: %ext:_(s64) = G_ZEXT %z(s32)
-    ; CHECK: $x0 = COPY %ext(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %z:_(s32) = COPY $w2
+    ; CHECK-NEXT: %ext:_(s64) = G_ZEXT %z(s32)
+    ; CHECK-NEXT: $x0 = COPY %ext(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %z:_(s32) = COPY $w2
@@ -121,15 +124,17 @@ body:             |
     liveins: $w0, $w1, $w2
     ; We don't know if the sign bit is set on %z. So, the value in %ext may have
     ; higher bits set than 4294967295.
-    ;
     ; CHECK-LABEL: name: dont_remove_all_ones_sext
     ; CHECK: liveins: $w0, $w1, $w2
-    ; CHECK: %z:_(s32) = COPY $w2
-    ; CHECK: %ext:_(s64) = G_SEXT %z(s32)
-    ; CHECK: %mask:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK: %and:_(s64) = G_AND %ext, %mask
-    ; CHECK: $x0 = COPY %and(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %z:_(s32) = COPY $w2
+    ; CHECK-NEXT: %ext:_(s64) = G_SEXT %z(s32)
+    ; CHECK-NEXT: %mask:_(s64) = G_CONSTANT i64 4294967295
+    ; CHECK-NEXT: %and:_(s64) = G_AND %ext, %mask
+    ; CHECK-NEXT: $x0 = COPY %and(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %z:_(s32) = COPY $w2
@@ -148,17 +153,18 @@ body:             |
   bb.0:
     liveins: $w0, $w1, $w2
     ; We know the sign bit is not set on %z. Therefore,
-    ;
     ; z = ext = 42 = 000...0101010
     ; mask = 0000...0111111
-    ;
     ; So z & mask == z
     ; CHECK-LABEL: name: remove_and_positive_constant_sext
     ; CHECK: liveins: $w0, $w1, $w2
-    ; CHECK: %z:_(s32) = G_CONSTANT i32 42
-    ; CHECK: %ext:_(s64) = G_SEXT %z(s32)
-    ; CHECK: $x0 = COPY %ext(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %z:_(s32) = G_CONSTANT i32 42
+    ; CHECK-NEXT: %ext:_(s64) = G_SEXT %z(s32)
+    ; CHECK-NEXT: $x0 = COPY %ext(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %z:_(s32) = G_CONSTANT i32 42
@@ -177,16 +183,16 @@ body:             |
   bb.0:
     liveins: $w0, $w1
     ; 6 is not a mask, so we should still have the G_AND.
-    ;
     ; CHECK-LABEL: name: not_a_mask
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %y:_(s32) = COPY $w1
-    ; CHECK: %cmp:_(s32) = G_ICMP intpred(eq), %x(s32), %y
-    ; CHECK: %mask:_(s32) = G_CONSTANT i32 6
-    ; CHECK: %and:_(s32) = G_AND %cmp, %mask
-    ; CHECK: $w0 = COPY %and(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %cmp:_(s32) = G_ICMP intpred(eq), %x(s32), %y
+    ; CHECK-NEXT: %mask:_(s32) = G_CONSTANT i32 6
+    ; CHECK-NEXT: %and:_(s32) = G_AND %cmp, %mask
+    ; CHECK-NEXT: $w0 = COPY %and(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %cmp:_(s32) = G_ICMP intpred(eq), %x(s32), %y
@@ -205,14 +211,16 @@ body:             |
     liveins: $w0, $w1, $w2
     ; We don't know what's in $w2, so we can't remove the G_AND without a mask
     ; that fills every bit in the type.
-    ;
     ; CHECK-LABEL: name: unknown_val
     ; CHECK: liveins: $w0, $w1, $w2
-    ; CHECK: %z:_(s32) = COPY $w2
-    ; CHECK: %one:_(s32) = G_CONSTANT i32 32
-    ; CHECK: %and:_(s32) = G_AND %z, %one
-    ; CHECK: $w0 = COPY %and(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %z:_(s32) = COPY $w2
+    ; CHECK-NEXT: %one:_(s32) = G_CONSTANT i32 32
+    ; CHECK-NEXT: %and:_(s32) = G_AND %z, %one
+    ; CHECK-NEXT: $w0 = COPY %and(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %z:_(s32) = COPY $w2
@@ -233,10 +241,11 @@ body:             |
 
     ; CHECK-LABEL: name: remove_and_assert_zext
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %assert_zext:_(s32) = G_ASSERT_ZEXT %x, 8
-    ; CHECK: $w0 = COPY %assert_zext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %assert_zext:_(s32) = G_ASSERT_ZEXT %x, 8
+    ; CHECK-NEXT: $w0 = COPY %assert_zext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %assert_zext:_(s32) = G_ASSERT_ZEXT %x(s32), 8
     %mask:_(s32) = G_CONSTANT i32 255
@@ -253,14 +262,15 @@ body:             |
     liveins: $w0
     ; The mask here is for 8 bits, not 16.
 
-    ; CHECK-LABEL: name: dont_remove_and_assert_zext
+    ; CHECK-LABEL: name: dont_remove_and_assert_zext_wrong_mask
     ; CHECK: liveins: $w0
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %assert_zext:_(s32) = G_ASSERT_ZEXT %x, 16
-    ; CHECK: %mask:_(s32) = G_CONSTANT i32 255
-    ; CHECK: %and:_(s32) = G_AND %assert_zext, %mask
-    ; CHECK: $w0 = COPY %and(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %assert_zext:_(s32) = G_ASSERT_ZEXT %x, 16
+    ; CHECK-NEXT: %mask:_(s32) = G_CONSTANT i32 255
+    ; CHECK-NEXT: %and:_(s32) = G_AND %assert_zext, %mask
+    ; CHECK-NEXT: $w0 = COPY %and(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %assert_zext:_(s32) = G_ASSERT_ZEXT %x(s32), 16
     %mask:_(s32) = G_CONSTANT i32 255

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-copy-prop.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-copy-prop.mir
index e62f00624a60a..d867fc4341b4e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-copy-prop.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-copy-prop.mir
@@ -12,12 +12,13 @@ body:             |
 
     ; CHECK-LABEL: name: postlegalize_copy_prop
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
-    ; CHECK: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[ADD]]
-    ; CHECK: $x0 = COPY [[ADD1]](s64)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[ADD]]
+    ; CHECK-NEXT: $x0 = COPY [[ADD1]](s64)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
     %2:_(s64) = G_ADD %0, %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-identity.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-identity.mir
index 017d87c11aadb..0d36f5c8d8c03 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-identity.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-identity.mir
@@ -12,9 +12,11 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: shift_of_zero
-    ; CHECK: %a:_(s64) = COPY $x0
-    ; CHECK: $x0 = COPY %a(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s64) = COPY $x0
+    ; CHECK-NEXT: $x0 = COPY %a(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %a:_(s64) = COPY $x0
     %b:_(s64) = G_CONSTANT i64 0
     %res:_(s64) = G_LSHR %a, %b

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-merge.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-merge.mir
index 86dd0a5ebfd66..9e7c573cd8a5f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-merge.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-merge.mir
@@ -12,9 +12,11 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: merge_unmerge
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: $x0 = COPY [[COPY]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: $x0 = COPY [[COPY]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
     %a:_(s32), %b:_(s32) = G_UNMERGE_VALUES %0
     %merge:_(s64) = G_MERGE_VALUES %a, %b
@@ -33,11 +35,13 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: merge_unmerge_mismatched_order
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: %b:_(s32), %a:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
-    ; CHECK: %merge:_(s64) = G_MERGE_VALUES %a(s32), %b(s32)
-    ; CHECK: $x0 = COPY %merge(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: %b:_(s32), %a:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; CHECK-NEXT: %merge:_(s64) = G_MERGE_VALUES %a(s32), %b(s32)
+    ; CHECK-NEXT: $x0 = COPY %merge(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
     %b:_(s32), %a:_(s32) = G_UNMERGE_VALUES %0
     %merge:_(s64) = G_MERGE_VALUES %a, %b
@@ -56,11 +60,13 @@ body:             |
     liveins: $q0
 
     ; CHECK-LABEL: name: merge_unmerge_mismatched_num_defs
-    ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $q0
-    ; CHECK: %a:_(s32), %b:_(s32), %c:_(s32), %d:_(s32) = G_UNMERGE_VALUES [[COPY]](s128)
-    ; CHECK: %merge:_(s64) = G_MERGE_VALUES %a(s32), %b(s32)
-    ; CHECK: $x0 = COPY %merge(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s128) = COPY $q0
+    ; CHECK-NEXT: %a:_(s32), %b:_(s32), %c:_(s32), %d:_(s32) = G_UNMERGE_VALUES [[COPY]](s128)
+    ; CHECK-NEXT: %merge:_(s64) = G_MERGE_VALUES %a(s32), %b(s32)
+    ; CHECK-NEXT: $x0 = COPY %merge(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s128) = COPY $q0
     %a:_(s32), %b:_(s32), %c:_(s32), %d:_(s32) = G_UNMERGE_VALUES %0
     %merge:_(s64) = G_MERGE_VALUES %a, %b
@@ -79,10 +85,12 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: merge_to_zext
-    ; CHECK: %v:_(s32) = COPY $w0
-    ; CHECK: %merge:_(s64) = G_ZEXT %v(s32)
-    ; CHECK: $x0 = COPY %merge(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v:_(s32) = COPY $w0
+    ; CHECK-NEXT: %merge:_(s64) = G_ZEXT %v(s32)
+    ; CHECK-NEXT: $x0 = COPY %merge(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %v:_(s32) = COPY $w0
     %zero:_(s32) = G_CONSTANT i32 0
     %merge:_(s64) = G_MERGE_VALUES %v, %zero

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-redundant-sextinreg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-redundant-sextinreg.mir
index 8e9642ab3e3b2..5da796c637c97 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-redundant-sextinreg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-combiner-redundant-sextinreg.mir
@@ -12,9 +12,10 @@ body: |
 
     ; CHECK-LABEL: name: sextload
     ; CHECK: liveins: $x0
-    ; CHECK: %x0:_(p0) = COPY $x0
-    ; CHECK: %sextload:_(s32) = G_SEXTLOAD %x0(p0) :: (load (s16))
-    ; CHECK: $w0 = COPY %sextload(s32)
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x0:_(p0) = COPY $x0
+    ; CHECK-NEXT: %sextload:_(s32) = G_SEXTLOAD %x0(p0) :: (load (s16))
+    ; CHECK-NEXT: $w0 = COPY %sextload(s32)
     %x0:_(p0) = COPY $x0
     %sextload:_(s32) = G_SEXTLOAD %x0:_(p0) :: (load (s16))
     %sext_inreg:_(s32) = G_SEXT_INREG %sextload:_(s32), 24
@@ -32,9 +33,10 @@ body: |
 
     ; CHECK-LABEL: name: assert_sext
     ; CHECK: liveins: $w0
-    ; CHECK: %w0:_(s32) = COPY $w0
-    ; CHECK: %assert_sext:_(s32) = G_ASSERT_SEXT %w0, 24
-    ; CHECK: $w0 = COPY %assert_sext(s32)
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %w0:_(s32) = COPY $w0
+    ; CHECK-NEXT: %assert_sext:_(s32) = G_ASSERT_SEXT %w0, 24
+    ; CHECK-NEXT: $w0 = COPY %assert_sext(s32)
     %w0:_(s32) = COPY $w0
     %assert_sext:_(s32) = G_ASSERT_SEXT %w0, 24
     %sext_inreg:_(s32) = G_SEXT_INREG %assert_sext:_(s32), 24
@@ -52,10 +54,11 @@ body: |
 
     ; CHECK-LABEL: name: assert_sext_greater_width
     ; CHECK: liveins: $w0
-    ; CHECK: %w0:_(s32) = COPY $w0
-    ; CHECK: %assert_sext:_(s32) = G_ASSERT_SEXT %w0, 24
-    ; CHECK: %sext_inreg:_(s32) = G_SEXT_INREG %assert_sext, 12
-    ; CHECK: $w0 = COPY %sext_inreg(s32)
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %w0:_(s32) = COPY $w0
+    ; CHECK-NEXT: %assert_sext:_(s32) = G_ASSERT_SEXT %w0, 24
+    ; CHECK-NEXT: %sext_inreg:_(s32) = G_SEXT_INREG %assert_sext, 12
+    ; CHECK-NEXT: $w0 = COPY %sext_inreg(s32)
     %w0:_(s32) = COPY $w0
     %assert_sext:_(s32) = G_ASSERT_SEXT %w0, 24
     %sext_inreg:_(s32) = G_SEXT_INREG %assert_sext:_(s32), 12
@@ -73,9 +76,10 @@ body: |
 
     ; CHECK-LABEL: name: assert_sext_smaller_width
     ; CHECK: liveins: $w0
-    ; CHECK: %w0:_(s32) = COPY $w0
-    ; CHECK: %assert_sext:_(s32) = G_ASSERT_SEXT %w0, 8
-    ; CHECK: $w0 = COPY %assert_sext(s32)
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %w0:_(s32) = COPY $w0
+    ; CHECK-NEXT: %assert_sext:_(s32) = G_ASSERT_SEXT %w0, 8
+    ; CHECK-NEXT: $w0 = COPY %assert_sext(s32)
     %w0:_(s32) = COPY $w0
     %assert_sext:_(s32) = G_ASSERT_SEXT %w0, 8
     %sext_inreg:_(s32) = G_SEXT_INREG %assert_sext:_(s32), 12

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
index 523eb0c4bc0e5..edc33e340c9b6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
@@ -14,7 +14,6 @@ body:             |
     liveins: $w0
 
     ; x slt c => x sle c - 1
-    ;
     ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
     ; left shift.
 
@@ -26,6 +25,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s32), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: slt_to_sle_s32
     ; SELECT: liveins: $w0
     ; SELECT-NEXT: {{  $}}
@@ -50,7 +50,6 @@ body:             |
     liveins: $x0
 
     ; x slt c => x sle c - 1
-    ;
     ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
     ; left shift.
 
@@ -62,6 +61,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s64), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: slt_to_sle_s64
     ; SELECT: liveins: $x0
     ; SELECT-NEXT: {{  $}}
@@ -86,7 +86,6 @@ body:             |
     liveins: $w0
 
     ; x sge c => x sgt c - 1
-    ;
     ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
     ; left shift.
 
@@ -98,6 +97,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sgt), %reg(s32), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: sge_to_sgt_s32
     ; SELECT: liveins: $w0
     ; SELECT-NEXT: {{  $}}
@@ -122,7 +122,6 @@ body:             |
     liveins: $x0
 
     ; x sge c => x sgt c - 1
-    ;
     ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
     ; left shift.
 
@@ -134,6 +133,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sgt), %reg(s64), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: sge_to_sgt_s64
     ; SELECT: liveins: $x0
     ; SELECT-NEXT: {{  $}}
@@ -158,7 +158,6 @@ body:             |
     liveins: $w0
 
     ; x ult c => x ule c - 1
-    ;
     ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
     ; left shift.
 
@@ -170,6 +169,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(ule), %reg(s32), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: ult_to_ule_s32
     ; SELECT: liveins: $w0
     ; SELECT-NEXT: {{  $}}
@@ -194,7 +194,6 @@ body:             |
     liveins: $x0
 
     ; x ult c => x ule c - 1
-    ;
     ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
     ; left shift.
 
@@ -206,6 +205,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(ule), %reg(s64), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: ult_to_ule_s64
     ; SELECT: liveins: $x0
     ; SELECT-NEXT: {{  $}}
@@ -230,7 +230,6 @@ body:             |
     liveins: $w0
 
     ; x uge c => x ugt c - 1
-    ;
     ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
     ; left shift.
 
@@ -242,6 +241,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(ugt), %reg(s32), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: uge_to_ugt_s32
     ; SELECT: liveins: $w0
     ; SELECT-NEXT: {{  $}}
@@ -266,7 +266,6 @@ body:             |
     liveins: $x0
 
     ; x uge c => x ugt c - 1
-    ;
     ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
     ; left shift.
 
@@ -278,6 +277,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(ugt), %reg(s64), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: uge_to_ugt_s64
     ; SELECT: liveins: $x0
     ; SELECT-NEXT: {{  $}}
@@ -302,10 +302,8 @@ body:             |
     liveins: $w0
 
     ; x sle c => x slt c + 1
-    ;
     ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
     ; left shift.
-    ;
     ; (We can't use 4095 here, because that's a legal arithmetic immediate.)
 
     ; LOWER-LABEL: name: sle_to_slt_s32
@@ -316,6 +314,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s32), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: sle_to_slt_s32
     ; SELECT: liveins: $w0
     ; SELECT-NEXT: {{  $}}
@@ -340,7 +339,6 @@ body:             |
     liveins: $x0
 
     ; x sle c => x slt c + 1
-    ;
     ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
     ; left shift.
 
@@ -352,6 +350,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s64), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: sle_to_slt_s64
     ; SELECT: liveins: $x0
     ; SELECT-NEXT: {{  $}}
@@ -376,7 +375,6 @@ body:             |
     liveins: $w0
 
     ; x sgt c => s sge c + 1
-    ;
     ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
     ; left shift.
 
@@ -388,6 +386,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sge), %reg(s32), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: sgt_to_sge_s32
     ; SELECT: liveins: $w0
     ; SELECT-NEXT: {{  $}}
@@ -412,7 +411,6 @@ body:             |
     liveins: $x0
 
     ; x sgt c => s sge c + 1
-    ;
     ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
     ; left shift.
 
@@ -424,6 +422,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sge), %reg(s64), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: sgt_to_sge_s64
     ; SELECT: liveins: $x0
     ; SELECT-NEXT: {{  $}}
@@ -448,10 +447,8 @@ body:             |
     liveins: $w0
 
     ; The cmp should not change.
-    ;
     ; If we subtract 1 from the constant, it will wrap around, and so it's not
     ; true that
-    ;
     ; x slt c => x sle c - 1
     ; x sge c => x sgt c - 1
 
@@ -463,6 +460,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s32), %cst
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: no_opt_int32_min
     ; SELECT: liveins: $w0
     ; SELECT-NEXT: {{  $}}
@@ -488,10 +486,8 @@ body:             |
     liveins: $x0
 
     ; The cmp should not change.
-    ;
     ; If we subtract 1 from the constant, it will wrap around, and so it's not
     ; true that
-    ;
     ; x slt c => x sle c - 1
     ; x sge c => x sgt c - 1
 
@@ -503,6 +499,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s64), %cst
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: no_opt_int64_min
     ; SELECT: liveins: $x0
     ; SELECT-NEXT: {{  $}}
@@ -528,10 +525,8 @@ body:             |
     liveins: $w0
 
     ; The cmp should not change.
-    ;
     ; If we add 1 to the constant, it will wrap around, and so it's not true
     ; that
-    ;
     ; x slt c => x sle c - 1
     ; x sge c => x sgt c - 1
 
@@ -543,6 +538,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s32), %cst
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: no_opt_int32_max
     ; SELECT: liveins: $w0
     ; SELECT-NEXT: {{  $}}
@@ -568,10 +564,8 @@ body:             |
     liveins: $x0
 
     ; The cmp should not change.
-    ;
     ; If we add 1 to the constant, it will wrap around, and so it's not true
     ; that
-    ;
     ; x slt c => x sle c - 1
     ; x sge c => x sgt c - 1
 
@@ -584,6 +578,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s64), %cst
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: no_opt_int64_max
     ; SELECT: liveins: $x0
     ; SELECT-NEXT: {{  $}}
@@ -609,10 +604,8 @@ body:             |
     liveins: $x0
 
     ; The cmp should not change during the lowering pass.
-    ;
     ; This is an unsigned comparison, so when the constant is 0, the following
     ; does not hold:
-    ;
     ; x slt c => x sle c - 1
     ; x sge c => x sgt c - 1
 
@@ -624,6 +617,7 @@ body:             |
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(ult), %reg(s64), %cst
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: no_opt_zero
     ; SELECT: liveins: $x0
     ; SELECT-NEXT: {{  $}}
@@ -660,6 +654,7 @@ body:             |
     ; LOWER-NEXT: %select:_(s32) = G_SELECT %cmp(s32), %reg0, %reg1
     ; LOWER-NEXT: $w0 = COPY %select(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: cmp_and_select
     ; SELECT: liveins: $w0, $w1
     ; SELECT-NEXT: {{  $}}
@@ -698,6 +693,7 @@ body:             |
     ; LOWER-NEXT: %cmp:gpr(s32) = G_ICMP intpred(sge), %and(s64), [[C]]
     ; LOWER-NEXT: $w0 = COPY %cmp(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; SELECT-LABEL: name: andsxri
     ; SELECT: liveins: $x0
     ; SELECT-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-ext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-ext.mir
index 3a8357db311ce..14d44d85e06f3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-ext.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-ext.mir
@@ -152,7 +152,6 @@ body:             |
     liveins: $d0, $d1
     ; Undef shuffle indices should not prevent matching G_EXT.
     ; We should get a constant 3 here.
-    ;
     ; CHECK-LABEL: name: undef_elts_should_match_1
     ; CHECK: liveins: $d0, $d1
     ; CHECK-NEXT: {{  $}}
@@ -178,7 +177,6 @@ body:             |
     liveins: $d0, $d1
     ; Undef shuffle indices should not prevent matching G_EXT.
     ; We should get a constant 6 here.
-    ;
     ; CHECK-LABEL: name: undef_elts_should_match_2
     ; CHECK: liveins: $d0, $d1
     ; CHECK-NEXT: {{  $}}
@@ -253,7 +251,6 @@ body:             |
   bb.0:
     liveins: $q0, $q1
     ; We expect at least one defined element in the shuffle mask.
-    ;
     ; CHECK-LABEL: name: all_undef
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-rev.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-rev.mir
index 19dd99ec33f16..c5a6030155494 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-rev.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-rev.mir
@@ -14,10 +14,11 @@ body:             |
     liveins: $d0, $d1
     ; CHECK-LABEL: name: rev64_mask_1_0
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[REV64_:%[0-9]+]]:_(<2 x s32>) = G_REV64 [[COPY]]
-    ; CHECK: $d0 = COPY [[REV64_]](<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[REV64_:%[0-9]+]]:_(<2 x s32>) = G_REV64 [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[REV64_]](<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = COPY $d1
     %2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0(<2 x s32>), %1, shufflemask(1, 0)
@@ -35,10 +36,11 @@ body:             |
     liveins: $d0, $d1
     ; CHECK-LABEL: name: rev64_mask_1_undef
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[REV64_:%[0-9]+]]:_(<2 x s32>) = G_REV64 [[COPY]]
-    ; CHECK: $d0 = COPY [[REV64_]](<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[REV64_:%[0-9]+]]:_(<2 x s32>) = G_REV64 [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[REV64_]](<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = COPY $d1
     %2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0(<2 x s32>), %1, shufflemask(1, undef)
@@ -56,28 +58,23 @@ body:             |
     liveins: $d0, $d1
 
     ; Verify that we don't produce a G_REV64 when
-    ;
     ; M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)
     ; In this example, BlockElts = 2
-    ;
     ; At i = 1
     ; M[i] = 3
     ; i % BlockElts = i % 2 = 1
-    ;
     ; So
-    ;
     ; 3 != (1 - 1) + (2 - 1 - 1)
     ; 3 != 0
-    ;
     ; And so we should not produce a G_REV64.
-    ;
     ; CHECK-LABEL: name: no_rev64_mask_1
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[ZIP2_:%[0-9]+]]:_(<2 x s32>) = G_ZIP2 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[ZIP2_]](<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[ZIP2_:%[0-9]+]]:_(<2 x s32>) = G_ZIP2 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[ZIP2_]](<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = COPY $d1
     %2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0(<2 x s32>), %1, shufflemask(1, 3)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuf-to-ins.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuf-to-ins.mir
index 567d5f2deb8c5..80eda3db6e04a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuf-to-ins.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuf-to-ins.mir
@@ -14,20 +14,20 @@ body:             |
     liveins: $d0, $d1
 
     ; 2 elts -> need 1 match.
-    ;
     ; Matched M[0] = 0 -> G_INSERT_VECTOR_ELT should use %left.
     ; DstLane (G_INSERT_VECTOR_ELT) : 1, because M[1] != 1.
     ; SrcLane (G_EXTRACT_VECTOR_ELT) : M[DstLane] = 0
 
     ; CHECK-LABEL: name: v2s32_match_left_0
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %left:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %left(<2 x s32>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
-    ; CHECK: $d0 = COPY %shuf(<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %left:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %left(<2 x s32>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
+    ; CHECK-NEXT: $d0 = COPY %shuf(<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %left:_(<2 x s32>) = COPY $d0
     %right:_(<2 x s32>) = COPY $d1
     %shuf:_(<2 x s32>) = G_SHUFFLE_VECTOR %left(<2 x s32>), %right, shufflemask(0, 0)
@@ -44,20 +44,20 @@ body:             |
     liveins: $d0, $d1
 
     ; 2 elts -> need 1 match.
-    ;
     ; Matched M[1] = 1 -> G_INSERT_VECTOR_ELT should use %left.
     ; DstLane (G_INSERT_VECTOR_ELT) : 0, because M[0] != 0.
     ; SrcLane (G_EXTRACT_VECTOR_ELT) : M[0] = 1
 
     ; CHECK-LABEL: name: v2s32_match_left_1
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %left:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %left(<2 x s32>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
-    ; CHECK: $d0 = COPY %shuf(<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %left:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %left(<2 x s32>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
+    ; CHECK-NEXT: $d0 = COPY %shuf(<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %left:_(<2 x s32>) = COPY $d0
     %right:_(<2 x s32>) = COPY $d1
     %shuf:_(<2 x s32>) = G_SHUFFLE_VECTOR %left(<2 x s32>), %right, shufflemask(1, 1)
@@ -74,21 +74,21 @@ body:             |
     liveins: $d0, $d1
 
     ; 2 elts -> need 1 match.
-    ;
     ; Matched M[0] = 1 -> G_INSERT_VECTOR_ELT should use %left.
     ; DstLane (G_INSERT_VECTOR_ELT) : 1, because M[1] != 1.
     ; SrcLane (G_EXTRACT_VECTOR_ELT) : M[1] = 3 - 2 = 1
 
     ; CHECK-LABEL: name: v2s32_match_left_3
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %left:_(<2 x s32>) = COPY $d0
-    ; CHECK: %right:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %right(<2 x s32>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
-    ; CHECK: $d0 = COPY %shuf(<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %left:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: %right:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %right(<2 x s32>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
+    ; CHECK-NEXT: $d0 = COPY %shuf(<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %left:_(<2 x s32>) = COPY $d0
     %right:_(<2 x s32>) = COPY $d1
     %shuf:_(<2 x s32>) = G_SHUFFLE_VECTOR %left(<2 x s32>), %right, shufflemask(0, 3)
@@ -106,21 +106,21 @@ body:             |
     liveins: $d0, $d1
 
     ; 2 elts -> need 1 match.
-    ;
     ; Matched M[1] = 1 + 2 -> G_INSERT_VECTOR_ELT should use %right.
     ; DstLane (G_INSERT_VECTOR_ELT) : 0, because M[0] != 2.
     ; SrcLane (G_EXTRACT_VECTOR_ELT) : M[0] = 1
 
     ; CHECK-LABEL: name: v2s32_match_right_3
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %left:_(<2 x s32>) = COPY $d0
-    ; CHECK: %right:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %left(<2 x s32>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %right, [[EVEC]](s32), [[C1]](s64)
-    ; CHECK: $d0 = COPY %shuf(<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %left:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: %right:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %left(<2 x s32>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %right, [[EVEC]](s32), [[C1]](s64)
+    ; CHECK-NEXT: $d0 = COPY %shuf(<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %left:_(<2 x s32>) = COPY $d0
     %right:_(<2 x s32>) = COPY $d1
     %shuf:_(<2 x s32>) = G_SHUFFLE_VECTOR %left(<2 x s32>), %right, shufflemask(1, 3)
@@ -137,21 +137,21 @@ body:             |
     liveins: $d0, $d1
 
     ; 2 elts -> need 1 match.
-    ;
     ; Matched M[0] = 0 + 2 -> G_INSERT_VECTOR_ELT should use %right.
     ; DstLane (G_INSERT_VECTOR_ELT) : 1, because M[1] != 3.
     ; SrcLane (G_EXTRACT_VECTOR_ELT) : M[1] = 0
 
     ; CHECK-LABEL: name: v2s32_match_right_2
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %left:_(<2 x s32>) = COPY $d0
-    ; CHECK: %right:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %left(<2 x s32>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %right, [[EVEC]](s32), [[C1]](s64)
-    ; CHECK: $d0 = COPY %shuf(<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %left:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: %right:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %left(<2 x s32>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %right, [[EVEC]](s32), [[C1]](s64)
+    ; CHECK-NEXT: $d0 = COPY %shuf(<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %left:_(<2 x s32>) = COPY $d0
     %right:_(<2 x s32>) = COPY $d1
     %shuf:_(<2 x s32>) = G_SHUFFLE_VECTOR %left(<2 x s32>), %right, shufflemask(2, 0)
@@ -168,16 +168,16 @@ body:             |
     liveins: $d0, $d1
 
     ; 2 elts -> need 1 match.
-    ;
     ; Matched M[0] = 0 + 2, M[1] = 1 + 2 -> too many matches.
 
     ; CHECK-LABEL: name: dont_combine_too_many_matches_right
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %left:_(<2 x s32>) = COPY $d0
-    ; CHECK: %right:_(<2 x s32>) = COPY $d1
-    ; CHECK: %shuf:_(<2 x s32>) = G_SHUFFLE_VECTOR %left(<2 x s32>), %right, shufflemask(2, 3)
-    ; CHECK: $d0 = COPY %shuf(<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %left:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: %right:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: %shuf:_(<2 x s32>) = G_SHUFFLE_VECTOR %left(<2 x s32>), %right, shufflemask(2, 3)
+    ; CHECK-NEXT: $d0 = COPY %shuf(<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %left:_(<2 x s32>) = COPY $d0
     %right:_(<2 x s32>) = COPY $d1
     %shuf:_(<2 x s32>) = G_SHUFFLE_VECTOR %left(<2 x s32>), %right, shufflemask(2, 3)
@@ -195,21 +195,21 @@ body:             |
 
     ; Matched the correct amount on the left and right.
     ; Use left as a tiebreaker.
-    ;
     ; Matched M[1] = 1 -> G_INSERT_VECTOR_ELT should use %left.
     ; DstLane (G_INSERT_VECTOR_ELT) : 0, because M[0] != 0.
     ; SrcLane (G_EXTRACT_VECTOR_ELT) : M[0] = 2 - 2 = 0
 
     ; CHECK-LABEL: name: tiebreaker
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %left:_(<2 x s32>) = COPY $d0
-    ; CHECK: %right:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %right(<2 x s32>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
-    ; CHECK: $d0 = COPY %shuf(<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %left:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: %right:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %right(<2 x s32>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
+    ; CHECK-NEXT: $d0 = COPY %shuf(<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %left:_(<2 x s32>) = COPY $d0
     %right:_(<2 x s32>) = COPY $d1
     %shuf:_(<2 x s32>) = G_SHUFFLE_VECTOR %left(<2 x s32>), %right, shufflemask(2, 1)
@@ -226,21 +226,21 @@ body:             |
     liveins: $d0, $d1
 
     ; Undef counts as a match for left and right.
-    ;
     ; Matched M[1] = -1 -> G_INSERT_VECTOR_ELT should use %left.
     ; DstLane (G_INSERT_VECTOR_ELT) : 0, because M[0] != 0.
     ; SrcLane (G_EXTRACT_VECTOR_ELT) : M[0] = 2 - 2 = 0
 
     ; CHECK-LABEL: name: tiebreaker_undef
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %left:_(<2 x s32>) = COPY $d0
-    ; CHECK: %right:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %right(<2 x s32>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
-    ; CHECK: $d0 = COPY %shuf(<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %left:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: %right:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %right(<2 x s32>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
+    ; CHECK-NEXT: $d0 = COPY %shuf(<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %left:_(<2 x s32>) = COPY $d0
     %right:_(<2 x s32>) = COPY $d1
     %shuf:_(<2 x s32>) = G_SHUFFLE_VECTOR %left(<2 x s32>), %right, shufflemask(2, -1)
@@ -257,21 +257,21 @@ body:             |
     liveins: $d0, $d1
 
     ; Undef counts as a match for left and right.
-    ;
     ; Matched M[1] = -1 -> G_INSERT_VECTOR_ELT should use %left.
     ; DstLane (G_INSERT_VECTOR_ELT) : 0, because M[0] != 0.
     ; SrcLane (G_EXTRACT_VECTOR_ELT) : M[0] = 3 - 2 = 1
 
     ; CHECK-LABEL: name: match_left_undef
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %left:_(<2 x s32>) = COPY $d0
-    ; CHECK: %right:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %right(<2 x s32>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
-    ; CHECK: $d0 = COPY %shuf(<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %left:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: %right:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %right(<2 x s32>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: %shuf:_(<2 x s32>) = G_INSERT_VECTOR_ELT %left, [[EVEC]](s32), [[C1]](s64)
+    ; CHECK-NEXT: $d0 = COPY %shuf(<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %left:_(<2 x s32>) = COPY $d0
     %right:_(<2 x s32>) = COPY $d1
     %shuf:_(<2 x s32>) = G_SHUFFLE_VECTOR %left(<2 x s32>), %right, shufflemask(3, -1)
@@ -288,20 +288,20 @@ body:             |
     liveins: $q0, $q1
 
     ; Matched M[0] = 0 + 4, undef, undef => 3 matches on the right.
-    ;
     ; DstLane (G_INSERT_VECTOR_ELT) : 3, because M[3] != 7.
     ; SrcLane (G_EXTRACT_VECTOR_ELT) : M[3] = 2
 
     ; CHECK-LABEL: name: match_right_undef
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: %left:_(<4 x s32>) = COPY $q0
-    ; CHECK: %right:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %left(<4 x s32>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
-    ; CHECK: %shuf:_(<4 x s32>) = G_INSERT_VECTOR_ELT %right, [[EVEC]](s32), [[C1]](s64)
-    ; CHECK: $q0 = COPY %shuf(<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %left:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: %right:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT %left(<4 x s32>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
+    ; CHECK-NEXT: %shuf:_(<4 x s32>) = G_INSERT_VECTOR_ELT %right, [[EVEC]](s32), [[C1]](s64)
+    ; CHECK-NEXT: $q0 = COPY %shuf(<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %left:_(<4 x s32>) = COPY $q0
     %right:_(<4 x s32>) = COPY $q1
     %shuf:_(<4 x s32>) = G_SHUFFLE_VECTOR %left(<4 x s32>), %right, shufflemask(4, -1, -1, 2)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-duplane.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-duplane.mir
index 3785210685147..c0cfbb8efdb47 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-duplane.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-duplane.mir
@@ -13,17 +13,20 @@ body:             |
 
     ; CHECK-LABEL: name: duplane64
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[DUPLANE64_:%[0-9]+]]:_(<2 x s64>) = G_DUPLANE64 [[COPY]], [[C]](s64)
-    ; CHECK: $q0 = COPY [[DUPLANE64_]](<2 x s64>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[DUPLANE64_:%[0-9]+]]:_(<2 x s64>) = G_DUPLANE64 [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $q0 = COPY [[DUPLANE64_]](<2 x s64>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
+    ;
     ; SELECTED-LABEL: name: duplane64
     ; SELECTED: liveins: $q0
-    ; SELECTED: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; SELECTED: [[DUPv2i64lane:%[0-9]+]]:fpr128 = DUPv2i64lane [[COPY]], 0
-    ; SELECTED: $q0 = COPY [[DUPv2i64lane]]
-    ; SELECTED: RET_ReallyLR implicit $q0
+    ; SELECTED-NEXT: {{  $}}
+    ; SELECTED-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; SELECTED-NEXT: [[DUPv2i64lane:%[0-9]+]]:fpr128 = DUPv2i64lane [[COPY]], 0
+    ; SELECTED-NEXT: $q0 = COPY [[DUPv2i64lane]]
+    ; SELECTED-NEXT: RET_ReallyLR implicit $q0
     %1:_(<2 x s64>) = COPY $q0
     %2:_(<2 x s64>) = G_IMPLICIT_DEF
     %4:_(<2 x s64>) = G_SHUFFLE_VECTOR %1(<2 x s64>), %2, shufflemask(0, 0)
@@ -42,17 +45,20 @@ body:             |
 
     ; CHECK-LABEL: name: duplane32
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[DUPLANE32_:%[0-9]+]]:_(<4 x s32>) = G_DUPLANE32 [[COPY]], [[C]](s64)
-    ; CHECK: $q0 = COPY [[DUPLANE32_]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[DUPLANE32_:%[0-9]+]]:_(<4 x s32>) = G_DUPLANE32 [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $q0 = COPY [[DUPLANE32_]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
+    ;
     ; SELECTED-LABEL: name: duplane32
     ; SELECTED: liveins: $q0
-    ; SELECTED: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; SELECTED: [[DUPv4i32lane:%[0-9]+]]:fpr128 = DUPv4i32lane [[COPY]], 0
-    ; SELECTED: $q0 = COPY [[DUPv4i32lane]]
-    ; SELECTED: RET_ReallyLR implicit $q0
+    ; SELECTED-NEXT: {{  $}}
+    ; SELECTED-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; SELECTED-NEXT: [[DUPv4i32lane:%[0-9]+]]:fpr128 = DUPv4i32lane [[COPY]], 0
+    ; SELECTED-NEXT: $q0 = COPY [[DUPv4i32lane]]
+    ; SELECTED-NEXT: RET_ReallyLR implicit $q0
     %1:_(<4 x s32>) = COPY $q0
     %2:_(<4 x s32>) = G_IMPLICIT_DEF
     %4:_(<4 x s32>) = G_SHUFFLE_VECTOR %1(<4 x s32>), %2, shufflemask(0, 0, 0, 0)
@@ -71,17 +77,20 @@ body:             |
 
     ; CHECK-LABEL: name: duplane16
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[DUPLANE16_:%[0-9]+]]:_(<8 x s16>) = G_DUPLANE16 [[COPY]], [[C]](s64)
-    ; CHECK: $q0 = COPY [[DUPLANE16_]](<8 x s16>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[DUPLANE16_:%[0-9]+]]:_(<8 x s16>) = G_DUPLANE16 [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $q0 = COPY [[DUPLANE16_]](<8 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
+    ;
     ; SELECTED-LABEL: name: duplane16
     ; SELECTED: liveins: $q0
-    ; SELECTED: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; SELECTED: [[DUPv8i16lane:%[0-9]+]]:fpr128 = DUPv8i16lane [[COPY]], 0
-    ; SELECTED: $q0 = COPY [[DUPv8i16lane]]
-    ; SELECTED: RET_ReallyLR implicit $q0
+    ; SELECTED-NEXT: {{  $}}
+    ; SELECTED-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; SELECTED-NEXT: [[DUPv8i16lane:%[0-9]+]]:fpr128 = DUPv8i16lane [[COPY]], 0
+    ; SELECTED-NEXT: $q0 = COPY [[DUPv8i16lane]]
+    ; SELECTED-NEXT: RET_ReallyLR implicit $q0
     %1:_(<8 x s16>) = COPY $q0
     %2:_(<8 x s16>) = G_IMPLICIT_DEF
     %4:_(<8 x s16>) = G_SHUFFLE_VECTOR %1(<8 x s16>), %2, shufflemask(0, 0, 0, 0, 0, 0, 0, 0)
@@ -100,17 +109,20 @@ body:             |
 
     ; CHECK-LABEL: name: duplane8
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[DUPLANE8_:%[0-9]+]]:_(<16 x s8>) = G_DUPLANE8 [[COPY]], [[C]](s64)
-    ; CHECK: $q0 = COPY [[DUPLANE8_]](<16 x s8>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[DUPLANE8_:%[0-9]+]]:_(<16 x s8>) = G_DUPLANE8 [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $q0 = COPY [[DUPLANE8_]](<16 x s8>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
+    ;
     ; SELECTED-LABEL: name: duplane8
     ; SELECTED: liveins: $q0
-    ; SELECTED: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; SELECTED: [[DUPv16i8lane:%[0-9]+]]:fpr128 = DUPv16i8lane [[COPY]], 0
-    ; SELECTED: $q0 = COPY [[DUPv16i8lane]]
-    ; SELECTED: RET_ReallyLR implicit $q0
+    ; SELECTED-NEXT: {{  $}}
+    ; SELECTED-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; SELECTED-NEXT: [[DUPv16i8lane:%[0-9]+]]:fpr128 = DUPv16i8lane [[COPY]], 0
+    ; SELECTED-NEXT: $q0 = COPY [[DUPv16i8lane]]
+    ; SELECTED-NEXT: RET_ReallyLR implicit $q0
     %1:_(<16 x s8>) = COPY $q0
     %2:_(<16 x s8>) = G_IMPLICIT_DEF
     %4:_(<16 x s8>) = G_SHUFFLE_VECTOR %1(<16 x s8>), %2, shufflemask(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
@@ -135,21 +147,25 @@ body:             |
 
     ; CHECK-LABEL: name: v2s32_duplane32
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
-    ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[DEF]](<2 x s32>)
-    ; CHECK: [[DUPLANE32_:%[0-9]+]]:_(<2 x s32>) = G_DUPLANE32 [[CONCAT_VECTORS]], [[C]](s64)
-    ; CHECK: $d0 = COPY [[DUPLANE32_]](<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY1]](<2 x s32>), [[DEF]](<2 x s32>)
+    ; CHECK-NEXT: [[DUPLANE32_:%[0-9]+]]:_(<2 x s32>) = G_DUPLANE32 [[CONCAT_VECTORS]], [[C]](s64)
+    ; CHECK-NEXT: $d0 = COPY [[DUPLANE32_]](<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
+    ;
     ; SELECTED-LABEL: name: v2s32_duplane32
     ; SELECTED: liveins: $d0, $d1
-    ; SELECTED: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; SELECTED: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; SELECTED: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
-    ; SELECTED: [[DUPv2i32lane:%[0-9]+]]:fpr64 = DUPv2i32lane [[INSERT_SUBREG]], 0
-    ; SELECTED: $d0 = COPY [[DUPv2i32lane]]
-    ; SELECTED: RET_ReallyLR implicit $d0
+    ; SELECTED-NEXT: {{  $}}
+    ; SELECTED-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; SELECTED-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; SELECTED-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; SELECTED-NEXT: [[DUPv2i32lane:%[0-9]+]]:fpr64 = DUPv2i32lane [[INSERT_SUBREG]], 0
+    ; SELECTED-NEXT: $d0 = COPY [[DUPv2i32lane]]
+    ; SELECTED-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = COPY $d1
     %2:_(<2 x s32>) = G_IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-splat.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-splat.mir
index 7c7689bcb80b5..f11396ee6c330 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-splat.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-shuffle-splat.mir
@@ -218,9 +218,7 @@ body:             |
     liveins: $x0
     ; If all the elements are undefined, we consider it a splat. In this case,
     ; we can choose 0 as our index.
-    ;
     ; We should get a G_DUP here.
-    ;
     ; CHECK-LABEL: name: all_undef
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}
@@ -246,9 +244,7 @@ body:             |
   bb.1.entry:
     liveins: $s0
     ; Make sure we can skip past undef values.
-    ;
     ; We should get a G_DUP here.
-    ;
     ; CHECK-LABEL: name: one_undef
     ; CHECK: liveins: $s0
     ; CHECK-NEXT: {{  $}}
@@ -350,7 +346,6 @@ body:             |
     liveins: $w0, $w1, $w2, $w3
     ; The G_SHUFFLE_VECTOR is fed by a G_BUILD_VECTOR, and the 0th input
     ; operand is not a constant. We should get a G_DUP.
-    ;
     ; CHECK-LABEL: name: build_vector
     ; CHECK: liveins: $w0, $w1, $w2, $w3
     ; CHECK-NEXT: {{  $}}
@@ -393,8 +388,7 @@ body:             |
     liveins: $w0, $w1, $w2, $w3, $w4
     ; The G_SHUFFLE_VECTOR is fed by a G_BUILD_VECTOR, and the 0th input
     ; operand is not a constant. We should get a G_DUP.
-    ;
-    ; CHECK-LABEL: name: build_vector
+    ; CHECK-LABEL: name: build_vector_rhs
     ; CHECK: liveins: $w0, $w1, $w2, $w3, $w4
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %lane_0:_(s32) = COPY $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
index b8020bd50a2b0..09e5a15252f86 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-swap-compare-operands.mir
@@ -305,7 +305,6 @@ body:             |
     liveins: $x0, $x1
     ; 7 isn't an extend mask for G_AND, so there's no folding opportunities
     ; here.
-    ;
     ; LOWER-LABEL: name: dont_swap_and_lhs_wrong_mask
     ; LOWER: liveins: $x0, $x1
     ; LOWER-NEXT: {{  $}}
@@ -643,7 +642,6 @@ body:             |
 
     ; Don't swap when the RHS's subtract offers a better constant folding
     ; opportunity than the LHS's subtract.
-    ;
     ; In this case, the RHS has a supported extend, plus a shift with a constant
     ; <= 4.
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-trn.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-trn.mir
index 659aa0dc7c9fc..cfafc68cc0073 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-trn.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-trn.mir
@@ -16,11 +16,12 @@ body:             |
     liveins: $d0, $d1
     ; CHECK-LABEL: name: trn1_v8s8
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<8 x s8>) = COPY $d1
-    ; CHECK: [[TRN1_:%[0-9]+]]:_(<8 x s8>) = G_TRN1 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[TRN1_]](<8 x s8>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s8>) = COPY $d1
+    ; CHECK-NEXT: [[TRN1_:%[0-9]+]]:_(<8 x s8>) = G_TRN1 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[TRN1_]](<8 x s8>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<8 x s8>) = COPY $d0
     %1:_(<8 x s8>) = COPY $d1
     %2:_(<8 x s8>) = G_SHUFFLE_VECTOR %0(<8 x s8>), %1, shufflemask(0, 8, 2, 10, 4, 12, 6, 14)
@@ -38,11 +39,12 @@ body:             |
     liveins: $d0, $d1
     ; CHECK-LABEL: name: trn2_v8s8
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<8 x s8>) = COPY $d1
-    ; CHECK: [[TRN2_:%[0-9]+]]:_(<8 x s8>) = G_TRN2 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[TRN2_]](<8 x s8>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s8>) = COPY $d1
+    ; CHECK-NEXT: [[TRN2_:%[0-9]+]]:_(<8 x s8>) = G_TRN2 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[TRN2_]](<8 x s8>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<8 x s8>) = COPY $d0
     %1:_(<8 x s8>) = COPY $d1
     %2:_(<8 x s8>) = G_SHUFFLE_VECTOR %0(<8 x s8>), %1, shufflemask(1, 9, 3, 11, 5, 13, 7, 15)
@@ -60,11 +62,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: trn1_v16s8
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
-    ; CHECK: [[TRN1_:%[0-9]+]]:_(<16 x s8>) = G_TRN1 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN1_]](<16 x s8>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
+    ; CHECK-NEXT: [[TRN1_:%[0-9]+]]:_(<16 x s8>) = G_TRN1 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN1_]](<16 x s8>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<16 x s8>) = COPY $q0
     %1:_(<16 x s8>) = COPY $q1
     %2:_(<16 x s8>) = G_SHUFFLE_VECTOR %0(<16 x s8>), %1, shufflemask(0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30)
@@ -82,11 +85,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: trn2_v16s8
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
-    ; CHECK: [[TRN2_:%[0-9]+]]:_(<16 x s8>) = G_TRN2 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN2_]](<16 x s8>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<16 x s8>) = COPY $q1
+    ; CHECK-NEXT: [[TRN2_:%[0-9]+]]:_(<16 x s8>) = G_TRN2 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN2_]](<16 x s8>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<16 x s8>) = COPY $q0
     %1:_(<16 x s8>) = COPY $q1
     %2:_(<16 x s8>) = G_SHUFFLE_VECTOR %0(<16 x s8>), %1, shufflemask(1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31)
@@ -104,11 +108,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: trn1_v4s32
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[TRN1_:%[0-9]+]]:_(<4 x s32>) = G_TRN1 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN1_]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[TRN1_:%[0-9]+]]:_(<4 x s32>) = G_TRN1 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN1_]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = COPY $q1
     %2:_(<4 x s32>) = G_SHUFFLE_VECTOR %0(<4 x s32>), %1, shufflemask(0, 4, 2, 6)
@@ -126,11 +131,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: trn2_v4s32
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[TRN2_:%[0-9]+]]:_(<4 x s32>) = G_TRN2 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN2_]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[TRN2_:%[0-9]+]]:_(<4 x s32>) = G_TRN2 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN2_]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = COPY $q1
     %2:_(<4 x s32>) = G_SHUFFLE_VECTOR %0(<4 x s32>), %1, shufflemask(1, 5, 3, 7)
@@ -147,14 +153,14 @@ body:             |
   bb.1.entry:
     liveins: $d0, $d1
     ; 2 x s32 TRN is redundant with ZIP. Make sure we prioritize ZIP.
-    ;
     ; CHECK-LABEL: name: redundant_with_zip1
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[ZIP1_:%[0-9]+]]:_(<2 x s32>) = G_ZIP1 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[ZIP1_]](<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[ZIP1_:%[0-9]+]]:_(<2 x s32>) = G_ZIP1 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[ZIP1_]](<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = COPY $d1
     %2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0(<2 x s32>), %1, shufflemask(0, 2)
@@ -171,14 +177,14 @@ body:             |
   bb.1.entry:
     liveins: $d0, $d1
     ; 2 x s32 TRN is redundant with ZIP. Make sure we prioritize ZIP.
-    ;
     ; CHECK-LABEL: name: redundant_with_zip2
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
-    ; CHECK: [[ZIP2_:%[0-9]+]]:_(<2 x s32>) = G_ZIP2 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[ZIP2_]](<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+    ; CHECK-NEXT: [[ZIP2_:%[0-9]+]]:_(<2 x s32>) = G_ZIP2 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[ZIP2_]](<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = COPY $d1
     %2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0(<2 x s32>), %1, shufflemask(1, 3)
@@ -195,14 +201,14 @@ body:             |
   bb.1.entry:
     liveins: $d0, $d1
     ; Undef shuffle indices should not prevent matching to G_TRN1.
-    ;
     ; CHECK-LABEL: name: trn1_undef
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<8 x s8>) = COPY $d1
-    ; CHECK: [[TRN1_:%[0-9]+]]:_(<8 x s8>) = G_TRN1 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[TRN1_]](<8 x s8>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s8>) = COPY $d1
+    ; CHECK-NEXT: [[TRN1_:%[0-9]+]]:_(<8 x s8>) = G_TRN1 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[TRN1_]](<8 x s8>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<8 x s8>) = COPY $d0
     %1:_(<8 x s8>) = COPY $d1
     %2:_(<8 x s8>) = G_SHUFFLE_VECTOR %0(<8 x s8>), %1, shufflemask(0, 8, -1, -1, 4, 12, 6, 14)
@@ -219,14 +225,14 @@ body:             |
   bb.1.entry:
     liveins: $d0, $d1
     ; Undef shuffle indices should not prevent matching to G_TRN2.
-    ;
     ; CHECK-LABEL: name: trn2_undef
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<8 x s8>) = COPY $d1
-    ; CHECK: [[TRN2_:%[0-9]+]]:_(<8 x s8>) = G_TRN2 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[TRN2_]](<8 x s8>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<8 x s8>) = COPY $d1
+    ; CHECK-NEXT: [[TRN2_:%[0-9]+]]:_(<8 x s8>) = G_TRN2 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[TRN2_]](<8 x s8>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<8 x s8>) = COPY $d0
     %1:_(<8 x s8>) = COPY $d1
     %2:_(<8 x s8>) = G_SHUFFLE_VECTOR %0(<8 x s8>), %1, shufflemask(1, -1, 3, 11, 5, 13, -1, -1)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-truncstore.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-truncstore.mir
index e96fae4dde5b0..28b55a69d3c9b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-truncstore.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-truncstore.mir
@@ -8,9 +8,11 @@ body: |
   bb.0.entry:
     liveins: $x0
     ; CHECK-LABEL: name: truncstore_s8
-    ; CHECK: %ptr:_(p0) = COPY $x0
-    ; CHECK: %val:_(s32) = COPY $w1
-    ; CHECK: G_STORE %val(s32), %ptr(p0) :: (store (s8))
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:_(p0) = COPY $x0
+    ; CHECK-NEXT: %val:_(s32) = COPY $w1
+    ; CHECK-NEXT: G_STORE %val(s32), %ptr(p0) :: (store (s8))
     %ptr:_(p0) = COPY $x0
     %val:_(s32) = COPY $w1
     %trunc:_(s8) = G_TRUNC %val
@@ -23,10 +25,12 @@ body: |
   bb.0.entry:
     liveins: $x0
     ; CHECK-LABEL: name: truncstore_vector
-    ; CHECK: %ptr:_(p0) = COPY $x0
-    ; CHECK: %val:_(<4 x s32>) = COPY $q0
-    ; CHECK: %trunc:_(<4 x s8>) = G_TRUNC %val(<4 x s32>)
-    ; CHECK: G_STORE %trunc(<4 x s8>), %ptr(p0) :: (store (<4 x s8>))
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:_(p0) = COPY $x0
+    ; CHECK-NEXT: %val:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: %trunc:_(<4 x s8>) = G_TRUNC %val(<4 x s32>)
+    ; CHECK-NEXT: G_STORE %trunc(<4 x s8>), %ptr(p0) :: (store (<4 x s8>))
     %ptr:_(p0) = COPY $x0
     %val:_(<4 x s32>) = COPY $q0
     %trunc:_(<4 x s8>) = G_TRUNC %val
@@ -39,10 +43,12 @@ body: |
   bb.0.entry:
     liveins: $x0
     ; CHECK-LABEL: name: truncstore_too_large
-    ; CHECK: %ptr:_(p0) = COPY $x0
-    ; CHECK: %val:_(s128) = COPY $q0
-    ; CHECK: %trunc:_(s32) = G_TRUNC %val(s128)
-    ; CHECK: G_STORE %trunc(s32), %ptr(p0) :: (store (s32))
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:_(p0) = COPY $x0
+    ; CHECK-NEXT: %val:_(s128) = COPY $q0
+    ; CHECK-NEXT: %trunc:_(s32) = G_TRUNC %val(s128)
+    ; CHECK-NEXT: G_STORE %trunc(s32), %ptr(p0) :: (store (s32))
     %ptr:_(p0) = COPY $x0
     %val:_(s128) = COPY $q0
     %trunc:_(s32) = G_TRUNC %val

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir
index a38086ef235c6..b3fb5a405135e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-vashr-vlshr.mir
@@ -13,11 +13,12 @@ body:             |
 
     ; CHECK-LABEL: name: ashr_v4s32
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
-    ; CHECK: [[VASHR:%[0-9]+]]:_(<4 x s32>) = G_VASHR [[COPY]], [[C]](s32)
-    ; CHECK: $q0 = COPY [[VASHR]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+    ; CHECK-NEXT: [[VASHR:%[0-9]+]]:_(<4 x s32>) = G_VASHR [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: $q0 = COPY [[VASHR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(s32) = G_CONSTANT i32 5
     %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
@@ -36,11 +37,12 @@ body:             |
 
     ; CHECK-LABEL: name: lshr_v4s32
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
-    ; CHECK: [[VLSHR:%[0-9]+]]:_(<4 x s32>) = G_VLSHR [[COPY]], [[C]](s32)
-    ; CHECK: $q0 = COPY [[VLSHR]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+    ; CHECK-NEXT: [[VLSHR:%[0-9]+]]:_(<4 x s32>) = G_VLSHR [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: $q0 = COPY [[VLSHR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(s32) = G_CONSTANT i32 5
     %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
@@ -59,11 +61,12 @@ body:             |
 
     ; CHECK-LABEL: name: lshr_v8s16
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
-    ; CHECK: [[VLSHR:%[0-9]+]]:_(<8 x s16>) = G_VLSHR [[COPY]], [[C]](s32)
-    ; CHECK: $q0 = COPY [[VLSHR]](<8 x s16>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+    ; CHECK-NEXT: [[VLSHR:%[0-9]+]]:_(<8 x s16>) = G_VLSHR [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: $q0 = COPY [[VLSHR]](<8 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<8 x s16>) = COPY $q0
     %1:_(s16) = G_CONSTANT i16 5
     %2:_(<8 x s16>) = G_BUILD_VECTOR %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16)
@@ -82,12 +85,13 @@ body:             |
 
     ; CHECK-LABEL: name: imm_too_large
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
-    ; CHECK: [[DUP:%[0-9]+]]:_(<4 x s32>) = G_DUP [[C]](s32)
-    ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[DUP]](<4 x s32>)
-    ; CHECK: $q0 = COPY [[LSHR]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; CHECK-NEXT: [[DUP:%[0-9]+]]:_(<4 x s32>) = G_DUP [[C]](s32)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[DUP]](<4 x s32>)
+    ; CHECK-NEXT: $q0 = COPY [[LSHR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(s32) = G_CONSTANT i32 40
     %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
@@ -106,12 +110,13 @@ body:             |
 
     ; CHECK-LABEL: name: imm_zero
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
-    ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
-    ; CHECK: $q0 = COPY [[LSHR]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
+    ; CHECK-NEXT: $q0 = COPY [[LSHR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(s32) = G_CONSTANT i32 0
     %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32)
@@ -130,13 +135,14 @@ body:             |
 
     ; CHECK-LABEL: name: imm_not_splat
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C]](s32), [[C]](s32)
-    ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
-    ; CHECK: $q0 = COPY [[LSHR]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 6
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C1]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[BUILD_VECTOR]](<4 x s32>)
+    ; CHECK-NEXT: $q0 = COPY [[LSHR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(s32) = G_CONSTANT i32 4
     %4:_(s32) = G_CONSTANT i32 6

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-zip.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-zip.mir
index bcf088287f46a..2fc90e4bf05a1 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-zip.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-zip.mir
@@ -161,7 +161,6 @@ body:             |
 
     ; This will fail because it expects 3 to be the second element of the
     ; shuffle vector mask.
-    ;
     ; CHECK-LABEL: name: zip2_no_combine_idx_mismatch
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
@@ -189,7 +188,6 @@ body:             |
 
     ; This will fail because it expects 2 to be the second element of the
     ; shuffle vector mask.
-    ;
     ; CHECK-LABEL: name: zip1_no_combine_idx_mismatch
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}
@@ -214,7 +212,6 @@ body:             |
     liveins: $q0, $q1
 
     ; zip1/zip2 must have 0 or 1 as the first element in the shuffle mask.
-    ;
     ; CHECK-LABEL: name: no_combine_first_elt_of_mask_must_be_zero_or_one
     ; CHECK: liveins: $q0, $q1
     ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extractvec-faddp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extractvec-faddp.mir
index 790634563068a..d6288d10b2b79 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extractvec-faddp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-extractvec-faddp.mir
@@ -13,14 +13,15 @@ body:             |
 
     ; CHECK-LABEL: name: f64_faddp
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s64>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[EVEC1:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s64>), [[C1]](s64)
-    ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[EVEC]], [[EVEC1]]
-    ; CHECK: $d0 = COPY [[FADD]](s64)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s64>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[EVEC1:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s64>), [[C1]](s64)
+    ; CHECK-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[EVEC]], [[EVEC1]]
+    ; CHECK-NEXT: $d0 = COPY [[FADD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s64>) = COPY $q0
     %2:_(<2 x s64>) = G_IMPLICIT_DEF
     %5:_(s64) = G_CONSTANT i64 0
@@ -44,14 +45,15 @@ body:             |
 
     ; CHECK-LABEL: name: f64_faddp_commuted
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s64>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[EVEC1:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s64>), [[C1]](s64)
-    ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[EVEC]], [[EVEC1]]
-    ; CHECK: $d0 = COPY [[FADD]](s64)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s64>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[EVEC1:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s64>), [[C1]](s64)
+    ; CHECK-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[EVEC]], [[EVEC1]]
+    ; CHECK-NEXT: $d0 = COPY [[FADD]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s64>) = COPY $q0
     %2:_(<2 x s64>) = G_IMPLICIT_DEF
     %5:_(s64) = G_CONSTANT i64 0
@@ -75,14 +77,15 @@ body:             |
 
     ; CHECK-LABEL: name: f32_faddp
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[EVEC1:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[C1]](s64)
-    ; CHECK: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[EVEC]], [[EVEC1]]
-    ; CHECK: $s0 = COPY [[FADD]](s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[EVEC1:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[C1]](s64)
+    ; CHECK-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[EVEC]], [[EVEC1]]
+    ; CHECK-NEXT: $s0 = COPY [[FADD]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:_(<2 x s32>) = COPY $d0
     %2:_(<2 x s32>) = G_IMPLICIT_DEF
     %5:_(s64) = G_CONSTANT i64 0
@@ -106,14 +109,15 @@ body:             |
 
     ; CHECK-LABEL: name: f32_faddp_commuted
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[EVEC1:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[C1]](s64)
-    ; CHECK: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[EVEC]], [[EVEC1]]
-    ; CHECK: $s0 = COPY [[FADD]](s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[EVEC1:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[C1]](s64)
+    ; CHECK-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[EVEC]], [[EVEC1]]
+    ; CHECK-NEXT: $s0 = COPY [[FADD]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:_(<2 x s32>) = COPY $d0
     %2:_(<2 x s32>) = G_IMPLICIT_DEF
     %5:_(s64) = G_CONSTANT i64 0
@@ -137,14 +141,15 @@ body:             |
 
     ; CHECK-LABEL: name: wrong_extract_idx
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[SHUF:%[0-9]+]]:_(<2 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<2 x s64>), [[DEF]], shufflemask(1, undef)
-    ; CHECK: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[SHUF]], [[COPY]]
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[FADD]](<2 x s64>), [[C]](s64)
-    ; CHECK: $d0 = COPY [[EVEC]](s64)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<2 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<2 x s64>), [[DEF]], shufflemask(1, undef)
+    ; CHECK-NEXT: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[SHUF]], [[COPY]]
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[FADD]](<2 x s64>), [[C]](s64)
+    ; CHECK-NEXT: $d0 = COPY [[EVEC]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s64>) = COPY $q0
     %2:_(<2 x s64>) = G_IMPLICIT_DEF
     %5:_(s64) = G_CONSTANT i64 1
@@ -168,14 +173,15 @@ body:             |
 
     ; CHECK-LABEL: name: wrong_shuffle_mask
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
-    ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[SHUF:%[0-9]+]]:_(<2 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<2 x s64>), [[DEF]], shufflemask(0, undef)
-    ; CHECK: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[SHUF]], [[COPY]]
-    ; CHECK: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[FADD]](<2 x s64>), [[C]](s64)
-    ; CHECK: $d0 = COPY [[EVEC]](s64)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<2 x s64>) = G_SHUFFLE_VECTOR [[COPY]](<2 x s64>), [[DEF]], shufflemask(0, undef)
+    ; CHECK-NEXT: [[FADD:%[0-9]+]]:_(<2 x s64>) = G_FADD [[SHUF]], [[COPY]]
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:_(s64) = G_EXTRACT_VECTOR_ELT [[FADD]](<2 x s64>), [[C]](s64)
+    ; CHECK-NEXT: $d0 = COPY [[EVEC]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s64>) = COPY $q0
     %2:_(<2 x s64>) = G_IMPLICIT_DEF
     %5:_(s64) = G_CONSTANT i64 0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-hoist-same-hands.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-hoist-same-hands.mir
index 0c624d7cc224d..8e0121f922849 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-hoist-same-hands.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-hoist-same-hands.mir
@@ -10,15 +10,15 @@ body:             |
   bb.0:
     liveins: $w0, $w1
     ; or (sext X), (sext Y) --> sext (or X, Y)
-    ;
     ; CHECK-LABEL: name: or_combine_sext
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %y:_(s32) = COPY $w1
-    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR %x, %y
-    ; CHECK: %logic_op:_(s64) = G_SEXT [[OR]](s32)
-    ; CHECK: $x0 = COPY %logic_op(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR %x, %y
+    ; CHECK-NEXT: %logic_op:_(s64) = G_SEXT [[OR]](s32)
+    ; CHECK-NEXT: $x0 = COPY %logic_op(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %hand1:_(s64) = G_SEXT %x(s32)
@@ -37,18 +37,18 @@ body:             |
     liveins: $w0, $w1
     ; Post-legalization, we should not perform the optimization here, since
     ; it would create an illegal G_OR.
-    ;
     ; CHECK-LABEL: name: illegal_ty
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %x_wide:_(s32) = COPY $w0
-    ; CHECK: %y_wide:_(s32) = COPY $w1
-    ; CHECK: %x:_(s1) = G_TRUNC %x_wide(s32)
-    ; CHECK: %y:_(s1) = G_TRUNC %y_wide(s32)
-    ; CHECK: %hand1:_(s64) = G_SEXT %x(s1)
-    ; CHECK: %hand2:_(s64) = G_SEXT %y(s1)
-    ; CHECK: %logic_op:_(s64) = G_OR %hand1, %hand2
-    ; CHECK: $x0 = COPY %logic_op(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x_wide:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y_wide:_(s32) = COPY $w1
+    ; CHECK-NEXT: %x:_(s1) = G_TRUNC %x_wide(s32)
+    ; CHECK-NEXT: %y:_(s1) = G_TRUNC %y_wide(s32)
+    ; CHECK-NEXT: %hand1:_(s64) = G_SEXT %x(s1)
+    ; CHECK-NEXT: %hand2:_(s64) = G_SEXT %y(s1)
+    ; CHECK-NEXT: %logic_op:_(s64) = G_OR %hand1, %hand2
+    ; CHECK-NEXT: $x0 = COPY %logic_op(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x_wide:_(s32) = COPY $w0
     %y_wide:_(s32) = COPY $w1
     %x:_(s1) = G_TRUNC %x_wide
@@ -68,8 +68,14 @@ body:             |
   bb.0:
     liveins: $w0, $w1
     ; Post-legalization, we must not crash here.
-    ;
     ; CHECK-LABEL: name: dont_combine_physreg
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %logic_op:_(s32) = G_OR %x, %y
+    ; CHECK-NEXT: $w0 = COPY %logic_op(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %logic_op:_(s32) = G_OR %x, %y

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-mulpow2.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-mulpow2.mir
index d8ba3a692adde..7cba6b78c5b4e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-mulpow2.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-mulpow2.mir
@@ -14,13 +14,15 @@ body:             |
 
     ; CHECK-LABEL: name: dont_combine_ptr_add
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %reg0:_(s64) = COPY $x0
-    ; CHECK: %ptr:_(p0) = COPY $x2
-    ; CHECK: %cst:_(s64) = G_CONSTANT i64 6
-    ; CHECK: %mul:_(s64) = nsw G_MUL %reg0, %cst
-    ; CHECK: %ptr_add:_(p0) = G_PTR_ADD %ptr, %mul(s64)
-    ; CHECK: $x0 = COPY %ptr_add(p0)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %reg0:_(s64) = COPY $x0
+    ; CHECK-NEXT: %reg1:_(s64) = COPY $x1
+    ; CHECK-NEXT: %ptr:_(p0) = COPY $x2
+    ; CHECK-NEXT: %cst:_(s64) = G_CONSTANT i64 6
+    ; CHECK-NEXT: %mul:_(s64) = nsw G_MUL %reg0, %cst
+    ; CHECK-NEXT: %ptr_add:_(p0) = G_PTR_ADD %ptr, %mul(s64)
+    ; CHECK-NEXT: $x0 = COPY %ptr_add(p0)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %reg0:_(s64) = COPY $x0
     %reg1:_(s64) = COPY $x1
     %ptr:_(p0) = COPY $x2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-rotate.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-rotate.mir
index 98a9cb7624edc..2bb4f00bcde72 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-rotate.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-rotate.mir
@@ -16,11 +16,12 @@ body:             |
 
     ; CHECK-LABEL: name: rotl
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CHECK: [[ROTL:%[0-9]+]]:_(s32) = G_ROTL [[COPY]], [[C]](s64)
-    ; CHECK: $w0 = COPY [[ROTL]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[ROTL:%[0-9]+]]:_(s32) = G_ROTL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[ROTL]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %5:_(s64) = G_CONSTANT i64 -16
     %2:_(s32) = G_ROTL %0, %5(s64)
@@ -41,11 +42,12 @@ body:             |
 
     ; CHECK-LABEL: name: rotr
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-    ; CHECK: [[ROTR:%[0-9]+]]:_(s32) = G_ROTR [[COPY]], [[C]](s64)
-    ; CHECK: $w0 = COPY [[ROTR]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK-NEXT: [[ROTR:%[0-9]+]]:_(s32) = G_ROTR [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[ROTR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %5:_(s64) = G_CONSTANT i64 -16
     %2:_(s32) = G_ROTR %0, %5(s64)
@@ -66,11 +68,12 @@ body:             |
 
     ; CHECK-LABEL: name: rotl_bitwidth_cst
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: [[ROTL:%[0-9]+]]:_(s32) = G_ROTL [[COPY]], [[C]](s64)
-    ; CHECK: $w0 = COPY [[ROTL]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[ROTL:%[0-9]+]]:_(s32) = G_ROTL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[ROTL]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %5:_(s64) = G_CONSTANT i64 32
     %2:_(s32) = G_ROTL %0, %5(s64)
@@ -91,11 +94,12 @@ body:             |
 
     ; CHECK-LABEL: name: rotl_bitwidth_minus_one_cst
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
-    ; CHECK: [[ROTL:%[0-9]+]]:_(s32) = G_ROTL [[COPY]], [[C]](s64)
-    ; CHECK: $w0 = COPY [[ROTL]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+    ; CHECK-NEXT: [[ROTL:%[0-9]+]]:_(s32) = G_ROTL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[ROTL]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %5:_(s64) = G_CONSTANT i64 31
     %2:_(s32) = G_ROTL %0, %5(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postselectopt-constrain-new-regop.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postselectopt-constrain-new-regop.mir
index a6bdcf26f3337..e53380c18beb1 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postselectopt-constrain-new-regop.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postselectopt-constrain-new-regop.mir
@@ -17,36 +17,40 @@ frameInfo:
 body:             |
   ; CHECK-LABEL: name: pluto
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
-  ; CHECK:   liveins: $w1, $x0, $x2
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr32sp = COPY $w1
-  ; CHECK:   [[COPY2:%[0-9]+]]:gpr64sp = COPY $x2
-  ; CHECK:   [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
-  ; CHECK:   [[COPY3:%[0-9]+]]:fpr32 = COPY [[DEF]]
-  ; CHECK:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 872415232
-  ; CHECK:   [[COPY4:%[0-9]+]]:fpr32 = COPY [[MOVi32imm]]
-  ; CHECK:   FCMPSrr [[COPY3]], [[COPY4]], implicit-def $nzcv, implicit $fpcr
-  ; CHECK:   [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
-  ; CHECK:   [[SUBWri:%[0-9]+]]:gpr32common = SUBWri [[COPY1]], 1, 0
-  ; CHECK:   [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[SUBWri]], %subreg.sub_32
-  ; CHECK:   [[COPY5:%[0-9]+]]:fpr32 = COPY [[DEF]]
-  ; CHECK:   FCMPSrr [[COPY5]], [[COPY4]], implicit-def $nzcv, implicit $fpcr
-  ; CHECK:   [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
-  ; CHECK:   [[EORWrr:%[0-9]+]]:gpr32 = EORWrr [[CSINCWr]], [[CSINCWr1]]
-  ; CHECK:   TBNZW [[EORWrr]], 0, %bb.2
-  ; CHECK:   B %bb.1
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK:   [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[SUBREG_TO_REG]], 60, 59
-  ; CHECK:   [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[UBFMXri]], 0, 0 :: (load (s32))
-  ; CHECK:   [[COPY6:%[0-9]+]]:fpr32 = COPY [[DEF]]
-  ; CHECK:   [[FMULSrr:%[0-9]+]]:fpr32 = FMULSrr [[COPY6]], [[LDRSroX]], implicit $fpcr
-  ; CHECK:   [[COPY7:%[0-9]+]]:fpr32 = COPY [[DEF]]
-  ; CHECK:   [[FADDSrr:%[0-9]+]]:fpr32 = FADDSrr [[FMULSrr]], [[COPY7]], implicit $fpcr
-  ; CHECK:   STRSui [[FADDSrr]], [[COPY2]], 0 :: (store (s32))
-  ; CHECK: bb.2:
-  ; CHECK:   RET_ReallyLR
+  ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
+  ; CHECK-NEXT:   liveins: $w1, $x0, $x2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32sp = COPY $w1
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr64sp = COPY $x2
+  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:fpr32 = COPY [[DEF]]
+  ; CHECK-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 872415232
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:fpr32 = COPY [[MOVi32imm]]
+  ; CHECK-NEXT:   FCMPSrr [[COPY3]], [[COPY4]], implicit-def $nzcv, implicit $fpcr
+  ; CHECK-NEXT:   [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
+  ; CHECK-NEXT:   [[SUBWri:%[0-9]+]]:gpr32common = SUBWri [[COPY1]], 1, 0
+  ; CHECK-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[SUBWri]], %subreg.sub_32
+  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:fpr32 = COPY [[DEF]]
+  ; CHECK-NEXT:   FCMPSrr [[COPY5]], [[COPY4]], implicit-def $nzcv, implicit $fpcr
+  ; CHECK-NEXT:   [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
+  ; CHECK-NEXT:   [[EORWrr:%[0-9]+]]:gpr32 = EORWrr [[CSINCWr]], [[CSINCWr1]]
+  ; CHECK-NEXT:   TBNZW [[EORWrr]], 0, %bb.2
+  ; CHECK-NEXT:   B %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[SUBREG_TO_REG]], 60, 59
+  ; CHECK-NEXT:   [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[UBFMXri]], 0, 0 :: (load (s32))
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:fpr32 = COPY [[DEF]]
+  ; CHECK-NEXT:   [[FMULSrr:%[0-9]+]]:fpr32 = FMULSrr [[COPY6]], [[LDRSroX]], implicit $fpcr
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:fpr32 = COPY [[DEF]]
+  ; CHECK-NEXT:   [[FADDSrr:%[0-9]+]]:fpr32 = FADDSrr [[FMULSrr]], [[COPY7]], implicit $fpcr
+  ; CHECK-NEXT:   STRSui [[FADDSrr]], [[COPY2]], 0 :: (store (s32))
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   RET_ReallyLR
   bb.1:
     liveins: $w1, $x0, $x2
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
index 8adf5b2d26bfa..199da9bee6aae 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-icmp-to-true-false-known-bits.mir
@@ -44,10 +44,11 @@ body:             |
 
     ; CHECK-LABEL: name: eq_true
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 true
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 1
     %cmp:_(s1) = G_ICMP intpred(eq), %cst(s32), %cst
@@ -65,10 +66,11 @@ body:             |
 
     ; CHECK-LABEL: name: ne_true
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 true
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst_1:_(s32) = G_CONSTANT i32 1
     %cst_2:_(s32) = G_CONSTANT i32 2
@@ -87,10 +89,11 @@ body:             |
 
     ; CHECK-LABEL: name: sge_true
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 true
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 2
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -109,10 +112,11 @@ body:             |
 
     ; CHECK-LABEL: name: sgt_true
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 true
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 3
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -131,10 +135,11 @@ body:             |
 
     ; CHECK-LABEL: name: sle_true
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 true
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 1
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -154,10 +159,11 @@ body:             |
 
     ; CHECK-LABEL: name: slt_true
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 true
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 -1
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -176,10 +182,11 @@ body:             |
 
     ; CHECK-LABEL: name: uge_true
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 true
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 2
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -198,10 +205,11 @@ body:             |
 
     ; CHECK-LABEL: name: ugt_true
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 true
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 -1
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -220,10 +228,11 @@ body:             |
 
     ; CHECK-LABEL: name: ule_true
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 true
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 1
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -242,10 +251,11 @@ body:             |
 
     ; CHECK-LABEL: name: ult_true
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 true
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 0
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -264,10 +274,11 @@ body:             |
 
     ; CHECK-LABEL: name: eq_false
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 false
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 false
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 0
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -286,10 +297,11 @@ body:             |
 
     ; CHECK-LABEL: name: ne_false
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 false
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 false
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst_1:_(s32) = G_CONSTANT i32 1
     %cst_2:_(s32) = G_CONSTANT i32 1
@@ -308,10 +320,11 @@ body:             |
 
     ; CHECK-LABEL: name: sge_false
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 false
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 false
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 -1
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -330,10 +343,11 @@ body:             |
 
     ; CHECK-LABEL: name: sgt_false
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 false
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 false
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 1
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -352,10 +366,11 @@ body:             |
 
     ; CHECK-LABEL: name: sle_false
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 false
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 false
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 3
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -375,10 +390,11 @@ body:             |
 
     ; CHECK-LABEL: name: slt_false
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 false
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 false
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 2
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -397,10 +413,11 @@ body:             |
 
     ; CHECK-LABEL: name: uge_false
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 false
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 false
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 0
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -419,10 +436,11 @@ body:             |
 
     ; CHECK-LABEL: name: ugt_false
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 false
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 false
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 1
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -441,10 +459,11 @@ body:             |
 
     ; CHECK-LABEL: name: ule_false
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 false
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 false
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 -1
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -463,10 +482,11 @@ body:             |
 
     ; CHECK-LABEL: name: ult_false
     ; CHECK: liveins: $x0
-    ; CHECK: %cmp:_(s1) = G_CONSTANT i1 false
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %cmp:_(s1) = G_CONSTANT i1 false
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 2
     %load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
@@ -485,13 +505,14 @@ body:             |
 
     ; CHECK-LABEL: name: eq_unknown
     ; CHECK: liveins: $x0
-    ; CHECK: %ptr:_(p0) = COPY $x0
-    ; CHECK: %cst:_(s32) = G_CONSTANT i32 1
-    ; CHECK: %load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load (s32),
-    ; CHECK: %cmp:_(s1) = G_ICMP intpred(eq), %load_between_1_2(s32), %cst
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:_(p0) = COPY $x0
+    ; CHECK-NEXT: %cst:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load (s32),
+    ; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(eq), %load_between_1_2(s32), %cst
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 1
     %load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !1)
@@ -510,13 +531,14 @@ body:             |
 
     ; CHECK-LABEL: name: ne_unknown
     ; CHECK: liveins: $x0
-    ; CHECK: %ptr:_(p0) = COPY $x0
-    ; CHECK: %cst:_(s32) = G_CONSTANT i32 1
-    ; CHECK: %load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load (s32),
-    ; CHECK: %cmp:_(s1) = G_ICMP intpred(ne), %load_between_1_2(s32), %cst
-    ; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
-    ; CHECK: $w0 = COPY %cmp_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:_(p0) = COPY $x0
+    ; CHECK-NEXT: %cst:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load (s32),
+    ; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(ne), %load_between_1_2(s32), %cst
+    ; CHECK-NEXT: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY %cmp_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %cst:_(s32) = G_CONSTANT i32 1
     %load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !1)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern-align.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern-align.mir
index a190a12dcd780..3dd681c2d21fb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern-align.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern-align.mir
@@ -14,22 +14,25 @@ body:             |
     liveins: $x0, $x1
     ; NOT_STRICT-LABEL: name: misaligned
     ; NOT_STRICT: liveins: $x0, $x1
-    ; NOT_STRICT: %ptr:_(p0) = COPY $x1
-    ; NOT_STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
-    ; NOT_STRICT: $w1 = COPY %full_load(s32)
-    ; NOT_STRICT: RET_ReallyLR implicit $w1
+    ; NOT_STRICT-NEXT: {{  $}}
+    ; NOT_STRICT-NEXT: %ptr:_(p0) = COPY $x1
+    ; NOT_STRICT-NEXT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
+    ; NOT_STRICT-NEXT: $w1 = COPY %full_load(s32)
+    ; NOT_STRICT-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; STRICT-LABEL: name: misaligned
     ; STRICT: liveins: $x0, $x1
-    ; STRICT: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; STRICT: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; STRICT: %ptr:_(p0) = COPY $x1
-    ; STRICT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; STRICT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; STRICT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; STRICT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-    ; STRICT: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; STRICT: $w1 = COPY %full_load(s32)
-    ; STRICT: RET_ReallyLR implicit $w1
+    ; STRICT-NEXT: {{  $}}
+    ; STRICT-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; STRICT-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; STRICT-NEXT: %ptr:_(p0) = COPY $x1
+    ; STRICT-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; STRICT-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; STRICT-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; STRICT-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+    ; STRICT-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; STRICT-NEXT: $w1 = COPY %full_load(s32)
+    ; STRICT-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_16:_(s32) = G_CONSTANT i32 16
 
@@ -54,16 +57,19 @@ body:             |
 
     ; NOT_STRICT-LABEL: name: aligned
     ; NOT_STRICT: liveins: $x0, $x1
-    ; NOT_STRICT: %ptr:_(p0) = COPY $x1
-    ; NOT_STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
-    ; NOT_STRICT: $w1 = COPY %full_load(s32)
-    ; NOT_STRICT: RET_ReallyLR implicit $w1
+    ; NOT_STRICT-NEXT: {{  $}}
+    ; NOT_STRICT-NEXT: %ptr:_(p0) = COPY $x1
+    ; NOT_STRICT-NEXT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    ; NOT_STRICT-NEXT: $w1 = COPY %full_load(s32)
+    ; NOT_STRICT-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; STRICT-LABEL: name: aligned
     ; STRICT: liveins: $x0, $x1
-    ; STRICT: %ptr:_(p0) = COPY $x1
-    ; STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
-    ; STRICT: $w1 = COPY %full_load(s32)
-    ; STRICT: RET_ReallyLR implicit $w1
+    ; STRICT-NEXT: {{  $}}
+    ; STRICT-NEXT: %ptr:_(p0) = COPY $x1
+    ; STRICT-NEXT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    ; STRICT-NEXT: $w1 = COPY %full_load(s32)
+    ; STRICT-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_16:_(s32) = G_CONSTANT i32 16
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern.mir
index c30fab32fccbf..131dcbf9906b4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-load-or-pattern.mir
@@ -21,23 +21,25 @@ body:             |
 
     ; s8* x = ...
     ; s32 y = (x[0] | (x[1] << 8)) | ((x[2] << 16) | (x[3] << 24))
-    ;
     ; -> Little endian: Load from x[0]
     ; -> Big endian: Load from x[0] + BSWAP
 
     ; LITTLE-LABEL: name: s8_loads_to_s32_little_endian_pat
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: s8_loads_to_s32_little_endian_pat
     ; BIG: liveins: $x0, $x1
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
-    ; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
+    ; BIG-NEXT: %full_load:_(s32) = G_BSWAP [[LOAD]]
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_2:_(s64) = G_CONSTANT i64 2
     %cst_3:_(s64) = G_CONSTANT i64 3
@@ -62,7 +64,6 @@ body:             |
     %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
 
     ; Note the shape of the tree:
-    ;
     ; byte byte byte  byte
     ;  \   /      \  /
     ;    OR        OR
@@ -87,23 +88,25 @@ body:             |
 
     ; s8* x = ...
     ; s32 y = (x[0] << 24 | (x[1] << 16)) | ((x[2] << 8) | x[3]))
-    ;
     ; -> Little endian: Load from x[0] + BSWAP
     ; -> Big endian: Load from x[0]
 
     ; LITTLE-LABEL: name: s8_loads_to_s32_big_endian_pat
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
-    ; LITTLE: %full_load:_(s32) = G_BSWAP [[LOAD]]
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_BSWAP [[LOAD]]
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: s8_loads_to_s32_big_endian_pat
     ; BIG: liveins: $x0, $x1
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_2:_(s64) = G_CONSTANT i64 2
     %cst_3:_(s64) = G_CONSTANT i64 3
@@ -142,26 +145,27 @@ body:             |
     liveins: $x0, $x1
 
     ; Slightly 
diff erent OR tree.
-    ;
     ; s8* x = ...
     ; s32 y = (((x[0] | (x[1] << 8)) | (x[2] << 16)) | (x[3] << 24))
-    ;
     ; -> Little endian: Load from x[0]
     ; -> Big endian: Load from x[0] + BSWAP
 
     ; LITTLE-LABEL: name: 
diff erent_or_pattern
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: 
diff erent_or_pattern
     ; BIG: liveins: $x0, $x1
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
-    ; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
+    ; BIG-NEXT: %full_load:_(s32) = G_BSWAP [[LOAD]]
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_2:_(s64) = G_CONSTANT i64 2
     %cst_3:_(s64) = G_CONSTANT i64 3
@@ -186,7 +190,6 @@ body:             |
     %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
 
     ; Note the shape of the tree:
-    ;
     ; byte   byte
     ;  \    /
     ;   OR_1   byte
@@ -212,23 +215,25 @@ body:             |
 
     ; s16* x = ...
     ; s32 y = x[0] | (x[1] << 16)
-    ;
     ; -> Little endian: Load from x[0]
     ; -> Big endian: Load from x[0] + BSWAP
 
     ; LITTLE-LABEL: name: s16_loads_to_s32_little_endian_pat
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: s16_loads_to_s32_little_endian_pat
     ; BIG: liveins: $x0, $x1
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
-    ; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
+    ; BIG-NEXT: %full_load:_(s32) = G_BSWAP [[LOAD]]
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_16:_(s32) = G_CONSTANT i32 16
 
@@ -253,23 +258,25 @@ body:             |
 
     ; s16 *x = ...
     ; s32 y = x[1] | (x[0] << 16)
-    ;
     ; -> Little endian: Load from x[0] + BSWAP
     ; -> Big endian: Load from x[0]
 
     ; LITTLE-LABEL: name: s16_loads_to_s32_big_endian_pat
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
-    ; LITTLE: %full_load:_(s32) = G_BSWAP [[LOAD]]
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_BSWAP [[LOAD]]
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: s16_loads_to_s32_big_endian_pat
     ; BIG: liveins: $x0, $x1
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_16:_(s32) = G_CONSTANT i32 16
 
@@ -294,23 +301,25 @@ body:             |
 
     ; s16 *x = ...
     ; s32 y = (x[0] | (x[1] << 16)) | ((x[2] << 32) | (x[3] << 48))
-    ;
     ; -> Little endian: Load from x[0]
     ; -> Big endian: Load from x[0] + BSWAP
 
     ; LITTLE-LABEL: name: s16_loads_to_s64_little_endian_pat
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %full_load:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
-    ; LITTLE: $x1 = COPY %full_load(s64)
-    ; LITTLE: RET_ReallyLR implicit $x1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %full_load:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
+    ; LITTLE-NEXT: $x1 = COPY %full_load(s64)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $x1
+    ;
     ; BIG-LABEL: name: s16_loads_to_s64_little_endian_pat
     ; BIG: liveins: $x0, $x1
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
-    ; BIG: %full_load:_(s64) = G_BSWAP [[LOAD]]
-    ; BIG: $x1 = COPY %full_load(s64)
-    ; BIG: RET_ReallyLR implicit $x1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
+    ; BIG-NEXT: %full_load:_(s64) = G_BSWAP [[LOAD]]
+    ; BIG-NEXT: $x1 = COPY %full_load(s64)
+    ; BIG-NEXT: RET_ReallyLR implicit $x1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_2:_(s64) = G_CONSTANT i64 2
     %cst_3:_(s64) = G_CONSTANT i64 3
@@ -351,23 +360,25 @@ body:             |
 
     ; s16 *x = ...
     ; s64 y = (x[3] | (x[2] << 16)) | ((x[1] << 32) | (x[0] << 48))
-    ;
     ; -> Little endian: Load from x[0] + BSWAP
     ; -> Big endian: Load from x[0]
 
     ; LITTLE-LABEL: name: s16_loads_to_s64_big_endian_pat
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
-    ; LITTLE: %full_load:_(s64) = G_BSWAP [[LOAD]]
-    ; LITTLE: $x1 = COPY %full_load(s64)
-    ; LITTLE: RET_ReallyLR implicit $x1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
+    ; LITTLE-NEXT: %full_load:_(s64) = G_BSWAP [[LOAD]]
+    ; LITTLE-NEXT: $x1 = COPY %full_load(s64)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $x1
+    ;
     ; BIG-LABEL: name: s16_loads_to_s64_big_endian_pat
     ; BIG: liveins: $x0, $x1
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %full_load:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
-    ; BIG: $x1 = COPY %full_load(s64)
-    ; BIG: RET_ReallyLR implicit $x1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %full_load:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
+    ; BIG-NEXT: $x1 = COPY %full_load(s64)
+    ; BIG-NEXT: RET_ReallyLR implicit $x1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_2:_(s64) = G_CONSTANT i64 2
     %cst_3:_(s64) = G_CONSTANT i64 3
@@ -408,27 +419,29 @@ body:             |
 
     ; s8* x = ...
     ; s32 y = (x[1] | (x[2] << 8)) | ((x[3] << 16) | (x[4] << 24))
-    ;
     ; -> Little endian: Load from x[1]
     ; -> Big endian: Load from x[1] + BSWAP
 
     ; LITTLE-LABEL: name: nonzero_start_idx_positive_little_endian_pat
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %ptr:_(p0) = COPY $x0
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %full_load:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x0
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: nonzero_start_idx_positive_little_endian_pat
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %ptr:_(p0) = COPY $x0
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
-    ; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x0
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
+    ; BIG-NEXT: %full_load:_(s32) = G_BSWAP [[LOAD]]
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_2:_(s64) = G_CONSTANT i64 2
     %cst_3:_(s64) = G_CONSTANT i64 3
@@ -470,27 +483,29 @@ body:             |
 
     ; s8* x = ...
     ; s32 y = (x[4] | (x[3] << 8)) | ((x[2] << 16) | (x[1] << 24))
-    ;
     ; -> Little endian: Load from x[1] + BSWAP
     ; -> Big endian: Load from x[1]
 
     ; LITTLE-LABEL: name: nonzero_start_idx_positive_big_endian_pat
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %ptr:_(p0) = COPY $x0
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
-    ; LITTLE: %full_load:_(s32) = G_BSWAP [[LOAD]]
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x0
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_BSWAP [[LOAD]]
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: nonzero_start_idx_positive_big_endian_pat
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %ptr:_(p0) = COPY $x0
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %full_load:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x0
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %full_load:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_2:_(s64) = G_CONSTANT i64 2
     %cst_3:_(s64) = G_CONSTANT i64 3
@@ -532,27 +547,29 @@ body:             |
 
     ; s8* x = ...
     ; s32 y = (x[-3] | (x[-2] << 8)) | ((x[-1] << 16) | (x[0] << 24))
-    ;
     ; -> Little endian: Load from x[-3]
     ; -> Big endian: Load from x[-3] + BSWAP
 
     ; LITTLE-LABEL: name: nonzero_start_idx_negative_little_endian_pat
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_neg_3:_(s64) = G_CONSTANT i64 -3
-    ; LITTLE: %ptr:_(p0) = COPY $x0
-    ; LITTLE: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s64)
-    ; LITTLE: %full_load:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_neg_3:_(s64) = G_CONSTANT i64 -3
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x0
+    ; LITTLE-NEXT: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s64)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: nonzero_start_idx_negative_little_endian_pat
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_neg_3:_(s64) = G_CONSTANT i64 -3
-    ; BIG: %ptr:_(p0) = COPY $x0
-    ; BIG: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s64)
-    ; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
-    ; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_neg_3:_(s64) = G_CONSTANT i64 -3
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x0
+    ; BIG-NEXT: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s64)
+    ; BIG-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
+    ; BIG-NEXT: %full_load:_(s32) = G_BSWAP [[LOAD]]
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_neg_1:_(s64) = G_CONSTANT i64 -1
     %cst_neg_2:_(s64) = G_CONSTANT i64 -2
     %cst_neg_3:_(s64) = G_CONSTANT i64 -3
@@ -592,27 +609,29 @@ body:             |
 
     ; s8* x = ...
     ; s32 y = (x[0] | (x[-1] << 8)) | ((x[-2] << 16) | (x[-3] << 24))
-    ;
     ; -> Little endian: Load from x[-3] + BSWAP
     ; -> Big endian: Load from x[-3]
 
     ; LITTLE-LABEL: name: nonzero_start_idx_negative_big_endian_pat
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_neg_3:_(s64) = G_CONSTANT i64 -3
-    ; LITTLE: %ptr:_(p0) = COPY $x0
-    ; LITTLE: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s64)
-    ; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
-    ; LITTLE: %full_load:_(s32) = G_BSWAP [[LOAD]]
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_neg_3:_(s64) = G_CONSTANT i64 -3
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x0
+    ; LITTLE-NEXT: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s64)
+    ; LITTLE-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_BSWAP [[LOAD]]
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: nonzero_start_idx_negative_big_endian_pat
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_neg_3:_(s64) = G_CONSTANT i64 -3
-    ; BIG: %ptr:_(p0) = COPY $x0
-    ; BIG: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s64)
-    ; BIG: %full_load:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_neg_3:_(s64) = G_CONSTANT i64 -3
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x0
+    ; BIG-NEXT: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s64)
+    ; BIG-NEXT: %full_load:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_neg_1:_(s64) = G_CONSTANT i64 -1
     %cst_neg_2:_(s64) = G_CONSTANT i64 -2
     %cst_neg_3:_(s64) = G_CONSTANT i64 -3
@@ -655,28 +674,31 @@ body:             |
 
     ; LITTLE-LABEL: name: dont_combine_volatile
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (volatile load (s16))
-    ; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-    ; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; LITTLE-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (volatile load (s16))
+    ; LITTLE-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: dont_combine_volatile
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (volatile load (s16))
-    ; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-    ; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; BIG-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (volatile load (s16))
+    ; BIG-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+    ; BIG-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_16:_(s32) = G_CONSTANT i32 16
 
@@ -703,28 +725,31 @@ body:             |
 
     ; LITTLE-LABEL: name: dont_wrong_memop_size
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; LITTLE: %wrong_size_load:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
-    ; LITTLE: %high_half:_(s32) = nuw G_SHL %wrong_size_load, %cst_16(s32)
-    ; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; LITTLE-NEXT: %wrong_size_load:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
+    ; LITTLE-NEXT: %high_half:_(s32) = nuw G_SHL %wrong_size_load, %cst_16(s32)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: dont_wrong_memop_size
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; BIG: %wrong_size_load:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
-    ; BIG: %high_half:_(s32) = nuw G_SHL %wrong_size_load, %cst_16(s32)
-    ; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; BIG-NEXT: %wrong_size_load:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
+    ; BIG-NEXT: %high_half:_(s32) = nuw G_SHL %wrong_size_load, %cst_16(s32)
+    ; BIG-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_16:_(s32) = G_CONSTANT i32 16
 
@@ -748,34 +773,36 @@ body:             |
     liveins: $x0, $x1
 
     ; This is not equivalent to a 32-bit load with/without a BSWAP:
-    ;
     ; s16 *x = ...
     ; s32 y = x[0] | (x[1] << 24)
 
     ; LITTLE-LABEL: name: dont_combine_wrong_offset
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %cst_24:_(s32) = G_CONSTANT i32 24
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_24(s32)
-    ; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %cst_24:_(s32) = G_CONSTANT i32 24
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; LITTLE-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; LITTLE-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_24(s32)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: dont_combine_wrong_offset
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %cst_24:_(s32) = G_CONSTANT i32 24
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_24(s32)
-    ; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %cst_24:_(s32) = G_CONSTANT i32 24
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; BIG-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; BIG-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_24(s32)
+    ; BIG-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_24:_(s32) = G_CONSTANT i32 24
 
@@ -799,34 +826,36 @@ body:             |
     liveins: $x0, $x1
 
     ; This does not correspond to a 32-bit load with/without a BSWAP:
-    ;
     ; s16 *x = ...
     ; s32 y = x[0] | (x[1] << 8)
 
     ; LITTLE-LABEL: name: dont_combine_wrong_offset_2
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %cst_8:_(s32) = G_CONSTANT i32 8
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
-    ; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %cst_8:_(s32) = G_CONSTANT i32 8
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; LITTLE-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; LITTLE-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: dont_combine_wrong_offset_2
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %cst_8:_(s32) = G_CONSTANT i32 8
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
-    ; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %cst_8:_(s32) = G_CONSTANT i32 8
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; BIG-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; BIG-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
+    ; BIG-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_8:_(s32) = G_CONSTANT i32 8
 
@@ -850,46 +879,48 @@ body:             |
     liveins: $x0, $x1
 
     ; This is missing x[2], so we shouldn't combine:
-    ;
     ; s16 *x = ...
     ; s64 y = (x[0] | (x[1] << 16)) | (x[3] << 48)
 
     ; LITTLE-LABEL: name: dont_combine_missing_load
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %cst_3:_(s64) = G_CONSTANT i64 3
-    ; LITTLE: %cst_16:_(s64) = G_CONSTANT i64 16
-    ; LITTLE: %cst_48:_(s64) = G_CONSTANT i64 48
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
-    ; LITTLE: %byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; LITTLE: %elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; LITTLE: %elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s16))
-    ; LITTLE: %byte2_byte3:_(s64) = nuw G_SHL %elt1, %cst_16(s64)
-    ; LITTLE: %byte6_byte7:_(s64) = nuw G_SHL %elt3, %cst_48(s64)
-    ; LITTLE: %or1:_(s64) = G_OR %byte0_byte1, %byte2_byte3
-    ; LITTLE: %full_load:_(s64) = G_OR %or1, %byte6_byte7
-    ; LITTLE: $x1 = COPY %full_load(s64)
-    ; LITTLE: RET_ReallyLR implicit $x1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %cst_3:_(s64) = G_CONSTANT i64 3
+    ; LITTLE-NEXT: %cst_16:_(s64) = G_CONSTANT i64 16
+    ; LITTLE-NEXT: %cst_48:_(s64) = G_CONSTANT i64 48
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
+    ; LITTLE-NEXT: %byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; LITTLE-NEXT: %elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; LITTLE-NEXT: %elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s16))
+    ; LITTLE-NEXT: %byte2_byte3:_(s64) = nuw G_SHL %elt1, %cst_16(s64)
+    ; LITTLE-NEXT: %byte6_byte7:_(s64) = nuw G_SHL %elt3, %cst_48(s64)
+    ; LITTLE-NEXT: %or1:_(s64) = G_OR %byte0_byte1, %byte2_byte3
+    ; LITTLE-NEXT: %full_load:_(s64) = G_OR %or1, %byte6_byte7
+    ; LITTLE-NEXT: $x1 = COPY %full_load(s64)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $x1
+    ;
     ; BIG-LABEL: name: dont_combine_missing_load
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %cst_3:_(s64) = G_CONSTANT i64 3
-    ; BIG: %cst_16:_(s64) = G_CONSTANT i64 16
-    ; BIG: %cst_48:_(s64) = G_CONSTANT i64 48
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
-    ; BIG: %byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; BIG: %elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; BIG: %elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s16))
-    ; BIG: %byte2_byte3:_(s64) = nuw G_SHL %elt1, %cst_16(s64)
-    ; BIG: %byte6_byte7:_(s64) = nuw G_SHL %elt3, %cst_48(s64)
-    ; BIG: %or1:_(s64) = G_OR %byte0_byte1, %byte2_byte3
-    ; BIG: %full_load:_(s64) = G_OR %or1, %byte6_byte7
-    ; BIG: $x1 = COPY %full_load(s64)
-    ; BIG: RET_ReallyLR implicit $x1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %cst_3:_(s64) = G_CONSTANT i64 3
+    ; BIG-NEXT: %cst_16:_(s64) = G_CONSTANT i64 16
+    ; BIG-NEXT: %cst_48:_(s64) = G_CONSTANT i64 48
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
+    ; BIG-NEXT: %byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; BIG-NEXT: %elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; BIG-NEXT: %elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s16))
+    ; BIG-NEXT: %byte2_byte3:_(s64) = nuw G_SHL %elt1, %cst_16(s64)
+    ; BIG-NEXT: %byte6_byte7:_(s64) = nuw G_SHL %elt3, %cst_48(s64)
+    ; BIG-NEXT: %or1:_(s64) = G_OR %byte0_byte1, %byte2_byte3
+    ; BIG-NEXT: %full_load:_(s64) = G_OR %or1, %byte6_byte7
+    ; BIG-NEXT: $x1 = COPY %full_load(s64)
+    ; BIG-NEXT: RET_ReallyLR implicit $x1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_3:_(s64) = G_CONSTANT i64 3
 
@@ -926,28 +957,31 @@ body:             |
 
     ; LITTLE-LABEL: name: dont_combine_
diff erent_addr_spaces
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16), addrspace 1)
-    ; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-    ; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; LITTLE-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16), addrspace 1)
+    ; LITTLE-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: dont_combine_
diff erent_addr_spaces
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16), addrspace 1)
-    ; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-    ; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; BIG-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16), addrspace 1)
+    ; BIG-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+    ; BIG-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_16:_(s32) = G_CONSTANT i32 16
 
@@ -971,56 +1005,58 @@ body:             |
     liveins: $x0, $x1
 
     ; If two of the G_PTR_ADDs have the same index, then don't combine.
-    ;
     ; sN *x = ...
     ; sM y = (x[i] << A) | (x[i] << B) ...
 
     ; LITTLE-LABEL: name: dont_combine_duplicate_idx
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %reused_idx:_(s64) = G_CONSTANT i64 2
-    ; LITTLE: %cst_8:_(s32) = G_CONSTANT i32 8
-    ; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; LITTLE: %cst_24:_(s32) = G_CONSTANT i32 24
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s64)
-    ; LITTLE: %also_uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s64)
-    ; LITTLE: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
-    ; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
-    ; LITTLE: %elt2:_(s32) = G_ZEXTLOAD %uses_idx_2(p0) :: (load (s8))
-    ; LITTLE: %elt3:_(s32) = G_ZEXTLOAD %also_uses_idx_2(p0) :: (load (s8))
-    ; LITTLE: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
-    ; LITTLE: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
-    ; LITTLE: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
-    ; LITTLE: %or1:_(s32) = G_OR %byte0, %byte1
-    ; LITTLE: %or2:_(s32) = G_OR %byte2, %byte3
-    ; LITTLE: %full_load:_(s32) = G_OR %or1, %or2
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %reused_idx:_(s64) = G_CONSTANT i64 2
+    ; LITTLE-NEXT: %cst_8:_(s32) = G_CONSTANT i32 8
+    ; LITTLE-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; LITTLE-NEXT: %cst_24:_(s32) = G_CONSTANT i32 24
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s64)
+    ; LITTLE-NEXT: %also_uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s64)
+    ; LITTLE-NEXT: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
+    ; LITTLE-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
+    ; LITTLE-NEXT: %elt2:_(s32) = G_ZEXTLOAD %uses_idx_2(p0) :: (load (s8))
+    ; LITTLE-NEXT: %elt3:_(s32) = G_ZEXTLOAD %also_uses_idx_2(p0) :: (load (s8))
+    ; LITTLE-NEXT: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
+    ; LITTLE-NEXT: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
+    ; LITTLE-NEXT: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
+    ; LITTLE-NEXT: %or1:_(s32) = G_OR %byte0, %byte1
+    ; LITTLE-NEXT: %or2:_(s32) = G_OR %byte2, %byte3
+    ; LITTLE-NEXT: %full_load:_(s32) = G_OR %or1, %or2
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: dont_combine_duplicate_idx
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %reused_idx:_(s64) = G_CONSTANT i64 2
-    ; BIG: %cst_8:_(s32) = G_CONSTANT i32 8
-    ; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; BIG: %cst_24:_(s32) = G_CONSTANT i32 24
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s64)
-    ; BIG: %also_uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s64)
-    ; BIG: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
-    ; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
-    ; BIG: %elt2:_(s32) = G_ZEXTLOAD %uses_idx_2(p0) :: (load (s8))
-    ; BIG: %elt3:_(s32) = G_ZEXTLOAD %also_uses_idx_2(p0) :: (load (s8))
-    ; BIG: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
-    ; BIG: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
-    ; BIG: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
-    ; BIG: %or1:_(s32) = G_OR %byte0, %byte1
-    ; BIG: %or2:_(s32) = G_OR %byte2, %byte3
-    ; BIG: %full_load:_(s32) = G_OR %or1, %or2
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %reused_idx:_(s64) = G_CONSTANT i64 2
+    ; BIG-NEXT: %cst_8:_(s32) = G_CONSTANT i32 8
+    ; BIG-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; BIG-NEXT: %cst_24:_(s32) = G_CONSTANT i32 24
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s64)
+    ; BIG-NEXT: %also_uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s64)
+    ; BIG-NEXT: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
+    ; BIG-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
+    ; BIG-NEXT: %elt2:_(s32) = G_ZEXTLOAD %uses_idx_2(p0) :: (load (s8))
+    ; BIG-NEXT: %elt3:_(s32) = G_ZEXTLOAD %also_uses_idx_2(p0) :: (load (s8))
+    ; BIG-NEXT: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
+    ; BIG-NEXT: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
+    ; BIG-NEXT: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
+    ; BIG-NEXT: %or1:_(s32) = G_OR %byte0, %byte1
+    ; BIG-NEXT: %or2:_(s32) = G_OR %byte2, %byte3
+    ; BIG-NEXT: %full_load:_(s32) = G_OR %or1, %or2
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %reused_idx:_(s64) = G_CONSTANT i64 2
 
@@ -1058,56 +1094,58 @@ body:             |
     liveins: $x0, $x1
 
     ; If two of the G_SHLs have the same constant, then we should not combine.
-    ;
     ; sN *x = ...
     ; sM y = (x[i] << A) | (x[i+1] << A) ...
 
     ; LITTLE-LABEL: name: dont_combine_duplicate_offset
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %cst_2:_(s64) = G_CONSTANT i64 2
-    ; LITTLE: %cst_3:_(s64) = G_CONSTANT i64 3
-    ; LITTLE: %cst_8:_(s32) = G_CONSTANT i32 8
-    ; LITTLE: %duplicate_shl_cst:_(s32) = G_CONSTANT i32 16
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s64)
-    ; LITTLE: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
-    ; LITTLE: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
-    ; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
-    ; LITTLE: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
-    ; LITTLE: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
-    ; LITTLE: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
-    ; LITTLE: %duplicate_shl_1:_(s32) = nuw G_SHL %elt2, %duplicate_shl_cst(s32)
-    ; LITTLE: %duplicate_shl_2:_(s32) = nuw G_SHL %elt3, %duplicate_shl_cst(s32)
-    ; LITTLE: %or1:_(s32) = G_OR %byte0, %byte1
-    ; LITTLE: %or2:_(s32) = G_OR %duplicate_shl_1, %duplicate_shl_2
-    ; LITTLE: %full_load:_(s32) = G_OR %or1, %or2
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %cst_2:_(s64) = G_CONSTANT i64 2
+    ; LITTLE-NEXT: %cst_3:_(s64) = G_CONSTANT i64 3
+    ; LITTLE-NEXT: %cst_8:_(s32) = G_CONSTANT i32 8
+    ; LITTLE-NEXT: %duplicate_shl_cst:_(s32) = G_CONSTANT i32 16
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s64)
+    ; LITTLE-NEXT: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
+    ; LITTLE-NEXT: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
+    ; LITTLE-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
+    ; LITTLE-NEXT: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
+    ; LITTLE-NEXT: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
+    ; LITTLE-NEXT: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
+    ; LITTLE-NEXT: %duplicate_shl_1:_(s32) = nuw G_SHL %elt2, %duplicate_shl_cst(s32)
+    ; LITTLE-NEXT: %duplicate_shl_2:_(s32) = nuw G_SHL %elt3, %duplicate_shl_cst(s32)
+    ; LITTLE-NEXT: %or1:_(s32) = G_OR %byte0, %byte1
+    ; LITTLE-NEXT: %or2:_(s32) = G_OR %duplicate_shl_1, %duplicate_shl_2
+    ; LITTLE-NEXT: %full_load:_(s32) = G_OR %or1, %or2
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: dont_combine_duplicate_offset
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %cst_2:_(s64) = G_CONSTANT i64 2
-    ; BIG: %cst_3:_(s64) = G_CONSTANT i64 3
-    ; BIG: %cst_8:_(s32) = G_CONSTANT i32 8
-    ; BIG: %duplicate_shl_cst:_(s32) = G_CONSTANT i32 16
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s64)
-    ; BIG: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
-    ; BIG: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
-    ; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
-    ; BIG: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
-    ; BIG: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
-    ; BIG: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
-    ; BIG: %duplicate_shl_1:_(s32) = nuw G_SHL %elt2, %duplicate_shl_cst(s32)
-    ; BIG: %duplicate_shl_2:_(s32) = nuw G_SHL %elt3, %duplicate_shl_cst(s32)
-    ; BIG: %or1:_(s32) = G_OR %byte0, %byte1
-    ; BIG: %or2:_(s32) = G_OR %duplicate_shl_1, %duplicate_shl_2
-    ; BIG: %full_load:_(s32) = G_OR %or1, %or2
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %cst_2:_(s64) = G_CONSTANT i64 2
+    ; BIG-NEXT: %cst_3:_(s64) = G_CONSTANT i64 3
+    ; BIG-NEXT: %cst_8:_(s32) = G_CONSTANT i32 8
+    ; BIG-NEXT: %duplicate_shl_cst:_(s32) = G_CONSTANT i32 16
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s64)
+    ; BIG-NEXT: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
+    ; BIG-NEXT: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
+    ; BIG-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
+    ; BIG-NEXT: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
+    ; BIG-NEXT: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
+    ; BIG-NEXT: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
+    ; BIG-NEXT: %duplicate_shl_1:_(s32) = nuw G_SHL %elt2, %duplicate_shl_cst(s32)
+    ; BIG-NEXT: %duplicate_shl_2:_(s32) = nuw G_SHL %elt3, %duplicate_shl_cst(s32)
+    ; BIG-NEXT: %or1:_(s32) = G_OR %byte0, %byte1
+    ; BIG-NEXT: %or2:_(s32) = G_OR %duplicate_shl_1, %duplicate_shl_2
+    ; BIG-NEXT: %full_load:_(s32) = G_OR %or1, %or2
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_2:_(s64) = G_CONSTANT i64 2
     %cst_3:_(s64) = G_CONSTANT i64 3
@@ -1147,58 +1185,60 @@ body:             |
 
     ; In this case, the lowest index load (e.g. x[0]) does not end up at byte
     ; offset 0. We shouldn't combine.
-    ;
     ; s8 *x = ...
     ; s32 y = (x[0] << 8) | (x[1]) | (x[2] << 16) ...
 
     ; LITTLE-LABEL: name: dont_combine_lowest_index_not_zero_offset
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %cst_2:_(s64) = G_CONSTANT i64 2
-    ; LITTLE: %cst_3:_(s64) = G_CONSTANT i64 3
-    ; LITTLE: %cst_8:_(s32) = G_CONSTANT i32 8
-    ; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; LITTLE: %cst_24:_(s32) = G_CONSTANT i32 24
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s64)
-    ; LITTLE: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
-    ; LITTLE: %lowest_idx_load:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
-    ; LITTLE: %byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
-    ; LITTLE: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
-    ; LITTLE: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
-    ; LITTLE: %byte1:_(s32) = nuw G_SHL %lowest_idx_load, %cst_8(s32)
-    ; LITTLE: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
-    ; LITTLE: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
-    ; LITTLE: %or1:_(s32) = G_OR %byte0, %byte1
-    ; LITTLE: %or2:_(s32) = G_OR %byte2, %byte3
-    ; LITTLE: %full_load:_(s32) = G_OR %or1, %or2
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %cst_2:_(s64) = G_CONSTANT i64 2
+    ; LITTLE-NEXT: %cst_3:_(s64) = G_CONSTANT i64 3
+    ; LITTLE-NEXT: %cst_8:_(s32) = G_CONSTANT i32 8
+    ; LITTLE-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; LITTLE-NEXT: %cst_24:_(s32) = G_CONSTANT i32 24
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s64)
+    ; LITTLE-NEXT: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
+    ; LITTLE-NEXT: %lowest_idx_load:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
+    ; LITTLE-NEXT: %byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
+    ; LITTLE-NEXT: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
+    ; LITTLE-NEXT: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
+    ; LITTLE-NEXT: %byte1:_(s32) = nuw G_SHL %lowest_idx_load, %cst_8(s32)
+    ; LITTLE-NEXT: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
+    ; LITTLE-NEXT: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
+    ; LITTLE-NEXT: %or1:_(s32) = G_OR %byte0, %byte1
+    ; LITTLE-NEXT: %or2:_(s32) = G_OR %byte2, %byte3
+    ; LITTLE-NEXT: %full_load:_(s32) = G_OR %or1, %or2
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: dont_combine_lowest_index_not_zero_offset
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %cst_2:_(s64) = G_CONSTANT i64 2
-    ; BIG: %cst_3:_(s64) = G_CONSTANT i64 3
-    ; BIG: %cst_8:_(s32) = G_CONSTANT i32 8
-    ; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; BIG: %cst_24:_(s32) = G_CONSTANT i32 24
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s64)
-    ; BIG: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
-    ; BIG: %lowest_idx_load:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
-    ; BIG: %byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
-    ; BIG: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
-    ; BIG: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
-    ; BIG: %byte1:_(s32) = nuw G_SHL %lowest_idx_load, %cst_8(s32)
-    ; BIG: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
-    ; BIG: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
-    ; BIG: %or1:_(s32) = G_OR %byte0, %byte1
-    ; BIG: %or2:_(s32) = G_OR %byte2, %byte3
-    ; BIG: %full_load:_(s32) = G_OR %or1, %or2
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %cst_2:_(s64) = G_CONSTANT i64 2
+    ; BIG-NEXT: %cst_3:_(s64) = G_CONSTANT i64 3
+    ; BIG-NEXT: %cst_8:_(s32) = G_CONSTANT i32 8
+    ; BIG-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; BIG-NEXT: %cst_24:_(s32) = G_CONSTANT i32 24
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s64)
+    ; BIG-NEXT: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
+    ; BIG-NEXT: %lowest_idx_load:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
+    ; BIG-NEXT: %byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
+    ; BIG-NEXT: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
+    ; BIG-NEXT: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
+    ; BIG-NEXT: %byte1:_(s32) = nuw G_SHL %lowest_idx_load, %cst_8(s32)
+    ; BIG-NEXT: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
+    ; BIG-NEXT: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
+    ; BIG-NEXT: %or1:_(s32) = G_OR %byte0, %byte1
+    ; BIG-NEXT: %or2:_(s32) = G_OR %byte2, %byte3
+    ; BIG-NEXT: %full_load:_(s32) = G_OR %or1, %or2
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_2:_(s64) = G_CONSTANT i64 2
     %cst_3:_(s64) = G_CONSTANT i64 3
@@ -1243,30 +1283,33 @@ body:             |
 
     ; LITTLE-LABEL: name: dont_combine_more_than_one_use_load
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-    ; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; LITTLE: %extra_use:_(s32) = G_AND %full_load, %low_half
-    ; LITTLE: $w1 = COPY %extra_use(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; LITTLE-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; LITTLE-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; LITTLE-NEXT: %extra_use:_(s32) = G_AND %full_load, %low_half
+    ; LITTLE-NEXT: $w1 = COPY %extra_use(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: dont_combine_more_than_one_use_load
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-    ; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; BIG: %extra_use:_(s32) = G_AND %full_load, %low_half
-    ; BIG: $w1 = COPY %extra_use(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; BIG-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; BIG-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+    ; BIG-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; BIG-NEXT: %extra_use:_(s32) = G_AND %full_load, %low_half
+    ; BIG-NEXT: $w1 = COPY %extra_use(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_16:_(s32) = G_CONSTANT i32 16
 
@@ -1295,30 +1338,33 @@ body:             |
 
     ; LITTLE-LABEL: name: dont_combine_more_than_one_use_shl
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-    ; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; LITTLE: %extra_use:_(s32) = G_AND %full_load, %high_half
-    ; LITTLE: $w1 = COPY %extra_use(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; LITTLE-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; LITTLE-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; LITTLE-NEXT: %extra_use:_(s32) = G_AND %full_load, %high_half
+    ; LITTLE-NEXT: $w1 = COPY %extra_use(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: dont_combine_more_than_one_use_shl
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-    ; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; BIG: %extra_use:_(s32) = G_AND %full_load, %high_half
-    ; BIG: $w1 = COPY %extra_use(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; BIG-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; BIG-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+    ; BIG-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; BIG-NEXT: %extra_use:_(s32) = G_AND %full_load, %high_half
+    ; BIG-NEXT: $w1 = COPY %extra_use(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_16:_(s32) = G_CONSTANT i32 16
 
@@ -1345,34 +1391,37 @@ body:             |
 
     ; LITTLE-LABEL: name: dont_combine_store_between_same_mbb
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; LITTLE: %other_ptr:_(p0) = COPY $x1
-    ; LITTLE: %some_val:_(s32) = G_CONSTANT i32 12
-    ; LITTLE: G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
-    ; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-    ; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; LITTLE-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; LITTLE-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; LITTLE-NEXT: %other_ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %some_val:_(s32) = G_CONSTANT i32 12
+    ; LITTLE-NEXT: G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
+    ; LITTLE-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; LITTLE-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+    ; LITTLE-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: dont_combine_store_between_same_mbb
     ; BIG: liveins: $x0, $x1
-    ; BIG: %cst_1:_(s64) = G_CONSTANT i64 1
-    ; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-    ; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-    ; BIG: %other_ptr:_(p0) = COPY $x1
-    ; BIG: %some_val:_(s32) = G_CONSTANT i32 12
-    ; BIG: G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
-    ; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-    ; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-    ; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %cst_1:_(s64) = G_CONSTANT i64 1
+    ; BIG-NEXT: %cst_16:_(s32) = G_CONSTANT i32 16
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+    ; BIG-NEXT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+    ; BIG-NEXT: %other_ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: %some_val:_(s32) = G_CONSTANT i32 12
+    ; BIG-NEXT: G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
+    ; BIG-NEXT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+    ; BIG-NEXT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+    ; BIG-NEXT: %full_load:_(s32) = G_OR %low_half, %high_half
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_16:_(s32) = G_CONSTANT i32 16
 
@@ -1399,48 +1448,59 @@ tracksRegLiveness: true
 body:             |
   ; LITTLE-LABEL: name: dont_combine_store_between_
diff erent_mbb
   ; LITTLE: bb.0:
-  ; LITTLE:   successors: %bb.1(0x80000000)
-  ; LITTLE:   liveins: $x0, $x1
-  ; LITTLE:   %cst_1:_(s64) = G_CONSTANT i64 1
-  ; LITTLE:   %cst_16:_(s32) = G_CONSTANT i32 16
-  ; LITTLE:   %ptr:_(p0) = COPY $x1
-  ; LITTLE:   %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-  ; LITTLE:   %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-  ; LITTLE: bb.1:
-  ; LITTLE:   successors: %bb.2(0x80000000)
-  ; LITTLE:   liveins: $x0, $x1
-  ; LITTLE:   %other_ptr:_(p0) = COPY $x1
-  ; LITTLE:   %some_val:_(s32) = G_CONSTANT i32 12
-  ; LITTLE:   G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
-  ; LITTLE: bb.2:
-  ; LITTLE:   liveins: $x0, $x1
-  ; LITTLE:   %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-  ; LITTLE:   %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-  ; LITTLE:   %full_load:_(s32) = G_OR %low_half, %high_half
-  ; LITTLE:   $w1 = COPY %full_load(s32)
-  ; LITTLE:   RET_ReallyLR implicit $w1
+  ; LITTLE-NEXT:   successors: %bb.1(0x80000000)
+  ; LITTLE-NEXT:   liveins: $x0, $x1
+  ; LITTLE-NEXT: {{  $}}
+  ; LITTLE-NEXT:   %cst_1:_(s64) = G_CONSTANT i64 1
+  ; LITTLE-NEXT:   %cst_16:_(s32) = G_CONSTANT i32 16
+  ; LITTLE-NEXT:   %ptr:_(p0) = COPY $x1
+  ; LITTLE-NEXT:   %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+  ; LITTLE-NEXT:   %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+  ; LITTLE-NEXT: {{  $}}
+  ; LITTLE-NEXT: bb.1:
+  ; LITTLE-NEXT:   successors: %bb.2(0x80000000)
+  ; LITTLE-NEXT:   liveins: $x0, $x1
+  ; LITTLE-NEXT: {{  $}}
+  ; LITTLE-NEXT:   %other_ptr:_(p0) = COPY $x1
+  ; LITTLE-NEXT:   %some_val:_(s32) = G_CONSTANT i32 12
+  ; LITTLE-NEXT:   G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
+  ; LITTLE-NEXT: {{  $}}
+  ; LITTLE-NEXT: bb.2:
+  ; LITTLE-NEXT:   liveins: $x0, $x1
+  ; LITTLE-NEXT: {{  $}}
+  ; LITTLE-NEXT:   %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+  ; LITTLE-NEXT:   %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+  ; LITTLE-NEXT:   %full_load:_(s32) = G_OR %low_half, %high_half
+  ; LITTLE-NEXT:   $w1 = COPY %full_load(s32)
+  ; LITTLE-NEXT:   RET_ReallyLR implicit $w1
+  ;
   ; BIG-LABEL: name: dont_combine_store_between_
diff erent_mbb
   ; BIG: bb.0:
-  ; BIG:   successors: %bb.1(0x80000000)
-  ; BIG:   liveins: $x0, $x1
-  ; BIG:   %cst_1:_(s64) = G_CONSTANT i64 1
-  ; BIG:   %cst_16:_(s32) = G_CONSTANT i32 16
-  ; BIG:   %ptr:_(p0) = COPY $x1
-  ; BIG:   %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-  ; BIG:   %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-  ; BIG: bb.1:
-  ; BIG:   successors: %bb.2(0x80000000)
-  ; BIG:   liveins: $x0, $x1
-  ; BIG:   %other_ptr:_(p0) = COPY $x1
-  ; BIG:   %some_val:_(s32) = G_CONSTANT i32 12
-  ; BIG:   G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
-  ; BIG: bb.2:
-  ; BIG:   liveins: $x0, $x1
-  ; BIG:   %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-  ; BIG:   %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-  ; BIG:   %full_load:_(s32) = G_OR %low_half, %high_half
-  ; BIG:   $w1 = COPY %full_load(s32)
-  ; BIG:   RET_ReallyLR implicit $w1
+  ; BIG-NEXT:   successors: %bb.1(0x80000000)
+  ; BIG-NEXT:   liveins: $x0, $x1
+  ; BIG-NEXT: {{  $}}
+  ; BIG-NEXT:   %cst_1:_(s64) = G_CONSTANT i64 1
+  ; BIG-NEXT:   %cst_16:_(s32) = G_CONSTANT i32 16
+  ; BIG-NEXT:   %ptr:_(p0) = COPY $x1
+  ; BIG-NEXT:   %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+  ; BIG-NEXT:   %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+  ; BIG-NEXT: {{  $}}
+  ; BIG-NEXT: bb.1:
+  ; BIG-NEXT:   successors: %bb.2(0x80000000)
+  ; BIG-NEXT:   liveins: $x0, $x1
+  ; BIG-NEXT: {{  $}}
+  ; BIG-NEXT:   %other_ptr:_(p0) = COPY $x1
+  ; BIG-NEXT:   %some_val:_(s32) = G_CONSTANT i32 12
+  ; BIG-NEXT:   G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
+  ; BIG-NEXT: {{  $}}
+  ; BIG-NEXT: bb.2:
+  ; BIG-NEXT:   liveins: $x0, $x1
+  ; BIG-NEXT: {{  $}}
+  ; BIG-NEXT:   %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+  ; BIG-NEXT:   %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+  ; BIG-NEXT:   %full_load:_(s32) = G_OR %low_half, %high_half
+  ; BIG-NEXT:   $w1 = COPY %full_load(s32)
+  ; BIG-NEXT:   RET_ReallyLR implicit $w1
   ; There is a store between the two loads, hidden away in a 
diff erent MBB.
   ; We should not combine here.
 
@@ -1481,36 +1541,43 @@ tracksRegLiveness: true
 body:             |
   ; LITTLE-LABEL: name: 
diff erent_mbb
   ; LITTLE: bb.0:
-  ; LITTLE:   successors: %bb.1(0x80000000)
-  ; LITTLE:   liveins: $x0, $x1
-  ; LITTLE:   %cst_1:_(s64) = G_CONSTANT i64 1
-  ; LITTLE:   %cst_16:_(s32) = G_CONSTANT i32 16
-  ; LITTLE:   %ptr:_(p0) = COPY $x1
-  ; LITTLE:   %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-  ; LITTLE:   %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-  ; LITTLE: bb.1:
-  ; LITTLE:   liveins: $x0, $x1
-  ; LITTLE:   %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-  ; LITTLE:   %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-  ; LITTLE:   %full_load:_(s32) = G_OR %low_half, %high_half
-  ; LITTLE:   $w1 = COPY %full_load(s32)
-  ; LITTLE:   RET_ReallyLR implicit $w1
+  ; LITTLE-NEXT:   successors: %bb.1(0x80000000)
+  ; LITTLE-NEXT:   liveins: $x0, $x1
+  ; LITTLE-NEXT: {{  $}}
+  ; LITTLE-NEXT:   %cst_1:_(s64) = G_CONSTANT i64 1
+  ; LITTLE-NEXT:   %cst_16:_(s32) = G_CONSTANT i32 16
+  ; LITTLE-NEXT:   %ptr:_(p0) = COPY $x1
+  ; LITTLE-NEXT:   %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+  ; LITTLE-NEXT:   %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+  ; LITTLE-NEXT: {{  $}}
+  ; LITTLE-NEXT: bb.1:
+  ; LITTLE-NEXT:   liveins: $x0, $x1
+  ; LITTLE-NEXT: {{  $}}
+  ; LITTLE-NEXT:   %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+  ; LITTLE-NEXT:   %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+  ; LITTLE-NEXT:   %full_load:_(s32) = G_OR %low_half, %high_half
+  ; LITTLE-NEXT:   $w1 = COPY %full_load(s32)
+  ; LITTLE-NEXT:   RET_ReallyLR implicit $w1
+  ;
   ; BIG-LABEL: name: 
diff erent_mbb
   ; BIG: bb.0:
-  ; BIG:   successors: %bb.1(0x80000000)
-  ; BIG:   liveins: $x0, $x1
-  ; BIG:   %cst_1:_(s64) = G_CONSTANT i64 1
-  ; BIG:   %cst_16:_(s32) = G_CONSTANT i32 16
-  ; BIG:   %ptr:_(p0) = COPY $x1
-  ; BIG:   %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
-  ; BIG:   %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
-  ; BIG: bb.1:
-  ; BIG:   liveins: $x0, $x1
-  ; BIG:   %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
-  ; BIG:   %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
-  ; BIG:   %full_load:_(s32) = G_OR %low_half, %high_half
-  ; BIG:   $w1 = COPY %full_load(s32)
-  ; BIG:   RET_ReallyLR implicit $w1
+  ; BIG-NEXT:   successors: %bb.1(0x80000000)
+  ; BIG-NEXT:   liveins: $x0, $x1
+  ; BIG-NEXT: {{  $}}
+  ; BIG-NEXT:   %cst_1:_(s64) = G_CONSTANT i64 1
+  ; BIG-NEXT:   %cst_16:_(s32) = G_CONSTANT i32 16
+  ; BIG-NEXT:   %ptr:_(p0) = COPY $x1
+  ; BIG-NEXT:   %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
+  ; BIG-NEXT:   %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
+  ; BIG-NEXT: {{  $}}
+  ; BIG-NEXT: bb.1:
+  ; BIG-NEXT:   liveins: $x0, $x1
+  ; BIG-NEXT: {{  $}}
+  ; BIG-NEXT:   %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
+  ; BIG-NEXT:   %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
+  ; BIG-NEXT:   %full_load:_(s32) = G_OR %low_half, %high_half
+  ; BIG-NEXT:   $w1 = COPY %full_load(s32)
+  ; BIG-NEXT:   RET_ReallyLR implicit $w1
   ; It should be possible to combine here, but it's not supported right now.
 
 
@@ -1545,17 +1612,20 @@ body:             |
 
     ; LITTLE-LABEL: name: load_first
     ; LITTLE: liveins: $x0, $x1
-    ; LITTLE: %ptr:_(p0) = COPY $x1
-    ; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
-    ; LITTLE: $w1 = COPY %full_load(s32)
-    ; LITTLE: RET_ReallyLR implicit $w1
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: %ptr:_(p0) = COPY $x1
+    ; LITTLE-NEXT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
+    ; LITTLE-NEXT: $w1 = COPY %full_load(s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w1
+    ;
     ; BIG-LABEL: name: load_first
     ; BIG: liveins: $x0, $x1
-    ; BIG: %ptr:_(p0) = COPY $x1
-    ; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
-    ; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
-    ; BIG: $w1 = COPY %full_load(s32)
-    ; BIG: RET_ReallyLR implicit $w1
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: %ptr:_(p0) = COPY $x1
+    ; BIG-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
+    ; BIG-NEXT: %full_load:_(s32) = G_BSWAP [[LOAD]]
+    ; BIG-NEXT: $w1 = COPY %full_load(s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w1
     %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
     %cst_1:_(s64) = G_CONSTANT i64 1
     %cst_16:_(s32) = G_CONSTANT i32 16
@@ -1589,23 +1659,26 @@ body:             |
   liveins: $x0, $x1
     ; LITTLE-LABEL: name: store_between_loads_and_or
     ; LITTLE: liveins: $x0, $x1, $x0, $x1
-    ; LITTLE: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; LITTLE: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; LITTLE: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
-    ; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 1)
-    ; LITTLE: G_STORE [[C]](s8), [[COPY1]](p0) :: (store (s8))
-    ; LITTLE: $w0 = COPY [[LOAD]](s32)
-    ; LITTLE: RET_ReallyLR implicit $w0
+    ; LITTLE-NEXT: {{  $}}
+    ; LITTLE-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; LITTLE-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; LITTLE-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+    ; LITTLE-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 1)
+    ; LITTLE-NEXT: G_STORE [[C]](s8), [[COPY1]](p0) :: (store (s8))
+    ; LITTLE-NEXT: $w0 = COPY [[LOAD]](s32)
+    ; LITTLE-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; BIG-LABEL: name: store_between_loads_and_or
     ; BIG: liveins: $x0, $x1, $x0, $x1
-    ; BIG: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; BIG: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
-    ; BIG: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
-    ; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 1)
-    ; BIG: [[BSWAP:%[0-9]+]]:_(s32) = G_BSWAP [[LOAD]]
-    ; BIG: G_STORE [[C]](s8), [[COPY1]](p0) :: (store (s8))
-    ; BIG: $w0 = COPY [[BSWAP]](s32)
-    ; BIG: RET_ReallyLR implicit $w0
+    ; BIG-NEXT: {{  $}}
+    ; BIG-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; BIG-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+    ; BIG-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
+    ; BIG-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32), align 1)
+    ; BIG-NEXT: [[BSWAP:%[0-9]+]]:_(s32) = G_BSWAP [[LOAD]]
+    ; BIG-NEXT: G_STORE [[C]](s8), [[COPY1]](p0) :: (store (s8))
+    ; BIG-NEXT: $w0 = COPY [[BSWAP]](s32)
+    ; BIG-NEXT: RET_ReallyLR implicit $w0
   %0:_(p0) = COPY $x0
   %1:_(p0) = COPY $x1
   %12:_(s8) = G_CONSTANT i8 1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir
index fb19cda303d36..8f19649339823 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizer-combiner-narrow-binop-feeding-add.mir
@@ -10,14 +10,15 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: add_64_mask_32
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
-    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
-    ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]]
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ADD]](s32)
-    ; CHECK: $x0 = COPY [[ZEXT]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ADD]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[ZEXT]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %mask_32:_(s64) = G_CONSTANT i64 4294967295
@@ -34,14 +35,15 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: sub_64_mask_32
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
-    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
-    ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[TRUNC1]]
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[SUB]](s32)
-    ; CHECK: $x0 = COPY [[ZEXT]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[SUB]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[ZEXT]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %mask_32:_(s64) = G_CONSTANT i64 4294967295
@@ -58,14 +60,15 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: mul_64_mask_32
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
-    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
-    ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[TRUNC]], [[TRUNC1]]
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[MUL]](s32)
-    ; CHECK: $x0 = COPY [[ZEXT]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[MUL]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[ZEXT]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %mask_32:_(s64) = G_CONSTANT i64 4294967295
@@ -82,14 +85,15 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: and_64_mask_32
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
-    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[AND]](s32)
-    ; CHECK: $x0 = COPY [[ZEXT]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[AND]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[ZEXT]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %mask_32:_(s64) = G_CONSTANT i64 4294967295
@@ -106,13 +110,14 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: or_64_mask_32
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: %mask_32:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK: %binop:_(s64) = G_SUB %binop_lhs, %binop_rhs
-    ; CHECK: %and:_(s64) = G_OR %binop, %mask_32
-    ; CHECK: $x0 = COPY %and(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: %mask_32:_(s64) = G_CONSTANT i64 4294967295
+    ; CHECK-NEXT: %binop:_(s64) = G_SUB %binop_lhs, %binop_rhs
+    ; CHECK-NEXT: %and:_(s64) = G_OR %binop, %mask_32
+    ; CHECK-NEXT: $x0 = COPY %and(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %mask_32:_(s64) = G_CONSTANT i64 4294967295
@@ -129,14 +134,15 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: xor_64_mask_32
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
-    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
-    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[XOR]](s32)
-    ; CHECK: $x0 = COPY [[ZEXT]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[XOR]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[ZEXT]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %mask_32:_(s64) = G_CONSTANT i64 4294967295
@@ -153,14 +159,15 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: walk_thru_copy
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
-    ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
-    ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]]
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ADD]](s32)
-    ; CHECK: $x0 = COPY [[ZEXT]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC %binop_lhs(s64)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC %binop_rhs(s64)
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[ADD]](s32)
+    ; CHECK-NEXT: $x0 = COPY [[ZEXT]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %mask_32:_(s64) = G_CONSTANT i64 4294967295
@@ -178,13 +185,14 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: dont_combine_zext_not_free_add_64_mask_16
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: %mask_16:_(s64) = G_CONSTANT i64 65535
-    ; CHECK: %binop:_(s64) = G_ADD %binop_lhs, %binop_rhs
-    ; CHECK: %and:_(s64) = G_AND %binop, %mask_16
-    ; CHECK: $x0 = COPY %and(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: %mask_16:_(s64) = G_CONSTANT i64 65535
+    ; CHECK-NEXT: %binop:_(s64) = G_ADD %binop_lhs, %binop_rhs
+    ; CHECK-NEXT: %and:_(s64) = G_AND %binop, %mask_16
+    ; CHECK-NEXT: $x0 = COPY %and(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %mask_16:_(s64) = G_CONSTANT i64 65535
@@ -201,13 +209,14 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: dont_combine_zext_not_free_add_64_mask_8
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: %mask_8:_(s64) = G_CONSTANT i64 255
-    ; CHECK: %binop:_(s64) = G_ADD %binop_lhs, %binop_rhs
-    ; CHECK: %and:_(s64) = G_AND %binop, %mask_8
-    ; CHECK: $x0 = COPY %and(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: %mask_8:_(s64) = G_CONSTANT i64 255
+    ; CHECK-NEXT: %binop:_(s64) = G_ADD %binop_lhs, %binop_rhs
+    ; CHECK-NEXT: %and:_(s64) = G_AND %binop, %mask_8
+    ; CHECK-NEXT: $x0 = COPY %and(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %mask_8:_(s64) = G_CONSTANT i64 255
@@ -224,13 +233,14 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: dont_combine_not_a_mask
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: %not_a_mask:_(s64) = G_CONSTANT i64 26
-    ; CHECK: %binop:_(s64) = G_ADD %binop_lhs, %binop_rhs
-    ; CHECK: %and:_(s64) = G_AND %binop, %not_a_mask
-    ; CHECK: $x0 = COPY %and(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: %not_a_mask:_(s64) = G_CONSTANT i64 26
+    ; CHECK-NEXT: %binop:_(s64) = G_ADD %binop_lhs, %binop_rhs
+    ; CHECK-NEXT: %and:_(s64) = G_AND %binop, %not_a_mask
+    ; CHECK-NEXT: $x0 = COPY %and(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %not_a_mask:_(s64) = G_CONSTANT i64 26
@@ -247,14 +257,15 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: dont_combine_more_than_one_use
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: %not_a_mask:_(s64) = G_CONSTANT i64 26
-    ; CHECK: %binop:_(s64) = G_ADD %binop_lhs, %binop_rhs
-    ; CHECK: %and:_(s64) = G_AND %binop, %not_a_mask
-    ; CHECK: %or:_(s64) = G_OR %and, %binop
-    ; CHECK: $x0 = COPY %or(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: %not_a_mask:_(s64) = G_CONSTANT i64 26
+    ; CHECK-NEXT: %binop:_(s64) = G_ADD %binop_lhs, %binop_rhs
+    ; CHECK-NEXT: %and:_(s64) = G_AND %binop, %not_a_mask
+    ; CHECK-NEXT: %or:_(s64) = G_OR %and, %binop
+    ; CHECK-NEXT: $x0 = COPY %or(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %not_a_mask:_(s64) = G_CONSTANT i64 26
@@ -272,14 +283,15 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: dont_combine_vector
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: %binop_lhs:_(<2 x s64>) = COPY $q0
-    ; CHECK: %binop_rhs:_(<2 x s64>) = COPY $q1
-    ; CHECK: %mask_elt:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK: %mask:_(<2 x s64>) = G_BUILD_VECTOR %mask_elt(s64), %mask_elt(s64)
-    ; CHECK: %binop:_(<2 x s64>) = G_ADD %binop_lhs, %binop_rhs
-    ; CHECK: %and:_(<2 x s64>) = G_AND %binop, %mask
-    ; CHECK: $q0 = COPY %and(<2 x s64>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: %binop_rhs:_(<2 x s64>) = COPY $q1
+    ; CHECK-NEXT: %mask_elt:_(s64) = G_CONSTANT i64 4294967295
+    ; CHECK-NEXT: %mask:_(<2 x s64>) = G_BUILD_VECTOR %mask_elt(s64), %mask_elt(s64)
+    ; CHECK-NEXT: %binop:_(<2 x s64>) = G_ADD %binop_lhs, %binop_rhs
+    ; CHECK-NEXT: %and:_(<2 x s64>) = G_AND %binop, %mask
+    ; CHECK-NEXT: $q0 = COPY %and(<2 x s64>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %binop_lhs:_(<2 x s64>) = COPY $q0
     %binop_rhs:_(<2 x s64>) = COPY $q1
     %mask_elt:_(s64) = G_CONSTANT i64 4294967295
@@ -297,11 +309,12 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: dont_combine_add_64_mask_64
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %binop_lhs:_(s64) = COPY $x0
-    ; CHECK: %binop_rhs:_(s64) = COPY $x1
-    ; CHECK: %binop:_(s64) = G_ADD %binop_lhs, %binop_rhs
-    ; CHECK: $x0 = COPY %binop(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %binop_lhs:_(s64) = COPY $x0
+    ; CHECK-NEXT: %binop_rhs:_(s64) = COPY $x1
+    ; CHECK-NEXT: %binop:_(s64) = G_ADD %binop_lhs, %binop_rhs
+    ; CHECK-NEXT: $x0 = COPY %binop(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %binop_lhs:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %mask_64:_(s64) = G_CONSTANT i64 18446744073709551615
@@ -318,11 +331,12 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: dont_combine_copy_from_physreg
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %copy_from_physreg:_(s64) = COPY $x0
-    ; CHECK: %mask_32:_(s64) = G_CONSTANT i64 4294967295
-    ; CHECK: %and:_(s64) = G_AND %copy_from_physreg, %mask_32
-    ; CHECK: $x0 = COPY %and(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy_from_physreg:_(s64) = COPY $x0
+    ; CHECK-NEXT: %mask_32:_(s64) = G_CONSTANT i64 4294967295
+    ; CHECK-NEXT: %and:_(s64) = G_AND %copy_from_physreg, %mask_32
+    ; CHECK-NEXT: $x0 = COPY %and(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy_from_physreg:_(s64) = COPY $x0
     %binop_rhs:_(s64) = COPY $x1
     %mask_32:_(s64) = G_CONSTANT i64 4294967295

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-s1.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-s1.mir
index c606cbf004437..072bdc38330b7 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-s1.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads-s1.mir
@@ -23,11 +23,12 @@ body:             |
 
     ; CHECK-LABEL: name: test
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[COPY]](p0) :: (load (s1) from %ir.ptr)
-    ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
-    ; CHECK: $w0 = COPY [[ZEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[COPY]](p0) :: (load (s1) from %ir.ptr)
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
+    ; CHECK-NEXT: $w0 = COPY [[ZEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(p0) = COPY $x0
     %1:_(s1) = G_LOAD %0(p0) :: (load (s1) from %ir.ptr)
     %2:_(s8) = G_ZEXT %1(s1)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-funnel-shifts-to-rotates.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-funnel-shifts-to-rotates.mir
index 842df6531f3ca..0ccd3515d2b6e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-funnel-shifts-to-rotates.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-funnel-shifts-to-rotates.mir
@@ -17,11 +17,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rotr
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: [[ROTR:%[0-9]+]]:_(s32) = G_ROTR [[COPY]], [[COPY1]](s32)
-    ; CHECK: $w0 = COPY [[ROTR]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[ROTR:%[0-9]+]]:_(s32) = G_ROTR [[COPY]], [[COPY1]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[ROTR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
     %2:_(s32) = G_FSHR %0, %0, %1(s32)
@@ -45,11 +46,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rotl
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: [[ROTL:%[0-9]+]]:_(s32) = G_ROTL [[COPY]], [[COPY1]](s32)
-    ; CHECK: $w0 = COPY [[ROTL]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[ROTL:%[0-9]+]]:_(s32) = G_ROTL [[COPY]], [[COPY1]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[ROTL]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
     %2:_(s32) = G_FSHL %0, %0, %1(s32)
@@ -73,11 +75,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_vector_rotr
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[ROTR:%[0-9]+]]:_(<4 x s32>) = G_ROTR [[COPY]], [[COPY1]](<4 x s32>)
-    ; CHECK: $q0 = COPY [[ROTR]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[ROTR:%[0-9]+]]:_(<4 x s32>) = G_ROTR [[COPY]], [[COPY1]](<4 x s32>)
+    ; CHECK-NEXT: $q0 = COPY [[ROTR]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = COPY $q1
     %2:_(<4 x s32>) = G_FSHR %0, %0, %1(<4 x s32>)
@@ -101,11 +104,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_vector_rotl
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
-    ; CHECK: [[ROTL:%[0-9]+]]:_(<4 x s32>) = G_ROTL [[COPY]], [[COPY1]](<4 x s32>)
-    ; CHECK: $q0 = COPY [[ROTL]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+    ; CHECK-NEXT: [[ROTL:%[0-9]+]]:_(<4 x s32>) = G_ROTL [[COPY]], [[COPY1]](<4 x s32>)
+    ; CHECK-NEXT: $q0 = COPY [[ROTL]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = COPY $q1
     %2:_(<4 x s32>) = G_FSHL %0, %0, %1(<4 x s32>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-icmp-redundant-trunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-icmp-redundant-trunc.mir
index 67a30ba2093c9..6a944c667cc82 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-icmp-redundant-trunc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-icmp-redundant-trunc.mir
@@ -11,13 +11,14 @@ body:             |
 
     ; CHECK-LABEL: name: icmp_trunc_sextload
     ; CHECK: liveins: $x0
-    ; CHECK: %v:_(p0) = COPY $x0
-    ; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: %cmp:_(s1) = G_ICMP intpred(ne), %load(s64), [[C]]
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %cmp(s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v:_(p0) = COPY $x0
+    ; CHECK-NEXT: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(ne), %load(s64), [[C]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %v:_(p0) = COPY $x0
     %load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load (s32))
     %trunc:_(s32) = G_TRUNC %load(s64)
@@ -36,13 +37,14 @@ body:             |
 
     ; CHECK-LABEL: name: icmp_trunc_sextload_eq
     ; CHECK: liveins: $x0
-    ; CHECK: %v:_(p0) = COPY $x0
-    ; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: %cmp:_(s1) = G_ICMP intpred(eq), %load(s64), [[C]]
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %cmp(s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v:_(p0) = COPY $x0
+    ; CHECK-NEXT: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(eq), %load(s64), [[C]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %v:_(p0) = COPY $x0
     %load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load (s32))
     %trunc:_(s32) = G_TRUNC %load(s64)
@@ -61,14 +63,15 @@ body:             |
 
     ; CHECK-LABEL: name: icmp_trunc_sextload_wrongpred
     ; CHECK: liveins: $x0
-    ; CHECK: %v:_(p0) = COPY $x0
-    ; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
-    ; CHECK: %trunc:_(s32) = G_TRUNC %load(s64)
-    ; CHECK: %zero:_(s32) = G_CONSTANT i32 0
-    ; CHECK: %cmp:_(s1) = G_ICMP intpred(slt), %trunc(s32), %zero
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %cmp(s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v:_(p0) = COPY $x0
+    ; CHECK-NEXT: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
+    ; CHECK-NEXT: %trunc:_(s32) = G_TRUNC %load(s64)
+    ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(slt), %trunc(s32), %zero
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %v:_(p0) = COPY $x0
     %load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load (s32))
     %trunc:_(s32) = G_TRUNC %load(s64)
@@ -87,14 +90,15 @@ body:             |
 
     ; CHECK-LABEL: name: icmp_trunc_sextload_extend_mismatch
     ; CHECK: liveins: $x0
-    ; CHECK: %v:_(p0) = COPY $x0
-    ; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
-    ; CHECK: %trunc:_(s16) = G_TRUNC %load(s64)
-    ; CHECK: %zero:_(s16) = G_CONSTANT i16 0
-    ; CHECK: %cmp:_(s1) = G_ICMP intpred(ne), %trunc(s16), %zero
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %cmp(s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v:_(p0) = COPY $x0
+    ; CHECK-NEXT: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
+    ; CHECK-NEXT: %trunc:_(s16) = G_TRUNC %load(s64)
+    ; CHECK-NEXT: %zero:_(s16) = G_CONSTANT i16 0
+    ; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(ne), %trunc(s16), %zero
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %cmp(s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %v:_(p0) = COPY $x0
     %load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load (s32))
     %trunc:_(s16) = G_TRUNC %load(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir
index 6fc5dc461cf27..ca9afa7700fa8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-invert-cmp.mir
@@ -14,12 +14,13 @@ body:             |
 
     ; CHECK-LABEL: name: icmp
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s64), [[C]]
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 1
     %2:_(s1) = G_CONSTANT i1 1
@@ -38,12 +39,13 @@ body:             |
 
     ; CHECK-LABEL: name: fcmp
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ule), [[COPY]](s64), [[C]]
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FCMP]](s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ule), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FCMP]](s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 1
     %2:_(s1) = G_CONSTANT i1 1
@@ -62,14 +64,15 @@ body:             |
 
     ; CHECK-LABEL: name: icmp_not_xor_with_1
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[C1:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
-    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
-    ; CHECK: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP]], [[C1]]
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP]], [[C1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 1
     %2:_(s1) = G_CONSTANT i1 0
@@ -90,13 +93,14 @@ body:             |
     ; for our aarch64's zero-or-one boolean contents.
     ; CHECK-LABEL: name: icmp_not_xor_with_wrong_bool_contents
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
-    ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
-    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ICMP]], [[C1]]
-    ; CHECK: $w0 = COPY [[XOR]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ICMP]], [[C1]]
+    ; CHECK-NEXT: $w0 = COPY [[XOR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 1
     %2:_(s32) = G_CONSTANT i32 7
@@ -114,17 +118,18 @@ body:             |
 
     ; CHECK-LABEL: name: icmp_multiple_use
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[C1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
-    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
-    ; CHECK: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP]], [[C1]]
-    ; CHECK: %other_use:_(s1) = G_AND [[ICMP]], [[C1]]
-    ; CHECK: %other_use_ext:_(s32) = G_ANYEXT %other_use(s1)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: $w1 = COPY %other_use_ext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP]], [[C1]]
+    ; CHECK-NEXT: %other_use:_(s1) = G_AND [[ICMP]], [[C1]]
+    ; CHECK-NEXT: %other_use_ext:_(s32) = G_ANYEXT %other_use(s1)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: $w1 = COPY %other_use_ext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 1
     %2:_(s1) = G_CONSTANT i1 1
@@ -146,13 +151,14 @@ body:             |
 
     ; CHECK-LABEL: name: icmp_vector
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
-    ; CHECK: %splat_op2:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
-    ; CHECK: [[ICMP:%[0-9]+]]:_(<4 x s1>) = G_ICMP intpred(sle), [[COPY]](<4 x s32>), %splat_op2
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[ICMP]](<4 x s1>)
-    ; CHECK: $q0 = COPY [[ANYEXT]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+    ; CHECK-NEXT: %splat_op2:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<4 x s1>) = G_ICMP intpred(sle), [[COPY]](<4 x s32>), %splat_op2
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<4 x s32>) = G_ANYEXT [[ICMP]](<4 x s1>)
+    ; CHECK-NEXT: $q0 = COPY [[ANYEXT]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(s32) = G_CONSTANT i32 5
     %splat_op2:_(<4 x s32>) = G_BUILD_VECTOR %1, %1, %1, %1
@@ -173,14 +179,15 @@ body:             |
 
     ; CHECK-LABEL: name: icmp_and_icmp
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s64), [[C]]
-    ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ule), [[COPY]](s64), [[C]]
-    ; CHECK: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP1]]
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ule), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 1
     %2:_(s1) = G_CONSTANT i1 1
@@ -201,14 +208,15 @@ body:             |
 
     ; CHECK-LABEL: name: icmp_or_icmp
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s64), [[C]]
-    ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ule), [[COPY]](s64), [[C]]
-    ; CHECK: [[AND:%[0-9]+]]:_(s1) = G_AND [[ICMP]], [[ICMP1]]
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ule), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s1) = G_AND [[ICMP]], [[ICMP1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 1
     %2:_(s1) = G_CONSTANT i1 1
@@ -229,16 +237,17 @@ body:             |
 
     ; CHECK-LABEL: name: icmp_and_icmp_or_icmp
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s64), [[C]]
-    ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ule), [[COPY]](s64), [[C]]
-    ; CHECK: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP1]]
-    ; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[C]]
-    ; CHECK: [[AND:%[0-9]+]]:_(s1) = G_AND [[OR]], [[ICMP2]]
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ule), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP1]]
+    ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s1) = G_AND [[OR]], [[ICMP2]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 1
     %2:_(s1) = G_CONSTANT i1 1
@@ -261,16 +270,17 @@ body:             |
 
     ; CHECK-LABEL: name: icmp_and_trunc
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: [[C1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
-    ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
-    ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
-    ; CHECK: [[AND:%[0-9]+]]:_(s1) = G_AND [[ICMP]], [[TRUNC]]
-    ; CHECK: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[AND]], [[C1]]
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s64), [[C]]
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s1) = G_AND [[ICMP]], [[TRUNC]]
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[AND]], [[C1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[XOR]](s1)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 1
     %2:_(s1) = G_CONSTANT i1 1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-not-really-equiv-insts.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-not-really-equiv-insts.mir
index 71eae18d4144e..b2d2f99777e4f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-not-really-equiv-insts.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-not-really-equiv-insts.mir
@@ -20,7 +20,6 @@ body:             |
     ; %load1 || %load2 == %load1 is not necessarily true, even though they
     ; both load from the same address. Whatever is in that address may be
     ; changed by another instruction which appears between them.
-    ;
     ; Check that we don't remove the G_OR.
 
     ; CHECK-LABEL: name: not_necessarily_equiv_loads

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-select.mir
index 17e680249ba01..94c93ba1f083e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-select.mir
@@ -18,9 +18,10 @@ body:             |
     ; Optimize (cond ? %a : %a) -> %a
     ; CHECK-LABEL: name: self
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %a:_(s32) = COPY $w0
-    ; CHECK: $w0 = COPY %a(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s32) = COPY $w0
+    ; CHECK-NEXT: $w0 = COPY %a(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %a:_(s32) = COPY $w0
     %cond_wide:gpr(s32) = COPY $w1
     %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
@@ -36,15 +37,14 @@ body:             |
   bb.0:
     liveins: $w0, $w1
     ; Optimize (cond ? %a : %b) -> %a
-    ;
     ; This shows that we are looking through copies correctly and deduce that
     ; %b is a copy from %a.
-    ;
     ; CHECK-LABEL: name: self_with_copy
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %a:_(s32) = COPY $w0
-    ; CHECK: $w0 = COPY %a(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s32) = COPY $w0
+    ; CHECK-NEXT: $w0 = COPY %a(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %a:_(s32) = COPY $w0
     %b:_(s32) = COPY %a
     %cond_wide:gpr(s32) = COPY $w1
@@ -60,20 +60,19 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $w0, $w1
-    ;
     ; $w0 is overwritten by a copy from $w1, so the copies for %a and %b are
     ; not the same.
-    ;
     ; CHECK-LABEL: name: self_not_equivalent_overwrite_w0
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %a:_(s32) = COPY $w0
-    ; CHECK: $w0 = COPY $w1
-    ; CHECK: %b:_(s32) = COPY $w0
-    ; CHECK: %cond_wide:gpr(s32) = COPY $w1
-    ; CHECK: %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
-    ; CHECK: %select:_(s32) = G_SELECT %cond(s1), %a, %b
-    ; CHECK: $w0 = COPY %select(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s32) = COPY $w0
+    ; CHECK-NEXT: $w0 = COPY $w1
+    ; CHECK-NEXT: %b:_(s32) = COPY $w0
+    ; CHECK-NEXT: %cond_wide:gpr(s32) = COPY $w1
+    ; CHECK-NEXT: %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
+    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %a, %b
+    ; CHECK-NEXT: $w0 = COPY %select(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %a:_(s32) = COPY $w0
     $w0 = COPY $w1
     %b:_(s32) = COPY $w0
@@ -90,20 +89,19 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $w0, $w1
-    ;
     ; $w0 is overwritten by a call which defines it implicitly, so the copies
     ; are not the same.
-    ;
     ; CHECK-LABEL: name: self_not_equivalent_overwrite_w0_implicit
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %a:_(s32) = COPY $w0
-    ; CHECK: BL @foo, implicit-def $w0
-    ; CHECK: %b:_(s32) = COPY $w0
-    ; CHECK: %cond_wide:gpr(s32) = COPY $w1
-    ; CHECK: %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
-    ; CHECK: %select:_(s32) = G_SELECT %cond(s1), %a, %b
-    ; CHECK: $w0 = COPY %select(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s32) = COPY $w0
+    ; CHECK-NEXT: BL @foo, implicit-def $w0
+    ; CHECK-NEXT: %b:_(s32) = COPY $w0
+    ; CHECK-NEXT: %cond_wide:gpr(s32) = COPY $w1
+    ; CHECK-NEXT: %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
+    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %a, %b
+    ; CHECK-NEXT: $w0 = COPY %select(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %a:_(s32) = COPY $w0
     BL @foo, implicit-def $w0
     %b:_(s32) = COPY $w0
@@ -121,16 +119,16 @@ body:             |
   bb.0:
     liveins: $w0, $w1
     ; In this case, the copies are not equivalent, so there is no optimization.
-    ;
     ; CHECK-LABEL: name: self_not_equivalent_
diff erent_copies
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %a:_(s32) = COPY $w0
-    ; CHECK: %b:_(s32) = COPY $w1
-    ; CHECK: %cond_wide:gpr(s32) = COPY $w1
-    ; CHECK: %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
-    ; CHECK: %select:_(s32) = G_SELECT %cond(s1), %a, %b
-    ; CHECK: $w0 = COPY %select(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s32) = COPY $w0
+    ; CHECK-NEXT: %b:_(s32) = COPY $w1
+    ; CHECK-NEXT: %cond_wide:gpr(s32) = COPY $w1
+    ; CHECK-NEXT: %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
+    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %a, %b
+    ; CHECK-NEXT: $w0 = COPY %select(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %a:_(s32) = COPY $w0
     %b:_(s32) = COPY $w1
     %c:_(s32) = COPY %b
@@ -149,13 +147,13 @@ body:             |
     liveins: $w0, $w1
     ; We should walk through G_ASSERT_ZEXT as if it's a copy, and remove the
     ; G_SELECT.
-    ;
     ; CHECK-LABEL: name: self_with_assert_zext
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %a:_(s32) = COPY $w0
-    ; CHECK: %a_assert_zext:_(s32) = G_ASSERT_ZEXT %a, 16
-    ; CHECK: $w0 = COPY %a_assert_zext(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s32) = COPY $w0
+    ; CHECK-NEXT: %a_assert_zext:_(s32) = G_ASSERT_ZEXT %a, 16
+    ; CHECK-NEXT: $w0 = COPY %a_assert_zext(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %a:_(s32) = COPY $w0
     %a_assert_zext:_(s32) = G_ASSERT_ZEXT %a, 16
     %b:_(s32) = COPY %a_assert_zext

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-sextload-from-sextinreg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-sextload-from-sextinreg.mir
index b9a4a5fb580ee..e827341e909d9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-sextload-from-sextinreg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-sextload-from-sextinreg.mir
@@ -12,11 +12,12 @@ body:             |
 
     ; CHECK-LABEL: name: sextload_from_inreg
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s16) = G_SEXTLOAD [[COPY]](p0) :: (load (s8), align 2)
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXTLOAD]](s16)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s16) = G_SEXTLOAD [[COPY]](p0) :: (load (s8), align 2)
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXTLOAD]](s16)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(p0) = COPY $x0
     %1:_(s16) = G_LOAD %0(p0) :: (load (s16))
     %2:_(s16) = G_SEXT_INREG %1, 8
@@ -38,12 +39,13 @@ body:             |
     ; could cause mem dependence violations.
     ; CHECK-LABEL: name: sextload_from_inreg_across_store
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s16) = G_SEXTLOAD [[COPY]](p0) :: (load (s8), align 2)
-    ; CHECK: G_STORE [[COPY]](p0), [[COPY]](p0) :: (store (p0))
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXTLOAD]](s16)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[SEXTLOAD:%[0-9]+]]:_(s16) = G_SEXTLOAD [[COPY]](p0) :: (load (s8), align 2)
+    ; CHECK-NEXT: G_STORE [[COPY]](p0), [[COPY]](p0) :: (store (p0))
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXTLOAD]](s16)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(p0) = COPY $x0
     %1:_(s16) = G_LOAD %0(p0) :: (load (s16))
     G_STORE %0(p0), %0(p0) :: (store (p0))
@@ -65,11 +67,12 @@ body:             |
 
     ; CHECK-LABEL: name: non_pow_2_inreg
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
-    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LOAD]], 24
-    ; CHECK: $w0 = COPY [[SEXT_INREG]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LOAD]], 24
+    ; CHECK-NEXT: $w0 = COPY [[SEXT_INREG]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(p0) = COPY $x0
     %1:_(s32) = G_LOAD %0(p0) :: (load (s32))
     %2:_(s32) = G_SEXT_INREG %1, 24
@@ -89,12 +92,13 @@ body:             |
 
     ; CHECK-LABEL: name: atomic
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load acquire (s16))
-    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s16) = G_SEXT_INREG [[LOAD]], 8
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXT_INREG]](s16)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load acquire (s16))
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s16) = G_SEXT_INREG [[LOAD]], 8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXT_INREG]](s16)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(p0) = COPY $x0
     %1:_(s16) = G_LOAD %0(p0) :: (load acquire (s16))
     %2:_(s16) = G_SEXT_INREG %1, 8
@@ -115,12 +119,13 @@ body:             |
 
     ; CHECK-LABEL: name: volatile
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
-    ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (volatile load (s16))
-    ; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s16) = G_SEXT_INREG [[LOAD]], 8
-    ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXT_INREG]](s16)
-    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (volatile load (s16))
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s16) = G_SEXT_INREG [[LOAD]], 8
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXT_INREG]](s16)
+    ; CHECK-NEXT: $w0 = COPY [[ANYEXT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(p0) = COPY $x0
     %1:_(s16) = G_LOAD %0(p0) :: (volatile load (s16))
     %2:_(s16) = G_SEXT_INREG %1, 8

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-simplify-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-simplify-add.mir
index c8d3b9f87da0f..83a4db1843a09 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-simplify-add.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-simplify-add.mir
@@ -32,11 +32,12 @@ body:             |
     ; Fold (A + (0-B)) -> A - B
     ; CHECK-LABEL: name: pat2
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %copy1:_(s64) = COPY $x0
-    ; CHECK: %copy2:_(s64) = COPY $x1
-    ; CHECK: %add:_(s64) = G_SUB %copy1, %copy2
-    ; CHECK: $x0 = COPY %add(s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy1:_(s64) = COPY $x0
+    ; CHECK-NEXT: %copy2:_(s64) = COPY $x1
+    ; CHECK-NEXT: %add:_(s64) = G_SUB %copy1, %copy2
+    ; CHECK-NEXT: $x0 = COPY %add(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy1:_(s64) = COPY $x0
     %copy2:_(s64) = COPY $x1
     %zero:_(s64) = G_CONSTANT i64 0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir
index de95527ceb1a2..d36b382672e60 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir
@@ -27,7 +27,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (x + 0) -> x
-    ;
     ; CHECK-LABEL: name: right_ident_add
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -48,7 +47,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (x * 0) -> 0
-    ;
     ; CHECK-LABEL: name: mul_0
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -70,7 +68,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (x * 0) -> 0
-    ;
     ; CHECK-LABEL: name: mul_0_cant_replace
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -94,7 +91,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (0 / x) -> 0
-    ;
     ; CHECK-LABEL: name: sdiv_0
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -115,7 +111,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (0 / x) -> 0
-    ;
     ; CHECK-LABEL: name: udiv_0
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -136,7 +131,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (0 % x) -> 0
-    ;
     ; CHECK-LABEL: name: srem_0
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -157,7 +151,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (0 % x) -> 0
-    ;
     ; CHECK-LABEL: name: urem_0
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -179,7 +172,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (x || 0) -> x
-    ;
     ; CHECK-LABEL: name: right_ident_or
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -200,7 +192,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (x | 0) -> x
-    ;
     ; CHECK-LABEL: name: right_ident_xor
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -221,7 +212,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (x << 0) -> x
-    ;
     ; CHECK-LABEL: name: right_ident_shl
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -242,7 +232,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (x ashr 0) -> x
-    ;
     ; CHECK-LABEL: name: right_ident_ashr
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -263,7 +252,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Fold (x lshr 0) -> x
-    ;
     ; CHECK-LABEL: name: right_ident_lshr
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -284,7 +272,6 @@ body:             |
   bb.1.entry:
     liveins: $w0
     ; Not an identity, no folding.
-    ;
     ; CHECK-LABEL: name: dont_fold_sub
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
@@ -326,7 +313,6 @@ body:             |
   bb.1.entry:
     liveins: $x0
     ; Fold (x + 0) -> x
-    ;
     ; CHECK-LABEL: name: right_ident_ptr_add
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir
index 7db4526ea0702..3a10f47aa3068 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-undef.mir
@@ -210,9 +210,7 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $d0
-    ; Show that we don't do the combine when one of the vectors is not a
-    ; G_IMPLICIT_DEF.
-    ;
+    ; Show that we don't do the combine when one of the vectors is not a G_IMPLICIT_DEF.
     ; CHECK-LABEL: name: shufflevector_not_all_ops_undef
     ; CHECK: liveins: $d0
     ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-xor-of-and-with-same-reg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-xor-of-and-with-same-reg.mir
index e8254c03ab636..f0cd970602d55 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-xor-of-and-with-same-reg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-xor-of-and-with-same-reg.mir
@@ -12,13 +12,14 @@ body:             |
     ; (xor (and x, y), y) -> (and (not x), y)
     ; CHECK-LABEL: name: fold_scalar
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %y:_(s32) = COPY $w1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR %x, [[C]]
-    ; CHECK: %xor:_(s32) = G_AND [[XOR]], %y
-    ; CHECK: $w0 = COPY %xor(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR %x, [[C]]
+    ; CHECK-NEXT: %xor:_(s32) = G_AND [[XOR]], %y
+    ; CHECK-NEXT: $w0 = COPY %xor(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %and:_(s32) = G_AND %x, %y
@@ -36,14 +37,15 @@ body:             |
     ; Vector edition
     ; CHECK-LABEL: name: fold_vector
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %x:_(<2 x s32>) = COPY $x0
-    ; CHECK: %y:_(<2 x s32>) = COPY $x1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
-    ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s32>) = G_XOR %x, [[BUILD_VECTOR]]
-    ; CHECK: %xor:_(<2 x s32>) = G_AND [[XOR]], %y
-    ; CHECK: $x0 = COPY %xor(<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(<2 x s32>) = COPY $x0
+    ; CHECK-NEXT: %y:_(<2 x s32>) = COPY $x1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s32>) = G_XOR %x, [[BUILD_VECTOR]]
+    ; CHECK-NEXT: %xor:_(<2 x s32>) = G_AND [[XOR]], %y
+    ; CHECK-NEXT: $x0 = COPY %xor(<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(<2 x s32>) = COPY $x0
     %y:_(<2 x s32>) = COPY $x1
     %and:_(<2 x s32>) = G_AND %x, %y
@@ -61,13 +63,14 @@ body:             |
     ; (xor (and y, x), y) -> (and (not x), y)
     ; CHECK-LABEL: name: fold_commuted_and
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %y:_(s32) = COPY $w1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR %x, [[C]]
-    ; CHECK: %xor:_(s32) = G_AND [[XOR]], %y
-    ; CHECK: $w0 = COPY %xor(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR %x, [[C]]
+    ; CHECK-NEXT: %xor:_(s32) = G_AND [[XOR]], %y
+    ; CHECK-NEXT: $w0 = COPY %xor(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %and:_(s32) = G_AND %y, %x
@@ -85,13 +88,14 @@ body:             |
     ; (xor y, (and x, y)) -> (and (not x), y)
     ; CHECK-LABEL: name: fold_commuted_xor
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %y:_(s32) = COPY $w1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR %x, [[C]]
-    ; CHECK: %xor:_(s32) = G_AND [[XOR]], %y
-    ; CHECK: $w0 = COPY %xor(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR %x, [[C]]
+    ; CHECK-NEXT: %xor:_(s32) = G_AND [[XOR]], %y
+    ; CHECK-NEXT: $w0 = COPY %xor(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %and:_(s32) = G_AND %x, %y
@@ -109,13 +113,14 @@ body:             |
     ; (xor y, (and x, y)) -> (and (not x), y)
     ; CHECK-LABEL: name: fold_commuted_xor_and
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %y:_(s32) = COPY $w1
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR %x, [[C]]
-    ; CHECK: %xor:_(s32) = G_AND [[XOR]], %y
-    ; CHECK: $w0 = COPY %xor(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR %x, [[C]]
+    ; CHECK-NEXT: %xor:_(s32) = G_AND [[XOR]], %y
+    ; CHECK-NEXT: $w0 = COPY %xor(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %and:_(s32) = G_AND %y, %x
@@ -133,13 +138,14 @@ body:             |
     ; The G_AND does not share any registers with the G_XOR
     ; CHECK-LABEL: name: dont_fold_
diff erent_regs
     ; CHECK: liveins: $w0, $w1, $w2
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %y:_(s32) = COPY $w1
-    ; CHECK: %z:_(s32) = COPY $w2
-    ; CHECK: %and:_(s32) = G_AND %x, %z
-    ; CHECK: %xor:_(s32) = G_XOR %and, %y
-    ; CHECK: $w0 = COPY %xor(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %z:_(s32) = COPY $w2
+    ; CHECK-NEXT: %and:_(s32) = G_AND %x, %z
+    ; CHECK-NEXT: %xor:_(s32) = G_XOR %and, %y
+    ; CHECK-NEXT: $w0 = COPY %xor(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %z:_(s32) = COPY $w2
@@ -156,17 +162,17 @@ body:             |
   bb.0:
     liveins: $w0, $w1, $w2
     ; Don't fold when the G_AND is used outside the G_XOR.
-    ;
     ; CHECK-LABEL: name: dont_fold_more_than_one_use
     ; CHECK: liveins: $w0, $w1, $w2
-    ; CHECK: %x:_(s32) = COPY $w0
-    ; CHECK: %y:_(s32) = COPY $w1
-    ; CHECK: %z:_(s32) = COPY $w2
-    ; CHECK: %and:_(s32) = G_AND %x, %z
-    ; CHECK: %xor:_(s32) = G_XOR %and, %y
-    ; CHECK: %add:_(s32) = G_ADD %and, %xor
-    ; CHECK: $w0 = COPY %add(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:_(s32) = COPY $w0
+    ; CHECK-NEXT: %y:_(s32) = COPY $w1
+    ; CHECK-NEXT: %z:_(s32) = COPY $w2
+    ; CHECK-NEXT: %and:_(s32) = G_AND %x, %z
+    ; CHECK-NEXT: %xor:_(s32) = G_XOR %and, %y
+    ; CHECK-NEXT: %add:_(s32) = G_ADD %and, %xor
+    ; CHECK-NEXT: $w0 = COPY %add(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
     %y:_(s32) = COPY $w1
     %z:_(s32) = COPY $w2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir b/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
index 61a613551b075..34dbad5a94977 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
@@ -31,7 +31,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr32 = PHI [[CSELWr]], %bb.1, %8, %bb.2
-  ; CHECK-NEXT:   [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]]
+  ; CHECK-NEXT:   [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]], implicit $fpcr
   ; CHECK-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[FCVTHSr]], %subreg.hsub
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]]
   ; CHECK-NEXT:   STRHHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `ptr undef`)
@@ -92,7 +92,7 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:fpr16 = PHI %7, %bb.2, [[COPY2]], %bb.1
-  ; CHECK-NEXT:   [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]]
+  ; CHECK-NEXT:   [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]], implicit $fpcr
   ; CHECK-NEXT:   STRHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `ptr undef`)
   ; CHECK-NEXT:   B %bb.2
   bb.0:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-ceil.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-ceil.mir
index e538aab695c1c..55ae4db241d00 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-ceil.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-ceil.mir
@@ -12,11 +12,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: load_gets_fpr
     ; CHECK: liveins: $x0
-    ; CHECK: %ptr:gpr(p0) = COPY $x0
-    ; CHECK: %load:fpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
-    ; CHECK: %fceil:fpr(s32) = G_FCEIL %load
-    ; CHECK: $s0 = COPY %fceil(s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr(p0) = COPY $x0
+    ; CHECK-NEXT: %load:fpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    ; CHECK-NEXT: %fceil:fpr(s32) = G_FCEIL %load
+    ; CHECK-NEXT: $s0 = COPY %fceil(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %ptr:_(p0) = COPY $x0
     %load:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
     %fceil:_(s32) = G_FCEIL %load

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-extract-vector-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-extract-vector-elt.mir
index 3f1515955d3af..35bc36d472b1a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-extract-vector-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-extract-vector-elt.mir
@@ -35,11 +35,12 @@ body:             |
 
     ; CHECK-LABEL: name: v4s32_gpr
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 0
-    ; CHECK: [[EVEC:%[0-9]+]]:fpr(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<4 x s32>), [[C]](s64)
-    ; CHECK: $s0 = COPY [[EVEC]](s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:fpr(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<4 x s32>), [[C]](s64)
+    ; CHECK-NEXT: $s0 = COPY [[EVEC]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:_(<4 x s32>) = COPY $q0
     %2:_(s64) = G_CONSTANT i64 0
     %1:_(s32) = G_EXTRACT_VECTOR_ELT %0(<4 x s32>), %2(s64)
@@ -62,11 +63,12 @@ body:             |
 
     ; CHECK-LABEL: name: v2s64_fpr
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s64>) = COPY $q0
-    ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 2
-    ; CHECK: [[EVEC:%[0-9]+]]:fpr(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s64>), [[C]](s64)
-    ; CHECK: $d0 = COPY [[EVEC]](s64)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:fpr(s64) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s64>), [[C]](s64)
+    ; CHECK-NEXT: $d0 = COPY [[EVEC]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s64>) = COPY $q0
     %2:_(s64) = G_CONSTANT i64 2
     %1:_(s64) = G_EXTRACT_VECTOR_ELT %0(<2 x s64>), %2(s64)
@@ -89,11 +91,12 @@ body:             |
 
     ; CHECK-LABEL: name: v4s16_fpr
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s16>) = COPY $d0
-    ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 1
-    ; CHECK: [[EVEC:%[0-9]+]]:fpr(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<4 x s16>), [[C]](s64)
-    ; CHECK: $h0 = COPY [[EVEC]](s16)
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[EVEC:%[0-9]+]]:fpr(s16) = G_EXTRACT_VECTOR_ELT [[COPY]](<4 x s16>), [[C]](s64)
+    ; CHECK-NEXT: $h0 = COPY [[EVEC]](s16)
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:_(<4 x s16>) = COPY $d0
     %2:_(s64) = G_CONSTANT i64 1
     %1:_(s16) = G_EXTRACT_VECTOR_ELT %0(<4 x s16>), %2(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-extract.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-extract.mir
index cf3614e5d7482..c23f900c6b1f8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-extract.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-extract.mir
@@ -11,10 +11,11 @@ body:             |
 
     ; CHECK-LABEL: name: extract_s64_s128
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s128) = COPY $q0
-    ; CHECK: [[EXTRACT:%[0-9]+]]:fpr(s64) = G_EXTRACT [[COPY]](s128), 0
-    ; CHECK: $d2 = COPY [[EXTRACT]](s64)
-    ; CHECK: RET_ReallyLR implicit $d2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s128) = COPY $q0
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:fpr(s64) = G_EXTRACT [[COPY]](s128), 0
+    ; CHECK-NEXT: $d2 = COPY [[EXTRACT]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d2
     %0:_(s128) = COPY $q0
     %1:_(s64) = G_EXTRACT %0(s128), 0
     $d2 = COPY %1(s64)
@@ -33,9 +34,10 @@ body:             |
 
     ; CHECK-LABEL: name: extract_s64_s128_gpr
     ; CHECK: liveins: $x0_x1, $x1
-    ; CHECK: [[CASPX:%[0-9]+]]:xseqpairsclass(s128) = CASPX $x0_x1, $x0_x1, $x0
-    ; CHECK: [[EXTRACT:%[0-9]+]]:gpr(s64) = G_EXTRACT [[CASPX]](s128), 0
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[CASPX:%[0-9]+]]:xseqpairsclass(s128) = CASPX $x0_x1, $x0_x1, $x0
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:gpr(s64) = G_EXTRACT [[CASPX]](s128), 0
+    ; CHECK-NEXT: RET_ReallyLR
     %0:xseqpairsclass = CASPX $x0_x1, $x0_x1, $x0
     %1:_(s64) = G_EXTRACT %0:xseqpairsclass(s128), 0
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fcmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fcmp.mir
index 51c04f6cd3932..72be9d4a29982 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fcmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fcmp.mir
@@ -13,11 +13,12 @@ body:             |
 
     ; CHECK-LABEL: name: vector
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: %x:fpr(<2 x s64>) = COPY $q0
-    ; CHECK: %y:fpr(<2 x s64>) = COPY $q1
-    ; CHECK: %fcmp:fpr(<2 x s64>) = G_FCMP floatpred(olt), %x(<2 x s64>), %y
-    ; CHECK: $q0 = COPY %fcmp(<2 x s64>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:fpr(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: %y:fpr(<2 x s64>) = COPY $q1
+    ; CHECK-NEXT: %fcmp:fpr(<2 x s64>) = G_FCMP floatpred(olt), %x(<2 x s64>), %y
+    ; CHECK-NEXT: $q0 = COPY %fcmp(<2 x s64>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %x:_(<2 x s64>) = COPY $q0
     %y:_(<2 x s64>) = COPY $q1
     %fcmp:_(<2 x s64>) = G_FCMP floatpred(olt), %x:_(<2 x s64>), %y:_

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fma.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fma.mir
index 6aed0ca8577f3..b29144dff8833 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fma.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fma.mir
@@ -15,12 +15,13 @@ body:             |
 
     ; CHECK-LABEL: name: fma_f32
     ; CHECK: liveins: $s0, $s1, $s2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr(s32) = COPY $s1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr(s32) = COPY $s2
-    ; CHECK: [[FMA:%[0-9]+]]:fpr(s32) = nnan ninf nsz arcp contract afn reassoc G_FMA [[COPY]], [[COPY1]], [[COPY2]]
-    ; CHECK: $s0 = COPY [[FMA]](s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s32) = COPY $s1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr(s32) = COPY $s2
+    ; CHECK-NEXT: [[FMA:%[0-9]+]]:fpr(s32) = nnan ninf nsz arcp contract afn reassoc G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    ; CHECK-NEXT: $s0 = COPY [[FMA]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:_(s32) = COPY $s0
     %1:_(s32) = COPY $s1
     %2:_(s32) = COPY $s2
@@ -41,12 +42,13 @@ body:             |
 
     ; CHECK-LABEL: name: fma_f64
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr(s64) = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr(s64) = COPY $d2
-    ; CHECK: [[FMA:%[0-9]+]]:fpr(s64) = nnan ninf nsz arcp contract afn reassoc G_FMA [[COPY]], [[COPY1]], [[COPY2]]
-    ; CHECK: $d0 = COPY [[FMA]](s64)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s64) = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr(s64) = COPY $d2
+    ; CHECK-NEXT: [[FMA:%[0-9]+]]:fpr(s64) = nnan ninf nsz arcp contract afn reassoc G_FMA [[COPY]], [[COPY1]], [[COPY2]]
+    ; CHECK-NEXT: $d0 = COPY [[FMA]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(s64) = COPY $d0
     %1:_(s64) = COPY $d1
     %2:_(s64) = COPY $d2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fp-use-def.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fp-use-def.mir
index 4cdefa80b309b..b2528840a39cf 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fp-use-def.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fp-use-def.mir
@@ -154,7 +154,7 @@ name:            load_used_by_phi_gpr_copy_gpr
 legalized:       true
 tracksRegLiveness: true
 body:             |
-  ; CHECK-LABEL: name: load_used_by_phi_gpr
+  ; CHECK-LABEL: name: load_used_by_phi_gpr_copy_gpr
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; CHECK-NEXT:   liveins: $x0, $s0, $s1, $w0, $w1
@@ -198,7 +198,7 @@ name:            load_used_by_phi_gpr_copy_fpr
 legalized:       true
 tracksRegLiveness: true
 body:             |
-  ; CHECK-LABEL: name: load_used_by_phi_gpr
+  ; CHECK-LABEL: name: load_used_by_phi_gpr_copy_fpr
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; CHECK-NEXT:   liveins: $x0, $s0, $s1, $w0, $w1
@@ -265,7 +265,6 @@ body:             |
   ; CHECK-NEXT:   $w0 = COPY %phi(s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
   ; The G_SELECT and G_PHI should end up with the same register bank.
-  ;
   bb.0:
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: $s0, $s1, $w0, $w1
@@ -312,7 +311,6 @@ body:             |
   ; CHECK-NEXT:   $s0 = COPY %phi(s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $s0
   ; The G_SELECT and G_PHI should end up with the same register bank.
-  ;
   bb.0:
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: $s0, $s1, $w0, $w1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic-round.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic-round.mir
index 57e1a4faeca7d..cff64405ababe 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic-round.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic-round.mir
@@ -18,10 +18,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f16.round
     ; CHECK: liveins: $h0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s16) = COPY $h0
-    ; CHECK: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(s16) = G_INTRINSIC_ROUND [[COPY]]
-    ; CHECK: $h0 = COPY [[INTRINSIC_ROUND]](s16)
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s16) = COPY $h0
+    ; CHECK-NEXT: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(s16) = G_INTRINSIC_ROUND [[COPY]]
+    ; CHECK-NEXT: $h0 = COPY [[INTRINSIC_ROUND]](s16)
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:_(s16) = COPY $h0
     %1:_(s16) = G_INTRINSIC_ROUND %0
     $h0 = COPY %1(s16)
@@ -42,10 +43,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f32.round
     ; CHECK: liveins: $s0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(s32) = G_INTRINSIC_ROUND [[COPY]]
-    ; CHECK: $s0 = COPY [[INTRINSIC_ROUND]](s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(s32) = G_INTRINSIC_ROUND [[COPY]]
+    ; CHECK-NEXT: $s0 = COPY [[INTRINSIC_ROUND]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:_(s32) = COPY $s0
     %1:_(s32) = G_INTRINSIC_ROUND %0
     $s0 = COPY %1(s32)
@@ -66,10 +68,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f64.round
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
-    ; CHECK: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(s64) = G_INTRINSIC_ROUND [[COPY]]
-    ; CHECK: $d0 = COPY [[INTRINSIC_ROUND]](s64)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
+    ; CHECK-NEXT: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(s64) = G_INTRINSIC_ROUND [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[INTRINSIC_ROUND]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(s64) = COPY $d0
     %1:_(s64) = G_INTRINSIC_ROUND %0
     $d0 = COPY %1(s64)
@@ -90,10 +93,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v8f16.round
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<8 x s16>) = COPY $q0
-    ; CHECK: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(<8 x s16>) = G_INTRINSIC_ROUND [[COPY]]
-    ; CHECK: $q0 = COPY [[INTRINSIC_ROUND]](<8 x s16>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<8 x s16>) = COPY $q0
+    ; CHECK-NEXT: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(<8 x s16>) = G_INTRINSIC_ROUND [[COPY]]
+    ; CHECK-NEXT: $q0 = COPY [[INTRINSIC_ROUND]](<8 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<8 x s16>) = COPY $q0
     %1:_(<8 x s16>) = G_INTRINSIC_ROUND %0
     $q0 = COPY %1(<8 x s16>)
@@ -114,10 +118,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f16.round
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s16>) = COPY $d0
-    ; CHECK: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(<4 x s16>) = G_INTRINSIC_ROUND [[COPY]]
-    ; CHECK: $d0 = COPY [[INTRINSIC_ROUND]](<4 x s16>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(<4 x s16>) = G_INTRINSIC_ROUND [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[INTRINSIC_ROUND]](<4 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<4 x s16>) = COPY $d0
     %1:_(<4 x s16>) = G_INTRINSIC_ROUND %0
     $d0 = COPY %1(<4 x s16>)
@@ -138,10 +143,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f32.round
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s32>) = COPY $d0
-    ; CHECK: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(<2 x s32>) = G_INTRINSIC_ROUND [[COPY]]
-    ; CHECK: $d0 = COPY [[INTRINSIC_ROUND]](<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(<2 x s32>) = G_INTRINSIC_ROUND [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[INTRINSIC_ROUND]](<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = G_INTRINSIC_ROUND %0
     $d0 = COPY %1(<2 x s32>)
@@ -162,10 +168,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f32.round
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(<4 x s32>) = G_INTRINSIC_ROUND [[COPY]]
-    ; CHECK: $q0 = COPY [[INTRINSIC_ROUND]](<4 x s32>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(<4 x s32>) = G_INTRINSIC_ROUND [[COPY]]
+    ; CHECK-NEXT: $q0 = COPY [[INTRINSIC_ROUND]](<4 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(<4 x s32>) = G_INTRINSIC_ROUND %0
     $q0 = COPY %1(<4 x s32>)
@@ -186,10 +193,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f64.round
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s64>) = COPY $q0
-    ; CHECK: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(<2 x s64>) = G_INTRINSIC_ROUND [[COPY]]
-    ; CHECK: $q0 = COPY [[INTRINSIC_ROUND]](<2 x s64>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[INTRINSIC_ROUND:%[0-9]+]]:fpr(<2 x s64>) = G_INTRINSIC_ROUND [[COPY]]
+    ; CHECK-NEXT: $q0 = COPY [[INTRINSIC_ROUND]](<2 x s64>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<2 x s64>) = COPY $q0
     %1:_(<2 x s64>) = G_INTRINSIC_ROUND %0
     $q0 = COPY %1(<2 x s64>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic-trunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic-trunc.mir
index c89cedd2af730..55afc4cd24070 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic-trunc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic-trunc.mir
@@ -16,10 +16,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f32.intrinsic_trunc
     ; CHECK: liveins: $s0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[INTRINSIC_TRUNC:%[0-9]+]]:fpr(s32) = G_INTRINSIC_TRUNC [[COPY]]
-    ; CHECK: $s0 = COPY [[INTRINSIC_TRUNC]](s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:fpr(s32) = G_INTRINSIC_TRUNC [[COPY]]
+    ; CHECK-NEXT: $s0 = COPY [[INTRINSIC_TRUNC]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:_(s32) = COPY $s0
     %1:_(s32) = G_INTRINSIC_TRUNC %0
     $s0 = COPY %1(s32)
@@ -40,10 +41,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f64.intrinsic_trunc
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
-    ; CHECK: [[INTRINSIC_TRUNC:%[0-9]+]]:fpr(s64) = G_INTRINSIC_TRUNC [[COPY]]
-    ; CHECK: $d0 = COPY [[INTRINSIC_TRUNC]](s64)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
+    ; CHECK-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:fpr(s64) = G_INTRINSIC_TRUNC [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[INTRINSIC_TRUNC]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(s64) = COPY $d0
     %1:_(s64) = G_INTRINSIC_TRUNC %0
     $d0 = COPY %1(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic.mir
index 82cfb351baf4f..104575a325aaf 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-intrinsic.mir
@@ -17,10 +17,11 @@ body:             |
 
     ; CHECK-LABEL: name: uaddlv_fpr
     ; CHECK: liveins: $q0
-    ; CHECK: %copy:fpr(<16 x s8>) = COPY $q0
-    ; CHECK: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<16 x s8>)
-    ; CHECK: $w0 = COPY %intrin(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr(<16 x s8>) = COPY $q0
+    ; CHECK-NEXT: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<16 x s8>)
+    ; CHECK-NEXT: $w0 = COPY %intrin(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:_(<16 x s8>) = COPY $q0
     %intrin:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<16 x s8>)
     $w0 = COPY %intrin(s32)
@@ -37,11 +38,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: uaddlv_fpr_load
     ; CHECK: liveins: $x0
-    ; CHECK: %ptr:gpr(p0) = COPY $x0
-    ; CHECK: %load:fpr(<2 x s32>) = G_LOAD %ptr(p0) :: (load (<2 x s32>))
-    ; CHECK: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %load(<2 x s32>)
-    ; CHECK: $w0 = COPY %intrin(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr(p0) = COPY $x0
+    ; CHECK-NEXT: %load:fpr(<2 x s32>) = G_LOAD %ptr(p0) :: (load (<2 x s32>))
+    ; CHECK-NEXT: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %load(<2 x s32>)
+    ; CHECK-NEXT: $w0 = COPY %intrin(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %ptr:_(p0) = COPY $x0
     %load:_(<2 x s32>) = G_LOAD %ptr :: (load (<2 x s32>))
     %intrin:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %load(<2 x s32>)
@@ -59,11 +61,12 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: uaddlv_fpr_store
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: %copy:gpr(<2 x s32>) = COPY $x0
-    ; CHECK: %ptr:gpr(p0) = COPY $x0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s32>) = COPY %copy(<2 x s32>)
-    ; CHECK: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), [[COPY]](<2 x s32>)
-    ; CHECK: G_STORE %intrin(s32), %ptr(p0) :: (store (s32))
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:gpr(<2 x s32>) = COPY $x0
+    ; CHECK-NEXT: %ptr:gpr(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<2 x s32>) = COPY %copy(<2 x s32>)
+    ; CHECK-NEXT: %intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), [[COPY]](<2 x s32>)
+    ; CHECK-NEXT: G_STORE %intrin(s32), %ptr(p0) :: (store (s32))
     %copy:_(<2 x s32>) = COPY $x0
     %ptr:_(p0) = COPY $x0
     %intrin:_(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<2 x s32>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-llround.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-llround.mir
index af9f74914f557..420c7cfb07b74 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-llround.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-llround.mir
@@ -12,10 +12,11 @@ body:             |
     liveins: $d0
     ; CHECK-LABEL: name: no_cross_bank_copies_needed
     ; CHECK: liveins: $d0
-    ; CHECK: %fpr:fpr(s64) = COPY $d0
-    ; CHECK: %llround:gpr(s64) = G_LLROUND %fpr(s64)
-    ; CHECK: $d0 = COPY %llround(s64)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %fpr:fpr(s64) = COPY $d0
+    ; CHECK-NEXT: %llround:gpr(s64) = G_LLROUND %fpr(s64)
+    ; CHECK-NEXT: $d0 = COPY %llround(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %fpr:_(s64) = COPY $d0
     %llround:_(s64) = G_LLROUND %fpr
     $d0 = COPY %llround:_(s64)
@@ -31,11 +32,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: source_needs_copy
     ; CHECK: liveins: $x0
-    ; CHECK: %gpr:gpr(s64) = COPY $x0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY %gpr(s64)
-    ; CHECK: %llround:gpr(s64) = G_LLROUND [[COPY]](s64)
-    ; CHECK: $d0 = COPY %llround(s64)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %gpr:gpr(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY %gpr(s64)
+    ; CHECK-NEXT: %llround:gpr(s64) = G_LLROUND [[COPY]](s64)
+    ; CHECK-NEXT: $d0 = COPY %llround(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %gpr:_(s64) = COPY $x0
     %llround:_(s64) = G_LLROUND %gpr
     $d0 = COPY %llround:_(s64)
@@ -51,11 +53,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: load_gets_fpr
     ; CHECK: liveins: $x0
-    ; CHECK: %ptr:gpr(p0) = COPY $x0
-    ; CHECK: %load:fpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
-    ; CHECK: %llround:gpr(s64) = G_LLROUND %load(s32)
-    ; CHECK: $d0 = COPY %llround(s64)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr(p0) = COPY $x0
+    ; CHECK-NEXT: %load:fpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    ; CHECK-NEXT: %llround:gpr(s64) = G_LLROUND %load(s32)
+    ; CHECK-NEXT: $d0 = COPY %llround(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %ptr:_(p0) = COPY $x0
     %load:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
     %llround:_(s64) = G_LLROUND %load

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-lround.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-lround.mir
index 77865c256c737..775c6ca773c68 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-lround.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-lround.mir
@@ -12,10 +12,11 @@ body:             |
     liveins: $d0
     ; CHECK-LABEL: name: no_cross_bank_copies_needed
     ; CHECK: liveins: $d0
-    ; CHECK: %fpr:fpr(s64) = COPY $d0
-    ; CHECK: %lround:gpr(s64) = G_LROUND %fpr(s64)
-    ; CHECK: $d0 = COPY %lround(s64)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %fpr:fpr(s64) = COPY $d0
+    ; CHECK-NEXT: %lround:gpr(s64) = G_LROUND %fpr(s64)
+    ; CHECK-NEXT: $d0 = COPY %lround(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %fpr:_(s64) = COPY $d0
     %lround:_(s64) = G_LROUND %fpr
     $d0 = COPY %lround:_(s64)
@@ -31,11 +32,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: source_needs_copy
     ; CHECK: liveins: $x0
-    ; CHECK: %gpr:gpr(s64) = COPY $x0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY %gpr(s64)
-    ; CHECK: %lround:gpr(s64) = G_LROUND [[COPY]](s64)
-    ; CHECK: $d0 = COPY %lround(s64)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %gpr:gpr(s64) = COPY $x0
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY %gpr(s64)
+    ; CHECK-NEXT: %lround:gpr(s64) = G_LROUND [[COPY]](s64)
+    ; CHECK-NEXT: $d0 = COPY %lround(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %gpr:_(s64) = COPY $x0
     %lround:_(s64) = G_LROUND %gpr
     $d0 = COPY %lround:_(s64)
@@ -51,11 +53,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: load_gets_fpr
     ; CHECK: liveins: $x0
-    ; CHECK: %ptr:gpr(p0) = COPY $x0
-    ; CHECK: %load:fpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
-    ; CHECK: %lround:gpr(s64) = G_LROUND %load(s32)
-    ; CHECK: $d0 = COPY %lround(s64)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr(p0) = COPY $x0
+    ; CHECK-NEXT: %load:fpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
+    ; CHECK-NEXT: %lround:gpr(s64) = G_LROUND %load(s32)
+    ; CHECK-NEXT: $d0 = COPY %lround(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %ptr:_(p0) = COPY $x0
     %load:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
     %lround:_(s64) = G_LROUND %load

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-maxnum.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-maxnum.mir
index 7a83a527957d0..b718ea300cca9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-maxnum.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-maxnum.mir
@@ -10,11 +10,13 @@ body:             |
     liveins: $s0, $s1
 
     ; CHECK-LABEL: name: fp_inputs
-    ; CHECK: %lhs:fpr(s32) = COPY $s0
-    ; CHECK: %rhs:fpr(s32) = COPY $s1
-    ; CHECK: %maxnum:fpr(s32) = G_FMAXNUM %lhs, %rhs
-    ; CHECK: $s0 = COPY %maxnum(s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK: liveins: $s0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %lhs:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: %rhs:fpr(s32) = COPY $s1
+    ; CHECK-NEXT: %maxnum:fpr(s32) = G_FMAXNUM %lhs, %rhs
+    ; CHECK-NEXT: $s0 = COPY %maxnum(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %lhs:_(s32) = COPY $s0
     %rhs:_(s32) = COPY $s1
     %maxnum:_(s32) = G_FMAXNUM %lhs, %rhs
@@ -30,13 +32,15 @@ body:             |
     liveins: $w0, $w1
 
     ; CHECK-LABEL: name: gp_inputs
-    ; CHECK: %lhs:gpr(s32) = COPY $w0
-    ; CHECK: %rhs:gpr(s32) = COPY $w1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %lhs(s32)
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr(s32) = COPY %rhs(s32)
-    ; CHECK: %maxnum:fpr(s32) = G_FMAXNUM [[COPY]], [[COPY1]]
-    ; CHECK: $s0 = COPY %maxnum(s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %lhs:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: %rhs:gpr(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY %lhs(s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s32) = COPY %rhs(s32)
+    ; CHECK-NEXT: %maxnum:fpr(s32) = G_FMAXNUM [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $s0 = COPY %maxnum(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %lhs:_(s32) = COPY $w0
     %rhs:_(s32) = COPY $w1
     %maxnum:_(s32) = G_FMAXNUM %lhs, %rhs
@@ -52,11 +56,13 @@ body:             |
     liveins: $s0, $s1
 
     ; CHECK-LABEL: name: gp_use
-    ; CHECK: %lhs:fpr(s32) = COPY $s0
-    ; CHECK: %rhs:fpr(s32) = COPY $s1
-    ; CHECK: %maxnum:fpr(s32) = G_FMAXNUM %lhs, %rhs
-    ; CHECK: $w0 = COPY %maxnum(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK: liveins: $s0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %lhs:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: %rhs:fpr(s32) = COPY $s1
+    ; CHECK-NEXT: %maxnum:fpr(s32) = G_FMAXNUM %lhs, %rhs
+    ; CHECK-NEXT: $w0 = COPY %maxnum(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %lhs:_(s32) = COPY $s0
     %rhs:_(s32) = COPY $s1
     %maxnum:_(s32) = G_FMAXNUM %lhs, %rhs

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-minnum.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-minnum.mir
index a090434873348..6be73170fd938 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-minnum.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-minnum.mir
@@ -10,11 +10,13 @@ body:             |
     liveins: $s0, $s1
 
     ; CHECK-LABEL: name: fp_inputs
-    ; CHECK: %lhs:fpr(s32) = COPY $s0
-    ; CHECK: %rhs:fpr(s32) = COPY $s1
-    ; CHECK: %minnum:fpr(s32) = G_FMINNUM %lhs, %rhs
-    ; CHECK: $s0 = COPY %minnum(s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK: liveins: $s0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %lhs:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: %rhs:fpr(s32) = COPY $s1
+    ; CHECK-NEXT: %minnum:fpr(s32) = G_FMINNUM %lhs, %rhs
+    ; CHECK-NEXT: $s0 = COPY %minnum(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %lhs:_(s32) = COPY $s0
     %rhs:_(s32) = COPY $s1
     %minnum:_(s32) = G_FMINNUM %lhs, %rhs
@@ -30,13 +32,15 @@ body:             |
     liveins: $w0, $w1
 
     ; CHECK-LABEL: name: gp_inputs
-    ; CHECK: %lhs:gpr(s32) = COPY $w0
-    ; CHECK: %rhs:gpr(s32) = COPY $w1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %lhs(s32)
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr(s32) = COPY %rhs(s32)
-    ; CHECK: %minnum:fpr(s32) = G_FMINNUM [[COPY]], [[COPY1]]
-    ; CHECK: $s0 = COPY %minnum(s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %lhs:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: %rhs:gpr(s32) = COPY $w1
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY %lhs(s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s32) = COPY %rhs(s32)
+    ; CHECK-NEXT: %minnum:fpr(s32) = G_FMINNUM [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $s0 = COPY %minnum(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %lhs:_(s32) = COPY $w0
     %rhs:_(s32) = COPY $w1
     %minnum:_(s32) = G_FMINNUM %lhs, %rhs
@@ -52,11 +56,13 @@ body:             |
     liveins: $s0, $s1
 
     ; CHECK-LABEL: name: gp_use
-    ; CHECK: %lhs:fpr(s32) = COPY $s0
-    ; CHECK: %rhs:fpr(s32) = COPY $s1
-    ; CHECK: %minnum:fpr(s32) = G_FMINNUM %lhs, %rhs
-    ; CHECK: $w0 = COPY %minnum(s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK: liveins: $s0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %lhs:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: %rhs:fpr(s32) = COPY $s1
+    ; CHECK-NEXT: %minnum:fpr(s32) = G_FMINNUM %lhs, %rhs
+    ; CHECK-NEXT: $w0 = COPY %minnum(s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %lhs:_(s32) = COPY $s0
     %rhs:_(s32) = COPY $s1
     %minnum:_(s32) = G_FMINNUM %lhs, %rhs

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-nearbyint.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-nearbyint.mir
index e4b510c145dd7..1d27e770e4f88 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-nearbyint.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-nearbyint.mir
@@ -14,10 +14,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f16.nearbyint
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s16>) = COPY $d0
-    ; CHECK: [[FNEARBYINT:%[0-9]+]]:fpr(<4 x s16>) = G_FNEARBYINT [[COPY]]
-    ; CHECK: $d0 = COPY [[FNEARBYINT]](<4 x s16>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s16>) = COPY $d0
+    ; CHECK-NEXT: [[FNEARBYINT:%[0-9]+]]:fpr(<4 x s16>) = G_FNEARBYINT [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[FNEARBYINT]](<4 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<4 x s16>) = COPY $d0
     %1:_(<4 x s16>) = G_FNEARBYINT %0
     $d0 = COPY %1(<4 x s16>)
@@ -36,10 +37,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v8f16.nearbyint
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<8 x s16>) = COPY $q0
-    ; CHECK: [[FNEARBYINT:%[0-9]+]]:fpr(<8 x s16>) = G_FNEARBYINT [[COPY]]
-    ; CHECK: $q0 = COPY [[FNEARBYINT]](<8 x s16>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<8 x s16>) = COPY $q0
+    ; CHECK-NEXT: [[FNEARBYINT:%[0-9]+]]:fpr(<8 x s16>) = G_FNEARBYINT [[COPY]]
+    ; CHECK-NEXT: $q0 = COPY [[FNEARBYINT]](<8 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<8 x s16>) = COPY $q0
     %1:_(<8 x s16>) = G_FNEARBYINT %0
     $q0 = COPY %1(<8 x s16>)
@@ -58,10 +60,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f32.nearbyint
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s32>) = COPY $d0
-    ; CHECK: [[FNEARBYINT:%[0-9]+]]:fpr(<2 x s32>) = G_FNEARBYINT [[COPY]]
-    ; CHECK: $d0 = COPY [[FNEARBYINT]](<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[FNEARBYINT:%[0-9]+]]:fpr(<2 x s32>) = G_FNEARBYINT [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[FNEARBYINT]](<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(<2 x s32>) = G_FNEARBYINT %0
     $d0 = COPY %1(<2 x s32>)
@@ -80,10 +83,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f64.nearbyint
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s64>) = COPY $q0
-    ; CHECK: [[FNEARBYINT:%[0-9]+]]:fpr(<2 x s64>) = G_FNEARBYINT [[COPY]]
-    ; CHECK: $q0 = COPY [[FNEARBYINT]](<2 x s64>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[FNEARBYINT:%[0-9]+]]:fpr(<2 x s64>) = G_FNEARBYINT [[COPY]]
+    ; CHECK-NEXT: $q0 = COPY [[FNEARBYINT]](<2 x s64>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(<2 x s64>) = COPY $q0
     %1:_(<2 x s64>) = G_FNEARBYINT %0
     $q0 = COPY %1(<2 x s64>)
@@ -102,10 +106,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f32.nearbyint
     ; CHECK: liveins: $s0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[FNEARBYINT:%[0-9]+]]:fpr(s32) = G_FNEARBYINT [[COPY]]
-    ; CHECK: $s0 = COPY [[FNEARBYINT]](s32)
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[FNEARBYINT:%[0-9]+]]:fpr(s32) = G_FNEARBYINT [[COPY]]
+    ; CHECK-NEXT: $s0 = COPY [[FNEARBYINT]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:_(s32) = COPY $s0
     %1:_(s32) = G_FNEARBYINT %0
     $s0 = COPY %1(s32)
@@ -124,10 +129,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f64.nearbyint
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
-    ; CHECK: [[FNEARBYINT:%[0-9]+]]:fpr(s64) = G_FNEARBYINT [[COPY]]
-    ; CHECK: $d0 = COPY [[FNEARBYINT]](s64)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
+    ; CHECK-NEXT: [[FNEARBYINT:%[0-9]+]]:fpr(s64) = G_FNEARBYINT [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[FNEARBYINT]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(s64) = COPY $d0
     %1:_(s64) = G_FNEARBYINT %0
     $d0 = COPY %1(s64)
@@ -146,10 +152,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f16.nearbyint
     ; CHECK: liveins: $h0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s16) = COPY $h0
-    ; CHECK: [[FNEARBYINT:%[0-9]+]]:fpr(s16) = G_FNEARBYINT [[COPY]]
-    ; CHECK: $h0 = COPY [[FNEARBYINT]](s16)
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s16) = COPY $h0
+    ; CHECK-NEXT: [[FNEARBYINT:%[0-9]+]]:fpr(s16) = G_FNEARBYINT [[COPY]]
+    ; CHECK-NEXT: $h0 = COPY [[FNEARBYINT]](s16)
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:_(s16) = COPY $h0
     %1:_(s16) = G_FNEARBYINT %0
     $h0 = COPY %1(s16)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-shift-imm-64.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-shift-imm-64.mir
index ae9ed3df96162..7ad76fd7f5b7b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-shift-imm-64.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-shift-imm-64.mir
@@ -10,11 +10,12 @@ body:             |
 
     ; CHECK-LABEL: name: shl_cimm_32
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 8
-    ; CHECK: [[SHL:%[0-9]+]]:gpr(s32) = G_SHL [[COPY]], [[C]](s32)
-    ; CHECK: $w0 = COPY [[SHL]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 8
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:gpr(s32) = G_SHL [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: $w0 = COPY [[SHL]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %1:_(s32) = G_CONSTANT i32 8
     %2:_(s32) = G_SHL %0, %1(s32)
@@ -32,11 +33,12 @@ body:             |
 
     ; CHECK-LABEL: name: shl_cimm_64
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
-    ; CHECK: [[SHL:%[0-9]+]]:gpr(s64) = G_SHL [[COPY]], [[C]](s64)
-    ; CHECK: $x0 = COPY [[SHL]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:gpr(s64) = G_SHL [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[SHL]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 8
     %2:_(s64) = G_SHL %0, %1(s64)
@@ -54,11 +56,12 @@ body:             |
 
     ; CHECK-LABEL: name: lshr_cimm_32
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
-    ; CHECK: [[LSHR:%[0-9]+]]:gpr(s32) = G_LSHR [[COPY]], [[C]](s64)
-    ; CHECK: $w0 = COPY [[LSHR]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gpr(s32) = G_LSHR [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[LSHR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %3:_(s64) = G_CONSTANT i64 8
     %2:_(s32) = G_LSHR %0, %3(s64)
@@ -76,11 +79,12 @@ body:             |
 
     ; CHECK-LABEL: name: lshr_cimm_64
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
-    ; CHECK: [[LSHR:%[0-9]+]]:gpr(s64) = G_LSHR [[COPY]], [[C]](s64)
-    ; CHECK: $x0 = COPY [[LSHR]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gpr(s64) = G_LSHR [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[LSHR]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 8
     %2:_(s64) = G_LSHR %0, %1(s64)
@@ -98,11 +102,12 @@ body:             |
 
     ; CHECK-LABEL: name: ashr_cimm_32
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
-    ; CHECK: [[ASHR:%[0-9]+]]:gpr(s32) = G_ASHR [[COPY]], [[C]](s64)
-    ; CHECK: $w0 = COPY [[ASHR]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:gpr(s32) = G_ASHR [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $w0 = COPY [[ASHR]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
     %3:_(s64) = G_CONSTANT i64 8
     %2:_(s32) = G_ASHR %0, %3(s64)
@@ -120,11 +125,12 @@ body:             |
 
     ; CHECK-LABEL: name: ashr_cimm_64
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
-    ; CHECK: [[ASHR:%[0-9]+]]:gpr(s64) = G_ASHR [[COPY]], [[C]](s64)
-    ; CHECK: $x0 = COPY [[ASHR]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:gpr(s64) = G_ASHR [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: $x0 = COPY [[ASHR]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 8
     %2:_(s64) = G_ASHR %0, %1(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-trunc-s128.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-trunc-s128.mir
index 1492808e741f6..a3cdd781077bd 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-trunc-s128.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-trunc-s128.mir
@@ -11,10 +11,11 @@ body:             |
 
     ; CHECK-LABEL: name: trunc_s64_s128
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s128) = COPY $q0
-    ; CHECK: [[TRUNC:%[0-9]+]]:fpr(s64) = G_TRUNC [[COPY]](s128)
-    ; CHECK: $d2 = COPY [[TRUNC]](s64)
-    ; CHECK: RET_ReallyLR implicit $d2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s128) = COPY $q0
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:fpr(s64) = G_TRUNC [[COPY]](s128)
+    ; CHECK-NEXT: $d2 = COPY [[TRUNC]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d2
     %0:_(s128) = COPY $q0
     %1:_(s64) = G_TRUNC %0(s128)
     $d2 = COPY %1(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-build-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-build-vector.mir
index eade999c3d524..015949ed8de95 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-build-vector.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-build-vector.mir
@@ -12,19 +12,20 @@ body:             |
     ; Check that s16 operands are assigned fpr as we don't have 16 bit gpr regs.
     ; CHECK-LABEL: name: build_vec_f16
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32)
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
-    ; CHECK: [[COPY3:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
-    ; CHECK: [[COPY4:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
-    ; CHECK: [[COPY5:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
-    ; CHECK: [[COPY6:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
-    ; CHECK: [[COPY7:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
-    ; CHECK: [[COPY8:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:fpr(<8 x s16>) = G_BUILD_VECTOR [[COPY1]](s16), [[COPY2]](s16), [[COPY3]](s16), [[COPY4]](s16), [[COPY5]](s16), [[COPY6]](s16), [[COPY7]](s16), [[COPY8]](s16)
-    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<8 x s16>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:fpr(s16) = COPY [[TRUNC]](s16)
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:fpr(<8 x s16>) = G_BUILD_VECTOR [[COPY1]](s16), [[COPY2]](s16), [[COPY3]](s16), [[COPY4]](s16), [[COPY5]](s16), [[COPY6]](s16), [[COPY7]](s16), [[COPY8]](s16)
+    ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<8 x s16>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(s32) = COPY $w0
     %1:_(s16) = G_TRUNC %0(s32)
     %2:_(<8 x s16>) = G_BUILD_VECTOR %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16), %1(s16)
@@ -44,17 +45,17 @@ body:             |
     ; and this form is more amenable to selection by patterns (without x-bank copies).
     ; CHECK-LABEL: name: g_constant_operands_on_gpr
     ; CHECK: [[C:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 4
-    ; CHECK: [[C1:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 10
-    ; CHECK: [[C2:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 3
-    ; CHECK: [[C3:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 11
-    ; CHECK: [[C4:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 15
-    ; CHECK: [[C5:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 44
-    ; CHECK: [[C6:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 22
-    ; CHECK: [[C7:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 19
-    ; CHECK: [[C8:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 55
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:fpr(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C1]](s8), [[C2]](s8), [[C3]](s8), [[C4]](s8), [[C]](s8), [[C1]](s8), [[C5]](s8), [[C6]](s8), [[C4]](s8), [[C]](s8), [[C7]](s8), [[C2]](s8), [[C3]](s8), [[C4]](s8), [[C8]](s8)
-    ; CHECK: $q0 = COPY [[BUILD_VECTOR]](<16 x s8>)
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 10
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 3
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 11
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 15
+    ; CHECK-NEXT: [[C5:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 44
+    ; CHECK-NEXT: [[C6:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 22
+    ; CHECK-NEXT: [[C7:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 19
+    ; CHECK-NEXT: [[C8:%[0-9]+]]:gpr(s8) = G_CONSTANT i8 55
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:fpr(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C1]](s8), [[C2]](s8), [[C3]](s8), [[C4]](s8), [[C]](s8), [[C1]](s8), [[C5]](s8), [[C6]](s8), [[C4]](s8), [[C]](s8), [[C7]](s8), [[C2]](s8), [[C3]](s8), [[C4]](s8), [[C8]](s8)
+    ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<16 x s8>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %1:_(s8) = G_CONSTANT i8 4
     %2:_(s8) = G_CONSTANT i8 10
     %3:_(s8) = G_CONSTANT i8 3
@@ -87,16 +88,17 @@ body:             |
 
     ; CHECK-LABEL: name: fed_by_fp_load
     ; CHECK: liveins: $s0, $x0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
-    ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 328
-    ; CHECK: [[PTR_ADD:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
-    ; CHECK: [[C1:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 344
-    ; CHECK: [[PTR_ADD1:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
-    ; CHECK: [[LOAD:%[0-9]+]]:fpr(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32))
-    ; CHECK: [[LOAD1:%[0-9]+]]:fpr(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32))
-    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:fpr(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
-    ; CHECK: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
+    ; CHECK-NEXT: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 328
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 344
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:fpr(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32))
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:fpr(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32))
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:fpr(<2 x s32>) = G_BUILD_VECTOR [[LOAD]](s32), [[LOAD1]](s32)
+    ; CHECK-NEXT: $d0 = COPY [[BUILD_VECTOR]](<2 x s32>)
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:_(p0) = COPY $x0
     %4:_(s64) = G_CONSTANT i64 328
     %5:_(p0) = G_PTR_ADD %0, %4(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
index 8d79c4412de67..8178cb0a1297b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
@@ -89,8 +89,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_add_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY]]
     %0(s32) = COPY $w0
     %1(s32) = G_ADD %0, %0
 ...
@@ -105,8 +107,10 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_add_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[ADD:%[0-9]+]]:fpr(<4 x s32>) = G_ADD [[COPY]], [[COPY]]
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:fpr(<4 x s32>) = G_ADD [[COPY]], [[COPY]]
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_ADD %0, %0
 ...
@@ -121,8 +125,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_sub_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[SUB:%[0-9]+]]:gpr(s32) = G_SUB [[COPY]], [[COPY]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:gpr(s32) = G_SUB [[COPY]], [[COPY]]
     %0(s32) = COPY $w0
     %1(s32) = G_SUB %0, %0
 ...
@@ -137,8 +143,10 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_sub_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[SUB:%[0-9]+]]:fpr(<4 x s32>) = G_SUB [[COPY]], [[COPY]]
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:fpr(<4 x s32>) = G_SUB [[COPY]], [[COPY]]
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_SUB %0, %0
 ...
@@ -153,8 +161,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_mul_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[MUL:%[0-9]+]]:gpr(s32) = G_MUL [[COPY]], [[COPY]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr(s32) = G_MUL [[COPY]], [[COPY]]
     %0(s32) = COPY $w0
     %1(s32) = G_MUL %0, %0
 ...
@@ -169,8 +179,10 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_mul_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[MUL:%[0-9]+]]:fpr(<4 x s32>) = G_MUL [[COPY]], [[COPY]]
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[MUL:%[0-9]+]]:fpr(<4 x s32>) = G_MUL [[COPY]], [[COPY]]
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_MUL %0, %0
 ...
@@ -185,8 +197,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_and_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[AND:%[0-9]+]]:gpr(s32) = G_AND [[COPY]], [[COPY]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:gpr(s32) = G_AND [[COPY]], [[COPY]]
     %0(s32) = COPY $w0
     %1(s32) = G_AND %0, %0
 ...
@@ -201,8 +215,10 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_and_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[AND:%[0-9]+]]:fpr(<4 x s32>) = G_AND [[COPY]], [[COPY]]
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:fpr(<4 x s32>) = G_AND [[COPY]], [[COPY]]
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_AND %0, %0
 ...
@@ -217,8 +233,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_or_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[OR:%[0-9]+]]:gpr(s32) = G_OR [[COPY]], [[COPY]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:gpr(s32) = G_OR [[COPY]], [[COPY]]
     %0(s32) = COPY $w0
     %1(s32) = G_OR %0, %0
 ...
@@ -233,8 +251,10 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_or_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[OR:%[0-9]+]]:fpr(<4 x s32>) = G_OR [[COPY]], [[COPY]]
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:fpr(<4 x s32>) = G_OR [[COPY]], [[COPY]]
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_OR %0, %0
 ...
@@ -249,8 +269,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_xor_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[XOR:%[0-9]+]]:gpr(s32) = G_XOR [[COPY]], [[COPY]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:gpr(s32) = G_XOR [[COPY]], [[COPY]]
     %0(s32) = COPY $w0
     %1(s32) = G_XOR %0, %0
 ...
@@ -265,8 +287,10 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_xor_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[XOR:%[0-9]+]]:fpr(<4 x s32>) = G_XOR [[COPY]], [[COPY]]
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:fpr(<4 x s32>) = G_XOR [[COPY]], [[COPY]]
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_XOR %0, %0
 ...
@@ -281,8 +305,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_shl_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[SHL:%[0-9]+]]:gpr(s32) = G_SHL [[COPY]], [[COPY]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:gpr(s32) = G_SHL [[COPY]], [[COPY]](s32)
     %0(s32) = COPY $w0
     %1(s32) = G_SHL %0, %0
 ...
@@ -297,8 +323,10 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_shl_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[SHL:%[0-9]+]]:fpr(<4 x s32>) = G_SHL [[COPY]], [[COPY]](<4 x s32>)
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:fpr(<4 x s32>) = G_SHL [[COPY]], [[COPY]](<4 x s32>)
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_SHL %0, %0
 ...
@@ -313,8 +341,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_lshr_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[LSHR:%[0-9]+]]:gpr(s32) = G_LSHR [[COPY]], [[COPY]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:gpr(s32) = G_LSHR [[COPY]], [[COPY]](s32)
     %0(s32) = COPY $w0
     %1(s32) = G_LSHR %0, %0
 ...
@@ -329,8 +359,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_ashr_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[ASHR:%[0-9]+]]:gpr(s32) = G_ASHR [[COPY]], [[COPY]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:gpr(s32) = G_ASHR [[COPY]], [[COPY]](s32)
     %0(s32) = COPY $w0
     %1(s32) = G_ASHR %0, %0
 ...
@@ -345,8 +377,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_sdiv_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[SDIV:%[0-9]+]]:gpr(s32) = G_SDIV [[COPY]], [[COPY]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[SDIV:%[0-9]+]]:gpr(s32) = G_SDIV [[COPY]], [[COPY]]
     %0(s32) = COPY $w0
     %1(s32) = G_SDIV %0, %0
 ...
@@ -361,8 +395,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_udiv_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[UDIV:%[0-9]+]]:gpr(s32) = G_UDIV [[COPY]], [[COPY]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[UDIV:%[0-9]+]]:gpr(s32) = G_UDIV [[COPY]], [[COPY]]
     %0(s32) = COPY $w0
     %1(s32) = G_UDIV %0, %0
 ...
@@ -377,8 +413,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_anyext_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[ANYEXT:%[0-9]+]]:gpr(s64) = G_ANYEXT [[COPY]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:gpr(s64) = G_ANYEXT [[COPY]](s32)
     %0(s32) = COPY $w0
     %1(s64) = G_ANYEXT %0
 ...
@@ -393,8 +431,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_sext_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[SEXT:%[0-9]+]]:gpr(s64) = G_SEXT [[COPY]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[SEXT:%[0-9]+]]:gpr(s64) = G_SEXT [[COPY]](s32)
     %0(s32) = COPY $w0
     %1(s64) = G_SEXT %0
 ...
@@ -409,8 +449,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_zext_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[ZEXT:%[0-9]+]]:gpr(s64) = G_ZEXT [[COPY]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:gpr(s64) = G_ZEXT [[COPY]](s32)
     %0(s32) = COPY $w0
     %1(s64) = G_ZEXT %0
 ...
@@ -425,8 +467,10 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_trunc_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
-    ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s32) = G_TRUNC [[COPY]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s32) = G_TRUNC [[COPY]](s64)
     %0(s64) = COPY $x0
     %1(s32) = G_TRUNC %0
 ...
@@ -466,9 +510,11 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_icmp_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY]]
-    ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY]]
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
     %0(s32) = COPY $w0
     %1(s32) = G_ICMP intpred(ne), %0, %0
     %2(s1) = G_TRUNC %1(s32)
@@ -485,9 +531,11 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_icmp_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
-    ; CHECK: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[COPY]](p0), [[COPY]]
-    ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[COPY]](p0), [[COPY]]
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
     %0(p0) = COPY $x0
     %1(s32) = G_ICMP intpred(ne), %0, %0
     %2(s1) = G_TRUNC %1(s32)
@@ -517,8 +565,10 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_ptrtoint_s64_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
-    ; CHECK: [[PTRTOINT:%[0-9]+]]:gpr(s64) = G_PTRTOINT [[COPY]](p0)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
+    ; CHECK-NEXT: [[PTRTOINT:%[0-9]+]]:gpr(s64) = G_PTRTOINT [[COPY]](p0)
     %0(p0) = COPY $x0
     %1(s64) = G_PTRTOINT %0
 ...
@@ -533,8 +583,10 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_inttoptr_p0_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
-    ; CHECK: [[INTTOPTR:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[COPY]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
+    ; CHECK-NEXT: [[INTTOPTR:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[COPY]](s64)
     %0(s64) = COPY $x0
     %1(p0) = G_INTTOPTR %0
 ...
@@ -549,8 +601,10 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_load_s32_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
-    ; CHECK: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
     %0(p0) = COPY $x0
     %1(s32) = G_LOAD %0 :: (load (s32))
 ...
@@ -565,9 +619,11 @@ body: |
   bb.0:
     liveins: $x0, $w1
     ; CHECK-LABEL: name: test_store_s32_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $w1
-    ; CHECK: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store (s32))
+    ; CHECK: liveins: $x0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $w1
+    ; CHECK-NEXT: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store (s32))
     %0(p0) = COPY $x0
     %1(s32) = COPY $w1
     G_STORE %1, %0 :: (store (s32))
@@ -583,8 +639,10 @@ body: |
   bb.0:
     liveins: $s0
     ; CHECK-LABEL: name: test_fadd_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[FADD:%[0-9]+]]:fpr(s32) = G_FADD [[COPY]], [[COPY]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[FADD:%[0-9]+]]:fpr(s32) = G_FADD [[COPY]], [[COPY]]
     %0(s32) = COPY $s0
     %1(s32) = G_FADD %0, %0
 ...
@@ -599,8 +657,10 @@ body: |
   bb.0:
     liveins: $s0
     ; CHECK-LABEL: name: test_fsub_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[FSUB:%[0-9]+]]:fpr(s32) = G_FSUB [[COPY]], [[COPY]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[FSUB:%[0-9]+]]:fpr(s32) = G_FSUB [[COPY]], [[COPY]]
     %0(s32) = COPY $s0
     %1(s32) = G_FSUB %0, %0
 ...
@@ -615,8 +675,10 @@ body: |
   bb.0:
     liveins: $s0
     ; CHECK-LABEL: name: test_fmul_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[FMUL:%[0-9]+]]:fpr(s32) = G_FMUL [[COPY]], [[COPY]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[FMUL:%[0-9]+]]:fpr(s32) = G_FMUL [[COPY]], [[COPY]]
     %0(s32) = COPY $s0
     %1(s32) = G_FMUL %0, %0
 ...
@@ -631,8 +693,10 @@ body: |
   bb.0:
     liveins: $s0
     ; CHECK-LABEL: name: test_fdiv_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[FDIV:%[0-9]+]]:fpr(s32) = G_FDIV [[COPY]], [[COPY]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[FDIV:%[0-9]+]]:fpr(s32) = G_FDIV [[COPY]], [[COPY]]
     %0(s32) = COPY $s0
     %1(s32) = G_FDIV %0, %0
 ...
@@ -647,8 +711,10 @@ body: |
   bb.0:
     liveins: $s0
     ; CHECK-LABEL: name: test_fpext_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[FPEXT:%[0-9]+]]:fpr(s64) = G_FPEXT [[COPY]](s32)
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPEXT:%[0-9]+]]:fpr(s64) = G_FPEXT [[COPY]](s32)
     %0(s32) = COPY $s0
     %1(s64) = G_FPEXT %0
 ...
@@ -663,8 +729,10 @@ body: |
   bb.0:
     liveins: $d0
     ; CHECK-LABEL: name: test_fptrunc_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
-    ; CHECK: [[FPTRUNC:%[0-9]+]]:fpr(s32) = G_FPTRUNC [[COPY]](s64)
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
+    ; CHECK-NEXT: [[FPTRUNC:%[0-9]+]]:fpr(s32) = G_FPTRUNC [[COPY]](s64)
     %0(s64) = COPY $d0
     %1(s32) = G_FPTRUNC %0
 ...
@@ -688,8 +756,10 @@ body: |
   bb.0:
   liveins: $s0
     ; CHECK-LABEL: name: test_fneg_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[FNEG:%[0-9]+]]:fpr(s32) = G_FNEG [[COPY]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[FNEG:%[0-9]+]]:fpr(s32) = G_FNEG [[COPY]]
     %0:_(s32) = COPY $s0
     %1:_(s32) = G_FNEG %0(s32)
 ...
@@ -705,9 +775,11 @@ body: |
   bb.0:
     liveins: $s0
     ; CHECK-LABEL: name: test_fcmp_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[FCMP:%[0-9]+]]:gpr(s32) = G_FCMP floatpred(olt), [[COPY]](s32), [[COPY]]
-    ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[FCMP]](s32)
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[FCMP:%[0-9]+]]:gpr(s32) = G_FCMP floatpred(olt), [[COPY]](s32), [[COPY]]
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[FCMP]](s32)
     %0(s32) = COPY $s0
     %1(s32) = G_FCMP floatpred(olt), %0, %0
     %2(s1) = G_TRUNC %1(s32)
@@ -723,8 +795,10 @@ body: |
   bb.0:
     liveins: $w0
     ; CHECK-LABEL: name: test_sitofp_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK: [[SITOFP:%[0-9]+]]:fpr(s64) = G_SITOFP [[COPY]](s32)
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+    ; CHECK-NEXT: [[SITOFP:%[0-9]+]]:fpr(s64) = G_SITOFP [[COPY]](s32)
     %0(s32) = COPY $w0
     %1(s64) = G_SITOFP %0
 ...
@@ -739,8 +813,10 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_sitofp_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[SITOFP:%[0-9]+]]:fpr(<4 x s32>) = G_SITOFP [[COPY]](<4 x s32>)
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[SITOFP:%[0-9]+]]:fpr(<4 x s32>) = G_SITOFP [[COPY]](<4 x s32>)
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_SITOFP %0
 ...
@@ -755,8 +831,10 @@ body: |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_uitofp_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
-    ; CHECK: [[UITOFP:%[0-9]+]]:fpr(s32) = G_UITOFP [[COPY]](s64)
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
+    ; CHECK-NEXT: [[UITOFP:%[0-9]+]]:fpr(s32) = G_UITOFP [[COPY]](s64)
     %0(s64) = COPY $x0
     %1(s32) = G_UITOFP %0
 ...
@@ -771,8 +849,10 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_uitofp_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[UITOFP:%[0-9]+]]:fpr(<4 x s32>) = G_UITOFP [[COPY]](<4 x s32>)
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[UITOFP:%[0-9]+]]:fpr(<4 x s32>) = G_UITOFP [[COPY]](<4 x s32>)
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_UITOFP %0
 ...
@@ -787,8 +867,10 @@ body: |
   bb.0:
     liveins: $s0
     ; CHECK-LABEL: name: test_fptosi_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
-    ; CHECK: [[FPTOSI:%[0-9]+]]:gpr(s64) = G_FPTOSI [[COPY]](s32)
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
+    ; CHECK-NEXT: [[FPTOSI:%[0-9]+]]:gpr(s64) = G_FPTOSI [[COPY]](s32)
     %0(s32) = COPY $s0
     %1(s64) = G_FPTOSI %0
 ...
@@ -803,8 +885,10 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_fptosi_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[FPTOSI:%[0-9]+]]:fpr(<4 x s32>) = G_FPTOSI [[COPY]](<4 x s32>)
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[FPTOSI:%[0-9]+]]:fpr(<4 x s32>) = G_FPTOSI [[COPY]](<4 x s32>)
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_FPTOSI %0
 ...
@@ -819,8 +903,10 @@ body: |
   bb.0:
     liveins: $d0
     ; CHECK-LABEL: name: test_fptoui_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
-    ; CHECK: [[FPTOUI:%[0-9]+]]:gpr(s32) = G_FPTOUI [[COPY]](s64)
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
+    ; CHECK-NEXT: [[FPTOUI:%[0-9]+]]:gpr(s32) = G_FPTOUI [[COPY]](s64)
     %0(s64) = COPY $d0
     %1(s32) = G_FPTOUI %0
 ...
@@ -835,8 +921,10 @@ body: |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: test_fptoui_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[FPTOUI:%[0-9]+]]:fpr(<4 x s32>) = G_FPTOUI [[COPY]](<4 x s32>)
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[FPTOUI:%[0-9]+]]:fpr(<4 x s32>) = G_FPTOUI [[COPY]](<4 x s32>)
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_FPTOUI %0
 ...
@@ -855,19 +943,22 @@ registers:
 body:             |
   ; CHECK-LABEL: name: test_gphi_ptr
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   liveins: $w2, $x0, $x1
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY $x1
-  ; CHECK:   [[COPY2:%[0-9]+]]:gpr(s32) = COPY $w2
-  ; CHECK:   G_BRCOND [[COPY2]](s32), %bb.1
-  ; CHECK:   G_BR %bb.2
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK: bb.2:
-  ; CHECK:   [[PHI:%[0-9]+]]:gpr(p0) = G_PHI [[COPY]](p0), %bb.0, [[COPY1]](p0), %bb.1
-  ; CHECK:   $x0 = COPY [[PHI]](p0)
-  ; CHECK:   RET_ReallyLR implicit $x0
+  ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; CHECK-NEXT:   liveins: $w2, $x0, $x1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY $x1
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr(s32) = COPY $w2
+  ; CHECK-NEXT:   G_BRCOND [[COPY2]](s32), %bb.1
+  ; CHECK-NEXT:   G_BR %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr(p0) = G_PHI [[COPY]](p0), %bb.0, [[COPY1]](p0), %bb.1
+  ; CHECK-NEXT:   $x0 = COPY [[PHI]](p0)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $x0
   bb.0:
     successors: %bb.1, %bb.2
     liveins: $w2, $x0, $x1
@@ -898,9 +989,10 @@ body: |
     liveins: $x16, $x17
     ; CHECK-LABEL: name: test_restricted_tail_call
     ; CHECK: liveins: $x16, $x17
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x16
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $x17
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x16
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $x17
+    ; CHECK-NEXT: RET_ReallyLR
     %0:_(s64) = COPY $x16
     %1:_(s64) = COPY $x17
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-reductions.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-reductions.mir
index 1c043da54215f..d06da3af549a2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-reductions.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-reductions.mir
@@ -11,10 +11,11 @@ body:             |
 
     ; CHECK-LABEL: name: fadd_v2s32
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s32>) = COPY $d0
-    ; CHECK: [[VECREDUCE_FADD:%[0-9]+]]:fpr(s32) = G_VECREDUCE_FADD [[COPY]](<2 x s32>)
-    ; CHECK: $w0 = COPY [[VECREDUCE_FADD]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<2 x s32>) = COPY $d0
+    ; CHECK-NEXT: [[VECREDUCE_FADD:%[0-9]+]]:fpr(s32) = G_VECREDUCE_FADD [[COPY]](<2 x s32>)
+    ; CHECK-NEXT: $w0 = COPY [[VECREDUCE_FADD]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(<2 x s32>) = COPY $d0
     %1:_(s32) = G_VECREDUCE_FADD %0(<2 x s32>)
     $w0 = COPY %1(s32)
@@ -31,10 +32,11 @@ body:             |
 
     ; CHECK-LABEL: name: add_v4s32
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:fpr(s32) = G_VECREDUCE_ADD [[COPY]](<4 x s32>)
-    ; CHECK: $w0 = COPY [[VECREDUCE_ADD]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[VECREDUCE_ADD:%[0-9]+]]:fpr(s32) = G_VECREDUCE_ADD [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: $w0 = COPY [[VECREDUCE_ADD]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(s32) = G_VECREDUCE_ADD %0(<4 x s32>)
     $w0 = COPY %1(s32)
@@ -51,10 +53,11 @@ body:             |
 
     ; CHECK-LABEL: name: fmaximum_v4s32
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
-    ; CHECK: [[VECREDUCE_FMAXIMUM:%[0-9]+]]:fpr(s32) = G_VECREDUCE_FMAXIMUM [[COPY]](<4 x s32>)
-    ; CHECK: $w0 = COPY [[VECREDUCE_FMAXIMUM]](s32)
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
+    ; CHECK-NEXT: [[VECREDUCE_FMAXIMUM:%[0-9]+]]:fpr(s32) = G_VECREDUCE_FMAXIMUM [[COPY]](<4 x s32>)
+    ; CHECK-NEXT: $w0 = COPY [[VECREDUCE_FMAXIMUM]](s32)
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(<4 x s32>) = COPY $q0
     %1:_(s32) = G_VECREDUCE_FMAXIMUM %0(<4 x s32>)
     $w0 = COPY %1(s32)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-unmerge-vec.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-unmerge-vec.mir
index 749a2ff3ca34d..b9c7fdb7fc00c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-unmerge-vec.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-unmerge-vec.mir
@@ -14,10 +14,11 @@ body:             |
     ; Ensure that the dest regs have FPR since we're unmerging from a vector
     ; CHECK-LABEL: name: unmerge
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s64>) = COPY $q0
-    ; CHECK: [[UV:%[0-9]+]]:fpr(s64), [[UV1:%[0-9]+]]:fpr(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
-    ; CHECK: $x0 = COPY [[UV]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(<2 x s64>) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:fpr(s64), [[UV1:%[0-9]+]]:fpr(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
+    ; CHECK-NEXT: $x0 = COPY [[UV]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(<2 x s64>) = COPY $q0
     %1:_(s64), %2:_(s64) = G_UNMERGE_VALUES %0(<2 x s64>)
     $x0 = COPY %1(s64)
@@ -38,10 +39,11 @@ body:             |
     ; s128 should be treated as an FPR/vector because it can't live on GPR bank.
     ; CHECK-LABEL: name: unmerge_s128
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s128) = COPY $q0
-    ; CHECK: [[UV:%[0-9]+]]:fpr(s64), [[UV1:%[0-9]+]]:fpr(s64) = G_UNMERGE_VALUES [[COPY]](s128)
-    ; CHECK: $x0 = COPY [[UV]](s64)
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s128) = COPY $q0
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:fpr(s64), [[UV1:%[0-9]+]]:fpr(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+    ; CHECK-NEXT: $x0 = COPY [[UV]](s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s128) = COPY $q0
     %1:_(s64), %2:_(s64) = G_UNMERGE_VALUES %0(s128)
     $x0 = COPY %1(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/retry-artifact-combine.mir b/llvm/test/CodeGen/AArch64/GlobalISel/retry-artifact-combine.mir
index 242b09546522b..8b01414daa702 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/retry-artifact-combine.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/retry-artifact-combine.mir
@@ -6,11 +6,11 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: combine_anyext_s2
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ogt), [[COPY]](s32), [[COPY1]]
-    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[FCMP]], [[C]]
-    ; CHECK: $w0 = COPY [[AND]](s32)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+    ; CHECK-NEXT: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(ogt), [[COPY]](s32), [[COPY1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FCMP]], [[C]]
+    ; CHECK-NEXT: $w0 = COPY [[AND]](s32)
     %0:_(s32) = COPY $w0
     %1:_(s32) = COPY $w1
     %2:_(s1) = G_FCMP floatpred(ogt), %0(s32), %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-abs.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-abs.mir
index 3f0a7adaa8310..d8cdcdb3e2f62 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-abs.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-abs.mir
@@ -13,10 +13,11 @@ body:             |
 
     ; CHECK-LABEL: name: v4s16
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[ABSv4i16_:%[0-9]+]]:fpr64 = ABSv4i16 [[COPY]]
-    ; CHECK: $d0 = COPY [[ABSv4i16_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[ABSv4i16_:%[0-9]+]]:fpr64 = ABSv4i16 [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[ABSv4i16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s16>) = G_ABS %0
     $d0 = COPY %1(<4 x s16>)
@@ -34,10 +35,11 @@ body:             |
 
     ; CHECK-LABEL: name: v8s16
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[ABSv8i16_:%[0-9]+]]:fpr128 = ABSv8i16 [[COPY]]
-    ; CHECK: $q0 = COPY [[ABSv8i16_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[ABSv8i16_:%[0-9]+]]:fpr128 = ABSv8i16 [[COPY]]
+    ; CHECK-NEXT: $q0 = COPY [[ABSv8i16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s16>) = COPY $q0
     %1:fpr(<8 x s16>) = G_ABS %0
     $q0 = COPY %1(<8 x s16>)
@@ -55,10 +57,11 @@ body:             |
 
     ; CHECK-LABEL: name: v2s32
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[ABSv2i32_:%[0-9]+]]:fpr64 = ABSv2i32 [[COPY]]
-    ; CHECK: $d0 = COPY [[ABSv2i32_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[ABSv2i32_:%[0-9]+]]:fpr64 = ABSv2i32 [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[ABSv2i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = G_ABS %0
     $d0 = COPY %1(<2 x s32>)
@@ -76,10 +79,11 @@ body:             |
 
     ; CHECK-LABEL: name: v4s32
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[ABSv4i32_:%[0-9]+]]:fpr128 = ABSv4i32 [[COPY]]
-    ; CHECK: $q0 = COPY [[ABSv4i32_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[ABSv4i32_:%[0-9]+]]:fpr128 = ABSv4i32 [[COPY]]
+    ; CHECK-NEXT: $q0 = COPY [[ABSv4i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s32>) = COPY $q0
     %1:fpr(<4 x s32>) = G_ABS %0
     $q0 = COPY %1(<4 x s32>)
@@ -97,10 +101,11 @@ body:             |
 
     ; CHECK-LABEL: name: v4s8
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[ABSv8i8_:%[0-9]+]]:fpr64 = ABSv8i8 [[COPY]]
-    ; CHECK: $d0 = COPY [[ABSv8i8_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[ABSv8i8_:%[0-9]+]]:fpr64 = ABSv8i8 [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[ABSv8i8_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<8 x s8>) = COPY $d0
     %1:fpr(<8 x s8>) = G_ABS %0
     $d0 = COPY %1(<8 x s8>)
@@ -118,10 +123,11 @@ body:             |
 
     ; CHECK-LABEL: name: v16s8
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[ABSv16i8_:%[0-9]+]]:fpr128 = ABSv16i8 [[COPY]]
-    ; CHECK: $q0 = COPY [[ABSv16i8_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[ABSv16i8_:%[0-9]+]]:fpr128 = ABSv16i8 [[COPY]]
+    ; CHECK-NEXT: $q0 = COPY [[ABSv16i8_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<16 x s8>) = COPY $q0
     %1:fpr(<16 x s8>) = G_ABS %0
     $q0 = COPY %1(<16 x s8>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-add-low.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-add-low.mir
index 677ab6a20e1ec..a08e3179837ef 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-add-low.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-add-low.mir
@@ -20,9 +20,10 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: select_add_low_without_offset
     ; CHECK: liveins: $x0
-    ; CHECK: %add_low:gpr64common = MOVaddr target-flags(aarch64-page) @x, target-flags(aarch64-pageoff, aarch64-nc) @x
-    ; CHECK: $x0 = COPY %add_low
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %add_low:gpr64common = MOVaddr target-flags(aarch64-page) @x, target-flags(aarch64-pageoff, aarch64-nc) @x
+    ; CHECK-NEXT: $x0 = COPY %add_low
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy:gpr(p0) = COPY $x0
     %adrp:gpr64(p0) = ADRP target-flags(aarch64-page) @x
     %add_low:gpr(p0) = G_ADD_LOW %adrp(p0), target-flags(aarch64-pageoff, aarch64-nc) @x
@@ -40,9 +41,10 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: select_add_low_with_offset
     ; CHECK: liveins: $x0
-    ; CHECK: %add_low:gpr64common = MOVaddr target-flags(aarch64-page) @x + 1, target-flags(aarch64-pageoff, aarch64-nc) @x + 1
-    ; CHECK: $x0 = COPY %add_low
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %add_low:gpr64common = MOVaddr target-flags(aarch64-page) @x + 1, target-flags(aarch64-pageoff, aarch64-nc) @x + 1
+    ; CHECK-NEXT: $x0 = COPY %add_low
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy:gpr(p0) = COPY $x0
     %adrp:gpr64(p0) = ADRP target-flags(aarch64-page) @x + 1
     %add_low:gpr(p0) = G_ADD_LOW %adrp(p0), target-flags(aarch64-pageoff, aarch64-nc) @x + 1
@@ -60,10 +62,11 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: select_add_low_without_adrp
     ; CHECK: liveins: $x0
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: %add_low:gpr64sp = ADDXri %ptr, target-flags(aarch64-pageoff, aarch64-nc) @x, 0
-    ; CHECK: $x0 = COPY %add_low
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %add_low:gpr64sp = ADDXri %ptr, target-flags(aarch64-pageoff, aarch64-nc) @x, 0
+    ; CHECK-NEXT: $x0 = COPY %add_low
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %ptr:gpr(p0) = COPY $x0
     %add_low:gpr(p0) = G_ADD_LOW %ptr(p0), target-flags(aarch64-pageoff, aarch64-nc) @x
     $x0 = COPY %add_low

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
index 950aab38400fe..5efa45dd78075 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
@@ -28,11 +28,13 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_xchg_i64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
-    ; CHECK: [[SWPX:%[0-9]+]]:gpr64 = SWPX [[SUBREG_TO_REG]], [[COPY]] :: (load store monotonic (s64) on %ir.addr)
-    ; CHECK: $x0 = COPY [[SWPX]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+    ; CHECK-NEXT: [[SWPX:%[0-9]+]]:gpr64 = SWPX [[SUBREG_TO_REG]], [[COPY]] :: (load store monotonic (s64) on %ir.addr)
+    ; CHECK-NEXT: $x0 = COPY [[SWPX]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 1
     %2:gpr(s64) = G_ATOMICRMW_XCHG %0, %1 :: (load store monotonic (s64) on %ir.addr)
@@ -48,11 +50,13 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_add_i64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
-    ; CHECK: [[LDADDX:%[0-9]+]]:gpr64 = LDADDX [[SUBREG_TO_REG]], [[COPY]] :: (load store monotonic (s64) on %ir.addr)
-    ; CHECK: $x0 = COPY [[LDADDX]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+    ; CHECK-NEXT: [[LDADDX:%[0-9]+]]:gpr64 = LDADDX [[SUBREG_TO_REG]], [[COPY]] :: (load store monotonic (s64) on %ir.addr)
+    ; CHECK-NEXT: $x0 = COPY [[LDADDX]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 1
     %2:gpr(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s64) on %ir.addr)
@@ -68,10 +72,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_add_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[LDADDALW:%[0-9]+]]:gpr32 = LDADDALW [[MOVi32imm]], [[COPY]] :: (load store seq_cst (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[LDADDALW]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[LDADDALW:%[0-9]+]]:gpr32 = LDADDALW [[MOVi32imm]], [[COPY]] :: (load store seq_cst (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[LDADDALW]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst (s32) on %ir.addr)
@@ -88,10 +94,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_sub_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[LDADDALW:%[0-9]+]]:gpr32 = LDADDALW [[MOVi32imm]], [[COPY]] :: (load store seq_cst (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[LDADDALW]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[LDADDALW:%[0-9]+]]:gpr32 = LDADDALW [[MOVi32imm]], [[COPY]] :: (load store seq_cst (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[LDADDALW]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst (s32) on %ir.addr)
@@ -108,11 +116,13 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_and_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[MOVi32imm]]
-    ; CHECK: [[LDCLRAW:%[0-9]+]]:gpr32 = LDCLRAW [[ORNWrr]], [[COPY]] :: (load store acquire (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[LDCLRAW]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[MOVi32imm]]
+    ; CHECK-NEXT: [[LDCLRAW:%[0-9]+]]:gpr32 = LDCLRAW [[ORNWrr]], [[COPY]] :: (load store acquire (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[LDCLRAW]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_AND %0, %1 :: (load store acquire (s32) on %ir.addr)
@@ -129,10 +139,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_or_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[LDSETLW:%[0-9]+]]:gpr32 = LDSETLW [[MOVi32imm]], [[COPY]] :: (load store release (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[LDSETLW]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[LDSETLW:%[0-9]+]]:gpr32 = LDSETLW [[MOVi32imm]], [[COPY]] :: (load store release (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[LDSETLW]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_OR %0, %1 :: (load store release (s32) on %ir.addr)
@@ -149,10 +161,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_xor_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[LDEORALW:%[0-9]+]]:gpr32 = LDEORALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[LDEORALW]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[LDEORALW:%[0-9]+]]:gpr32 = LDEORALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[LDEORALW]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store acq_rel (s32) on %ir.addr)
@@ -169,10 +183,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_min_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[LDSMINALW:%[0-9]+]]:gpr32 = LDSMINALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[LDSMINALW]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[LDSMINALW:%[0-9]+]]:gpr32 = LDSMINALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[LDSMINALW]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store acq_rel (s32) on %ir.addr)
@@ -189,10 +205,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_max_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[LDSMAXALW:%[0-9]+]]:gpr32 = LDSMAXALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[LDSMAXALW]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[LDSMAXALW:%[0-9]+]]:gpr32 = LDSMAXALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[LDSMAXALW]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store acq_rel (s32) on %ir.addr)
@@ -209,10 +227,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_umin_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[LDUMINALW:%[0-9]+]]:gpr32 = LDUMINALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[LDUMINALW]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[LDUMINALW:%[0-9]+]]:gpr32 = LDUMINALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[LDUMINALW]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store acq_rel (s32) on %ir.addr)
@@ -229,10 +249,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_umax_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[LDUMAXALW:%[0-9]+]]:gpr32 = LDUMAXALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[LDUMAXALW]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[LDUMAXALW:%[0-9]+]]:gpr32 = LDUMAXALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[LDUMAXALW]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store acq_rel (s32) on %ir.addr)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir
index d9ee37e312b99..e615b4d8efbb4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir
@@ -29,10 +29,12 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: bitcast_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]]
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]]
-    ; CHECK: $w0 = COPY [[COPY2]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY2]]
     %0(s32) = COPY $w0
     %1(<2 x s16>) = G_BITCAST %0
     %2(s32) = G_BITCAST %1
@@ -53,10 +55,12 @@ body:             |
     liveins: $s0
 
     ; CHECK-LABEL: name: bitcast_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]]
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY [[COPY1]]
-    ; CHECK: $s0 = COPY [[COPY2]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY [[COPY1]]
+    ; CHECK-NEXT: $s0 = COPY [[COPY2]]
     %0(s32) = COPY $s0
     %1(<2 x s16>) = G_BITCAST %0
     %2(s32) = G_BITCAST %1
@@ -77,9 +81,11 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: bitcast_s32_gpr_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]]
-    ; CHECK: $s0 = COPY [[COPY1]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]]
+    ; CHECK-NEXT: $s0 = COPY [[COPY1]]
     %0(s32) = COPY $w0
     %1(<2 x s16>) = G_BITCAST %0
     $s0 = COPY %1
@@ -99,9 +105,11 @@ body:             |
     liveins: $s0
 
     ; CHECK-LABEL: name: bitcast_s32_fpr_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]]
-    ; CHECK: $w0 = COPY [[COPY1]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]]
     %0(<2 x s16>) = COPY $s0
     %1(s32) = G_BITCAST %0
     $w0 = COPY %1(s32)
@@ -122,10 +130,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: bitcast_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY [[COPY]]
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
-    ; CHECK: $x0 = COPY [[COPY2]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY [[COPY]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
+    ; CHECK-NEXT: $x0 = COPY [[COPY2]]
     %0(s64) = COPY $x0
     %1(<2 x s32>) = G_BITCAST %0
     %2(s64) = G_BITCAST %1
@@ -146,8 +156,10 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: bitcast_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: $d0 = COPY [[COPY]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: $d0 = COPY [[COPY]]
     %0(s64) = COPY $d0
     %1(<2 x s32>) = G_BITCAST %0
     $d0 = COPY %1
@@ -166,9 +178,11 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: bitcast_s64_gpr_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY [[COPY]]
-    ; CHECK: $d0 = COPY [[COPY1]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[COPY1]]
     %0(s64) = COPY $x0
     %1(<2 x s32>) = G_BITCAST %0
     $d0 = COPY %1
@@ -188,9 +202,11 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: bitcast_s64_fpr_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64all = COPY [[COPY]]
-    ; CHECK: $x0 = COPY [[COPY1]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64all = COPY [[COPY]]
+    ; CHECK-NEXT: $x0 = COPY [[COPY1]]
     %0(s64) = COPY $d0
     %1(<2 x s32>) = G_BITCAST %0
     $x0 = COPY %1
@@ -210,8 +226,10 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: bitcast_s64_v2f32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: $x0 = COPY [[COPY]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: $x0 = COPY [[COPY]]
     %0(s64) = COPY $d0
     %1(<2 x s32>) = G_BITCAST %0
     $x0 = COPY %1(<2 x s32>)
@@ -231,8 +249,10 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: bitcast_s64_v8i8_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: $x0 = COPY [[COPY]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: $x0 = COPY [[COPY]]
     %0(s64) = COPY $d0
     %1(<8 x s8>) = G_BITCAST %0
     $x0 = COPY %1(<8 x s8>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-bitreverse.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-bitreverse.mir
index efff1160b951e..6327ede4fc40b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-bitreverse.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-bitreverse.mir
@@ -13,10 +13,11 @@ body:             |
 
     ; CHECK-LABEL: name: s32
     ; CHECK: liveins: $w0
-    ; CHECK: %copy:gpr32 = COPY $w0
-    ; CHECK: %bitreverse:gpr32 = RBITWr %copy
-    ; CHECK: $w0 = COPY %bitreverse
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:gpr32 = COPY $w0
+    ; CHECK-NEXT: %bitreverse:gpr32 = RBITWr %copy
+    ; CHECK-NEXT: $w0 = COPY %bitreverse
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:gpr(s32) = COPY $w0
     %bitreverse:gpr(s32) = G_BITREVERSE %copy
     $w0 = COPY %bitreverse(s32)
@@ -34,10 +35,11 @@ body:             |
 
     ; CHECK-LABEL: name: s64
     ; CHECK: liveins: $x0
-    ; CHECK: %copy:gpr64 = COPY $x0
-    ; CHECK: %bitreverse:gpr64 = RBITXr %copy
-    ; CHECK: $x0 = COPY %bitreverse
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:gpr64 = COPY $x0
+    ; CHECK-NEXT: %bitreverse:gpr64 = RBITXr %copy
+    ; CHECK-NEXT: $x0 = COPY %bitreverse
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy:gpr(s64) = COPY $x0
     %bitreverse:gpr(s64) = G_BITREVERSE %copy
     $x0 = COPY %bitreverse(s64)
@@ -55,10 +57,11 @@ body:             |
 
     ; CHECK-LABEL: name: v8s8_legal
     ; CHECK: liveins: $x0
-    ; CHECK: %vec:fpr64 = IMPLICIT_DEF
-    ; CHECK: %bitreverse:fpr64 = RBITv8i8 %vec
-    ; CHECK: $x0 = COPY %bitreverse
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %vec:fpr64 = IMPLICIT_DEF
+    ; CHECK-NEXT: %bitreverse:fpr64 = RBITv8i8 %vec
+    ; CHECK-NEXT: $x0 = COPY %bitreverse
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %vec:fpr(<8 x s8>) = G_IMPLICIT_DEF
     %bitreverse:fpr(<8 x s8>) = G_BITREVERSE %vec
     $x0 = COPY %bitreverse(<8 x s8>)
@@ -76,10 +79,11 @@ body:             |
 
     ; CHECK-LABEL: name: v16s8_legal
     ; CHECK: liveins: $q0
-    ; CHECK: %vec:fpr128 = IMPLICIT_DEF
-    ; CHECK: %bitreverse:fpr128 = RBITv16i8 %vec
-    ; CHECK: $q0 = COPY %bitreverse
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %vec:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: %bitreverse:fpr128 = RBITv16i8 %vec
+    ; CHECK-NEXT: $q0 = COPY %bitreverse
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %vec:fpr(<16 x s8>) = G_IMPLICIT_DEF
     %bitreverse:fpr(<16 x s8>) = G_BITREVERSE %vec
     $q0 = COPY %bitreverse(<16 x s8>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-binop.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-binop.mir
index 4cf3c5769dda7..5ad64b53769b4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-binop.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-binop.mir
@@ -62,7 +62,6 @@ body:             |
   ; CHECK-NEXT:   RET_ReallyLR
   ; TB(N)ZX has no encoding if the bit being tested is < 32, so we should get
   ; TBZW here.
-  ;
   bb.1:
     successors: %bb.2, %bb.3
     liveins: $x0
@@ -172,7 +171,6 @@ body:             |
   ; We won't ever fold this, because
   ; bit = 0
   ; bit - constant < 0, which isn't valid for tbz/tbnz.
-  ;
   bb.1:
     successors: %bb.2, %bb.3
     liveins: $w0
@@ -207,9 +205,7 @@ body:             |
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   RET_ReallyLR
   ; We can fold ashr, because we can have
-  ;
   ; (tbz (ashr x, c), 0) where 0 + c > # bits in x.
-  ;
   bb.1:
     successors: %bb.2, %bb.3
     liveins: $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-build-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-build-vector.mir
index 71a2bd2ddcc6e..d3d413c34ea72 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-build-vector.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-build-vector.mir
@@ -15,23 +15,24 @@ body:             |
 
     ; CHECK-LABEL: name: test_f32
     ; CHECK: liveins: $s0, $s1, $s2, $s3
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY $s2
-    ; CHECK: [[COPY3:%[0-9]+]]:fpr32 = COPY $s3
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.ssub
-    ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY1]], %subreg.ssub
-    ; CHECK: [[INSvi32lane:%[0-9]+]]:fpr128 = INSvi32lane [[INSERT_SUBREG]], 1, [[INSERT_SUBREG1]], 0
-    ; CHECK: [[DEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[COPY2]], %subreg.ssub
-    ; CHECK: [[INSvi32lane1:%[0-9]+]]:fpr128 = INSvi32lane [[INSvi32lane]], 2, [[INSERT_SUBREG2]], 0
-    ; CHECK: [[DEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[COPY3]], %subreg.ssub
-    ; CHECK: [[INSvi32lane2:%[0-9]+]]:fpr128 = INSvi32lane [[INSvi32lane1]], 3, [[INSERT_SUBREG3]], 0
-    ; CHECK: $q0 = COPY [[INSvi32lane2]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY $s2
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fpr32 = COPY $s3
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.ssub
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY1]], %subreg.ssub
+    ; CHECK-NEXT: [[INSvi32lane:%[0-9]+]]:fpr128 = INSvi32lane [[INSERT_SUBREG]], 1, [[INSERT_SUBREG1]], 0
+    ; CHECK-NEXT: [[DEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[COPY2]], %subreg.ssub
+    ; CHECK-NEXT: [[INSvi32lane1:%[0-9]+]]:fpr128 = INSvi32lane [[INSvi32lane]], 2, [[INSERT_SUBREG2]], 0
+    ; CHECK-NEXT: [[DEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[COPY3]], %subreg.ssub
+    ; CHECK-NEXT: [[INSvi32lane2:%[0-9]+]]:fpr128 = INSvi32lane [[INSvi32lane1]], 3, [[INSERT_SUBREG3]], 0
+    ; CHECK-NEXT: $q0 = COPY [[INSvi32lane2]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = COPY $s1
     %2:fpr(s32) = COPY $s2
@@ -56,15 +57,16 @@ body:             |
 
     ; CHECK-LABEL: name: test_f64
     ; CHECK: liveins: $d0, $d1, $d2, $d3
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
-    ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY1]], %subreg.dsub
-    ; CHECK: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG]], 1, [[INSERT_SUBREG1]], 0
-    ; CHECK: $q0 = COPY [[INSvi64lane]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY1]], %subreg.dsub
+    ; CHECK-NEXT: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG]], 1, [[INSERT_SUBREG1]], 0
+    ; CHECK-NEXT: $q0 = COPY [[INSvi64lane]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(s64) = COPY $d0
     %1:fpr(s64) = COPY $d1
     %4:fpr(<2 x s64>) = G_BUILD_VECTOR %0(s64), %1(s64)
@@ -87,17 +89,18 @@ body:             |
 
     ; CHECK-LABEL: name: test_i32
     ; CHECK: liveins: $w0, $w1, $w2, $w3
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $w2
-    ; CHECK: [[COPY3:%[0-9]+]]:gpr32 = COPY $w3
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.ssub
-    ; CHECK: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG]], 1, [[COPY1]]
-    ; CHECK: [[INSvi32gpr1:%[0-9]+]]:fpr128 = INSvi32gpr [[INSvi32gpr]], 2, [[COPY2]]
-    ; CHECK: [[INSvi32gpr2:%[0-9]+]]:fpr128 = INSvi32gpr [[INSvi32gpr1]], 3, [[COPY3]]
-    ; CHECK: $q0 = COPY [[INSvi32gpr2]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY $w2
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY $w3
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.ssub
+    ; CHECK-NEXT: [[INSvi32gpr:%[0-9]+]]:fpr128 = INSvi32gpr [[INSERT_SUBREG]], 1, [[COPY1]]
+    ; CHECK-NEXT: [[INSvi32gpr1:%[0-9]+]]:fpr128 = INSvi32gpr [[INSvi32gpr]], 2, [[COPY2]]
+    ; CHECK-NEXT: [[INSvi32gpr2:%[0-9]+]]:fpr128 = INSvi32gpr [[INSvi32gpr1]], 3, [[COPY3]]
+    ; CHECK-NEXT: $q0 = COPY [[INSvi32gpr2]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = COPY $w2
@@ -122,13 +125,14 @@ body:             |
 
     ; CHECK-LABEL: name: test_i64
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
-    ; CHECK: [[INSvi64gpr:%[0-9]+]]:fpr128 = INSvi64gpr [[INSERT_SUBREG]], 1, [[COPY1]]
-    ; CHECK: $q0 = COPY [[INSvi64gpr]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[INSvi64gpr:%[0-9]+]]:fpr128 = INSvi64gpr [[INSERT_SUBREG]], 1, [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[INSvi64gpr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = COPY $x1
     %4:fpr(<2 x s64>) = G_BUILD_VECTOR %0(s64), %1(s64)
@@ -151,13 +155,14 @@ body:             |
 
     ; CHECK-LABEL: name: test_p0
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
-    ; CHECK: [[INSvi64gpr:%[0-9]+]]:fpr128 = INSvi64gpr [[INSERT_SUBREG]], 1, [[COPY1]]
-    ; CHECK: $q0 = COPY [[INSvi64gpr]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[INSvi64gpr:%[0-9]+]]:fpr128 = INSvi64gpr [[INSERT_SUBREG]], 1, [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[INSvi64gpr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:gpr(p0) = COPY $x0
     %1:gpr(p0) = COPY $x1
     %4:fpr(<2 x p0>) = G_BUILD_VECTOR %0(p0), %1(p0)
@@ -181,9 +186,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4s32_zero
     ; CHECK: liveins: $x0
-    ; CHECK: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
-    ; CHECK: $q0 = COPY [[MOVIv2d_ns]]
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
+    ; CHECK-NEXT: $q0 = COPY [[MOVIv2d_ns]]
+    ; CHECK-NEXT: RET_ReallyLR
     %0:gpr(p0) = COPY $x0
     %2:gpr(s32) = G_CONSTANT i32 0
     %3:fpr(s32) = COPY %2(s32)
@@ -210,10 +216,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v8s8_zero
     ; CHECK: liveins: $x0
-    ; CHECK: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY [[MOVIv2d_ns]].dsub
-    ; CHECK: $d0 = COPY [[COPY]]
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[MOVIv2d_ns:%[0-9]+]]:fpr128 = MOVIv2d_ns 0
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY [[MOVIv2d_ns]].dsub
+    ; CHECK-NEXT: $d0 = COPY [[COPY]]
+    ; CHECK-NEXT: RET_ReallyLR
     %0:gpr(p0) = COPY $x0
     %2:gpr(s8) = G_CONSTANT i8 0
     %3:fpr(s8) = COPY %2(s8)
@@ -241,10 +248,11 @@ body:             |
 
     ; CHECK-LABEL: name: undef_elts_to_subreg_to_reg
     ; CHECK: liveins: $s0
-    ; CHECK: %val:fpr32 = COPY $s0
-    ; CHECK: %bv:fpr128 = SUBREG_TO_REG 0, %val, %subreg.ssub
-    ; CHECK: $q0 = COPY %bv
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %val:fpr32 = COPY $s0
+    ; CHECK-NEXT: %bv:fpr128 = SUBREG_TO_REG 0, %val, %subreg.ssub
+    ; CHECK-NEXT: $q0 = COPY %bv
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %val:fpr(s32) = COPY $s0
     %undef:fpr(s32) = G_IMPLICIT_DEF
     %bv:fpr(<4 x s32>) = G_BUILD_VECTOR %val(s32), %undef(s32), %undef(s32), %undef(s32)
@@ -265,11 +273,12 @@ body:             |
 
     ; CHECK-LABEL: name: undef_elts_
diff erent_regbanks
     ; CHECK: liveins: $w0
-    ; CHECK: %val:gpr32all = COPY $w0
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: %bv:fpr128 = INSERT_SUBREG [[DEF]], %val, %subreg.ssub
-    ; CHECK: $q0 = COPY %bv
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %val:gpr32all = COPY $w0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: %bv:fpr128 = INSERT_SUBREG [[DEF]], %val, %subreg.ssub
+    ; CHECK-NEXT: $q0 = COPY %bv
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %val:gpr(s32) = COPY $w0
     %undef:gpr(s32) = G_IMPLICIT_DEF
     %bv:fpr(<4 x s32>) = G_BUILD_VECTOR %val(s32), %undef(s32), %undef(s32), %undef(s32)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
index 5202cec121ef3..be5939c346223 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
@@ -18,11 +18,13 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[CASW:%[0-9]+]]:gpr32 = CASW [[COPY1]], [[MOVi32imm]], [[COPY]] :: (load store monotonic (s32) on %ir.addr)
-    ; CHECK: $w0 = COPY [[CASW]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[CASW:%[0-9]+]]:gpr32 = CASW [[COPY1]], [[MOVi32imm]], [[COPY]] :: (load store monotonic (s32) on %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[CASW]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 0
     %2:gpr(s32) = G_CONSTANT i32 1
@@ -40,12 +42,14 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $xzr
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
-    ; CHECK: [[CASX:%[0-9]+]]:gpr64 = CASX [[COPY1]], [[SUBREG_TO_REG]], [[COPY]] :: (load store monotonic (s64) on %ir.addr)
-    ; CHECK: $x0 = COPY [[CASX]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $xzr
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+    ; CHECK-NEXT: [[CASX:%[0-9]+]]:gpr64 = CASX [[COPY1]], [[SUBREG_TO_REG]], [[COPY]] :: (load store monotonic (s64) on %ir.addr)
+    ; CHECK-NEXT: $x0 = COPY [[CASX]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 0
     %2:gpr(s64) = G_CONSTANT i64 1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir
index 439bd2066b930..53d8b82509481 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-concat-vectors.mir
@@ -18,15 +18,16 @@ body:             |
   liveins: $d0, $d1
     ; CHECK-LABEL: name: legal_v4s32_v2s32
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
-    ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY]], %subreg.dsub
-    ; CHECK: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
-    ; CHECK: $q0 = COPY [[INSvi64lane]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
+    ; CHECK-NEXT: $q0 = COPY [[INSvi64lane]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = COPY $d1
     %2:fpr(<4 x s32>) = G_CONCAT_VECTORS %0(<2 x s32>), %1(<2 x s32>)
@@ -51,15 +52,16 @@ body:             |
   liveins: $d0, $d1
     ; CHECK-LABEL: name: legal_v8s16_v4s16
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
-    ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY]], %subreg.dsub
-    ; CHECK: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
-    ; CHECK: $q0 = COPY [[INSvi64lane]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY1]], %subreg.dsub
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
+    ; CHECK-NEXT: $q0 = COPY [[INSvi64lane]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s16>) = COPY $d1
     %2:fpr(<8 x s16>) = G_CONCAT_VECTORS %0(<4 x s16>), %1(<4 x s16>)
@@ -78,11 +80,12 @@ body:             |
 
     ; CHECK-LABEL: name: select_v16s8_v8s8_undef
     ; CHECK: liveins: $q0
-    ; CHECK: %a:fpr64 = IMPLICIT_DEF
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: %concat:fpr128 = INSERT_SUBREG [[DEF]], %a, %subreg.dsub
-    ; CHECK: $q0 = COPY %concat
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:fpr64 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: %concat:fpr128 = INSERT_SUBREG [[DEF]], %a, %subreg.dsub
+    ; CHECK-NEXT: $q0 = COPY %concat
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %a:fpr(<8 x s8>) = G_IMPLICIT_DEF
     %b:fpr(<8 x s8>) = G_IMPLICIT_DEF
     %concat:fpr(<16 x s8>) = G_CONCAT_VECTORS %a(<8 x s8>), %b(<8 x s8>)
@@ -100,15 +103,16 @@ body:             |
     liveins: $q0, $d1
     ; CHECK-LABEL: name: select_v16s8_v8s8_not_undef
     ; CHECK: liveins: $q0, $d1
-    ; CHECK: %a:fpr64 = COPY $d0
-    ; CHECK: %b:fpr64 = COPY $d1
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], %b, %subreg.dsub
-    ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], %a, %subreg.dsub
-    ; CHECK: %concat:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
-    ; CHECK: $q0 = COPY %concat
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:fpr64 = COPY $d0
+    ; CHECK-NEXT: %b:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], %b, %subreg.dsub
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], %a, %subreg.dsub
+    ; CHECK-NEXT: %concat:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG]], 0
+    ; CHECK-NEXT: $q0 = COPY %concat
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %a:fpr(<8 x s8>) = COPY $d0
     %b:fpr(<8 x s8>) = COPY $d1
     %concat:fpr(<16 x s8>) = G_CONCAT_VECTORS %a(<8 x s8>), %b(<8 x s8>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
index 674299ab9d47e..417ac91113f2c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-constant.mir
@@ -29,8 +29,8 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: const_s8
     ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 42
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY [[MOVi32imm]]
-    ; CHECK: $w0 = COPY [[COPY]]
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY [[MOVi32imm]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY]]
     %0:gpr(s8) = G_CONSTANT i8 42
     %1:gpr(s32) = G_ANYEXT %0(s8)
     $w0 = COPY %1(s32)
@@ -44,8 +44,8 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: const_s16
     ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 42
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY [[MOVi32imm]]
-    ; CHECK: $w0 = COPY [[COPY]]
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY [[MOVi32imm]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY]]
     %0:gpr(s16) = G_CONSTANT i16 42
     %1:gpr(s32) = G_ANYEXT %0(s16)
     $w0 = COPY %1(s32)
@@ -62,7 +62,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: const_s32
     ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 42
-    ; CHECK: $w0 = COPY [[MOVi32imm]]
+    ; CHECK-NEXT: $w0 = COPY [[MOVi32imm]]
     %0(s32) = G_CONSTANT i32 42
     $w0 = COPY %0(s32)
 ...
@@ -78,7 +78,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: const_s64
     ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 1234567890123
-    ; CHECK: $x0 = COPY [[MOVi64imm]]
+    ; CHECK-NEXT: $x0 = COPY [[MOVi64imm]]
     %0(s64) = G_CONSTANT i64 1234567890123
     $x0 = COPY %0(s64)
 ...
@@ -94,7 +94,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: const_s32_zero
     ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $wzr
-    ; CHECK: $w0 = COPY [[COPY]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY]]
     %0(s32) = G_CONSTANT i32 0
     $w0 = COPY %0(s32)
 ...
@@ -110,7 +110,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: const_s64_zero
     ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $xzr
-    ; CHECK: $x0 = COPY [[COPY]]
+    ; CHECK-NEXT: $x0 = COPY [[COPY]]
     %0(s64) = G_CONSTANT i64 0
     $x0 = COPY %0(s64)
 ...
@@ -123,7 +123,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: const_p0_0
     ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $xzr
-    ; CHECK: $x0 = COPY [[COPY]]
+    ; CHECK-NEXT: $x0 = COPY [[COPY]]
     %0:gpr(p0) = G_CONSTANT i64 0
     $x0 = COPY %0
 ...
@@ -139,7 +139,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fconst_s32
     ; CHECK: [[FMOVSi:%[0-9]+]]:fpr32 = FMOVSi 12
-    ; CHECK: $s0 = COPY [[FMOVSi]]
+    ; CHECK-NEXT: $s0 = COPY [[FMOVSi]]
     %0(s32) = G_FCONSTANT float 3.5
     $s0 = COPY %0(s32)
 ...
@@ -155,7 +155,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fconst_s64
     ; CHECK: [[FMOVDi:%[0-9]+]]:fpr64 = FMOVDi 112
-    ; CHECK: $d0 = COPY [[FMOVDi]]
+    ; CHECK-NEXT: $d0 = COPY [[FMOVDi]]
     %0(s64) = G_FCONSTANT double 1.0
     $d0 = COPY %0(s64)
 ...
@@ -171,7 +171,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fconst_s32_0
     ; CHECK: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
-    ; CHECK: $s0 = COPY [[FMOVS0_]]
+    ; CHECK-NEXT: $s0 = COPY [[FMOVS0_]]
     %0(s32) = G_FCONSTANT float 0.0
     $s0 = COPY %0(s32)
 ...
@@ -187,7 +187,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fconst_s64_0
     ; CHECK: [[FMOVD0_:%[0-9]+]]:fpr64 = FMOVD0
-    ; CHECK: $x0 = COPY [[FMOVD0_]]
+    ; CHECK-NEXT: $x0 = COPY [[FMOVD0_]]
     %0(s64) = G_FCONSTANT double 0.0
     $x0 = COPY %0(s64)
 ...
@@ -199,8 +199,8 @@ body: |
   bb.0:
     ; CHECK-LABEL: name: optnone_i64
     ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 42
-    ; CHECK: $x0 = COPY [[MOVi64imm]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: $x0 = COPY [[MOVi64imm]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = G_CONSTANT i64 42
     $x0 = COPY %0(s64)
     RET_ReallyLR implicit $x0
@@ -213,9 +213,9 @@ body: |
   bb.0:
     ; CHECK-LABEL: name: opt_i64
     ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 42
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
-    ; CHECK: $x0 = COPY [[SUBREG_TO_REG]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+    ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = G_CONSTANT i64 42
     $x0 = COPY %0(s64)
     RET_ReallyLR implicit $x0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ctlz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ctlz.mir
index 7b0112f9e7f2b..97c080049ae01 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ctlz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ctlz.mir
@@ -34,10 +34,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4s16
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[CLZv4i16_:%[0-9]+]]:fpr64 = CLZv4i16 [[COPY]]
-    ; CHECK: $d0 = COPY [[CLZv4i16_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[CLZv4i16_:%[0-9]+]]:fpr64 = CLZv4i16 [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[CLZv4i16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s16>) = G_CTLZ %0(<4 x s16>)
     $d0 = COPY %1(<4 x s16>)
@@ -56,10 +57,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2s32
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[CLZv2i32_:%[0-9]+]]:fpr64 = CLZv2i32 [[COPY]]
-    ; CHECK: $d0 = COPY [[CLZv2i32_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[CLZv2i32_:%[0-9]+]]:fpr64 = CLZv2i32 [[COPY]]
+    ; CHECK-NEXT: $d0 = COPY [[CLZv2i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = G_CTLZ %0(<2 x s32>)
     $d0 = COPY %1(<2 x s32>)
@@ -78,11 +80,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_s64
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY [[COPY]]
-    ; CHECK: [[CLZXr:%[0-9]+]]:gpr64 = CLZXr [[COPY1]]
-    ; CHECK: $d0 = COPY [[CLZXr]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[COPY]]
+    ; CHECK-NEXT: [[CLZXr:%[0-9]+]]:gpr64 = CLZXr [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[CLZXr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(s64) = COPY $d0
     %2:gpr(s64) = COPY %0(s64)
     %1:gpr(s64) = G_CTLZ %2(s64)
@@ -101,11 +104,12 @@ body:             |
     liveins: $s0
     ; CHECK-LABEL: name: test_s32
     ; CHECK: liveins: $s0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
-    ; CHECK: [[CLZWr:%[0-9]+]]:gpr32 = CLZWr [[COPY1]]
-    ; CHECK: $s0 = COPY [[CLZWr]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
+    ; CHECK-NEXT: [[CLZWr:%[0-9]+]]:gpr32 = CLZWr [[COPY1]]
+    ; CHECK-NEXT: $s0 = COPY [[CLZWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s32) = COPY $s0
     %2:gpr(s32) = COPY %0(s32)
     %1:gpr(s32) = G_CTLZ %2(s32)
@@ -125,10 +129,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v16s8
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[CLZv16i8_:%[0-9]+]]:fpr128 = CLZv16i8 [[COPY]]
-    ; CHECK: $q0 = COPY [[CLZv16i8_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[CLZv16i8_:%[0-9]+]]:fpr128 = CLZv16i8 [[COPY]]
+    ; CHECK-NEXT: $q0 = COPY [[CLZv16i8_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<16 x s8>) = COPY $q0
     %1:fpr(<16 x s8>) = G_CTLZ %0(<16 x s8>)
     $q0 = COPY %1(<16 x s8>)
@@ -147,10 +152,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v8s16
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[CLZv8i16_:%[0-9]+]]:fpr128 = CLZv8i16 [[COPY]]
-    ; CHECK: $q0 = COPY [[CLZv8i16_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[CLZv8i16_:%[0-9]+]]:fpr128 = CLZv8i16 [[COPY]]
+    ; CHECK-NEXT: $q0 = COPY [[CLZv8i16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s16>) = COPY $q0
     %1:fpr(<8 x s16>) = G_CTLZ %0(<8 x s16>)
     $q0 = COPY %1(<8 x s16>)
@@ -169,10 +175,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4s32
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[CLZv4i32_:%[0-9]+]]:fpr128 = CLZv4i32 [[COPY]]
-    ; CHECK: $q0 = COPY [[CLZv4i32_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[CLZv4i32_:%[0-9]+]]:fpr128 = CLZv4i32 [[COPY]]
+    ; CHECK-NEXT: $q0 = COPY [[CLZv4i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s32>) = COPY $q0
     %1:fpr(<4 x s32>) = G_CTLZ %0(<4 x s32>)
     $q0 = COPY %1(<4 x s32>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ctpop.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ctpop.mir
index 08de2770b8496..d6d41615c37ea 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ctpop.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ctpop.mir
@@ -13,10 +13,11 @@ body:             |
 
     ; CHECK-LABEL: name: CNTv8i8
     ; CHECK: liveins: $d0
-    ; CHECK: %copy:fpr64 = COPY $d0
-    ; CHECK: %ctpop:fpr64 = CNTv8i8 %copy
-    ; CHECK: $d0 = COPY %ctpop
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr64 = COPY $d0
+    ; CHECK-NEXT: %ctpop:fpr64 = CNTv8i8 %copy
+    ; CHECK-NEXT: $d0 = COPY %ctpop
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %copy:fpr(<8 x s8>) = COPY $d0
     %ctpop:fpr(<8 x s8>) = G_CTPOP %copy(<8 x s8>)
     $d0 = COPY %ctpop(<8 x s8>)
@@ -34,10 +35,11 @@ body:             |
 
     ; CHECK-LABEL: name: CNTv16i8
     ; CHECK: liveins: $q0
-    ; CHECK: %copy:fpr128 = COPY $q0
-    ; CHECK: %ctpop:fpr128 = CNTv16i8 %copy
-    ; CHECK: $q0 = COPY %ctpop
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr128 = COPY $q0
+    ; CHECK-NEXT: %ctpop:fpr128 = CNTv16i8 %copy
+    ; CHECK-NEXT: $q0 = COPY %ctpop
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %copy:fpr(<16 x s8>) = COPY $q0
     %ctpop:fpr(<16 x s8>) = G_CTPOP %copy(<16 x s8>)
     $q0 = COPY %ctpop(<16 x s8>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-cttz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-cttz.mir
index 914dab6a29a5f..0458d9f6ce283 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-cttz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-cttz.mir
@@ -19,6 +19,7 @@ body:             |
     ; CHECK-NEXT: [[CLZWr:%[0-9]+]]:gpr32 = CLZWr [[RBITWr]]
     ; CHECK-NEXT: $w0 = COPY [[CLZWr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ;
     ; CHECK-CSSC-LABEL: name: s32
     ; CHECK-CSSC: liveins: $w0
     ; CHECK-CSSC-NEXT: {{  $}}
@@ -49,6 +50,7 @@ body:             |
     ; CHECK-NEXT: [[CLZXr:%[0-9]+]]:gpr64 = CLZXr [[RBITXr]]
     ; CHECK-NEXT: $x0 = COPY [[CLZXr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    ;
     ; CHECK-CSSC-LABEL: name: s64
     ; CHECK-CSSC: liveins: $x0
     ; CHECK-CSSC-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
index e015d35916ac3..237e158d55311 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
@@ -48,7 +48,7 @@ body: |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY]], [[COPY]]
     ; CHECK-NEXT: $w0 = COPY [[ADDWrr]]
-    ; CHECK-NEXT: DBG_VALUE [[ADDWrr]], $noreg, !7, !DIExpression(), debug-location !9
+    ; CHECK-NEXT: DBG_VALUE [[ADDWrr]], $noreg, !7, !DIExpression(),  debug-location !9
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = G_ADD %0, %0
     $w0 = COPY %1(s32)
@@ -65,7 +65,7 @@ body: |
     ; CHECK-LABEL: name: test_dbg_value_dead
     ; CHECK: liveins: $w0
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: DBG_VALUE %1:gpr64, $noreg, !7, !DIExpression(), debug-location !9
+    ; CHECK-NEXT: DBG_VALUE %1:gpr64, $noreg, !7, !DIExpression(),  debug-location !9
     %0:gpr(s32) = COPY $w0
     %1:gpr(s64) = G_ZEXT %0:gpr(s32)
     DBG_VALUE %1(s64), $noreg, !7, !DIExpression(), debug-location !9

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ext.mir
index 38697342459b0..4d76bdf405b3d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ext.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ext.mir
@@ -16,10 +16,11 @@ body:             |
 
     ; CHECK-LABEL: name: v8s8_EXTv8i8
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %v1:fpr64 = COPY $d0
-    ; CHECK: %v2:fpr64 = COPY $d1
-    ; CHECK: %shuf:fpr64 = EXTv8i8 %v1, %v2, 3
-    ; CHECK: $d0 = COPY %shuf
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v1:fpr64 = COPY $d0
+    ; CHECK-NEXT: %v2:fpr64 = COPY $d1
+    ; CHECK-NEXT: %shuf:fpr64 = EXTv8i8 %v1, %v2, 3
+    ; CHECK-NEXT: $d0 = COPY %shuf
     %v1:fpr(<8 x s8>) = COPY $d0
     %v2:fpr(<8 x s8>) = COPY $d1
     %3:gpr(s32) = G_CONSTANT i32 3
@@ -39,10 +40,11 @@ body:             |
 
     ; CHECK-LABEL: name: v16s8_EXTv16i8
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: %v1:fpr128 = COPY $q0
-    ; CHECK: %v2:fpr128 = COPY $q1
-    ; CHECK: %shuf:fpr128 = EXTv16i8 %v1, %v2, 3
-    ; CHECK: $q0 = COPY %shuf
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v1:fpr128 = COPY $q0
+    ; CHECK-NEXT: %v2:fpr128 = COPY $q1
+    ; CHECK-NEXT: %shuf:fpr128 = EXTv16i8 %v1, %v2, 3
+    ; CHECK-NEXT: $q0 = COPY %shuf
     %v1:fpr(<16 x s8>) = COPY $q0
     %v2:fpr(<16 x s8>) = COPY $q1
     %3:gpr(s32) = G_CONSTANT i32 3
@@ -62,10 +64,11 @@ body:             |
 
     ; CHECK-LABEL: name: v4s16_EXTv8i8
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %v1:fpr64 = COPY $d0
-    ; CHECK: %v2:fpr64 = COPY $d1
-    ; CHECK: %shuf:fpr64 = EXTv8i8 %v1, %v2, 6
-    ; CHECK: $d0 = COPY %shuf
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v1:fpr64 = COPY $d0
+    ; CHECK-NEXT: %v2:fpr64 = COPY $d1
+    ; CHECK-NEXT: %shuf:fpr64 = EXTv8i8 %v1, %v2, 6
+    ; CHECK-NEXT: $d0 = COPY %shuf
     %v1:fpr(<4 x s16>) = COPY $d0
     %v2:fpr(<4 x s16>) = COPY $d1
     %3:gpr(s32) = G_CONSTANT i32 6
@@ -85,10 +88,11 @@ body:             |
 
     ; CHECK-LABEL: name: v8s16_EXTv16i8
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: %v1:fpr128 = COPY $q0
-    ; CHECK: %v2:fpr128 = COPY $q1
-    ; CHECK: %shuf:fpr128 = EXTv16i8 %v2, %v1, 10
-    ; CHECK: $q0 = COPY %shuf
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v1:fpr128 = COPY $q0
+    ; CHECK-NEXT: %v2:fpr128 = COPY $q1
+    ; CHECK-NEXT: %shuf:fpr128 = EXTv16i8 %v2, %v1, 10
+    ; CHECK-NEXT: $q0 = COPY %shuf
     %v1:fpr(<8 x s16>) = COPY $q0
     %v2:fpr(<8 x s16>) = COPY $q1
     %3:gpr(s32) = G_CONSTANT i32 10
@@ -109,10 +113,11 @@ body:             |
 
     ; CHECK-LABEL: name: v4s32_EXTv16i8
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: %v1:fpr128 = COPY $q0
-    ; CHECK: %v2:fpr128 = COPY $q1
-    ; CHECK: %shuf:fpr128 = EXTv16i8 %v1, %v2, 12
-    ; CHECK: $q0 = COPY %shuf
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v1:fpr128 = COPY $q0
+    ; CHECK-NEXT: %v2:fpr128 = COPY $q1
+    ; CHECK-NEXT: %shuf:fpr128 = EXTv16i8 %v1, %v2, 12
+    ; CHECK-NEXT: $q0 = COPY %shuf
     %v1:fpr(<4 x s32>) = COPY $q0
     %v2:fpr(<4 x s32>) = COPY $q1
     %3:gpr(s32) = G_CONSTANT i32 12
@@ -132,10 +137,11 @@ body:             |
 
     ; CHECK-LABEL: name: v2s32_EXTv8i8
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: %v1:fpr64 = COPY $d0
-    ; CHECK: %v2:fpr64 = COPY $d1
-    ; CHECK: %shuf:fpr64 = EXTv8i8 %v1, %v2, 2
-    ; CHECK: $d0 = COPY %shuf
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v1:fpr64 = COPY $d0
+    ; CHECK-NEXT: %v2:fpr64 = COPY $d1
+    ; CHECK-NEXT: %shuf:fpr64 = EXTv8i8 %v1, %v2, 2
+    ; CHECK-NEXT: $d0 = COPY %shuf
     %v1:fpr(<2 x s32>) = COPY $d0
     %v2:fpr(<2 x s32>) = COPY $d1
     %3:gpr(s32) = G_CONSTANT i32 2
@@ -155,10 +161,11 @@ body:             |
 
     ; CHECK-LABEL: name: v2s64_EXTv16i8
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: %v1:fpr128 = COPY $q0
-    ; CHECK: %v2:fpr128 = COPY $q1
-    ; CHECK: %shuf:fpr128 = EXTv16i8 %v1, %v2, 2
-    ; CHECK: $q0 = COPY %shuf
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %v1:fpr128 = COPY $q0
+    ; CHECK-NEXT: %v2:fpr128 = COPY $q1
+    ; CHECK-NEXT: %shuf:fpr128 = EXTv16i8 %v1, %v2, 2
+    ; CHECK-NEXT: $q0 = COPY %shuf
     %v1:fpr(<2 x s64>) = COPY $q0
     %v2:fpr(<2 x s64>) = COPY $q1
     %3:gpr(s32) = G_CONSTANT i32 2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-extload.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-extload.mir
index a282c0f83d561..5ba0353e23f76 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-extload.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-extload.mir
@@ -19,9 +19,11 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: aextload_s32_from_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load (s16) from %ir.addr)
-    ; CHECK: $w0 = COPY [[T0]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load (s16) from %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[LDRHHui]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_LOAD %0 :: (load (s16) from %ir.addr)
     $w0 = COPY %1(s32)
@@ -36,11 +38,13 @@ body:             |
   bb.0:
     liveins: $x0
 
-    ; CHECK-LABEL: name: aextload_s32_from_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load (s16) from %ir.addr)
-    ; CHECK: [[T1:%[0-9]+]]:gpr32all = COPY [[T0]]
-    ; CHECK: $w0 = COPY [[T1]]
+    ; CHECK-LABEL: name: aextload_s32_from_s16_not_combined
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load (s16) from %ir.addr)
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[LDRHHui]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s16) = G_LOAD %0 :: (load (s16) from %ir.addr)
     %2:gpr(s32) = G_ANYEXT %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-extract-vector-elt-with-extend.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-extract-vector-elt-with-extend.mir
index 4e5416a8b8548..55dfbcd7a88a8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-extract-vector-elt-with-extend.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-extract-vector-elt-with-extend.mir
@@ -12,10 +12,11 @@ body: |
 
     ; CHECK-LABEL: name: si64
     ; CHECK: liveins: $q0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SMOVvi32to64_:%[0-9]+]]:gpr64 = SMOVvi32to64 [[COPY]], 1
-    ; CHECK: $x0 = COPY [[SMOVvi32to64_]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SMOVvi32to64_:%[0-9]+]]:gpr64 = SMOVvi32to64 [[COPY]], 1
+    ; CHECK-NEXT: $x0 = COPY [[SMOVvi32to64_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:fpr(<4 x s32>) = COPY $q0
     %3:gpr(s64) = G_CONSTANT i64 1
     %2:fpr(s32) = G_EXTRACT_VECTOR_ELT %0(<4 x s32>), %3(s64)
@@ -36,12 +37,13 @@ body: |
 
     ; CHECK-LABEL: name: si64_2
     ; CHECK: liveins: $d0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
-    ; CHECK: [[SMOVvi32to64_:%[0-9]+]]:gpr64 = SMOVvi32to64 [[INSERT_SUBREG]], 1
-    ; CHECK: $x0 = COPY [[SMOVvi32to64_]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[SMOVvi32to64_:%[0-9]+]]:gpr64 = SMOVvi32to64 [[INSERT_SUBREG]], 1
+    ; CHECK-NEXT: $x0 = COPY [[SMOVvi32to64_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:fpr(<2 x s32>) = COPY $d0
     %3:gpr(s64) = G_CONSTANT i64 1
     %2:fpr(s32) = G_EXTRACT_VECTOR_ELT %0(<2 x s32>), %3(s64)
@@ -62,11 +64,12 @@ body: |
 
     ; CHECK-LABEL: name: zi64
     ; CHECK: liveins: $q0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[UMOVvi32_:%[0-9]+]]:gpr32 = UMOVvi32 [[COPY]], 1
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[UMOVvi32_]], %subreg.sub_32
-    ; CHECK: $x0 = COPY [[SUBREG_TO_REG]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[UMOVvi32_:%[0-9]+]]:gpr32 = UMOVvi32 [[COPY]], 1
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[UMOVvi32_]], %subreg.sub_32
+    ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:fpr(<4 x s32>) = COPY $q0
     %3:gpr(s64) = G_CONSTANT i64 1
     %2:fpr(s32) = G_EXTRACT_VECTOR_ELT %0(<4 x s32>), %3(s64)
@@ -87,13 +90,14 @@ body: |
 
     ; CHECK-LABEL: name: zi64_2
     ; CHECK: liveins: $d0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
-    ; CHECK: [[UMOVvi32_:%[0-9]+]]:gpr32 = UMOVvi32 [[INSERT_SUBREG]], 1
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[UMOVvi32_]], %subreg.sub_32
-    ; CHECK: $x0 = COPY [[SUBREG_TO_REG]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[UMOVvi32_:%[0-9]+]]:gpr32 = UMOVvi32 [[INSERT_SUBREG]], 1
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[UMOVvi32_]], %subreg.sub_32
+    ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
      %0:fpr(<2 x s32>) = COPY $d0
     %3:gpr(s64) = G_CONSTANT i64 1
     %2:fpr(s32) = G_EXTRACT_VECTOR_ELT %0(<2 x s32>), %3(s64)
@@ -114,10 +118,11 @@ body: |
 
     ; CHECK-LABEL: name: si32
     ; CHECK: liveins: $q0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SMOVvi16to32_:%[0-9]+]]:gpr32 = SMOVvi16to32 [[COPY]], 1
-    ; CHECK: $w0 = COPY [[SMOVvi16to32_]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SMOVvi16to32_:%[0-9]+]]:gpr32 = SMOVvi16to32 [[COPY]], 1
+    ; CHECK-NEXT: $w0 = COPY [[SMOVvi16to32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:fpr(<8 x s16>) = COPY $q0
     %4:gpr(s64) = G_CONSTANT i64 1
     %3:fpr(s16) = G_EXTRACT_VECTOR_ELT %0(<8 x s16>), %4(s64)
@@ -138,10 +143,11 @@ body: |
 
     ; CHECK-LABEL: name: zi32
     ; CHECK: liveins: $q0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[UMOVvi16_:%[0-9]+]]:gpr32 = UMOVvi16 [[COPY]], 1
-    ; CHECK: $w0 = COPY [[UMOVvi16_]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[UMOVvi16_:%[0-9]+]]:gpr32 = UMOVvi16 [[COPY]], 1
+    ; CHECK-NEXT: $w0 = COPY [[UMOVvi16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:fpr(<8 x s16>) = COPY $q0
     %4:gpr(s64) = G_CONSTANT i64 1
     %3:fpr(s16) = G_EXTRACT_VECTOR_ELT %0(<8 x s16>), %4(s64)
@@ -162,12 +168,13 @@ body: |
 
     ; CHECK-LABEL: name: si32_2
     ; CHECK: liveins: $d0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
-    ; CHECK: [[SMOVvi16to32_:%[0-9]+]]:gpr32 = SMOVvi16to32 [[INSERT_SUBREG]], 1
-    ; CHECK: $w0 = COPY [[SMOVvi16to32_]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[SMOVvi16to32_:%[0-9]+]]:gpr32 = SMOVvi16to32 [[INSERT_SUBREG]], 1
+    ; CHECK-NEXT: $w0 = COPY [[SMOVvi16to32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:fpr(<4 x s16>) = COPY $d0
     %4:gpr(s64) = G_CONSTANT i64 1
     %3:fpr(s16) = G_EXTRACT_VECTOR_ELT %0(<4 x s16>), %4(s64)
@@ -188,12 +195,13 @@ body: |
 
     ; CHECK-LABEL: name: zi32_2
     ; CHECK: liveins: $d0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
-    ; CHECK: [[UMOVvi16_:%[0-9]+]]:gpr32 = UMOVvi16 [[INSERT_SUBREG]], 1
-    ; CHECK: $w0 = COPY [[UMOVvi16_]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[UMOVvi16_:%[0-9]+]]:gpr32 = UMOVvi16 [[INSERT_SUBREG]], 1
+    ; CHECK-NEXT: $w0 = COPY [[UMOVvi16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:fpr(<4 x s16>) = COPY $d0
     %4:gpr(s64) = G_CONSTANT i64 1
     %3:fpr(s16) = G_EXTRACT_VECTOR_ELT %0(<4 x s16>), %4(s64)
@@ -214,10 +222,11 @@ body: |
 
     ; CHECK-LABEL: name: si16
     ; CHECK: liveins: $q0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SMOVvi8to32_:%[0-9]+]]:gpr32 = SMOVvi8to32 [[COPY]], 1
-    ; CHECK: $w0 = COPY [[SMOVvi8to32_]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SMOVvi8to32_:%[0-9]+]]:gpr32 = SMOVvi8to32 [[COPY]], 1
+    ; CHECK-NEXT: $w0 = COPY [[SMOVvi8to32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:fpr(<16 x s8>) = COPY $q0
     %4:gpr(s64) = G_CONSTANT i64 1
     %3:fpr(s8) = G_EXTRACT_VECTOR_ELT %0(<16 x s8>), %4(s64)
@@ -237,10 +246,11 @@ body: |
 
     ; CHECK-LABEL: name: zi16
     ; CHECK: liveins: $q0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[UMOVvi8_:%[0-9]+]]:gpr32 = UMOVvi8 [[COPY]], 1
-    ; CHECK: $w0 = COPY [[UMOVvi8_]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[UMOVvi8_:%[0-9]+]]:gpr32 = UMOVvi8 [[COPY]], 1
+    ; CHECK-NEXT: $w0 = COPY [[UMOVvi8_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:fpr(<16 x s8>) = COPY $q0
     %4:gpr(s64) = G_CONSTANT i64 1
     %3:fpr(s8) = G_EXTRACT_VECTOR_ELT %0(<16 x s8>), %4(s64)
@@ -261,12 +271,13 @@ body: |
 
     ; CHECK-LABEL: name: si16_2
     ; CHECK: liveins: $d0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
-    ; CHECK: [[SMOVvi8to32_:%[0-9]+]]:gpr32 = SMOVvi8to32 [[INSERT_SUBREG]], 1
-    ; CHECK: $w0 = COPY [[SMOVvi8to32_]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[SMOVvi8to32_:%[0-9]+]]:gpr32 = SMOVvi8to32 [[INSERT_SUBREG]], 1
+    ; CHECK-NEXT: $w0 = COPY [[SMOVvi8to32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:fpr(<8 x s8>) = COPY $d0
     %4:gpr(s64) = G_CONSTANT i64 1
     %3:fpr(s8) = G_EXTRACT_VECTOR_ELT %0(<8 x s8>), %4(s64)
@@ -286,12 +297,13 @@ body: |
 
     ; CHECK-LABEL: name: zi16_2
     ; CHECK: liveins: $d0, $w0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
-    ; CHECK: [[UMOVvi8_:%[0-9]+]]:gpr32 = UMOVvi8 [[INSERT_SUBREG]], 1
-    ; CHECK: $w0 = COPY [[UMOVvi8_]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[UMOVvi8_:%[0-9]+]]:gpr32 = UMOVvi8 [[INSERT_SUBREG]], 1
+    ; CHECK-NEXT: $w0 = COPY [[UMOVvi8_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:fpr(<8 x s8>) = COPY $d0
     %4:gpr(s64) = G_CONSTANT i64 1
     %3:fpr(s8) = G_EXTRACT_VECTOR_ELT %0(<8 x s8>), %4(s64)
@@ -308,6 +320,12 @@ regBankSelected: true
 tracksRegLiveness: true
 body:             |
   bb.1.entry:
+    ; CHECK-LABEL: name: skip_anyext_to_16
+    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr8 = COPY [[DEF]].bsub
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[COPY]], %subreg.bsub
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]]
     %5:fpr(<16 x s8>) = G_IMPLICIT_DEF
     %12:gpr(s64) = G_CONSTANT i64 0
     %4:fpr(s8) = G_EXTRACT_VECTOR_ELT %5(<16 x s8>), %12(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-faddp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-faddp.mir
index 167d9b1db4989..20a37f2573569 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-faddp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-faddp.mir
@@ -16,10 +16,11 @@ body:             |
 
     ; CHECK-LABEL: name: f64_faddp
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FADDPv2i64p:%[0-9]+]]:fpr64 = nofpexcept FADDPv2i64p [[COPY]]
-    ; CHECK: $d0 = COPY [[FADDPv2i64p]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FADDPv2i64p:%[0-9]+]]:fpr64 = nofpexcept FADDPv2i64p [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FADDPv2i64p]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s64>) = COPY $q0
     %6:gpr(s64) = G_CONSTANT i64 0
     %7:fpr(s64) = G_EXTRACT_VECTOR_ELT %0(<2 x s64>), %6(s64)
@@ -46,10 +47,11 @@ body:             |
 
     ; CHECK-LABEL: name: f32_faddp
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FADDPv2i32p:%[0-9]+]]:fpr32 = nofpexcept FADDPv2i32p [[COPY]]
-    ; CHECK: $s0 = COPY [[FADDPv2i32p]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FADDPv2i32p:%[0-9]+]]:fpr32 = nofpexcept FADDPv2i32p [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[FADDPv2i32p]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(<2 x s32>) = COPY $d0
     %6:gpr(s64) = G_CONSTANT i64 0
     %7:fpr(s32) = G_EXTRACT_VECTOR_ELT %0(<2 x s32>), %6(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-fcmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-fcmp.mir
index dd7487df40dbb..62d55a270b5e2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-fcmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-fcmp.mir
@@ -16,11 +16,12 @@ body:             |
 
     ; CHECK-LABEL: name: zero
     ; CHECK: liveins: $s0, $s1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK: $s0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv, implicit $fpcr
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $s0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = COPY $s1
     %2:fpr(s32) = G_FCONSTANT float 0.000000e+00
@@ -42,12 +43,13 @@ body:             |
 
     ; CHECK-LABEL: name: notzero
     ; CHECK: liveins: $s0, $s1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FMOVSi:%[0-9]+]]:fpr32 = FMOVSi 112
-    ; CHECK: nofpexcept FCMPSrr [[COPY]], [[FMOVSi]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK: $s0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FMOVSi:%[0-9]+]]:fpr32 = FMOVSi 112
+    ; CHECK-NEXT: nofpexcept FCMPSrr [[COPY]], [[FMOVSi]], implicit-def $nzcv, implicit $fpcr
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $s0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = COPY $s1
     %2:fpr(s32) = G_FCONSTANT float 1.000000e+00
@@ -69,12 +71,13 @@ body:             |
 
     ; CHECK-LABEL: name: notzero_s64
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FMOVDi:%[0-9]+]]:fpr64 = FMOVDi 112
-    ; CHECK: nofpexcept FCMPDrr [[COPY]], [[FMOVDi]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK: $s0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FMOVDi:%[0-9]+]]:fpr64 = FMOVDi 112
+    ; CHECK-NEXT: nofpexcept FCMPDrr [[COPY]], [[FMOVDi]], implicit-def $nzcv, implicit $fpcr
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $s0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s64) = COPY $d0
     %1:fpr(s64) = COPY $d1
     %2:fpr(s64) = G_FCONSTANT double 1.000000e+00
@@ -96,11 +99,12 @@ body:             |
 
     ; CHECK-LABEL: name: zero_s64
     ; CHECK: liveins: $d0, $d1, $s0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: nofpexcept FCMPDri [[COPY]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK: $s0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: nofpexcept FCMPDri [[COPY]], implicit-def $nzcv, implicit $fpcr
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $s0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s64) = COPY $d0
     %1:fpr(s64) = COPY $d1
     %2:fpr(s64) = G_FCONSTANT double 0.000000e+00
@@ -121,11 +125,12 @@ body:             |
 
     ; CHECK-LABEL: name: zero_lhs
     ; CHECK: liveins: $s0, $s1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK: $s0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv, implicit $fpcr
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
+    ; CHECK-NEXT: $s0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = COPY $s1
     %2:fpr(s32) = G_FCONSTANT float 0.000000e+00
@@ -146,12 +151,13 @@ body:             |
 
     ; CHECK-LABEL: name: zero_lhs_not_commutative_pred
     ; CHECK: liveins: $s0, $s1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
-    ; CHECK: nofpexcept FCMPSrr [[FMOVS0_]], [[COPY]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
-    ; CHECK: $s0 = COPY [[CSINCWr]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
+    ; CHECK-NEXT: nofpexcept FCMPSrr [[FMOVS0_]], [[COPY]], implicit-def $nzcv, implicit $fpcr
+    ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
+    ; CHECK-NEXT: $s0 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = COPY $s1
     %2:fpr(s32) = G_FCONSTANT float 0.000000e+00

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-fma.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-fma.mir
index 5bde8c398af05..a487c16f839d2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-fma.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-fma.mir
@@ -23,11 +23,13 @@ body:             |
     liveins: $w0, $w1, $w2
 
     ; CHECK-LABEL: name: FMADDSrrr_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $w1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY $w2
-    ; CHECK: [[FMADDSrrr:%[0-9]+]]:fpr32 = nofpexcept FMADDSrrr [[COPY]], [[COPY1]], [[COPY2]]
-    ; CHECK: $w0 = COPY [[FMADDSrrr]]
+    ; CHECK: liveins: $w0, $w1, $w2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY $w2
+    ; CHECK-NEXT: [[FMADDSrrr:%[0-9]+]]:fpr32 = nofpexcept FMADDSrrr [[COPY]], [[COPY1]], [[COPY2]], implicit $fpcr
+    ; CHECK-NEXT: $w0 = COPY [[FMADDSrrr]]
     %0(s32) = COPY $w0
     %1(s32) = COPY $w1
     %2(s32) = COPY $w2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-fmaximum.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-fmaximum.mir
index 481d0f1d6b9c4..fb04eff54e041 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-fmaximum.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-fmaximum.mir
@@ -16,7 +16,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %a:fpr16 = COPY $h0
     ; CHECK-NEXT: %b:fpr16 = COPY $h1
-    ; CHECK-NEXT: %select_me:fpr16 = nofpexcept FMAXHrr %a, %b
+    ; CHECK-NEXT: %select_me:fpr16 = nofpexcept FMAXHrr %a, %b, implicit $fpcr
     ; CHECK-NEXT: $h0 = COPY %select_me
     ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %a:fpr(s16) = COPY $h0
@@ -40,7 +40,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %a:fpr32 = COPY $s0
     ; CHECK-NEXT: %b:fpr32 = COPY $s1
-    ; CHECK-NEXT: %select_me:fpr32 = nofpexcept FMAXSrr %a, %b
+    ; CHECK-NEXT: %select_me:fpr32 = nofpexcept FMAXSrr %a, %b, implicit $fpcr
     ; CHECK-NEXT: $s0 = COPY %select_me
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %a:fpr(s32) = COPY $s0
@@ -64,7 +64,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %a:fpr64 = COPY $d0
     ; CHECK-NEXT: %b:fpr64 = COPY $d1
-    ; CHECK-NEXT: %select_me:fpr64 = nofpexcept FMAXDrr %a, %b
+    ; CHECK-NEXT: %select_me:fpr64 = nofpexcept FMAXDrr %a, %b, implicit $fpcr
     ; CHECK-NEXT: $d0 = COPY %select_me
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %a:fpr(s64) = COPY $d0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-fminimum.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-fminimum.mir
index 5ffda6d557a28..457a6ef1d123f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-fminimum.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-fminimum.mir
@@ -16,7 +16,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %a:fpr16 = COPY $h0
     ; CHECK-NEXT: %b:fpr16 = COPY $h1
-    ; CHECK-NEXT: %select_me:fpr16 = nofpexcept FMINHrr %a, %b
+    ; CHECK-NEXT: %select_me:fpr16 = nofpexcept FMINHrr %a, %b, implicit $fpcr
     ; CHECK-NEXT: $h0 = COPY %select_me
     ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %a:fpr(s16) = COPY $h0
@@ -40,7 +40,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %a:fpr32 = COPY $s0
     ; CHECK-NEXT: %b:fpr32 = COPY $s1
-    ; CHECK-NEXT: %select_me:fpr32 = nofpexcept FMINSrr %a, %b
+    ; CHECK-NEXT: %select_me:fpr32 = nofpexcept FMINSrr %a, %b, implicit $fpcr
     ; CHECK-NEXT: $s0 = COPY %select_me
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %a:fpr(s32) = COPY $s0
@@ -64,7 +64,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %a:fpr64 = COPY $d0
     ; CHECK-NEXT: %b:fpr64 = COPY $d1
-    ; CHECK-NEXT: %select_me:fpr64 = nofpexcept FMINDrr %a, %b
+    ; CHECK-NEXT: %select_me:fpr64 = nofpexcept FMINDrr %a, %b, implicit $fpcr
     ; CHECK-NEXT: $d0 = COPY %select_me
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %a:fpr(s64) = COPY $d0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-fmul-indexed.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-fmul-indexed.mir
index 2aca52ed8a40e..491835a5079d7 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-fmul-indexed.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-fmul-indexed.mir
@@ -18,14 +18,15 @@ body:             |
 
     ; CHECK-LABEL: name: v2s32_fmul_indexed
     ; CHECK: liveins: $d0, $x0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY1]], 0 :: (load (<2 x s32>), align 4)
-    ; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[LDRDui]], %subreg.dsub
-    ; CHECK: [[FMULv2i32_indexed:%[0-9]+]]:fpr64 = nofpexcept FMULv2i32_indexed [[COPY]], [[INSERT_SUBREG]], 0
-    ; CHECK: $d0 = COPY [[FMULv2i32_indexed]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY1]], 0 :: (load (<2 x s32>), align 4)
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[LDRDui]], %subreg.dsub
+    ; CHECK-NEXT: [[FMULv2i32_indexed:%[0-9]+]]:fpr64 = nofpexcept FMULv2i32_indexed [[COPY]], [[INSERT_SUBREG]], 0, implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FMULv2i32_indexed]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:gpr(p0) = COPY $x0
     %2:fpr(<2 x s32>) = G_LOAD %1(p0) :: (load (<2 x s32>), align 4)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir
index 0890f746fa0fa..0ee0d9587e88b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir
@@ -15,9 +15,11 @@ body:             |
     liveins: $s0
 
     ; CHECK-LABEL: name: fptrunc_s16_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]]
-    ; CHECK: $h0 = COPY [[FCVTHSr]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $h0 = COPY [[FCVTHSr]]
     %0(s32) = COPY $s0
     %1(s16) = G_FPTRUNC %0
     $h0 = COPY %1(s16)
@@ -37,9 +39,11 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: fptrunc_s16_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTHDr:%[0-9]+]]:fpr16 = nofpexcept FCVTHDr [[COPY]]
-    ; CHECK: $h0 = COPY [[FCVTHDr]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTHDr:%[0-9]+]]:fpr16 = nofpexcept FCVTHDr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $h0 = COPY [[FCVTHDr]]
     %0(s64) = COPY $d0
     %1(s16) = G_FPTRUNC %0
     $h0 = COPY %1(s16)
@@ -59,9 +63,11 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: fptrunc_s32_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTSDr:%[0-9]+]]:fpr32 = nofpexcept FCVTSDr [[COPY]]
-    ; CHECK: $s0 = COPY [[FCVTSDr]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTSDr:%[0-9]+]]:fpr32 = nofpexcept FCVTSDr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[FCVTSDr]]
     %0(s64) = COPY $d0
     %1(s32) = G_FPTRUNC %0
     $s0 = COPY %1(s32)
@@ -80,9 +86,11 @@ body:             |
   bb.0:
     liveins: $d0
     ; CHECK-LABEL: name: fptrunc_v4s16_v4s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FCVTNv4i16_:%[0-9]+]]:fpr64 = nofpexcept FCVTNv4i16 [[COPY]]
-    ; CHECK: $d0 = COPY [[FCVTNv4i16_]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FCVTNv4i16_:%[0-9]+]]:fpr64 = nofpexcept FCVTNv4i16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FCVTNv4i16_]]
     %0(<4 x s32>) = COPY $q0
     %1(<4 x s16>) = G_FPTRUNC %0
     $d0 = COPY %1(<4 x s16>)
@@ -101,9 +109,11 @@ body:             |
   bb.0:
     liveins: $q0
     ; CHECK-LABEL: name: fptrunc_v2s32_v2s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FCVTNv2i32_:%[0-9]+]]:fpr64 = nofpexcept FCVTNv2i32 [[COPY]]
-    ; CHECK: $d0 = COPY [[FCVTNv2i32_]]
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FCVTNv2i32_:%[0-9]+]]:fpr64 = nofpexcept FCVTNv2i32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FCVTNv2i32_]]
     %0(<2 x s64>) = COPY $q0
     %1(<2 x s32>) = G_FPTRUNC %0
     $d0 = COPY %1(<2 x s32>)
@@ -123,9 +133,11 @@ body:             |
     liveins: $h0
 
     ; CHECK-LABEL: name: fpext_s32_s16_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
-    ; CHECK: [[FCVTSHr:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[COPY]]
-    ; CHECK: $s0 = COPY [[FCVTSHr]]
+    ; CHECK: liveins: $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
+    ; CHECK-NEXT: [[FCVTSHr:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[FCVTSHr]]
     %0(s16) = COPY $h0
     %1(s32) = G_FPEXT %0
     $s0 = COPY %1(s32)
@@ -145,9 +157,11 @@ body:             |
     liveins: $h0
 
     ; CHECK-LABEL: name: fpext_s64_s16_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
-    ; CHECK: [[FCVTDHr:%[0-9]+]]:fpr64 = nofpexcept FCVTDHr [[COPY]]
-    ; CHECK: $d0 = COPY [[FCVTDHr]]
+    ; CHECK: liveins: $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
+    ; CHECK-NEXT: [[FCVTDHr:%[0-9]+]]:fpr64 = nofpexcept FCVTDHr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FCVTDHr]]
     %0(s16) = COPY $h0
     %1(s64) = G_FPEXT %0
     $d0 = COPY %1(s64)
@@ -167,9 +181,11 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: fpext_s64_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FCVTDSr:%[0-9]+]]:fpr64 = nofpexcept FCVTDSr [[COPY]]
-    ; CHECK: $d0 = COPY [[FCVTDSr]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FCVTDSr:%[0-9]+]]:fpr64 = nofpexcept FCVTDSr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FCVTDSr]]
     %0(s32) = COPY $s0
     %1(s64) = G_FPEXT %0
     $d0 = COPY %1(s64)
@@ -188,9 +204,11 @@ body:             |
   bb.0:
     liveins: $d0
     ; CHECK-LABEL: name: fpext_v4s32_v4s16_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTLv4i16_:%[0-9]+]]:fpr128 = nofpexcept FCVTLv4i16 [[COPY]]
-    ; CHECK: $q0 = COPY [[FCVTLv4i16_]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTLv4i16_:%[0-9]+]]:fpr128 = nofpexcept FCVTLv4i16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FCVTLv4i16_]]
     %0(<4 x s16>) = COPY $d0
     %1(<4 x s32>) = G_FPEXT %0
     $q0 = COPY %1(<4 x s32>)
@@ -209,9 +227,11 @@ body:             |
   bb.0:
     liveins: $d0
     ; CHECK-LABEL: name: fpext_v2s64_v2s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTLv2i32_:%[0-9]+]]:fpr128 = nofpexcept FCVTLv2i32 [[COPY]]
-    ; CHECK: $q0 = COPY [[FCVTLv2i32_]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTLv2i32_:%[0-9]+]]:fpr128 = nofpexcept FCVTLv2i32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FCVTLv2i32_]]
     %0(<2 x s32>) = COPY $d0
     %1(<2 x s64>) = G_FPEXT %0
     $q0 = COPY %1(<2 x s64>)
@@ -231,9 +251,11 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: sitofp_s32_s32_fpr_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[SCVTFUWSri:%[0-9]+]]:fpr32 = nofpexcept SCVTFUWSri [[COPY]]
-    ; CHECK: $s0 = COPY [[SCVTFUWSri]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[SCVTFUWSri:%[0-9]+]]:fpr32 = nofpexcept SCVTFUWSri [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[SCVTFUWSri]]
     %0(s32) = COPY $w0
     %1(s32) = G_SITOFP %0
     $s0 = COPY %1(s32)
@@ -253,9 +275,11 @@ body:             |
     liveins: $s0
 
     ; CHECK-LABEL: name: sitofp_s32_s32_fpr_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[SCVTFv1i32_:%[0-9]+]]:fpr32 = nofpexcept SCVTFv1i32 [[COPY]]
-    ; CHECK: $s0 = COPY [[SCVTFv1i32_]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[SCVTFv1i32_:%[0-9]+]]:fpr32 = nofpexcept SCVTFv1i32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[SCVTFv1i32_]]
     %0(s32) = COPY $s0
     %1(s32) = G_SITOFP %0
     $s0 = COPY %1(s32)
@@ -275,9 +299,11 @@ body:             |
     liveins: $s0
 
     ; CHECK-LABEL: name: uitofp_s32_s32_fpr_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[UCVTFv1i32_:%[0-9]+]]:fpr32 = nofpexcept UCVTFv1i32 [[COPY]]
-    ; CHECK: $s0 = COPY [[UCVTFv1i32_]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[UCVTFv1i32_:%[0-9]+]]:fpr32 = nofpexcept UCVTFv1i32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[UCVTFv1i32_]]
     %0(s32) = COPY $s0
     %1(s32) = G_UITOFP %0
     $s0 = COPY %1(s32)
@@ -297,9 +323,11 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: sitofp_s32_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[SCVTFUXSri:%[0-9]+]]:fpr32 = nofpexcept SCVTFUXSri [[COPY]]
-    ; CHECK: $s0 = COPY [[SCVTFUXSri]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[SCVTFUXSri:%[0-9]+]]:fpr32 = nofpexcept SCVTFUXSri [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[SCVTFUXSri]]
     %0(s64) = COPY $x0
     %1(s32) = G_SITOFP %0
     $s0 = COPY %1(s32)
@@ -319,9 +347,11 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: sitofp_s64_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[SCVTFUWDri:%[0-9]+]]:fpr64 = nofpexcept SCVTFUWDri [[COPY]]
-    ; CHECK: $d0 = COPY [[SCVTFUWDri]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[SCVTFUWDri:%[0-9]+]]:fpr64 = nofpexcept SCVTFUWDri [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[SCVTFUWDri]]
     %0(s32) = COPY $w0
     %1(s64) = G_SITOFP %0
     $d0 = COPY %1(s64)
@@ -341,10 +371,12 @@ body:             |
     liveins: $s0
 
     ; CHECK-LABEL: name: sitofp_s64_s32_fpr_both
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
-    ; CHECK: [[SCVTFUWDri:%[0-9]+]]:fpr64 = nofpexcept SCVTFUWDri [[COPY1]]
-    ; CHECK: $d0 = COPY [[SCVTFUWDri]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
+    ; CHECK-NEXT: [[SCVTFUWDri:%[0-9]+]]:fpr64 = nofpexcept SCVTFUWDri [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[SCVTFUWDri]]
     %0(s32) = COPY $s0
     %1(s64) = G_SITOFP %0
     $d0 = COPY %1(s64)
@@ -364,9 +396,11 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: sitofp_s64_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[SCVTFUXDri:%[0-9]+]]:fpr64 = nofpexcept SCVTFUXDri [[COPY]]
-    ; CHECK: $d0 = COPY [[SCVTFUXDri]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[SCVTFUXDri:%[0-9]+]]:fpr64 = nofpexcept SCVTFUXDri [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[SCVTFUXDri]]
     %0(s64) = COPY $x0
     %1(s64) = G_SITOFP %0
     $d0 = COPY %1(s64)
@@ -386,9 +420,11 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: uitofp_s32_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[UCVTFUWSri:%[0-9]+]]:fpr32 = nofpexcept UCVTFUWSri [[COPY]]
-    ; CHECK: $s0 = COPY [[UCVTFUWSri]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[UCVTFUWSri:%[0-9]+]]:fpr32 = nofpexcept UCVTFUWSri [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[UCVTFUWSri]]
     %0(s32) = COPY $w0
     %1(s32) = G_UITOFP %0
     $s0 = COPY %1(s32)
@@ -408,9 +444,11 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: uitofp_s32_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[UCVTFUXSri:%[0-9]+]]:fpr32 = nofpexcept UCVTFUXSri [[COPY]]
-    ; CHECK: $s0 = COPY [[UCVTFUXSri]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[UCVTFUXSri:%[0-9]+]]:fpr32 = nofpexcept UCVTFUXSri [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[UCVTFUXSri]]
     %0(s64) = COPY $x0
     %1(s32) = G_UITOFP %0
     $s0 = COPY %1(s32)
@@ -430,9 +468,11 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: uitofp_s64_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[UCVTFUWDri:%[0-9]+]]:fpr64 = nofpexcept UCVTFUWDri [[COPY]]
-    ; CHECK: $d0 = COPY [[UCVTFUWDri]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[UCVTFUWDri:%[0-9]+]]:fpr64 = nofpexcept UCVTFUWDri [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[UCVTFUWDri]]
     %0(s32) = COPY $w0
     %1(s64) = G_UITOFP %0
     $d0 = COPY %1(s64)
@@ -452,9 +492,11 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: uitofp_s64_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[UCVTFUXDri:%[0-9]+]]:fpr64 = nofpexcept UCVTFUXDri [[COPY]]
-    ; CHECK: $d0 = COPY [[UCVTFUXDri]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[UCVTFUXDri:%[0-9]+]]:fpr64 = nofpexcept UCVTFUXDri [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[UCVTFUXDri]]
     %0(s64) = COPY $x0
     %1(s64) = G_UITOFP %0
     $d0 = COPY %1(s64)
@@ -474,9 +516,11 @@ body:             |
     liveins: $s0
 
     ; CHECK-LABEL: name: fptosi_s32_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FCVTZSUWSr:%[0-9]+]]:gpr32 = nofpexcept FCVTZSUWSr [[COPY]]
-    ; CHECK: $w0 = COPY [[FCVTZSUWSr]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FCVTZSUWSr:%[0-9]+]]:gpr32 = nofpexcept FCVTZSUWSr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $w0 = COPY [[FCVTZSUWSr]]
     %0(s32) = COPY $s0
     %1(s32) = G_FPTOSI %0
     $w0 = COPY %1(s32)
@@ -496,9 +540,11 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: fptosi_s32_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTZSUWDr:%[0-9]+]]:gpr32 = nofpexcept FCVTZSUWDr [[COPY]]
-    ; CHECK: $w0 = COPY [[FCVTZSUWDr]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTZSUWDr:%[0-9]+]]:gpr32 = nofpexcept FCVTZSUWDr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $w0 = COPY [[FCVTZSUWDr]]
     %0(s64) = COPY $d0
     %1(s32) = G_FPTOSI %0
     $w0 = COPY %1(s32)
@@ -518,9 +564,11 @@ body:             |
     liveins: $s0
 
     ; CHECK-LABEL: name: fptosi_s64_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FCVTZSUXSr:%[0-9]+]]:gpr64 = nofpexcept FCVTZSUXSr [[COPY]]
-    ; CHECK: $x0 = COPY [[FCVTZSUXSr]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FCVTZSUXSr:%[0-9]+]]:gpr64 = nofpexcept FCVTZSUXSr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $x0 = COPY [[FCVTZSUXSr]]
     %0(s32) = COPY $s0
     %1(s64) = G_FPTOSI %0
     $x0 = COPY %1(s64)
@@ -540,9 +588,11 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: fptosi_s64_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTZSUXDr:%[0-9]+]]:gpr64 = nofpexcept FCVTZSUXDr [[COPY]]
-    ; CHECK: $x0 = COPY [[FCVTZSUXDr]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTZSUXDr:%[0-9]+]]:gpr64 = nofpexcept FCVTZSUXDr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $x0 = COPY [[FCVTZSUXDr]]
     %0(s64) = COPY $d0
     %1(s64) = G_FPTOSI %0
     $x0 = COPY %1(s64)
@@ -562,9 +612,11 @@ body:             |
     liveins: $s0
 
     ; CHECK-LABEL: name: fptoui_s32_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FCVTZUUWSr:%[0-9]+]]:gpr32 = nofpexcept FCVTZUUWSr [[COPY]]
-    ; CHECK: $w0 = COPY [[FCVTZUUWSr]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FCVTZUUWSr:%[0-9]+]]:gpr32 = nofpexcept FCVTZUUWSr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $w0 = COPY [[FCVTZUUWSr]]
     %0(s32) = COPY $s0
     %1(s32) = G_FPTOUI %0
     $w0 = COPY %1(s32)
@@ -584,9 +636,11 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: fptoui_s32_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTZUUWDr:%[0-9]+]]:gpr32 = nofpexcept FCVTZUUWDr [[COPY]]
-    ; CHECK: $w0 = COPY [[FCVTZUUWDr]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTZUUWDr:%[0-9]+]]:gpr32 = nofpexcept FCVTZUUWDr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $w0 = COPY [[FCVTZUUWDr]]
     %0(s64) = COPY $d0
     %1(s32) = G_FPTOUI %0
     $w0 = COPY %1(s32)
@@ -606,9 +660,11 @@ body:             |
     liveins: $s0
 
     ; CHECK-LABEL: name: fptoui_s64_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FCVTZUUXSr:%[0-9]+]]:gpr64 = nofpexcept FCVTZUUXSr [[COPY]]
-    ; CHECK: $x0 = COPY [[FCVTZUUXSr]]
+    ; CHECK: liveins: $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FCVTZUUXSr:%[0-9]+]]:gpr64 = nofpexcept FCVTZUUXSr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $x0 = COPY [[FCVTZUUXSr]]
     %0(s32) = COPY $s0
     %1(s64) = G_FPTOUI %0
     $x0 = COPY %1(s64)
@@ -628,9 +684,11 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: fptoui_s64_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTZUUXDr:%[0-9]+]]:gpr64 = nofpexcept FCVTZUUXDr [[COPY]]
-    ; CHECK: $x0 = COPY [[FCVTZUUXDr]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTZUUXDr:%[0-9]+]]:gpr64 = nofpexcept FCVTZUUXDr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $x0 = COPY [[FCVTZUUXDr]]
     %0(s64) = COPY $d0
     %1(s64) = G_FPTOUI %0
     $x0 = COPY %1(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-fp16-fconstant.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-fp16-fconstant.mir
index 18f907813de52..5b6726d6e5bf3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-fp16-fconstant.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-fp16-fconstant.mir
@@ -10,8 +10,8 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: positive_zero
     ; CHECK: [[FMOVH0_:%[0-9]+]]:fpr16 = FMOVH0
-    ; CHECK: $h0 = COPY [[FMOVH0_]]
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: $h0 = COPY [[FMOVH0_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:fpr(s16) = G_FCONSTANT half 0.0
     $h0 = COPY %0(s16)
     RET_ReallyLR implicit $h0
@@ -25,8 +25,8 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: one
     ; CHECK: [[FMOVHi:%[0-9]+]]:fpr16 = FMOVHi 112
-    ; CHECK: $h0 = COPY [[FMOVHi]]
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: $h0 = COPY [[FMOVHi]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:fpr(s16) = G_FCONSTANT half 1.0
     $h0 = COPY %0(s16)
     RET_ReallyLR implicit $h0
@@ -40,9 +40,9 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: constant_pool_load
     ; CHECK: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) %const.0
-    ; CHECK: [[LDRHui:%[0-9]+]]:fpr16 = LDRHui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0 :: (load (s16) from constant-pool)
-    ; CHECK: $h0 = COPY [[LDRHui]]
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: [[LDRHui:%[0-9]+]]:fpr16 = LDRHui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0 :: (load (s16) from constant-pool)
+    ; CHECK-NEXT: $h0 = COPY [[LDRHui]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:fpr(s16) = G_FCONSTANT half 0xH000B
     $h0 = COPY %0(s16)
     RET_ReallyLR implicit $h0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-frint-nofp16.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-frint-nofp16.mir
index b6e831b73df97..fe4adfac8e721 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-frint-nofp16.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-frint-nofp16.mir
@@ -17,10 +17,10 @@ body:             |
     ; CHECK: liveins: $h0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
-    ; CHECK-NEXT: %1:fpr32 = nofpexcept FCVTSHr [[COPY]], implicit $fpcr
-    ; CHECK-NEXT: %2:fpr32 = nofpexcept FRINTXSr %1, implicit $fpcr
-    ; CHECK-NEXT: %3:fpr16 = nofpexcept FCVTHSr %2, implicit $fpcr
-    ; CHECK-NEXT: $h0 = COPY %3
+    ; CHECK-NEXT: [[FCVTSHr:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr]], implicit $fpcr
+    ; CHECK-NEXT: $h0 = COPY [[FCVTHSr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:fpr(s16) = COPY $h0
     %2:fpr(s32) = G_FPEXT %0(s16)
@@ -55,28 +55,28 @@ body:             |
     ; CHECK-NEXT: [[DUPi16_:%[0-9]+]]:fpr16 = DUPi16 [[INSERT_SUBREG]], 1
     ; CHECK-NEXT: [[DUPi16_1:%[0-9]+]]:fpr16 = DUPi16 [[INSERT_SUBREG1]], 2
     ; CHECK-NEXT: [[DUPi16_2:%[0-9]+]]:fpr16 = DUPi16 [[INSERT_SUBREG2]], 3
-    ; CHECK-NEXT: %5:fpr32 = nofpexcept FCVTSHr [[COPY1]], implicit $fpcr
-    ; CHECK-NEXT: %6:fpr32 = nofpexcept FRINTXSr %5, implicit $fpcr
-    ; CHECK-NEXT: %7:fpr16 = nofpexcept FCVTHSr %6, implicit $fpcr
-    ; CHECK-NEXT: %8:fpr32 = nofpexcept FCVTSHr [[DUPi16_]], implicit $fpcr
-    ; CHECK-NEXT: %9:fpr32 = nofpexcept FRINTXSr %8, implicit $fpcr
-    ; CHECK-NEXT: %10:fpr16 = nofpexcept FCVTHSr %9, implicit $fpcr
-    ; CHECK-NEXT: %11:fpr32 = nofpexcept FCVTSHr [[DUPi16_1]], implicit $fpcr
-    ; CHECK-NEXT: %12:fpr32 = nofpexcept FRINTXSr %11, implicit $fpcr
-    ; CHECK-NEXT: %13:fpr16 = nofpexcept FCVTHSr %12, implicit $fpcr
-    ; CHECK-NEXT: %14:fpr32 = nofpexcept FCVTSHr [[DUPi16_2]], implicit $fpcr
-    ; CHECK-NEXT: %15:fpr32 = nofpexcept FRINTXSr %14, implicit $fpcr
-    ; CHECK-NEXT: %16:fpr16 = nofpexcept FCVTHSr %15, implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[COPY1]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr1:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[DUPi16_]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr1:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr1]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr1:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr1]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr2:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[DUPi16_1]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr2:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr2]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr2:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr2]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr3:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[DUPi16_2]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr3:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr3]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr3:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr3]], implicit $fpcr
     ; CHECK-NEXT: [[DEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], %7, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[FCVTHSr]], %subreg.hsub
     ; CHECK-NEXT: [[DEF4:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG4:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF4]], %10, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG4:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF4]], [[FCVTHSr1]], %subreg.hsub
     ; CHECK-NEXT: [[INSvi16lane:%[0-9]+]]:fpr128 = INSvi16lane [[INSERT_SUBREG3]], 1, [[INSERT_SUBREG4]], 0
     ; CHECK-NEXT: [[DEF5:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG5:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF5]], %13, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG5:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF5]], [[FCVTHSr2]], %subreg.hsub
     ; CHECK-NEXT: [[INSvi16lane1:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane]], 2, [[INSERT_SUBREG5]], 0
     ; CHECK-NEXT: [[DEF6:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG6:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF6]], %16, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG6:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF6]], [[FCVTHSr3]], %subreg.hsub
     ; CHECK-NEXT: [[INSvi16lane2:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane1]], 3, [[INSERT_SUBREG6]], 0
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY [[INSvi16lane2]].dsub
     ; CHECK-NEXT: $d0 = COPY [[COPY2]]
@@ -123,52 +123,52 @@ body:             |
     ; CHECK-NEXT: [[DUPi16_4:%[0-9]+]]:fpr16 = DUPi16 [[COPY]], 5
     ; CHECK-NEXT: [[DUPi16_5:%[0-9]+]]:fpr16 = DUPi16 [[COPY]], 6
     ; CHECK-NEXT: [[DUPi16_6:%[0-9]+]]:fpr16 = DUPi16 [[COPY]], 7
-    ; CHECK-NEXT: %9:fpr32 = nofpexcept FCVTSHr [[COPY1]], implicit $fpcr
-    ; CHECK-NEXT: %10:fpr32 = nofpexcept FRINTXSr %9, implicit $fpcr
-    ; CHECK-NEXT: %11:fpr16 = nofpexcept FCVTHSr %10, implicit $fpcr
-    ; CHECK-NEXT: %12:fpr32 = nofpexcept FCVTSHr [[DUPi16_]], implicit $fpcr
-    ; CHECK-NEXT: %13:fpr32 = nofpexcept FRINTXSr %12, implicit $fpcr
-    ; CHECK-NEXT: %14:fpr16 = nofpexcept FCVTHSr %13, implicit $fpcr
-    ; CHECK-NEXT: %15:fpr32 = nofpexcept FCVTSHr [[DUPi16_1]], implicit $fpcr
-    ; CHECK-NEXT: %16:fpr32 = nofpexcept FRINTXSr %15, implicit $fpcr
-    ; CHECK-NEXT: %17:fpr16 = nofpexcept FCVTHSr %16, implicit $fpcr
-    ; CHECK-NEXT: %18:fpr32 = nofpexcept FCVTSHr [[DUPi16_2]], implicit $fpcr
-    ; CHECK-NEXT: %19:fpr32 = nofpexcept FRINTXSr %18, implicit $fpcr
-    ; CHECK-NEXT: %20:fpr16 = nofpexcept FCVTHSr %19, implicit $fpcr
-    ; CHECK-NEXT: %21:fpr32 = nofpexcept FCVTSHr [[DUPi16_3]], implicit $fpcr
-    ; CHECK-NEXT: %22:fpr32 = nofpexcept FRINTXSr %21, implicit $fpcr
-    ; CHECK-NEXT: %23:fpr16 = nofpexcept FCVTHSr %22, implicit $fpcr
-    ; CHECK-NEXT: %24:fpr32 = nofpexcept FCVTSHr [[DUPi16_4]], implicit $fpcr
-    ; CHECK-NEXT: %25:fpr32 = nofpexcept FRINTXSr %24, implicit $fpcr
-    ; CHECK-NEXT: %26:fpr16 = nofpexcept FCVTHSr %25, implicit $fpcr
-    ; CHECK-NEXT: %27:fpr32 = nofpexcept FCVTSHr [[DUPi16_5]], implicit $fpcr
-    ; CHECK-NEXT: %28:fpr32 = nofpexcept FRINTXSr %27, implicit $fpcr
-    ; CHECK-NEXT: %29:fpr16 = nofpexcept FCVTHSr %28, implicit $fpcr
-    ; CHECK-NEXT: %30:fpr32 = nofpexcept FCVTSHr [[DUPi16_6]], implicit $fpcr
-    ; CHECK-NEXT: %31:fpr32 = nofpexcept FRINTXSr %30, implicit $fpcr
-    ; CHECK-NEXT: %32:fpr16 = nofpexcept FCVTHSr %31, implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[COPY1]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr1:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[DUPi16_]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr1:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr1]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr1:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr1]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr2:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[DUPi16_1]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr2:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr2]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr2:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr2]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr3:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[DUPi16_2]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr3:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr3]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr3:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr3]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr4:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[DUPi16_3]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr4:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr4]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr4:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr4]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr5:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[DUPi16_4]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr5:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr5]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr5:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr5]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr6:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[DUPi16_5]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr6:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr6]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr6:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr6]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTSHr7:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[DUPi16_6]], implicit $fpcr
+    ; CHECK-NEXT: [[FRINTXSr7:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[FCVTSHr7]], implicit $fpcr
+    ; CHECK-NEXT: [[FCVTHSr7:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr [[FRINTXSr7]], implicit $fpcr
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], %11, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[FCVTHSr]], %subreg.hsub
     ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], %14, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[FCVTHSr1]], %subreg.hsub
     ; CHECK-NEXT: [[INSvi16lane:%[0-9]+]]:fpr128 = INSvi16lane [[INSERT_SUBREG]], 1, [[INSERT_SUBREG1]], 0
     ; CHECK-NEXT: [[DEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], %17, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[FCVTHSr2]], %subreg.hsub
     ; CHECK-NEXT: [[INSvi16lane1:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane]], 2, [[INSERT_SUBREG2]], 0
     ; CHECK-NEXT: [[DEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], %20, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[FCVTHSr3]], %subreg.hsub
     ; CHECK-NEXT: [[INSvi16lane2:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane1]], 3, [[INSERT_SUBREG3]], 0
     ; CHECK-NEXT: [[DEF4:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG4:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF4]], %23, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG4:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF4]], [[FCVTHSr4]], %subreg.hsub
     ; CHECK-NEXT: [[INSvi16lane3:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane2]], 4, [[INSERT_SUBREG4]], 0
     ; CHECK-NEXT: [[DEF5:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG5:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF5]], %26, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG5:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF5]], [[FCVTHSr5]], %subreg.hsub
     ; CHECK-NEXT: [[INSvi16lane4:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane3]], 5, [[INSERT_SUBREG5]], 0
     ; CHECK-NEXT: [[DEF6:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG6:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF6]], %29, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG6:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF6]], [[FCVTHSr6]], %subreg.hsub
     ; CHECK-NEXT: [[INSvi16lane5:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane4]], 6, [[INSERT_SUBREG6]], 0
     ; CHECK-NEXT: [[DEF7:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK-NEXT: [[INSERT_SUBREG7:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF7]], %32, %subreg.hsub
+    ; CHECK-NEXT: [[INSERT_SUBREG7:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF7]], [[FCVTHSr7]], %subreg.hsub
     ; CHECK-NEXT: [[INSvi16lane6:%[0-9]+]]:fpr128 = INSvi16lane [[INSvi16lane5]], 7, [[INSERT_SUBREG7]], 0
     ; CHECK-NEXT: $q0 = COPY [[INSvi16lane6]]
     ; CHECK-NEXT: RET_ReallyLR implicit $q0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-frint.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-frint.mir
index a0856baece9f6..737c31cd81796 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-frint.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-frint.mir
@@ -15,10 +15,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f16.rint
     ; CHECK: liveins: $h0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
-    ; CHECK: [[FRINTXHr:%[0-9]+]]:fpr16 = nofpexcept FRINTXHr [[COPY]]
-    ; CHECK: $h0 = COPY [[FRINTXHr]]
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
+    ; CHECK-NEXT: [[FRINTXHr:%[0-9]+]]:fpr16 = nofpexcept FRINTXHr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $h0 = COPY [[FRINTXHr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:fpr(s16) = COPY $h0
     %1:fpr(s16) = G_FRINT %0
     $h0 = COPY %1(s16)
@@ -38,10 +39,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f32.rint
     ; CHECK: liveins: $s0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FRINTXSr:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[COPY]]
-    ; CHECK: $s0 = COPY [[FRINTXSr]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FRINTXSr:%[0-9]+]]:fpr32 = nofpexcept FRINTXSr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[FRINTXSr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = G_FRINT %0
     $s0 = COPY %1(s32)
@@ -61,10 +63,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f64.rint
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTXDr:%[0-9]+]]:fpr64 = nofpexcept FRINTXDr [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTXDr]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTXDr:%[0-9]+]]:fpr64 = nofpexcept FRINTXDr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTXDr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(s64) = COPY $d0
     %1:fpr(s64) = G_FRINT %0
     $d0 = COPY %1(s64)
@@ -84,10 +87,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f32.rint
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FRINTXv4f32_:%[0-9]+]]:fpr128 = nofpexcept FRINTXv4f32 [[COPY]]
-    ; CHECK: $q0 = COPY [[FRINTXv4f32_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FRINTXv4f32_:%[0-9]+]]:fpr128 = nofpexcept FRINTXv4f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FRINTXv4f32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s32>) = COPY $q0
     %1:fpr(<4 x s32>) = G_FRINT %0
     $q0 = COPY %1(<4 x s32>)
@@ -107,10 +111,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f64.rint
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FRINTXv2f64_:%[0-9]+]]:fpr128 = nofpexcept FRINTXv2f64 [[COPY]]
-    ; CHECK: $q0 = COPY [[FRINTXv2f64_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FRINTXv2f64_:%[0-9]+]]:fpr128 = nofpexcept FRINTXv2f64 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FRINTXv2f64_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s64>) = COPY $q0
     %1:fpr(<2 x s64>) = G_FRINT %0
     $q0 = COPY %1(<2 x s64>)
@@ -130,10 +135,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f16.rint
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTXv4f16_:%[0-9]+]]:fpr64 = nofpexcept FRINTXv4f16 [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTXv4f16_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTXv4f16_:%[0-9]+]]:fpr64 = nofpexcept FRINTXv4f16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTXv4f16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s16>) = G_FRINT %0
     $d0 = COPY %1(<4 x s16>)
@@ -153,10 +159,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v8f16.rint
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FRINTXv8f16_:%[0-9]+]]:fpr128 = nofpexcept FRINTXv8f16 [[COPY]]
-    ; CHECK: $q0 = COPY [[FRINTXv8f16_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FRINTXv8f16_:%[0-9]+]]:fpr128 = nofpexcept FRINTXv8f16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FRINTXv8f16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s16>) = COPY $q0
     %1:fpr(<8 x s16>) = G_FRINT %0
     $q0 = COPY %1(<8 x s16>)
@@ -176,10 +183,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f32.rint
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTXv2f32_:%[0-9]+]]:fpr64 = nofpexcept FRINTXv2f32 [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTXv2f32_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTXv2f32_:%[0-9]+]]:fpr64 = nofpexcept FRINTXv2f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTXv2f32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = G_FRINT %0
     $d0 = COPY %1(<2 x s32>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-tiny.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-tiny.mir
index 4995024a7229d..3a616c9d3c346 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-tiny.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-tiny.mir
@@ -31,15 +31,15 @@ body:             |
   bb.1:
     ; CHECK-LABEL: name: gv_tiny
     ; CHECK: [[ADR:%[0-9]+]]:gpr64 = ADR @foo1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY [[ADR]]
-    ; CHECK: [[ADR1:%[0-9]+]]:gpr64 = ADR @foo2
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[ADR1]]
-    ; CHECK: STRWui $wzr, %stack.0.retval, 0 :: (store (s32) into %ir.retval)
-    ; CHECK: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load (s32) from @foo1)
-    ; CHECK: [[LDRWui1:%[0-9]+]]:gpr32 = LDRWui [[COPY1]], 0 :: (load (s32) from @foo2)
-    ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRWui]], [[LDRWui1]]
-    ; CHECK: $w0 = COPY [[ADDWrr]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY [[ADR]]
+    ; CHECK-NEXT: [[ADR1:%[0-9]+]]:gpr64 = ADR @foo2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[ADR1]]
+    ; CHECK-NEXT: STRWui $wzr, %stack.0.retval, 0 :: (store (s32) into %ir.retval)
+    ; CHECK-NEXT: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load (s32) from @foo1)
+    ; CHECK-NEXT: [[LDRWui1:%[0-9]+]]:gpr32 = LDRWui [[COPY1]], 0 :: (load (s32) from @foo2)
+    ; CHECK-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRWui]], [[LDRWui1]]
+    ; CHECK-NEXT: $w0 = COPY [[ADDWrr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:gpr(s32) = G_CONSTANT i32 0
     %4:gpr(p0) = G_GLOBAL_VALUE @foo1
     %3:gpr(p0) = COPY %4(p0)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-imm.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-imm.mir
index 0e0ed70fb955f..9f02f31a15e32 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-imm.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-imm.mir
@@ -35,6 +35,7 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm -1234
     ; CHECK-NEXT: $w0 = COPY [[MOVi32imm]]
+    ;
     ; CHECK-TINY-LABEL: name: imm_s32_gpr
     ; CHECK-TINY: liveins: $w0, $w1
     ; CHECK-TINY-NEXT: {{  $}}
@@ -63,6 +64,7 @@ body:             |
     ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1234
     ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
     ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
+    ;
     ; CHECK-TINY-LABEL: name: imm_s64_gpr
     ; CHECK-TINY: liveins: $w0, $w1
     ; CHECK-TINY-NEXT: {{  $}}
@@ -90,16 +92,17 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK-NEXT: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) %const.0
     ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0 :: (load (s64) from constant-pool)
-    ; CHECK-NEXT: %2:fpr64 = nofpexcept FADDDrr [[COPY]], [[LDRDui]]
-    ; CHECK-NEXT: $d0 = COPY %2
+    ; CHECK-NEXT: [[FADDDrr:%[0-9]+]]:fpr64 = nofpexcept FADDDrr [[COPY]], [[LDRDui]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FADDDrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
+    ;
     ; CHECK-TINY-LABEL: name: test_f64_cp
     ; CHECK-TINY: liveins: $d0
     ; CHECK-TINY-NEXT: {{  $}}
     ; CHECK-TINY-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK-TINY-NEXT: [[LDRDl:%[0-9]+]]:fpr64 = LDRDl %const.0 :: (load (s64) from constant-pool)
-    ; CHECK-TINY-NEXT: %2:fpr64 = nofpexcept FADDDrr [[COPY]], [[LDRDl]]
-    ; CHECK-TINY-NEXT: $d0 = COPY %2
+    ; CHECK-TINY-NEXT: [[FADDDrr:%[0-9]+]]:fpr64 = nofpexcept FADDDrr [[COPY]], [[LDRDl]], implicit $fpcr
+    ; CHECK-TINY-NEXT: $d0 = COPY [[FADDDrr]]
     ; CHECK-TINY-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(s64) = COPY $d0
     %1:fpr(s64) = G_FCONSTANT double 0x3FEFF7CED916872B
@@ -125,16 +128,17 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-NEXT: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) %const.0
     ; CHECK-NEXT: [[LDRSui:%[0-9]+]]:fpr32 = LDRSui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0 :: (load (s32) from constant-pool)
-    ; CHECK-NEXT: %2:fpr32 = nofpexcept FADDSrr [[COPY]], [[LDRSui]]
-    ; CHECK-NEXT: $s0 = COPY %2
+    ; CHECK-NEXT: [[FADDSrr:%[0-9]+]]:fpr32 = nofpexcept FADDSrr [[COPY]], [[LDRSui]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[FADDSrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
+    ;
     ; CHECK-TINY-LABEL: name: test_f32_cp_optsize
     ; CHECK-TINY: liveins: $s0
     ; CHECK-TINY-NEXT: {{  $}}
     ; CHECK-TINY-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-TINY-NEXT: [[LDRSl:%[0-9]+]]:fpr32 = LDRSl %const.0 :: (load (s32) from constant-pool)
-    ; CHECK-TINY-NEXT: %2:fpr32 = nofpexcept FADDSrr [[COPY]], [[LDRSl]]
-    ; CHECK-TINY-NEXT: $s0 = COPY %2
+    ; CHECK-TINY-NEXT: [[FADDSrr:%[0-9]+]]:fpr32 = nofpexcept FADDSrr [[COPY]], [[LDRSl]], implicit $fpcr
+    ; CHECK-TINY-NEXT: $s0 = COPY [[FADDSrr]]
     ; CHECK-TINY-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = G_FCONSTANT float 0x3FDB267DE0000000
@@ -160,16 +164,17 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-NEXT: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) %const.0
     ; CHECK-NEXT: [[LDRSui:%[0-9]+]]:fpr32 = LDRSui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0 :: (load (s32) from constant-pool)
-    ; CHECK-NEXT: %2:fpr32 = nofpexcept FADDSrr [[COPY]], [[LDRSui]]
-    ; CHECK-NEXT: $s0 = COPY %2
+    ; CHECK-NEXT: [[FADDSrr:%[0-9]+]]:fpr32 = nofpexcept FADDSrr [[COPY]], [[LDRSui]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[FADDSrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
+    ;
     ; CHECK-TINY-LABEL: name: test_f32_cp_minsize
     ; CHECK-TINY: liveins: $s0
     ; CHECK-TINY-NEXT: {{  $}}
     ; CHECK-TINY-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-TINY-NEXT: [[LDRSl:%[0-9]+]]:fpr32 = LDRSl %const.0 :: (load (s32) from constant-pool)
-    ; CHECK-TINY-NEXT: %2:fpr32 = nofpexcept FADDSrr [[COPY]], [[LDRSl]]
-    ; CHECK-TINY-NEXT: $s0 = COPY %2
+    ; CHECK-TINY-NEXT: [[FADDSrr:%[0-9]+]]:fpr32 = nofpexcept FADDSrr [[COPY]], [[LDRSl]], implicit $fpcr
+    ; CHECK-TINY-NEXT: $s0 = COPY [[FADDSrr]]
     ; CHECK-TINY-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = G_FCONSTANT float 0x3FDB267DE0000000

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir
index cdbd34f94548d..ada185c2c9dc0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir
@@ -20,8 +20,8 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: implicit_def
     ; CHECK: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
-    ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[DEF]], [[DEF]]
-    ; CHECK: $w0 = COPY [[ADDWrr]]
+    ; CHECK-NEXT: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[DEF]], [[DEF]]
+    ; CHECK-NEXT: $w0 = COPY [[ADDWrr]]
     %0(s32) = G_IMPLICIT_DEF
     %1(s32) = G_ADD %0, %0
     $w0 = COPY %1(s32)
@@ -38,8 +38,8 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: implicit_def_copy
     ; CHECK: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY [[DEF]]
-    ; CHECK: $w0 = COPY [[COPY]]
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY [[DEF]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY]]
     %0(s32) = G_IMPLICIT_DEF
     %1(s32) = COPY %0(s32)
     $w0 = COPY %1(s32)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir
index 51c2c163b2caa..f4675e1b752aa 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir
@@ -11,14 +11,16 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: insert_gprx
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64 = IMPLICIT_DEF
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
-    ; CHECK: [[BFMXri:%[0-9]+]]:gpr64 = BFMXri [[DEF]], [[SUBREG_TO_REG]], 0, 31
-    ; CHECK: [[SUBREG_TO_REG1:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
-    ; CHECK: [[BFMXri1:%[0-9]+]]:gpr64 = BFMXri [[DEF]], [[SUBREG_TO_REG1]], 51, 31
-    ; CHECK: $x0 = COPY [[BFMXri]]
-    ; CHECK: $x1 = COPY [[BFMXri1]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
+    ; CHECK-NEXT: [[BFMXri:%[0-9]+]]:gpr64 = BFMXri [[DEF]], [[SUBREG_TO_REG]], 0, 31
+    ; CHECK-NEXT: [[SUBREG_TO_REG1:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
+    ; CHECK-NEXT: [[BFMXri1:%[0-9]+]]:gpr64 = BFMXri [[DEF]], [[SUBREG_TO_REG1]], 51, 31
+    ; CHECK-NEXT: $x0 = COPY [[BFMXri]]
+    ; CHECK-NEXT: $x1 = COPY [[BFMXri1]]
     %0:gpr(s32) = COPY $w0
 
     %1:gpr(s64) = G_IMPLICIT_DEF
@@ -40,12 +42,14 @@ body:             |
   bb.0:
     liveins: $w0, $w1
     ; CHECK-LABEL: name: insert_gprw
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
-    ; CHECK: [[BFMWri:%[0-9]+]]:gpr32 = BFMWri [[DEF]], [[COPY]], 0, 15
-    ; CHECK: [[BFMWri1:%[0-9]+]]:gpr32 = BFMWri [[BFMWri]], [[COPY]], 16, 15
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[BFMWri1]]
-    ; CHECK: $w0 = COPY [[COPY1]]
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[BFMWri:%[0-9]+]]:gpr32 = BFMWri [[DEF]], [[COPY]], 0, 15
+    ; CHECK-NEXT: [[BFMWri1:%[0-9]+]]:gpr32 = BFMWri [[BFMWri]], [[COPY]], 16, 15
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[BFMWri1]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]]
     %1:gpr(s32) = COPY $w0
     %2:gpr(s32) = COPY $w1
     %3:gpr(s16) = G_TRUNC %1(s32)
@@ -67,13 +71,15 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: extract_gprs
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[COPY]], 0, 31
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[UBFMXri]].sub_32
-    ; CHECK: [[UBFMXri1:%[0-9]+]]:gpr64 = UBFMXri [[COPY]], 13, 44
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[UBFMXri1]].sub_32
-    ; CHECK: $w0 = COPY [[COPY1]]
-    ; CHECK: $w1 = COPY [[COPY2]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[COPY]], 0, 31
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[UBFMXri]].sub_32
+    ; CHECK-NEXT: [[UBFMXri1:%[0-9]+]]:gpr64 = UBFMXri [[COPY]], 13, 44
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY [[UBFMXri1]].sub_32
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]]
+    ; CHECK-NEXT: $w1 = COPY [[COPY2]]
     %0:gpr(s64) = COPY $x0
 
     %1:gpr(s32) = G_EXTRACT %0, 0
@@ -94,15 +100,17 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: extract_gprw
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 15
-    ; CHECK: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 15, 30
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY [[UBFMWri]]
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr16 = COPY [[COPY1]].hsub
-    ; CHECK: $h0 = COPY [[COPY2]]
-    ; CHECK: [[COPY3:%[0-9]+]]:fpr32 = COPY [[UBFMWri1]]
-    ; CHECK: [[COPY4:%[0-9]+]]:fpr16 = COPY [[COPY3]].hsub
-    ; CHECK: $h1 = COPY [[COPY4]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 15
+    ; CHECK-NEXT: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 15, 30
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY [[UBFMWri]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr16 = COPY [[COPY1]].hsub
+    ; CHECK-NEXT: $h0 = COPY [[COPY2]]
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fpr32 = COPY [[UBFMWri1]]
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:fpr16 = COPY [[COPY3]].hsub
+    ; CHECK-NEXT: $h1 = COPY [[COPY4]]
     %0:gpr(s32) = COPY $w0
 
     %1:gpr(s16) = G_EXTRACT %0, 0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir
index efe15c53d2f5d..e3712db51ad3a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir
@@ -41,10 +41,12 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: anyext_s64_from_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64all = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
-    ; CHECK: $x0 = COPY [[INSERT_SUBREG]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:gpr64all = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
+    ; CHECK-NEXT: $x0 = COPY [[INSERT_SUBREG]]
     %0(s32) = COPY $w0
     %1(s64) = G_ANYEXT %0
     $x0 = COPY %1(s64)
@@ -64,9 +66,11 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: anyext_s32_from_s8
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]]
-    ; CHECK: $w0 = COPY [[COPY1]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]]
     %2:gpr(s32) = COPY $w0
     %0(s8) = G_TRUNC %2
     %1(s32) = G_ANYEXT %0
@@ -89,10 +93,11 @@ body:             |
 
     ; CHECK-LABEL: name: anyext_v8s16_from_v8s8
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv8i8_shift:%[0-9]+]]:fpr128 = USHLLv8i8_shift [[COPY]], 0
-    ; CHECK: $q0 = COPY [[USHLLv8i8_shift]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv8i8_shift:%[0-9]+]]:fpr128 = USHLLv8i8_shift [[COPY]], 0
+    ; CHECK-NEXT: $q0 = COPY [[USHLLv8i8_shift]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s8>) = COPY $d0
     %1:fpr(<8 x s16>) = G_ANYEXT %0(<8 x s8>)
     $q0 = COPY %1(<8 x s16>)
@@ -115,10 +120,11 @@ body:             |
 
     ; CHECK-LABEL: name: anyext_v4s32_from_v4s16
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv4i16_shift:%[0-9]+]]:fpr128 = USHLLv4i16_shift [[COPY]], 0
-    ; CHECK: $q0 = COPY [[USHLLv4i16_shift]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv4i16_shift:%[0-9]+]]:fpr128 = USHLLv4i16_shift [[COPY]], 0
+    ; CHECK-NEXT: $q0 = COPY [[USHLLv4i16_shift]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s32>) = G_ANYEXT %0(<4 x s16>)
     $q0 = COPY %1(<4 x s32>)
@@ -141,10 +147,11 @@ body:             |
 
     ; CHECK-LABEL: name: anyext_v2s64_from_v2s32
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv2i32_shift:%[0-9]+]]:fpr128 = USHLLv2i32_shift [[COPY]], 0
-    ; CHECK: $q0 = COPY [[USHLLv2i32_shift]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv2i32_shift:%[0-9]+]]:fpr128 = USHLLv2i32_shift [[COPY]], 0
+    ; CHECK-NEXT: $q0 = COPY [[USHLLv2i32_shift]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s64>) = G_ANYEXT %0(<2 x s32>)
     $q0 = COPY %1(<2 x s64>)
@@ -165,10 +172,12 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: zext_s64_from_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[COPY]], 0
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
-    ; CHECK: $x0 = COPY [[SUBREG_TO_REG]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[ORRWrs:%[0-9]+]]:gpr32 = ORRWrs $wzr, [[COPY]], 0
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[ORRWrs]], %subreg.sub_32
+    ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
     %0(s32) = COPY $w0
     %1(s64) = G_ZEXT %0
     $x0 = COPY %1(s64)
@@ -188,9 +197,11 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: zext_s32_from_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 15
-    ; CHECK: $w0 = COPY [[UBFMWri]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 15
+    ; CHECK-NEXT: $w0 = COPY [[UBFMWri]]
     %2:gpr(s32) = COPY $w0
     %0(s16) = G_TRUNC %2
     %1(s32) = G_ZEXT %0
@@ -211,9 +222,11 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: zext_s32_from_s8
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 15
-    ; CHECK: $w0 = COPY [[UBFMWri]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 15
+    ; CHECK-NEXT: $w0 = COPY [[UBFMWri]]
     %2:gpr(s32) = COPY $w0
     %0(s16) = G_TRUNC %2
     %1(s32) = G_ZEXT %0
@@ -234,10 +247,12 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: zext_s16_from_s8
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 7
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[UBFMWri]]
-    ; CHECK: $w0 = COPY [[COPY1]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 7
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[UBFMWri]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]]
     %2:gpr(s32) = COPY $w0
     %0(s8) = G_TRUNC %2
     %1(s16) = G_ZEXT %0
@@ -261,10 +276,11 @@ body:             |
 
     ; CHECK-LABEL: name: zext_v8s16_from_v8s8
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv8i8_shift:%[0-9]+]]:fpr128 = USHLLv8i8_shift [[COPY]], 0
-    ; CHECK: $q0 = COPY [[USHLLv8i8_shift]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv8i8_shift:%[0-9]+]]:fpr128 = USHLLv8i8_shift [[COPY]], 0
+    ; CHECK-NEXT: $q0 = COPY [[USHLLv8i8_shift]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s8>) = COPY $d0
     %1:fpr(<8 x s16>) = G_ZEXT %0(<8 x s8>)
     $q0 = COPY %1(<8 x s16>)
@@ -288,10 +304,11 @@ body:             |
 
     ; CHECK-LABEL: name: zext_v4s32_from_v4s16
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv4i16_shift:%[0-9]+]]:fpr128 = USHLLv4i16_shift [[COPY]], 0
-    ; CHECK: $q0 = COPY [[USHLLv4i16_shift]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv4i16_shift:%[0-9]+]]:fpr128 = USHLLv4i16_shift [[COPY]], 0
+    ; CHECK-NEXT: $q0 = COPY [[USHLLv4i16_shift]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s32>) = G_ZEXT %0(<4 x s16>)
     $q0 = COPY %1(<4 x s32>)
@@ -314,10 +331,11 @@ body:             |
 
     ; CHECK-LABEL: name: zext_v2s64_from_v2s32
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv2i32_shift:%[0-9]+]]:fpr128 = USHLLv2i32_shift [[COPY]], 0
-    ; CHECK: $q0 = COPY [[USHLLv2i32_shift]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv2i32_shift:%[0-9]+]]:fpr128 = USHLLv2i32_shift [[COPY]], 0
+    ; CHECK-NEXT: $q0 = COPY [[USHLLv2i32_shift]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s64>) = G_ZEXT %0(<2 x s32>)
     $q0 = COPY %1(<2 x s64>)
@@ -338,11 +356,13 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: sext_s64_from_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
-    ; CHECK: [[SBFMXri:%[0-9]+]]:gpr64 = SBFMXri [[INSERT_SUBREG]], 0, 31
-    ; CHECK: $x0 = COPY [[SBFMXri]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
+    ; CHECK-NEXT: [[SBFMXri:%[0-9]+]]:gpr64 = SBFMXri [[INSERT_SUBREG]], 0, 31
+    ; CHECK-NEXT: $x0 = COPY [[SBFMXri]]
     %0(s32) = COPY $w0
     %1(s64) = G_SEXT %0
     $x0 = COPY %1(s64)
@@ -362,9 +382,11 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: sext_s32_from_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 15
-    ; CHECK: $w0 = COPY [[SBFMWri]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 15
+    ; CHECK-NEXT: $w0 = COPY [[SBFMWri]]
     %2:gpr(s32) = COPY $w0
     %0(s16) = G_TRUNC %2
     %1(s32) = G_SEXT %0
@@ -385,9 +407,11 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: sext_s32_from_s8
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 7
-    ; CHECK: $w0 = COPY [[SBFMWri]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 7
+    ; CHECK-NEXT: $w0 = COPY [[SBFMWri]]
     %2:gpr(s32) = COPY $w0
     %0(s8) = G_TRUNC %2
     %1(s32) = G_SEXT %0
@@ -408,10 +432,12 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: sext_s16_from_s8
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 7
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SBFMWri]]
-    ; CHECK: $w0 = COPY [[COPY1]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 7
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SBFMWri]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]]
     %2:gpr(s32) = COPY $w0
     %0(s8) = G_TRUNC %2
     %1(s16) = G_SEXT %0
@@ -435,10 +461,11 @@ body:             |
 
     ; CHECK-LABEL: name: sext_v8s16_from_v8s8
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SSHLLv8i8_shift:%[0-9]+]]:fpr128 = SSHLLv8i8_shift [[COPY]], 0
-    ; CHECK: $q0 = COPY [[SSHLLv8i8_shift]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SSHLLv8i8_shift:%[0-9]+]]:fpr128 = SSHLLv8i8_shift [[COPY]], 0
+    ; CHECK-NEXT: $q0 = COPY [[SSHLLv8i8_shift]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s8>) = COPY $d0
     %1:fpr(<8 x s16>) = G_SEXT %0(<8 x s8>)
     $q0 = COPY %1(<8 x s16>)
@@ -462,10 +489,11 @@ body:             |
 
     ; CHECK-LABEL: name: sext_v4s32_from_v4s16
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SSHLLv4i16_shift:%[0-9]+]]:fpr128 = SSHLLv4i16_shift [[COPY]], 0
-    ; CHECK: $q0 = COPY [[SSHLLv4i16_shift]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SSHLLv4i16_shift:%[0-9]+]]:fpr128 = SSHLLv4i16_shift [[COPY]], 0
+    ; CHECK-NEXT: $q0 = COPY [[SSHLLv4i16_shift]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s32>) = G_SEXT %0(<4 x s16>)
     $q0 = COPY %1(<4 x s32>)
@@ -488,10 +516,11 @@ body:             |
 
     ; CHECK-LABEL: name: sext_v2s64_from_v2s32
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SSHLLv2i32_shift:%[0-9]+]]:fpr128 = SSHLLv2i32_shift [[COPY]], 0
-    ; CHECK: $q0 = COPY [[SSHLLv2i32_shift]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SSHLLv2i32_shift:%[0-9]+]]:fpr128 = SSHLLv2i32_shift [[COPY]], 0
+    ; CHECK-NEXT: $q0 = COPY [[SSHLLv2i32_shift]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s64>) = G_SEXT %0(<2 x s32>)
     $q0 = COPY %1(<2 x s64>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir
index 6c0cf42553ff0..2dc8c9d4772df 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir
@@ -24,10 +24,12 @@ body:             |
     liveins: $w0, $w1
 
     ; CHECK-LABEL: name: sdiv_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr [[COPY]], [[COPY1]]
-    ; CHECK: $w0 = COPY [[SDIVWr]]
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $w0 = COPY [[SDIVWr]]
     %0(s32) = COPY $w0
     %1(s32) = COPY $w1
     %2(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.sdiv.i32), %0, %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir
index a1a0b6ffae48d..7d17336ad7543 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir
@@ -13,11 +13,13 @@ body:             |
     liveins: $q0, $q1
 
     ; CHECK-LABEL: name: aesmc_aese
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[T0:%[0-9]+]]:fpr128 = AESErr [[COPY]], [[COPY1]]
-    ; CHECK: [[T1:%[0-9]+]]:fpr128 = AESMCrrTied [[T0]]
-    ; CHECK: $q0 = COPY [[T1]]
+    ; CHECK: liveins: $q0, $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[AESErr:%[0-9]+]]:fpr128 = AESErr [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[AESMCrrTied:%[0-9]+]]:fpr128 = AESMCrrTied [[AESErr]]
+    ; CHECK-NEXT: $q0 = COPY [[AESMCrrTied]]
     %0:fpr(<16 x s8>) = COPY $q0
     %1:fpr(<16 x s8>) = COPY $q1
     %2:fpr(<16 x s8>) = G_INTRINSIC intrinsic(@llvm.aarch64.crypto.aese), %0, %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-round.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-round.mir
index 140d189a3af1d..53f2fbb328460 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-round.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-round.mir
@@ -15,10 +15,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f64.intrinsic_round
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTADr:%[0-9]+]]:fpr64 = nofpexcept FRINTADr [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTADr]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTADr:%[0-9]+]]:fpr64 = nofpexcept FRINTADr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTADr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(s64) = COPY $d0
     %1:fpr(s64) = G_INTRINSIC_ROUND %0
     $d0 = COPY %1(s64)
@@ -38,10 +39,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f32.intrinsic_round
     ; CHECK: liveins: $s0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FRINTASr:%[0-9]+]]:fpr32 = nofpexcept FRINTASr [[COPY]]
-    ; CHECK: $s0 = COPY [[FRINTASr]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FRINTASr:%[0-9]+]]:fpr32 = nofpexcept FRINTASr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[FRINTASr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = G_INTRINSIC_ROUND %0
     $s0 = COPY %1(s32)
@@ -63,10 +65,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f16.intrinsic_round
     ; CHECK: liveins: $h0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
-    ; CHECK: [[FRINTAHr:%[0-9]+]]:fpr16 = nofpexcept FRINTAHr [[COPY]]
-    ; CHECK: $h0 = COPY [[FRINTAHr]]
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
+    ; CHECK-NEXT: [[FRINTAHr:%[0-9]+]]:fpr16 = nofpexcept FRINTAHr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $h0 = COPY [[FRINTAHr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:fpr(s16) = COPY $h0
     %1:fpr(s16) = G_INTRINSIC_ROUND %0
     $h0 = COPY %1(s16)
@@ -88,10 +91,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f16.intrinsic_round
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTAv4f16_:%[0-9]+]]:fpr64 = nofpexcept FRINTAv4f16 [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTAv4f16_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTAv4f16_:%[0-9]+]]:fpr64 = nofpexcept FRINTAv4f16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTAv4f16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s16>) = G_INTRINSIC_ROUND %0
     $d0 = COPY %1(<4 x s16>)
@@ -113,10 +117,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v8f16.intrinsic_round
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FRINTAv8f16_:%[0-9]+]]:fpr128 = nofpexcept FRINTAv8f16 [[COPY]]
-    ; CHECK: $q0 = COPY [[FRINTAv8f16_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FRINTAv8f16_:%[0-9]+]]:fpr128 = nofpexcept FRINTAv8f16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FRINTAv8f16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s16>) = COPY $q0
     %1:fpr(<8 x s16>) = G_INTRINSIC_ROUND %0
     $q0 = COPY %1(<8 x s16>)
@@ -138,10 +143,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f32.intrinsic_round
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTAv2f32_:%[0-9]+]]:fpr64 = nofpexcept FRINTAv2f32 [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTAv2f32_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTAv2f32_:%[0-9]+]]:fpr64 = nofpexcept FRINTAv2f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTAv2f32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = G_INTRINSIC_ROUND %0
     $d0 = COPY %1(<2 x s32>)
@@ -163,10 +169,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f32.intrinsic_round
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FRINTAv4f32_:%[0-9]+]]:fpr128 = nofpexcept FRINTAv4f32 [[COPY]]
-    ; CHECK: $q0 = COPY [[FRINTAv4f32_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FRINTAv4f32_:%[0-9]+]]:fpr128 = nofpexcept FRINTAv4f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FRINTAv4f32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s32>) = COPY $q0
     %1:fpr(<4 x s32>) = G_INTRINSIC_ROUND %0
     $q0 = COPY %1(<4 x s32>)
@@ -188,10 +195,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f64.intrinsic_round
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FRINTAv2f64_:%[0-9]+]]:fpr128 = nofpexcept FRINTAv2f64 [[COPY]]
-    ; CHECK: $q0 = COPY [[FRINTAv2f64_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FRINTAv2f64_:%[0-9]+]]:fpr128 = nofpexcept FRINTAv2f64 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FRINTAv2f64_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s64>) = COPY $q0
     %1:fpr(<2 x s64>) = G_INTRINSIC_ROUND %0
     $q0 = COPY %1(<2 x s64>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-trunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-trunc.mir
index b76f0b0bff2d3..57f8ed8b459fc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-trunc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-intrinsic-trunc.mir
@@ -15,10 +15,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f64.intrinsic_trunc
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTZDr:%[0-9]+]]:fpr64 = nofpexcept FRINTZDr [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTZDr]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTZDr:%[0-9]+]]:fpr64 = nofpexcept FRINTZDr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTZDr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(s64) = COPY $d0
     %1:fpr(s64) = G_INTRINSIC_TRUNC %0
     $d0 = COPY %1(s64)
@@ -38,10 +39,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f32.intrinsic_trunc
     ; CHECK: liveins: $s0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FRINTZSr:%[0-9]+]]:fpr32 = nofpexcept FRINTZSr [[COPY]]
-    ; CHECK: $s0 = COPY [[FRINTZSr]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FRINTZSr:%[0-9]+]]:fpr32 = nofpexcept FRINTZSr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[FRINTZSr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = G_INTRINSIC_TRUNC %0
     $s0 = COPY %1(s32)
@@ -63,10 +65,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f16.intrinsic_trunc
     ; CHECK: liveins: $h0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
-    ; CHECK: [[FRINTZHr:%[0-9]+]]:fpr16 = nofpexcept FRINTZHr [[COPY]]
-    ; CHECK: $h0 = COPY [[FRINTZHr]]
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
+    ; CHECK-NEXT: [[FRINTZHr:%[0-9]+]]:fpr16 = nofpexcept FRINTZHr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $h0 = COPY [[FRINTZHr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:fpr(s16) = COPY $h0
     %1:fpr(s16) = G_INTRINSIC_TRUNC %0
     $h0 = COPY %1(s16)
@@ -88,10 +91,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f16.intrinsic_trunc
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTZv4f16_:%[0-9]+]]:fpr64 = nofpexcept FRINTZv4f16 [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTZv4f16_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTZv4f16_:%[0-9]+]]:fpr64 = nofpexcept FRINTZv4f16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTZv4f16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s16>) = G_INTRINSIC_TRUNC %0
     $d0 = COPY %1(<4 x s16>)
@@ -113,10 +117,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v8f16.intrinsic_trunc
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FRINTZv8f16_:%[0-9]+]]:fpr128 = nofpexcept FRINTZv8f16 [[COPY]]
-    ; CHECK: $q0 = COPY [[FRINTZv8f16_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FRINTZv8f16_:%[0-9]+]]:fpr128 = nofpexcept FRINTZv8f16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FRINTZv8f16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s16>) = COPY $q0
     %1:fpr(<8 x s16>) = G_INTRINSIC_TRUNC %0
     $q0 = COPY %1(<8 x s16>)
@@ -138,10 +143,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f32.intrinsic_trunc
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTZv2f32_:%[0-9]+]]:fpr64 = nofpexcept FRINTZv2f32 [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTZv2f32_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTZv2f32_:%[0-9]+]]:fpr64 = nofpexcept FRINTZv2f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTZv2f32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = G_INTRINSIC_TRUNC %0
     $d0 = COPY %1(<2 x s32>)
@@ -163,10 +169,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f32.intrinsic_trunc
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FRINTZv4f32_:%[0-9]+]]:fpr128 = nofpexcept FRINTZv4f32 [[COPY]]
-    ; CHECK: $q0 = COPY [[FRINTZv4f32_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FRINTZv4f32_:%[0-9]+]]:fpr128 = nofpexcept FRINTZv4f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FRINTZv4f32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s32>) = COPY $q0
     %1:fpr(<4 x s32>) = G_INTRINSIC_TRUNC %0
     $q0 = COPY %1(<4 x s32>)
@@ -188,10 +195,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f64.intrinsic_trunc
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FRINTZv2f64_:%[0-9]+]]:fpr128 = nofpexcept FRINTZv2f64 [[COPY]]
-    ; CHECK: $q0 = COPY [[FRINTZv2f64_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FRINTZv2f64_:%[0-9]+]]:fpr128 = nofpexcept FRINTZv2f64 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FRINTZv2f64_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s64>) = COPY $q0
     %1:fpr(<2 x s64>) = G_INTRINSIC_TRUNC %0
     $q0 = COPY %1(<2 x s64>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ld2.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ld2.mir
index 940485ea0b427..933dcb9628314 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ld2.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ld2.mir
@@ -11,13 +11,14 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD2Twov8b
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD2Twov8b:%[0-9]+]]:dd = LD2Twov8b %ptr :: (load (<8 x s64>))
-    ; CHECK: %dst1:fpr64 = COPY [[LD2Twov8b]].dsub0
-    ; CHECK: %dst2:fpr64 = COPY [[LD2Twov8b]].dsub1
-    ; CHECK: $d0 = COPY %dst1
-    ; CHECK: $d1 = COPY %dst2
-    ; CHECK: RET_ReallyLR implicit $d0, implicit $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD2Twov8b:%[0-9]+]]:dd = LD2Twov8b %ptr :: (load (<8 x s64>))
+    ; CHECK-NEXT: %dst1:fpr64 = COPY [[LD2Twov8b]].dsub0
+    ; CHECK-NEXT: %dst2:fpr64 = COPY [[LD2Twov8b]].dsub1
+    ; CHECK-NEXT: $d0 = COPY %dst1
+    ; CHECK-NEXT: $d1 = COPY %dst2
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<8 x s8>), %dst2:fpr(<8 x s8>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<8 x s64>))
     $d0 = COPY %dst1(<8 x s8>)
@@ -34,13 +35,14 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD2Twov16b
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD2Twov16b:%[0-9]+]]:qq = LD2Twov16b %ptr :: (load (<16 x s64>))
-    ; CHECK: %dst1:fpr128 = COPY [[LD2Twov16b]].qsub0
-    ; CHECK: %dst2:fpr128 = COPY [[LD2Twov16b]].qsub1
-    ; CHECK: $q0 = COPY %dst1
-    ; CHECK: $q1 = COPY %dst2
-    ; CHECK: RET_ReallyLR implicit $q0, implicit $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD2Twov16b:%[0-9]+]]:qq = LD2Twov16b %ptr :: (load (<16 x s64>))
+    ; CHECK-NEXT: %dst1:fpr128 = COPY [[LD2Twov16b]].qsub0
+    ; CHECK-NEXT: %dst2:fpr128 = COPY [[LD2Twov16b]].qsub1
+    ; CHECK-NEXT: $q0 = COPY %dst1
+    ; CHECK-NEXT: $q1 = COPY %dst2
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<16 x s8>), %dst2:fpr(<16 x s8>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<16 x s64>))
     $q0 = COPY %dst1(<16 x s8>)
@@ -57,13 +59,14 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD2Twov4h
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD2Twov4h:%[0-9]+]]:dd = LD2Twov4h %ptr :: (load (<4 x s64>))
-    ; CHECK: %dst1:fpr64 = COPY [[LD2Twov4h]].dsub0
-    ; CHECK: %dst2:fpr64 = COPY [[LD2Twov4h]].dsub1
-    ; CHECK: $d0 = COPY %dst1
-    ; CHECK: $d1 = COPY %dst2
-    ; CHECK: RET_ReallyLR implicit $d0, implicit $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD2Twov4h:%[0-9]+]]:dd = LD2Twov4h %ptr :: (load (<4 x s64>))
+    ; CHECK-NEXT: %dst1:fpr64 = COPY [[LD2Twov4h]].dsub0
+    ; CHECK-NEXT: %dst2:fpr64 = COPY [[LD2Twov4h]].dsub1
+    ; CHECK-NEXT: $d0 = COPY %dst1
+    ; CHECK-NEXT: $d1 = COPY %dst2
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<4 x s16>), %dst2:fpr(<4 x s16>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<4 x s64>))
     $d0 = COPY %dst1(<4 x s16>)
@@ -80,13 +83,14 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD2Twov8h
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD2Twov8h:%[0-9]+]]:qq = LD2Twov8h %ptr :: (load (<8 x s64>))
-    ; CHECK: %dst1:fpr128 = COPY [[LD2Twov8h]].qsub0
-    ; CHECK: %dst2:fpr128 = COPY [[LD2Twov8h]].qsub1
-    ; CHECK: $q0 = COPY %dst1
-    ; CHECK: $q1 = COPY %dst2
-    ; CHECK: RET_ReallyLR implicit $q0, implicit $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD2Twov8h:%[0-9]+]]:qq = LD2Twov8h %ptr :: (load (<8 x s64>))
+    ; CHECK-NEXT: %dst1:fpr128 = COPY [[LD2Twov8h]].qsub0
+    ; CHECK-NEXT: %dst2:fpr128 = COPY [[LD2Twov8h]].qsub1
+    ; CHECK-NEXT: $q0 = COPY %dst1
+    ; CHECK-NEXT: $q1 = COPY %dst2
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<8 x s16>), %dst2:fpr(<8 x s16>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<8 x s64>))
     $q0 = COPY %dst1(<8 x s16>)
@@ -103,13 +107,14 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD2Twov2s
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD2Twov2s:%[0-9]+]]:dd = LD2Twov2s %ptr :: (load (<2 x s64>))
-    ; CHECK: %dst1:fpr64 = COPY [[LD2Twov2s]].dsub0
-    ; CHECK: %dst2:fpr64 = COPY [[LD2Twov2s]].dsub1
-    ; CHECK: $d0 = COPY %dst1
-    ; CHECK: $d1 = COPY %dst2
-    ; CHECK: RET_ReallyLR implicit $d0, implicit $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD2Twov2s:%[0-9]+]]:dd = LD2Twov2s %ptr :: (load (<2 x s64>))
+    ; CHECK-NEXT: %dst1:fpr64 = COPY [[LD2Twov2s]].dsub0
+    ; CHECK-NEXT: %dst2:fpr64 = COPY [[LD2Twov2s]].dsub1
+    ; CHECK-NEXT: $d0 = COPY %dst1
+    ; CHECK-NEXT: $d1 = COPY %dst2
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<2 x s32>), %dst2:fpr(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<2 x s64>))
     $d0 = COPY %dst1(<2 x s32>)
@@ -126,13 +131,14 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD2Twov4s
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD2Twov4s:%[0-9]+]]:qq = LD2Twov4s %ptr :: (load (<4 x s64>))
-    ; CHECK: %dst1:fpr128 = COPY [[LD2Twov4s]].qsub0
-    ; CHECK: %dst2:fpr128 = COPY [[LD2Twov4s]].qsub1
-    ; CHECK: $q0 = COPY %dst1
-    ; CHECK: $q1 = COPY %dst2
-    ; CHECK: RET_ReallyLR implicit $q0, implicit $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD2Twov4s:%[0-9]+]]:qq = LD2Twov4s %ptr :: (load (<4 x s64>))
+    ; CHECK-NEXT: %dst1:fpr128 = COPY [[LD2Twov4s]].qsub0
+    ; CHECK-NEXT: %dst2:fpr128 = COPY [[LD2Twov4s]].qsub1
+    ; CHECK-NEXT: $q0 = COPY %dst1
+    ; CHECK-NEXT: $q1 = COPY %dst2
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<4 x s32>), %dst2:fpr(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<4 x s64>))
     $q0 = COPY %dst1(<4 x s32>)
@@ -149,13 +155,14 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD2Twov2d_s64
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD2Twov2d:%[0-9]+]]:qq = LD2Twov2d %ptr :: (load (<2 x s64>))
-    ; CHECK: %dst1:fpr128 = COPY [[LD2Twov2d]].qsub0
-    ; CHECK: %dst2:fpr128 = COPY [[LD2Twov2d]].qsub1
-    ; CHECK: $q0 = COPY %dst1
-    ; CHECK: $q1 = COPY %dst2
-    ; CHECK: RET_ReallyLR implicit $q0, implicit $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD2Twov2d:%[0-9]+]]:qq = LD2Twov2d %ptr :: (load (<2 x s64>))
+    ; CHECK-NEXT: %dst1:fpr128 = COPY [[LD2Twov2d]].qsub0
+    ; CHECK-NEXT: %dst2:fpr128 = COPY [[LD2Twov2d]].qsub1
+    ; CHECK-NEXT: $q0 = COPY %dst1
+    ; CHECK-NEXT: $q1 = COPY %dst2
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<2 x s64>), %dst2:fpr(<2 x s64>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<2 x s64>))
     $q0 = COPY %dst1(<2 x s64>)
@@ -172,13 +179,14 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD2Twov2d_p0
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD2Twov2d:%[0-9]+]]:qq = LD2Twov2d %ptr :: (load (<2 x p0>))
-    ; CHECK: %dst1:fpr128 = COPY [[LD2Twov2d]].qsub0
-    ; CHECK: %dst2:fpr128 = COPY [[LD2Twov2d]].qsub1
-    ; CHECK: $q0 = COPY %dst1
-    ; CHECK: $q1 = COPY %dst2
-    ; CHECK: RET_ReallyLR implicit $q0, implicit $q1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD2Twov2d:%[0-9]+]]:qq = LD2Twov2d %ptr :: (load (<2 x p0>))
+    ; CHECK-NEXT: %dst1:fpr128 = COPY [[LD2Twov2d]].qsub0
+    ; CHECK-NEXT: %dst2:fpr128 = COPY [[LD2Twov2d]].qsub1
+    ; CHECK-NEXT: $q0 = COPY %dst1
+    ; CHECK-NEXT: $q1 = COPY %dst2
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<2 x p0>), %dst2:fpr(<2 x p0>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<2 x p0>))
     $q0 = COPY %dst1(<2 x p0>)
@@ -195,13 +203,14 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD1Twov1d_s64
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD1Twov1d:%[0-9]+]]:dd = LD1Twov1d %ptr :: (load (s64))
-    ; CHECK: %dst1:fpr64 = COPY [[LD1Twov1d]].dsub0
-    ; CHECK: %dst2:fpr64 = COPY [[LD1Twov1d]].dsub1
-    ; CHECK: $d0 = COPY %dst1
-    ; CHECK: $d1 = COPY %dst2
-    ; CHECK: RET_ReallyLR implicit $d0, implicit $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD1Twov1d:%[0-9]+]]:dd = LD1Twov1d %ptr :: (load (s64))
+    ; CHECK-NEXT: %dst1:fpr64 = COPY [[LD1Twov1d]].dsub0
+    ; CHECK-NEXT: %dst2:fpr64 = COPY [[LD1Twov1d]].dsub1
+    ; CHECK-NEXT: $d0 = COPY %dst1
+    ; CHECK-NEXT: $d1 = COPY %dst2
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(s64), %dst2:fpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (s64))
     $d0 = COPY %dst1(s64)
@@ -218,13 +227,14 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD1Twov1d_p0
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD1Twov1d:%[0-9]+]]:dd = LD1Twov1d %ptr :: (load (p0))
-    ; CHECK: %dst1:fpr64 = COPY [[LD1Twov1d]].dsub0
-    ; CHECK: %dst2:fpr64 = COPY [[LD1Twov1d]].dsub1
-    ; CHECK: $d0 = COPY %dst1
-    ; CHECK: $d1 = COPY %dst2
-    ; CHECK: RET_ReallyLR implicit $d0, implicit $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD1Twov1d:%[0-9]+]]:dd = LD1Twov1d %ptr :: (load (p0))
+    ; CHECK-NEXT: %dst1:fpr64 = COPY [[LD1Twov1d]].dsub0
+    ; CHECK-NEXT: %dst2:fpr64 = COPY [[LD1Twov1d]].dsub1
+    ; CHECK-NEXT: $d0 = COPY %dst1
+    ; CHECK-NEXT: $d1 = COPY %dst2
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(p0), %dst2:fpr(p0) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (p0))
     $d0 = COPY %dst1(p0)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ld4.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ld4.mir
index ec956ded6efff..03e271e15e991 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ld4.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ld4.mir
@@ -11,17 +11,18 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD4Fourv8b
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD4Fourv8b:%[0-9]+]]:dddd = LD4Fourv8b %ptr :: (load (<8 x s64>))
-    ; CHECK: %dst1:fpr64 = COPY [[LD4Fourv8b]].dsub0
-    ; CHECK: %dst2:fpr64 = COPY [[LD4Fourv8b]].dsub1
-    ; CHECK: %dst3:fpr64 = COPY [[LD4Fourv8b]].dsub2
-    ; CHECK: %dst4:fpr64 = COPY [[LD4Fourv8b]].dsub3
-    ; CHECK: $d0 = COPY %dst1
-    ; CHECK: $d1 = COPY %dst2
-    ; CHECK: $d2 = COPY %dst3
-    ; CHECK: $d3 = COPY %dst4
-    ; CHECK: RET_ReallyLR implicit $d0, implicit $d1, implicit $d2, implicit $d3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD4Fourv8b:%[0-9]+]]:dddd = LD4Fourv8b %ptr :: (load (<8 x s64>))
+    ; CHECK-NEXT: %dst1:fpr64 = COPY [[LD4Fourv8b]].dsub0
+    ; CHECK-NEXT: %dst2:fpr64 = COPY [[LD4Fourv8b]].dsub1
+    ; CHECK-NEXT: %dst3:fpr64 = COPY [[LD4Fourv8b]].dsub2
+    ; CHECK-NEXT: %dst4:fpr64 = COPY [[LD4Fourv8b]].dsub3
+    ; CHECK-NEXT: $d0 = COPY %dst1
+    ; CHECK-NEXT: $d1 = COPY %dst2
+    ; CHECK-NEXT: $d2 = COPY %dst3
+    ; CHECK-NEXT: $d3 = COPY %dst4
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1, implicit $d2, implicit $d3
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<8 x s8>), %dst2:fpr(<8 x s8>), %dst3:fpr(<8 x s8>), %dst4:fpr(<8 x s8>)= G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld4), %ptr(p0) :: (load (<8 x s64>))
     $d0 = COPY %dst1(<8 x s8>)
@@ -40,17 +41,18 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD4Fourv16b
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD4Fourv16b:%[0-9]+]]:qqqq = LD4Fourv16b %ptr :: (load (<16 x s64>))
-    ; CHECK: %dst1:fpr128 = COPY [[LD4Fourv16b]].qsub0
-    ; CHECK: %dst2:fpr128 = COPY [[LD4Fourv16b]].qsub1
-    ; CHECK: %dst3:fpr128 = COPY [[LD4Fourv16b]].qsub2
-    ; CHECK: %dst4:fpr128 = COPY [[LD4Fourv16b]].qsub3
-    ; CHECK: $q0 = COPY %dst1
-    ; CHECK: $q1 = COPY %dst2
-    ; CHECK: $q2 = COPY %dst3
-    ; CHECK: $q3 = COPY %dst4
-    ; CHECK: RET_ReallyLR implicit $q0, implicit $q1, implicit $q2, implicit $q3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD4Fourv16b:%[0-9]+]]:qqqq = LD4Fourv16b %ptr :: (load (<16 x s64>))
+    ; CHECK-NEXT: %dst1:fpr128 = COPY [[LD4Fourv16b]].qsub0
+    ; CHECK-NEXT: %dst2:fpr128 = COPY [[LD4Fourv16b]].qsub1
+    ; CHECK-NEXT: %dst3:fpr128 = COPY [[LD4Fourv16b]].qsub2
+    ; CHECK-NEXT: %dst4:fpr128 = COPY [[LD4Fourv16b]].qsub3
+    ; CHECK-NEXT: $q0 = COPY %dst1
+    ; CHECK-NEXT: $q1 = COPY %dst2
+    ; CHECK-NEXT: $q2 = COPY %dst3
+    ; CHECK-NEXT: $q3 = COPY %dst4
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1, implicit $q2, implicit $q3
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<16 x s8>), %dst2:fpr(<16 x s8>), %dst3:fpr(<16 x s8>), %dst4:fpr(<16 x s8>)  = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld4), %ptr(p0) :: (load (<16 x s64>))
     $q0 = COPY %dst1(<16 x s8>)
@@ -69,17 +71,18 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: LD4Fourv4h
     ; CHECK: liveins: $x0
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD4Fourv4h:%[0-9]+]]:dddd = LD4Fourv4h %ptr :: (load (<4 x s64>))
-    ; CHECK: %dst1:fpr64 = COPY [[LD4Fourv4h]].dsub0
-    ; CHECK: %dst2:fpr64 = COPY [[LD4Fourv4h]].dsub1
-    ; CHECK: %dst3:fpr64 = COPY [[LD4Fourv4h]].dsub2
-    ; CHECK: %dst4:fpr64 = COPY [[LD4Fourv4h]].dsub3
-    ; CHECK: $d0 = COPY %dst1
-    ; CHECK: $d1 = COPY %dst2
-    ; CHECK: $d2 = COPY %dst3
-    ; CHECK: $d3 = COPY %dst4
-    ; CHECK: RET_ReallyLR implicit $d0, implicit $d1, implicit $d2, implicit $d3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD4Fourv4h:%[0-9]+]]:dddd = LD4Fourv4h %ptr :: (load (<4 x s64>))
+    ; CHECK-NEXT: %dst1:fpr64 = COPY [[LD4Fourv4h]].dsub0
+    ; CHECK-NEXT: %dst2:fpr64 = COPY [[LD4Fourv4h]].dsub1
+    ; CHECK-NEXT: %dst3:fpr64 = COPY [[LD4Fourv4h]].dsub2
+    ; CHECK-NEXT: %dst4:fpr64 = COPY [[LD4Fourv4h]].dsub3
+    ; CHECK-NEXT: $d0 = COPY %dst1
+    ; CHECK-NEXT: $d1 = COPY %dst2
+    ; CHECK-NEXT: $d2 = COPY %dst3
+    ; CHECK-NEXT: $d3 = COPY %dst4
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1, implicit $d2, implicit $d3
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<4 x s16>), %dst2:fpr(<4 x s16>), %dst3:fpr(<4 x s16>), %dst4:fpr(<4 x s16>)  = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld4), %ptr(p0) :: (load (<4 x s64>))
     $d0 = COPY %dst1(<4 x s16>)
@@ -98,17 +101,18 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD4Fourv8h
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD4Fourv8h:%[0-9]+]]:qqqq = LD4Fourv8h %ptr :: (load (<8 x s64>))
-    ; CHECK: %dst1:fpr128 = COPY [[LD4Fourv8h]].qsub0
-    ; CHECK: %dst2:fpr128 = COPY [[LD4Fourv8h]].qsub1
-    ; CHECK: %dst3:fpr128 = COPY [[LD4Fourv8h]].qsub2
-    ; CHECK: %dst4:fpr128 = COPY [[LD4Fourv8h]].qsub3
-    ; CHECK: $q0 = COPY %dst1
-    ; CHECK: $q1 = COPY %dst2
-    ; CHECK: $q2 = COPY %dst3
-    ; CHECK: $q3 = COPY %dst4
-    ; CHECK: RET_ReallyLR implicit $q0, implicit $q1, implicit $q2, implicit $q3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD4Fourv8h:%[0-9]+]]:qqqq = LD4Fourv8h %ptr :: (load (<8 x s64>))
+    ; CHECK-NEXT: %dst1:fpr128 = COPY [[LD4Fourv8h]].qsub0
+    ; CHECK-NEXT: %dst2:fpr128 = COPY [[LD4Fourv8h]].qsub1
+    ; CHECK-NEXT: %dst3:fpr128 = COPY [[LD4Fourv8h]].qsub2
+    ; CHECK-NEXT: %dst4:fpr128 = COPY [[LD4Fourv8h]].qsub3
+    ; CHECK-NEXT: $q0 = COPY %dst1
+    ; CHECK-NEXT: $q1 = COPY %dst2
+    ; CHECK-NEXT: $q2 = COPY %dst3
+    ; CHECK-NEXT: $q3 = COPY %dst4
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1, implicit $q2, implicit $q3
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<8 x s16>), %dst2:fpr(<8 x s16>), %dst3:fpr(<8 x s16>), %dst4:fpr(<8 x s16>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld4), %ptr(p0) :: (load (<8 x s64>))
     $q0 = COPY %dst1(<8 x s16>)
@@ -127,17 +131,18 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD4Fourv2s
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD4Fourv2s:%[0-9]+]]:dddd = LD4Fourv2s %ptr :: (load (<2 x s64>))
-    ; CHECK: %dst1:fpr64 = COPY [[LD4Fourv2s]].dsub0
-    ; CHECK: %dst2:fpr64 = COPY [[LD4Fourv2s]].dsub1
-    ; CHECK: %dst3:fpr64 = COPY [[LD4Fourv2s]].dsub2
-    ; CHECK: %dst4:fpr64 = COPY [[LD4Fourv2s]].dsub3
-    ; CHECK: $d0 = COPY %dst1
-    ; CHECK: $d1 = COPY %dst2
-    ; CHECK: $d2 = COPY %dst3
-    ; CHECK: $d3 = COPY %dst4
-    ; CHECK: RET_ReallyLR implicit $d0, implicit $d1, implicit $d2, implicit $d3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD4Fourv2s:%[0-9]+]]:dddd = LD4Fourv2s %ptr :: (load (<2 x s64>))
+    ; CHECK-NEXT: %dst1:fpr64 = COPY [[LD4Fourv2s]].dsub0
+    ; CHECK-NEXT: %dst2:fpr64 = COPY [[LD4Fourv2s]].dsub1
+    ; CHECK-NEXT: %dst3:fpr64 = COPY [[LD4Fourv2s]].dsub2
+    ; CHECK-NEXT: %dst4:fpr64 = COPY [[LD4Fourv2s]].dsub3
+    ; CHECK-NEXT: $d0 = COPY %dst1
+    ; CHECK-NEXT: $d1 = COPY %dst2
+    ; CHECK-NEXT: $d2 = COPY %dst3
+    ; CHECK-NEXT: $d3 = COPY %dst4
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1, implicit $d2, implicit $d3
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<2 x s32>), %dst2:fpr(<2 x s32>), %dst3:fpr(<2 x s32>), %dst4:fpr(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld4), %ptr(p0) :: (load (<2 x s64>))
     $d0 = COPY %dst1(<2 x s32>)
@@ -156,17 +161,18 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD4Fourv4s
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD4Fourv4s:%[0-9]+]]:qqqq = LD4Fourv4s %ptr :: (load (<4 x s64>))
-    ; CHECK: %dst1:fpr128 = COPY [[LD4Fourv4s]].qsub0
-    ; CHECK: %dst2:fpr128 = COPY [[LD4Fourv4s]].qsub1
-    ; CHECK: %dst3:fpr128 = COPY [[LD4Fourv4s]].qsub2
-    ; CHECK: %dst4:fpr128 = COPY [[LD4Fourv4s]].qsub3
-    ; CHECK: $q0 = COPY %dst1
-    ; CHECK: $q1 = COPY %dst2
-    ; CHECK: $q2 = COPY %dst3
-    ; CHECK: $q3 = COPY %dst4
-    ; CHECK: RET_ReallyLR implicit $q0, implicit $q1, implicit $q2, implicit $q3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD4Fourv4s:%[0-9]+]]:qqqq = LD4Fourv4s %ptr :: (load (<4 x s64>))
+    ; CHECK-NEXT: %dst1:fpr128 = COPY [[LD4Fourv4s]].qsub0
+    ; CHECK-NEXT: %dst2:fpr128 = COPY [[LD4Fourv4s]].qsub1
+    ; CHECK-NEXT: %dst3:fpr128 = COPY [[LD4Fourv4s]].qsub2
+    ; CHECK-NEXT: %dst4:fpr128 = COPY [[LD4Fourv4s]].qsub3
+    ; CHECK-NEXT: $q0 = COPY %dst1
+    ; CHECK-NEXT: $q1 = COPY %dst2
+    ; CHECK-NEXT: $q2 = COPY %dst3
+    ; CHECK-NEXT: $q3 = COPY %dst4
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1, implicit $q2, implicit $q3
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<4 x s32>), %dst2:fpr(<4 x s32>), %dst3:fpr(<4 x s32>), %dst4:fpr(<4 x s32>)= G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld4), %ptr(p0) :: (load (<4 x s64>))
     $q0 = COPY %dst1(<4 x s32>)
@@ -185,17 +191,18 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD4Fourv2d_v2s64
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD4Fourv2d:%[0-9]+]]:qqqq = LD4Fourv2d %ptr :: (load (<2 x s64>))
-    ; CHECK: %dst1:fpr128 = COPY [[LD4Fourv2d]].qsub0
-    ; CHECK: %dst2:fpr128 = COPY [[LD4Fourv2d]].qsub1
-    ; CHECK: %dst3:fpr128 = COPY [[LD4Fourv2d]].qsub2
-    ; CHECK: %dst4:fpr128 = COPY [[LD4Fourv2d]].qsub3
-    ; CHECK: $q0 = COPY %dst1
-    ; CHECK: $q1 = COPY %dst2
-    ; CHECK: $q2 = COPY %dst3
-    ; CHECK: $q3 = COPY %dst4
-    ; CHECK: RET_ReallyLR implicit $q0, implicit $q1, implicit $q2, implicit $q3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD4Fourv2d:%[0-9]+]]:qqqq = LD4Fourv2d %ptr :: (load (<2 x s64>))
+    ; CHECK-NEXT: %dst1:fpr128 = COPY [[LD4Fourv2d]].qsub0
+    ; CHECK-NEXT: %dst2:fpr128 = COPY [[LD4Fourv2d]].qsub1
+    ; CHECK-NEXT: %dst3:fpr128 = COPY [[LD4Fourv2d]].qsub2
+    ; CHECK-NEXT: %dst4:fpr128 = COPY [[LD4Fourv2d]].qsub3
+    ; CHECK-NEXT: $q0 = COPY %dst1
+    ; CHECK-NEXT: $q1 = COPY %dst2
+    ; CHECK-NEXT: $q2 = COPY %dst3
+    ; CHECK-NEXT: $q3 = COPY %dst4
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1, implicit $q2, implicit $q3
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<2 x s64>), %dst2:fpr(<2 x s64>), %dst3:fpr(<2 x s64>), %dst4:fpr(<2 x s64>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld4), %ptr(p0) :: (load (<2 x s64>))
     $q0 = COPY %dst1(<2 x s64>)
@@ -214,17 +221,18 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD4Fourv2d_v2p0
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD4Fourv2d:%[0-9]+]]:qqqq = LD4Fourv2d %ptr :: (load (<2 x p0>))
-    ; CHECK: %dst1:fpr128 = COPY [[LD4Fourv2d]].qsub0
-    ; CHECK: %dst2:fpr128 = COPY [[LD4Fourv2d]].qsub1
-    ; CHECK: %dst3:fpr128 = COPY [[LD4Fourv2d]].qsub2
-    ; CHECK: %dst4:fpr128 = COPY [[LD4Fourv2d]].qsub3
-    ; CHECK: $q0 = COPY %dst1
-    ; CHECK: $q1 = COPY %dst2
-    ; CHECK: $q2 = COPY %dst3
-    ; CHECK: $q3 = COPY %dst4
-    ; CHECK: RET_ReallyLR implicit $q0, implicit $q1, implicit $q2, implicit $q3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD4Fourv2d:%[0-9]+]]:qqqq = LD4Fourv2d %ptr :: (load (<2 x p0>))
+    ; CHECK-NEXT: %dst1:fpr128 = COPY [[LD4Fourv2d]].qsub0
+    ; CHECK-NEXT: %dst2:fpr128 = COPY [[LD4Fourv2d]].qsub1
+    ; CHECK-NEXT: %dst3:fpr128 = COPY [[LD4Fourv2d]].qsub2
+    ; CHECK-NEXT: %dst4:fpr128 = COPY [[LD4Fourv2d]].qsub3
+    ; CHECK-NEXT: $q0 = COPY %dst1
+    ; CHECK-NEXT: $q1 = COPY %dst2
+    ; CHECK-NEXT: $q2 = COPY %dst3
+    ; CHECK-NEXT: $q3 = COPY %dst4
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1, implicit $q2, implicit $q3
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(<2 x p0>), %dst2:fpr(<2 x p0>), %dst3:fpr(<2 x p0>), %dst4:fpr(<2 x p0>)  = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld4), %ptr(p0) :: (load (<2 x p0>))
     $q0 = COPY %dst1(<2 x p0>)
@@ -243,17 +251,18 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD1Fourv1d_s64
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD1Fourv1d:%[0-9]+]]:dddd = LD1Fourv1d %ptr :: (load (s64))
-    ; CHECK: %dst1:fpr64 = COPY [[LD1Fourv1d]].dsub0
-    ; CHECK: %dst2:fpr64 = COPY [[LD1Fourv1d]].dsub1
-    ; CHECK: %dst3:fpr64 = COPY [[LD1Fourv1d]].dsub2
-    ; CHECK: %dst4:fpr64 = COPY [[LD1Fourv1d]].dsub3
-    ; CHECK: $d0 = COPY %dst1
-    ; CHECK: $d1 = COPY %dst2
-    ; CHECK: $d2 = COPY %dst3
-    ; CHECK: $d3 = COPY %dst4
-    ; CHECK: RET_ReallyLR implicit $d0, implicit $d1, implicit $d2, implicit $d3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD1Fourv1d:%[0-9]+]]:dddd = LD1Fourv1d %ptr :: (load (s64))
+    ; CHECK-NEXT: %dst1:fpr64 = COPY [[LD1Fourv1d]].dsub0
+    ; CHECK-NEXT: %dst2:fpr64 = COPY [[LD1Fourv1d]].dsub1
+    ; CHECK-NEXT: %dst3:fpr64 = COPY [[LD1Fourv1d]].dsub2
+    ; CHECK-NEXT: %dst4:fpr64 = COPY [[LD1Fourv1d]].dsub3
+    ; CHECK-NEXT: $d0 = COPY %dst1
+    ; CHECK-NEXT: $d1 = COPY %dst2
+    ; CHECK-NEXT: $d2 = COPY %dst3
+    ; CHECK-NEXT: $d3 = COPY %dst4
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1, implicit $d2, implicit $d3
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(s64), %dst2:fpr(s64), %dst3:fpr(s64), %dst4:fpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld4), %ptr(p0) :: (load (s64))
     $d0 = COPY %dst1(s64)
@@ -272,17 +281,18 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: LD1Fourv1d_p0
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: [[LD1Fourv1d:%[0-9]+]]:dddd = LD1Fourv1d %ptr :: (load (p0))
-    ; CHECK: %dst1:fpr64 = COPY [[LD1Fourv1d]].dsub0
-    ; CHECK: %dst2:fpr64 = COPY [[LD1Fourv1d]].dsub1
-    ; CHECK: %dst3:fpr64 = COPY [[LD1Fourv1d]].dsub2
-    ; CHECK: %dst4:fpr64 = COPY [[LD1Fourv1d]].dsub3
-    ; CHECK: $d0 = COPY %dst1
-    ; CHECK: $d1 = COPY %dst2
-    ; CHECK: $d2 = COPY %dst3
-    ; CHECK: $d3 = COPY %dst4
-    ; CHECK: RET_ReallyLR implicit $d0, implicit $d1, implicit $d2, implicit $d3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LD1Fourv1d:%[0-9]+]]:dddd = LD1Fourv1d %ptr :: (load (p0))
+    ; CHECK-NEXT: %dst1:fpr64 = COPY [[LD1Fourv1d]].dsub0
+    ; CHECK-NEXT: %dst2:fpr64 = COPY [[LD1Fourv1d]].dsub1
+    ; CHECK-NEXT: %dst3:fpr64 = COPY [[LD1Fourv1d]].dsub2
+    ; CHECK-NEXT: %dst4:fpr64 = COPY [[LD1Fourv1d]].dsub3
+    ; CHECK-NEXT: $d0 = COPY %dst1
+    ; CHECK-NEXT: $d1 = COPY %dst2
+    ; CHECK-NEXT: $d2 = COPY %dst3
+    ; CHECK-NEXT: $d3 = COPY %dst4
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0, implicit $d1, implicit $d2, implicit $d3
     %ptr:gpr(p0) = COPY $x0
     %dst1:fpr(p0), %dst2:fpr(p0), %dst3:fpr(p0), %dst4:fpr(p0) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld4), %ptr(p0) :: (load (p0))
     $d0 = COPY %dst1(p0)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ldaxr-intrin.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ldaxr-intrin.mir
index d4c1f23ca7ab0..79b0e678a62de 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ldaxr-intrin.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ldaxr-intrin.mir
@@ -19,11 +19,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: test_load_acquire_i8
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDAXRB:%[0-9]+]]:gpr32 = LDAXRB [[COPY]] :: (volatile load (s8) from %ir.addr)
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDAXRB]], %subreg.sub_32
-    ; CHECK: $x1 = COPY [[SUBREG_TO_REG]]
-    ; CHECK: RET_ReallyLR implicit $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDAXRB:%[0-9]+]]:gpr32 = LDAXRB [[COPY]] :: (volatile load (s8) from %ir.addr)
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDAXRB]], %subreg.sub_32
+    ; CHECK-NEXT: $x1 = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x1
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldaxr), %0(p0) :: (volatile load (s8) from %ir.addr)
     $x1 = COPY %1
@@ -41,11 +42,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: test_load_acquire_i16
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDAXRH:%[0-9]+]]:gpr32 = LDAXRH [[COPY]] :: (volatile load (s16) from %ir.addr)
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDAXRH]], %subreg.sub_32
-    ; CHECK: $x1 = COPY [[SUBREG_TO_REG]]
-    ; CHECK: RET_ReallyLR implicit $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDAXRH:%[0-9]+]]:gpr32 = LDAXRH [[COPY]] :: (volatile load (s16) from %ir.addr)
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDAXRH]], %subreg.sub_32
+    ; CHECK-NEXT: $x1 = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x1
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldaxr), %0(p0) :: (volatile load (s16) from %ir.addr)
     $x1 = COPY %1
@@ -63,11 +65,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: test_load_acquire_i32
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDAXRW:%[0-9]+]]:gpr32 = LDAXRW [[COPY]] :: (volatile load (s32) from %ir.addr)
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDAXRW]], %subreg.sub_32
-    ; CHECK: $x1 = COPY [[SUBREG_TO_REG]]
-    ; CHECK: RET_ReallyLR implicit $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDAXRW:%[0-9]+]]:gpr32 = LDAXRW [[COPY]] :: (volatile load (s32) from %ir.addr)
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDAXRW]], %subreg.sub_32
+    ; CHECK-NEXT: $x1 = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x1
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldaxr), %0(p0) :: (volatile load (s32) from %ir.addr)
     $x1 = COPY %1
@@ -84,10 +87,12 @@ body:             |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: test_load_acquire_i64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDAXRX:%[0-9]+]]:gpr64 = LDAXRX [[COPY]] :: (volatile load (s64) from %ir.addr)
-    ; CHECK: $x1 = COPY [[LDAXRX]]
-    ; CHECK: RET_ReallyLR implicit $x1
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDAXRX:%[0-9]+]]:gpr64 = LDAXRX [[COPY]] :: (volatile load (s64) from %ir.addr)
+    ; CHECK-NEXT: $x1 = COPY [[LDAXRX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x1
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldaxr), %0(p0) :: (volatile load (s64) from %ir.addr)
     $x1 = COPY %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ldxr-intrin.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ldxr-intrin.mir
index bad2e045f38ac..93d3aec10235f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ldxr-intrin.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ldxr-intrin.mir
@@ -18,11 +18,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: test_load_i8
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDXRB:%[0-9]+]]:gpr32 = LDXRB [[COPY]] :: (volatile load (s8) from %ir.addr)
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDXRB]], %subreg.sub_32
-    ; CHECK: $x1 = COPY [[SUBREG_TO_REG]]
-    ; CHECK: RET_ReallyLR implicit $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDXRB:%[0-9]+]]:gpr32 = LDXRB [[COPY]] :: (volatile load (s8) from %ir.addr)
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDXRB]], %subreg.sub_32
+    ; CHECK-NEXT: $x1 = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x1
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), %0(p0) :: (volatile load (s8) from %ir.addr)
     $x1 = COPY %1(s64)
@@ -40,11 +41,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: test_load_i16
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDXRH:%[0-9]+]]:gpr32 = LDXRH [[COPY]] :: (volatile load (s16) from %ir.addr)
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDXRH]], %subreg.sub_32
-    ; CHECK: $x1 = COPY [[SUBREG_TO_REG]]
-    ; CHECK: RET_ReallyLR implicit $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDXRH:%[0-9]+]]:gpr32 = LDXRH [[COPY]] :: (volatile load (s16) from %ir.addr)
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDXRH]], %subreg.sub_32
+    ; CHECK-NEXT: $x1 = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x1
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), %0(p0) :: (volatile load (s16) from %ir.addr)
     $x1 = COPY %1(s64)
@@ -62,11 +64,12 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: test_load_i32
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDXRW:%[0-9]+]]:gpr32 = LDXRW [[COPY]] :: (volatile load (s32) from %ir.addr)
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDXRW]], %subreg.sub_32
-    ; CHECK: $x1 = COPY [[SUBREG_TO_REG]]
-    ; CHECK: RET_ReallyLR implicit $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDXRW:%[0-9]+]]:gpr32 = LDXRW [[COPY]] :: (volatile load (s32) from %ir.addr)
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDXRW]], %subreg.sub_32
+    ; CHECK-NEXT: $x1 = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x1
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), %0(p0) :: (volatile load (s32) from %ir.addr)
     $x1 = COPY %1(s64)
@@ -85,10 +88,11 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: test_load_i64
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDXRX:%[0-9]+]]:gpr64 = LDXRX [[COPY]] :: (volatile load (s64) from %ir.addr)
-    ; CHECK: $x1 = COPY [[LDXRX]]
-    ; CHECK: RET_ReallyLR implicit $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDXRX:%[0-9]+]]:gpr64 = LDXRX [[COPY]] :: (volatile load (s64) from %ir.addr)
+    ; CHECK-NEXT: $x1 = COPY [[LDXRX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x1
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), %0(p0) :: (volatile load (s64) from %ir.addr)
     $x1 = COPY %1(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-load-store-vector-of-ptr.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-load-store-vector-of-ptr.mir
index 7c9c61db4de99..b5e204433c4ad 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-load-store-vector-of-ptr.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-load-store-vector-of-ptr.mir
@@ -32,10 +32,11 @@ body:             |
 
     ; CHECK-LABEL: name: store_v2p0
     ; CHECK: liveins: $q0, $x0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: STRQui [[COPY]], [[COPY1]], 0 :: (store (<2 x s64>) into %ir.ptr)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: STRQui [[COPY]], [[COPY1]], 0 :: (store (<2 x s64>) into %ir.ptr)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:fpr(<2 x p0>) = COPY $q0
     %1:gpr(p0) = COPY $x0
     %2:fpr(<2 x s64>) = G_BITCAST %0(<2 x p0>)
@@ -60,10 +61,11 @@ body:             |
 
     ; CHECK-LABEL: name: load_v2p0
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (<2 x s64>) from %ir.ptr)
-    ; CHECK: $q0 = COPY [[LDRQui]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (<2 x s64>) from %ir.ptr)
+    ; CHECK-NEXT: $q0 = COPY [[LDRQui]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:gpr(p0) = COPY $x0
     %2:fpr(<2 x s64>) = G_LOAD %0(p0) :: (load (<2 x s64>) from %ir.ptr)
     %1:fpr(<2 x p0>) = G_BITCAST %2(<2 x s64>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-load.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-load.mir
index 20e1a93fe2e63..bd97890866fd5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-load.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-load.mir
@@ -792,7 +792,7 @@ regBankSelected: true
 body:             |
   bb.0:
     liveins: $q0, $x0
-    ;This test should not select an LD1 instruction as there is a store instruction between G_INSERT_VECTOR_ELT and G_LOAD 
+    ;This test should not select an LD1 instruction as there is a store instruction between G_INSERT_VECTOR_ELT and G_LOAD
     ; CHECK-LABEL: name: load_s32_gpr_GIM
     ; CHECK: liveins: $q0, $x0
     ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-logical-imm.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-logical-imm.mir
index e70d71c37726b..58f3acc0d5495 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-logical-imm.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-logical-imm.mir
@@ -11,10 +11,11 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: logical_imm_64_and
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[COPY]], 4096
-    ; CHECK: $x0 = COPY [[ANDXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[COPY]], 4096
+    ; CHECK-NEXT: $x0 = COPY [[ANDXri]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 1
     %2:gpr(s64) = G_AND %0, %1:gpr(s64)
@@ -31,10 +32,11 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: logical_imm_64_or
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[ORRXri:%[0-9]+]]:gpr64sp = ORRXri [[COPY]], 4096
-    ; CHECK: $x0 = COPY [[ORRXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[ORRXri:%[0-9]+]]:gpr64sp = ORRXri [[COPY]], 4096
+    ; CHECK-NEXT: $x0 = COPY [[ORRXri]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 1
     %2:gpr(s64) = G_OR %0, %1:gpr(s64)
@@ -51,10 +53,11 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: logical_imm_64_xor
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[EORXri:%[0-9]+]]:gpr64sp = EORXri [[COPY]], 4096
-    ; CHECK: $x0 = COPY [[EORXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[EORXri:%[0-9]+]]:gpr64sp = EORXri [[COPY]], 4096
+    ; CHECK-NEXT: $x0 = COPY [[EORXri]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 1
     %2:gpr(s64) = G_XOR %0, %1:gpr(s64)
@@ -71,10 +74,11 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: logical_imm_32_and
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[COPY]], 0
-    ; CHECK: $w0 = COPY [[ANDWri]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[COPY]], 0
+    ; CHECK-NEXT: $w0 = COPY [[ANDWri]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_AND %0, %1:gpr(s32)
@@ -91,10 +95,11 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: logical_imm_32_or
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[ORRWri:%[0-9]+]]:gpr32sp = ORRWri [[COPY]], 0
-    ; CHECK: $w0 = COPY [[ORRWri]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[ORRWri:%[0-9]+]]:gpr32sp = ORRWri [[COPY]], 0
+    ; CHECK-NEXT: $w0 = COPY [[ORRWri]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_OR %0, %1:gpr(s32)
@@ -111,10 +116,11 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: logical_imm_32_xor
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[EORWri:%[0-9]+]]:gpr32sp = EORWri [[COPY]], 0
-    ; CHECK: $w0 = COPY [[EORWri]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[EORWri:%[0-9]+]]:gpr32sp = EORWri [[COPY]], 0
+    ; CHECK-NEXT: $w0 = COPY [[EORWri]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_XOR %0, %1:gpr(s32)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-logical-shifted-reg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-logical-shifted-reg.mir
index 96a35714d1608..391069de8f0ca 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-logical-shifted-reg.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-logical-shifted-reg.mir
@@ -14,10 +14,11 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: and_xor_bicxrs
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[BICXrs:%[0-9]+]]:gpr64 = BICXrs [[COPY]], [[COPY]], 8
-    ; CHECK: $x0 = COPY [[BICXrs]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[BICXrs:%[0-9]+]]:gpr64 = BICXrs [[COPY]], [[COPY]], 8
+    ; CHECK-NEXT: $x0 = COPY [[BICXrs]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 8
     %2:gpr(s64) = G_CONSTANT i64 -1
@@ -37,10 +38,11 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: or_xor_ornxrs
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[ORNXrs:%[0-9]+]]:gpr64 = ORNXrs [[COPY]], [[COPY]], 8
-    ; CHECK: $x0 = COPY [[ORNXrs]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[ORNXrs:%[0-9]+]]:gpr64 = ORNXrs [[COPY]], [[COPY]], 8
+    ; CHECK-NEXT: $x0 = COPY [[ORNXrs]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 8
     %2:gpr(s64) = G_CONSTANT i64 -1
@@ -60,10 +62,11 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: xor_xor_eonxrs
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[EONXrs:%[0-9]+]]:gpr64 = EONXrs [[COPY]], [[COPY]], 8
-    ; CHECK: $x0 = COPY [[EONXrs]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[EONXrs:%[0-9]+]]:gpr64 = EONXrs [[COPY]], [[COPY]], 8
+    ; CHECK-NEXT: $x0 = COPY [[EONXrs]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 8
     %2:gpr(s64) = G_CONSTANT i64 -1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-mul.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-mul.mir
index f6622566a59a9..2e803b613bea1 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-mul.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-mul.mir
@@ -20,7 +20,9 @@ body:             |
     ; def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
     ;             (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
     ; CHECK-LABEL: name: mul_i64_sext_imm32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 3
     ; CHECK-NEXT: [[SMADDLrrr:%[0-9]+]]:gpr64 = SMADDLrrr [[COPY]], [[MOVi32imm]], $xzr
     ; CHECK-NEXT: $x0 = COPY [[SMADDLrrr]]

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-muladd.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-muladd.mir
index c44714c17debb..13aa001311a2a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-muladd.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-muladd.mir
@@ -26,11 +26,13 @@ body:             |
     liveins: $x0, $w1, $w2
 
     ; CHECK-LABEL: name: SMADDLrrr_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $w2
-    ; CHECK: [[SMADDLrrr:%[0-9]+]]:gpr64 = SMADDLrrr [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: $x0 = COPY [[SMADDLrrr]]
+    ; CHECK: liveins: $x0, $w1, $w2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY $w2
+    ; CHECK-NEXT: [[SMADDLrrr:%[0-9]+]]:gpr64 = SMADDLrrr [[COPY1]], [[COPY2]], [[COPY]]
+    ; CHECK-NEXT: $x0 = COPY [[SMADDLrrr]]
     %0(s64) = COPY $x0
     %1(s32) = COPY $w1
     %2(s32) = COPY $w2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-nearbyint.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-nearbyint.mir
index 864d2e8fdcba8..9a181adfc0b31 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-nearbyint.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-nearbyint.mir
@@ -15,10 +15,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v4f16.nearbyint
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTIv4f16_:%[0-9]+]]:fpr64 = nofpexcept FRINTIv4f16 [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTIv4f16_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTIv4f16_:%[0-9]+]]:fpr64 = nofpexcept FRINTIv4f16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTIv4f16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s16>) = G_FNEARBYINT %0
     $d0 = COPY %1(<4 x s16>)
@@ -38,10 +39,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v8f16.nearbyint
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FRINTIv8f16_:%[0-9]+]]:fpr128 = nofpexcept FRINTIv8f16 [[COPY]]
-    ; CHECK: $q0 = COPY [[FRINTIv8f16_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FRINTIv8f16_:%[0-9]+]]:fpr128 = nofpexcept FRINTIv8f16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FRINTIv8f16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s16>) = COPY $q0
     %1:fpr(<8 x s16>) = G_FNEARBYINT %0
     $q0 = COPY %1(<8 x s16>)
@@ -61,10 +63,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f32.nearbyint
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTIv2f32_:%[0-9]+]]:fpr64 = nofpexcept FRINTIv2f32 [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTIv2f32_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTIv2f32_:%[0-9]+]]:fpr64 = nofpexcept FRINTIv2f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTIv2f32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = G_FNEARBYINT %0
     $d0 = COPY %1(<2 x s32>)
@@ -84,10 +87,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_v2f64.nearbyint
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FRINTIv2f64_:%[0-9]+]]:fpr128 = nofpexcept FRINTIv2f64 [[COPY]]
-    ; CHECK: $q0 = COPY [[FRINTIv2f64_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FRINTIv2f64_:%[0-9]+]]:fpr128 = nofpexcept FRINTIv2f64 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY [[FRINTIv2f64_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s64>) = COPY $q0
     %1:fpr(<2 x s64>) = G_FNEARBYINT %0
     $q0 = COPY %1(<2 x s64>)
@@ -107,10 +111,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f32.nearbyint
     ; CHECK: liveins: $s0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FRINTISr:%[0-9]+]]:fpr32 = nofpexcept FRINTISr [[COPY]]
-    ; CHECK: $s0 = COPY [[FRINTISr]]
-    ; CHECK: RET_ReallyLR implicit $s0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FRINTISr:%[0-9]+]]:fpr32 = nofpexcept FRINTISr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $s0 = COPY [[FRINTISr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = G_FNEARBYINT %0
     $s0 = COPY %1(s32)
@@ -130,10 +135,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f64.nearbyint
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FRINTIDr:%[0-9]+]]:fpr64 = nofpexcept FRINTIDr [[COPY]]
-    ; CHECK: $d0 = COPY [[FRINTIDr]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FRINTIDr:%[0-9]+]]:fpr64 = nofpexcept FRINTIDr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $d0 = COPY [[FRINTIDr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(s64) = COPY $d0
     %1:fpr(s64) = G_FNEARBYINT %0
     $d0 = COPY %1(s64)
@@ -153,10 +159,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_f16.nearbyint
     ; CHECK: liveins: $h0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
-    ; CHECK: [[FRINTIHr:%[0-9]+]]:fpr16 = nofpexcept FRINTIHr [[COPY]]
-    ; CHECK: $h0 = COPY [[FRINTIHr]]
-    ; CHECK: RET_ReallyLR implicit $h0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
+    ; CHECK-NEXT: [[FRINTIHr:%[0-9]+]]:fpr16 = nofpexcept FRINTIHr [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $h0 = COPY [[FRINTIHr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $h0
     %0:fpr(s16) = COPY $h0
     %1:fpr(s16) = G_FNEARBYINT %0
     $h0 = COPY %1(s16)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
index 690ebc401a98b..c38e4a8707e1a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
@@ -23,9 +23,11 @@ body:             |
     liveins: $d0
 
     ; CHECK-LABEL: name: vcvtfxu2fp_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[UCVTFd:%[0-9]+]]:fpr64 = UCVTFd [[COPY]], 12
-    ; CHECK: $d1 = COPY [[UCVTFd]]
+    ; CHECK: liveins: $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[UCVTFd:%[0-9]+]]:fpr64 = UCVTFd [[COPY]], 12
+    ; CHECK-NEXT: $d1 = COPY [[UCVTFd]]
     %0(s64) = COPY $d0
     %1(s32) = G_CONSTANT i32 12
     %2(s64) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.vcvtfxu2fp.f64), %0, %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vector-fcmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vector-fcmp.mir
index daf84b5cf07e9..265b12338f840 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vector-fcmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-neon-vector-fcmp.mir
@@ -11,10 +11,10 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fcmeq
     ; CHECK: %lhs:fpr128 = COPY $q0
-    ; CHECK: %rhs:fpr128 = COPY $q1
-    ; CHECK: %fcmp:fpr128 = nofpexcept FCMEQv2f64 %lhs, %rhs
-    ; CHECK: $q0 = COPY %fcmp
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: %rhs:fpr128 = COPY $q1
+    ; CHECK-NEXT: %fcmp:fpr128 = nofpexcept FCMEQv2f64 %lhs, %rhs, implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY %fcmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %lhs:fpr(<2 x s64>) = COPY $q0
     %rhs:fpr(<2 x s64>) = COPY $q1
     %fcmp:fpr(<2 x s64>) = G_FCMEQ %lhs, %rhs(<2 x s64>)
@@ -31,10 +31,10 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fcmge
     ; CHECK: %lhs:fpr128 = COPY $q0
-    ; CHECK: %rhs:fpr128 = COPY $q1
-    ; CHECK: %fcmp:fpr128 = nofpexcept FCMGEv2f64 %lhs, %rhs
-    ; CHECK: $q0 = COPY %fcmp
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: %rhs:fpr128 = COPY $q1
+    ; CHECK-NEXT: %fcmp:fpr128 = nofpexcept FCMGEv2f64 %lhs, %rhs, implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY %fcmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %lhs:fpr(<2 x s64>) = COPY $q0
     %rhs:fpr(<2 x s64>) = COPY $q1
     %fcmp:fpr(<2 x s64>) = G_FCMGE %lhs, %rhs(<2 x s64>)
@@ -51,10 +51,10 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fcmgt
     ; CHECK: %lhs:fpr128 = COPY $q0
-    ; CHECK: %rhs:fpr128 = COPY $q1
-    ; CHECK: %fcmp:fpr128 = nofpexcept FCMGTv2f64 %lhs, %rhs
-    ; CHECK: $q0 = COPY %fcmp
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: %rhs:fpr128 = COPY $q1
+    ; CHECK-NEXT: %fcmp:fpr128 = nofpexcept FCMGTv2f64 %lhs, %rhs, implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY %fcmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %lhs:fpr(<2 x s64>) = COPY $q0
     %rhs:fpr(<2 x s64>) = COPY $q1
     %fcmp:fpr(<2 x s64>) = G_FCMGT %lhs, %rhs(<2 x s64>)
@@ -71,9 +71,9 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fcmeqz
     ; CHECK: %lhs:fpr128 = COPY $q0
-    ; CHECK: %fcmp:fpr128 = nofpexcept FCMEQv2i64rz %lhs
-    ; CHECK: $q0 = COPY %fcmp
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: %fcmp:fpr128 = nofpexcept FCMEQv2i64rz %lhs, implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY %fcmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %lhs:fpr(<2 x s64>) = COPY $q0
     %zero:gpr(s64) = G_CONSTANT i64 0
     %zero_vec:fpr(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
@@ -91,9 +91,9 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fcmgez
     ; CHECK: %lhs:fpr128 = COPY $q0
-    ; CHECK: %fcmp:fpr128 = nofpexcept FCMGEv2i64rz %lhs
-    ; CHECK: $q0 = COPY %fcmp
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: %fcmp:fpr128 = nofpexcept FCMGEv2i64rz %lhs, implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY %fcmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %lhs:fpr(<2 x s64>) = COPY $q0
     %zero:gpr(s64) = G_CONSTANT i64 0
     %zero_vec:fpr(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
@@ -111,9 +111,9 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fcmgtz
     ; CHECK: %lhs:fpr128 = COPY $q0
-    ; CHECK: %fcmp:fpr128 = nofpexcept FCMGTv2i64rz %lhs
-    ; CHECK: $q0 = COPY %fcmp
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: %fcmp:fpr128 = nofpexcept FCMGTv2i64rz %lhs, implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY %fcmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %lhs:fpr(<2 x s64>) = COPY $q0
     %zero:gpr(s64) = G_CONSTANT i64 0
     %zero_vec:fpr(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
@@ -131,9 +131,9 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fcmlez
     ; CHECK: %lhs:fpr128 = COPY $q0
-    ; CHECK: %fcmp:fpr128 = nofpexcept FCMLEv2i64rz %lhs
-    ; CHECK: $q0 = COPY %fcmp
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: %fcmp:fpr128 = nofpexcept FCMLEv2i64rz %lhs, implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY %fcmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %lhs:fpr(<2 x s64>) = COPY $q0
     %zero:gpr(s64) = G_CONSTANT i64 0
     %zero_vec:fpr(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)
@@ -151,9 +151,9 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fcmltz
     ; CHECK: %lhs:fpr128 = COPY $q0
-    ; CHECK: %fcmp:fpr128 = nofpexcept FCMLTv2i64rz %lhs
-    ; CHECK: $q0 = COPY %fcmp
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: %fcmp:fpr128 = nofpexcept FCMLTv2i64rz %lhs, implicit $fpcr
+    ; CHECK-NEXT: $q0 = COPY %fcmp
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %lhs:fpr(<2 x s64>) = COPY $q0
     %zero:gpr(s64) = G_CONSTANT i64 0
     %zero_vec:fpr(<2 x s64>) = G_BUILD_VECTOR %zero(s64), %zero(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir
index bb838ff81c1a6..2bdf585d91286 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir
@@ -53,10 +53,11 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: main
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]]
-    ; CHECK: [[EORWri:%[0-9]+]]:gpr32sp = EORWri [[ORNWrr]], 0
-    ; CHECK: $w0 = COPY [[EORWri]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]]
+    ; CHECK-NEXT: [[EORWri:%[0-9]+]]:gpr32sp = EORWri [[ORNWrr]], 0
+    ; CHECK-NEXT: $w0 = COPY [[EORWri]]
     %0(s32) = G_CONSTANT i32 -1
     %3(s32) = G_CONSTANT i32 1
     %1(s32) = COPY $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-reduce-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-reduce-add.mir
index 5496e7ed8d7b0..d2caee5683f99 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-reduce-add.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-reduce-add.mir
@@ -14,13 +14,14 @@ body:             |
 
     ; CHECK-LABEL: name: add_B
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (<16 x s8>))
-    ; CHECK: [[ADDVv16i8v:%[0-9]+]]:fpr8 = ADDVv16i8v [[LDRQui]]
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[ADDVv16i8v]], %subreg.bsub
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]]
-    ; CHECK: $w0 = COPY [[COPY1]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (<16 x s8>))
+    ; CHECK-NEXT: [[ADDVv16i8v:%[0-9]+]]:fpr8 = ADDVv16i8v [[LDRQui]]
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[ADDVv16i8v]], %subreg.bsub
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(p0) = COPY $x0
     %1:fpr(<16 x s8>) = G_LOAD %0(p0) :: (load (<16 x s8>))
     %2:fpr(s8) = G_VECREDUCE_ADD %1(<16 x s8>)
@@ -44,13 +45,14 @@ body:             |
 
     ; CHECK-LABEL: name: add_H
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (<8 x s16>))
-    ; CHECK: [[ADDVv8i16v:%[0-9]+]]:fpr16 = ADDVv8i16v [[LDRQui]]
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[ADDVv8i16v]], %subreg.hsub
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]]
-    ; CHECK: $w0 = COPY [[COPY1]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (<8 x s16>))
+    ; CHECK-NEXT: [[ADDVv8i16v:%[0-9]+]]:fpr16 = ADDVv8i16v [[LDRQui]]
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[ADDVv8i16v]], %subreg.hsub
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]]
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(p0) = COPY $x0
     %1:fpr(<8 x s16>) = G_LOAD %0(p0) :: (load (<8 x s16>))
     %2:fpr(s16) = G_VECREDUCE_ADD %1(<8 x s16>)
@@ -74,11 +76,12 @@ body:             |
 
     ; CHECK-LABEL: name: add_S
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (<4 x s32>))
-    ; CHECK: [[ADDVv4i32v:%[0-9]+]]:fpr32 = ADDVv4i32v [[LDRQui]]
-    ; CHECK: $w0 = COPY [[ADDVv4i32v]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (<4 x s32>))
+    ; CHECK-NEXT: [[ADDVv4i32v:%[0-9]+]]:fpr32 = ADDVv4i32v [[LDRQui]]
+    ; CHECK-NEXT: $w0 = COPY [[ADDVv4i32v]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(p0) = COPY $x0
     %1:fpr(<4 x s32>) = G_LOAD %0(p0) :: (load (<4 x s32>))
     %2:fpr(s32) = G_VECREDUCE_ADD %1(<4 x s32>)
@@ -100,12 +103,13 @@ body:             |
 
     ; CHECK-LABEL: name: add_S_v2i32
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY]], 0 :: (load (<2 x s32>))
-    ; CHECK: [[ADDPv2i32_:%[0-9]+]]:fpr64 = ADDPv2i32 [[LDRDui]], [[LDRDui]]
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY [[ADDPv2i32_]].ssub
-    ; CHECK: $w0 = COPY [[COPY1]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY]], 0 :: (load (<2 x s32>))
+    ; CHECK-NEXT: [[ADDPv2i32_:%[0-9]+]]:fpr64 = ADDPv2i32 [[LDRDui]], [[LDRDui]]
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY [[ADDPv2i32_]].ssub
+    ; CHECK-NEXT: $w0 = COPY [[COPY1]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(p0) = COPY $x0
     %1:fpr(<2 x s32>) = G_LOAD %0(p0) :: (load (<2 x s32>))
     %2:fpr(s32) = G_VECREDUCE_ADD %1(<2 x s32>)
@@ -127,11 +131,12 @@ body:             |
 
     ; CHECK-LABEL: name: add_D
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (<2 x s64>))
-    ; CHECK: [[ADDPv2i64p:%[0-9]+]]:fpr64 = ADDPv2i64p [[LDRQui]]
-    ; CHECK: $x0 = COPY [[ADDPv2i64p]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (<2 x s64>))
+    ; CHECK-NEXT: [[ADDPv2i64p:%[0-9]+]]:fpr64 = ADDPv2i64p [[LDRQui]]
+    ; CHECK-NEXT: $x0 = COPY [[ADDPv2i64p]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(p0) = COPY $x0
     %1:fpr(<2 x s64>) = G_LOAD %0(p0) :: (load (<2 x s64>))
     %2:fpr(s64) = G_VECREDUCE_ADD %1(<2 x s64>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
index e5d1c325f38f1..24afde32e12a8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
@@ -16,15 +16,18 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: lr_other_block
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: $w0, $x0, $lr
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr64 = COPY $lr
-  ; CHECK: bb.1:
-  ; CHECK:   $lr = COPY [[COPY]]
-  ; CHECK:   XPACLRI implicit-def $lr, implicit $lr
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr64 = COPY $lr
-  ; CHECK:   $x0 = COPY [[COPY1]]
-  ; CHECK:   RET_ReallyLR implicit $x0
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT:   liveins: $w0, $x0, $lr
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64 = COPY $lr
+  ; CHECK-NEXT:   B %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   $lr = COPY [[COPY]]
+  ; CHECK-NEXT:   XPACLRI implicit-def $lr, implicit $lr
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr64 = COPY $lr
+  ; CHECK-NEXT:   $x0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   RET_ReallyLR implicit $x0
   ; LR should be added as a livein to the entry block.
 
   bb.0:
@@ -45,15 +48,18 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: already_live_in
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: $w0, $x0, $lr
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr64 = COPY $lr
-  ; CHECK: bb.1:
-  ; CHECK:   $lr = COPY [[COPY]]
-  ; CHECK:   XPACLRI implicit-def $lr, implicit $lr
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr64 = COPY $lr
-  ; CHECK:   $x0 = COPY [[COPY1]]
-  ; CHECK:   RET_ReallyLR implicit $x0
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT:   liveins: $w0, $x0, $lr
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64 = COPY $lr
+  ; CHECK-NEXT:   B %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   $lr = COPY [[COPY]]
+  ; CHECK-NEXT:   XPACLRI implicit-def $lr, implicit $lr
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr64 = COPY $lr
+  ; CHECK-NEXT:   $x0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   RET_ReallyLR implicit $x0
   ; We should not have LR listed as a livein twice.
 
   bb.0:
@@ -76,18 +82,21 @@ tracksRegLiveness: true
 body:             |
   ; CHECK-LABEL: name: multi_use
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: $w0, $x0, $lr
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr64 = COPY $lr
-  ; CHECK:   $lr = COPY [[COPY]]
-  ; CHECK:   XPACLRI implicit-def $lr, implicit $lr
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr64 = COPY $lr
-  ; CHECK: bb.1:
-  ; CHECK:   $x0 = COPY [[COPY1]]
-  ; CHECK:   $lr = COPY [[COPY]]
-  ; CHECK:   XPACLRI implicit-def $lr, implicit $lr
-  ; CHECK:   [[COPY2:%[0-9]+]]:gpr64 = COPY $lr
-  ; CHECK:   RET_ReallyLR implicit [[COPY2]]
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT:   liveins: $w0, $x0, $lr
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64 = COPY $lr
+  ; CHECK-NEXT:   $lr = COPY [[COPY]]
+  ; CHECK-NEXT:   XPACLRI implicit-def $lr, implicit $lr
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr64 = COPY $lr
+  ; CHECK-NEXT:   B %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   $x0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   $lr = COPY [[COPY]]
+  ; CHECK-NEXT:   XPACLRI implicit-def $lr, implicit $lr
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr64 = COPY $lr
+  ; CHECK-NEXT:   RET_ReallyLR implicit [[COPY2]]
   bb.0:
     liveins: $w0, $x0, $lr
     %0:gpr(p0) = G_INTRINSIC intrinsic(@llvm.returnaddress), 0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-rev.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-rev.mir
index e2779dfd84fda..2d3051ec09e1b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-rev.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-rev.mir
@@ -22,10 +22,11 @@ body:             |
     liveins: $d0
     ; CHECK-LABEL: name: rev64_REV64v2i32
     ; CHECK: liveins: $d0
-    ; CHECK: %copy:fpr64 = COPY $d0
-    ; CHECK: %rev:fpr64 = REV64v2i32 %copy
-    ; CHECK: $d0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr64 = COPY $d0
+    ; CHECK-NEXT: %rev:fpr64 = REV64v2i32 %copy
+    ; CHECK-NEXT: $d0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %copy:fpr(<2 x s32>) = COPY $d0
     %rev:fpr(<2 x s32>) = G_REV64 %copy
     $d0 = COPY %rev(<2 x s32>)
@@ -43,10 +44,11 @@ body:             |
     liveins: $d0
     ; CHECK-LABEL: name: rev64_REV64v4i16
     ; CHECK: liveins: $d0
-    ; CHECK: %copy:fpr64 = COPY $d0
-    ; CHECK: %rev:fpr64 = REV64v4i16 %copy
-    ; CHECK: $d0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr64 = COPY $d0
+    ; CHECK-NEXT: %rev:fpr64 = REV64v4i16 %copy
+    ; CHECK-NEXT: $d0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %copy:fpr(<4 x s16>) = COPY $d0
     %rev:fpr(<4 x s16>) = G_REV64 %copy
     $d0 = COPY %rev(<4 x s16>)
@@ -64,10 +66,11 @@ body:             |
     liveins: $q0
     ; CHECK-LABEL: name: rev64_REV64v4i32
     ; CHECK: liveins: $q0
-    ; CHECK: %copy:fpr128 = COPY $q0
-    ; CHECK: %rev:fpr128 = REV64v4i32 %copy
-    ; CHECK: $q0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr128 = COPY $q0
+    ; CHECK-NEXT: %rev:fpr128 = REV64v4i32 %copy
+    ; CHECK-NEXT: $q0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %copy:fpr(<4 x s32>) = COPY $q0
     %rev:fpr(<4 x s32>) = G_REV64 %copy
     $q0 = COPY %rev(<4 x s32>)
@@ -85,10 +88,11 @@ body:             |
     liveins: $q0
     ; CHECK-LABEL: name: rev64_REV64v8i8
     ; CHECK: liveins: $q0
-    ; CHECK: %copy:fpr64 = COPY $d0
-    ; CHECK: %rev:fpr64 = REV64v8i8 %copy
-    ; CHECK: $d0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr64 = COPY $d0
+    ; CHECK-NEXT: %rev:fpr64 = REV64v8i8 %copy
+    ; CHECK-NEXT: $d0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %copy:fpr(<8 x s8>) = COPY $d0
     %rev:fpr(<8 x s8>) = G_REV64 %copy
     $d0 = COPY %rev(<8 x s8>)
@@ -106,10 +110,11 @@ body:             |
     liveins: $q0
     ; CHECK-LABEL: name: rev64_REV64v8i16
     ; CHECK: liveins: $q0
-    ; CHECK: %copy:fpr128 = COPY $q0
-    ; CHECK: %rev:fpr128 = REV64v8i16 %copy
-    ; CHECK: $q0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr128 = COPY $q0
+    ; CHECK-NEXT: %rev:fpr128 = REV64v8i16 %copy
+    ; CHECK-NEXT: $q0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %copy:fpr(<8 x s16>) = COPY $q0
     %rev:fpr(<8 x s16>) = G_REV64 %copy
     $q0 = COPY %rev(<8 x s16>)
@@ -127,10 +132,11 @@ body:             |
     liveins: $q0
     ; CHECK-LABEL: name: rev64_REV64v16i8
     ; CHECK: liveins: $q0
-    ; CHECK: %copy:fpr128 = COPY $q0
-    ; CHECK: %rev:fpr128 = REV64v16i8 %copy
-    ; CHECK: $q0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr128 = COPY $q0
+    ; CHECK-NEXT: %rev:fpr128 = REV64v16i8 %copy
+    ; CHECK-NEXT: $q0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %copy:fpr(<16 x s8>) = COPY $q0
     %rev:fpr(<16 x s8>) = G_REV64 %copy
     $q0 = COPY %rev(<16 x s8>)
@@ -148,10 +154,11 @@ body:             |
     liveins: $d0
     ; CHECK-LABEL: name: rev32_REV32v4i16
     ; CHECK: liveins: $d0
-    ; CHECK: %copy:fpr64 = COPY $d0
-    ; CHECK: %rev:fpr64 = REV32v4i16 %copy
-    ; CHECK: $d0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr64 = COPY $d0
+    ; CHECK-NEXT: %rev:fpr64 = REV32v4i16 %copy
+    ; CHECK-NEXT: $d0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %copy:fpr(<4 x s16>) = COPY $d0
     %rev:fpr(<4 x s16>) = G_REV32 %copy
     $d0 = COPY %rev(<4 x s16>)
@@ -169,10 +176,11 @@ body:             |
     liveins: $d0
     ; CHECK-LABEL: name: rev32_REV32v8i8
     ; CHECK: liveins: $d0
-    ; CHECK: %copy:fpr64 = COPY $d0
-    ; CHECK: %rev:fpr64 = REV32v8i8 %copy
-    ; CHECK: $d0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr64 = COPY $d0
+    ; CHECK-NEXT: %rev:fpr64 = REV32v8i8 %copy
+    ; CHECK-NEXT: $d0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %copy:fpr(<8 x s8>) = COPY $d0
     %rev:fpr(<8 x s8>) = G_REV32 %copy
     $d0 = COPY %rev(<8 x s8>)
@@ -190,10 +198,11 @@ body:             |
     liveins: $q0
     ; CHECK-LABEL: name: rev32_REV32v8i16
     ; CHECK: liveins: $q0
-    ; CHECK: %copy:fpr128 = COPY $q0
-    ; CHECK: %rev:fpr128 = REV32v8i16 %copy
-    ; CHECK: $q0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr128 = COPY $q0
+    ; CHECK-NEXT: %rev:fpr128 = REV32v8i16 %copy
+    ; CHECK-NEXT: $q0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %copy:fpr(<8 x s16>) = COPY $q0
     %rev:fpr(<8 x s16>) = G_REV32 %copy
     $q0 = COPY %rev(<8 x s16>)
@@ -211,10 +220,11 @@ body:             |
     liveins: $q0
     ; CHECK-LABEL: name: rev32_REV32v16i8
     ; CHECK: liveins: $q0
-    ; CHECK: %copy:fpr128 = COPY $q0
-    ; CHECK: %rev:fpr128 = REV32v16i8 %copy
-    ; CHECK: $q0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr128 = COPY $q0
+    ; CHECK-NEXT: %rev:fpr128 = REV32v16i8 %copy
+    ; CHECK-NEXT: $q0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %copy:fpr(<16 x s8>) = COPY $q0
     %rev:fpr(<16 x s8>) = G_REV32 %copy
     $q0 = COPY %rev(<16 x s8>)
@@ -232,10 +242,11 @@ body:             |
     liveins: $q0
     ; CHECK-LABEL: name: rev16_REV16v8i8
     ; CHECK: liveins: $q0
-    ; CHECK: %copy:fpr64 = COPY $d0
-    ; CHECK: %rev:fpr64 = REV16v8i8 %copy
-    ; CHECK: $d0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr64 = COPY $d0
+    ; CHECK-NEXT: %rev:fpr64 = REV16v8i8 %copy
+    ; CHECK-NEXT: $d0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %copy:fpr(<8 x s8>) = COPY $d0
     %rev:fpr(<8 x s8>) = G_REV16 %copy
     $d0 = COPY %rev(<8 x s8>)
@@ -253,10 +264,11 @@ body:             |
     liveins: $q0
     ; CHECK-LABEL: name: rev16_REV16v16i8
     ; CHECK: liveins: $q0
-    ; CHECK: %copy:fpr128 = COPY $q0
-    ; CHECK: %rev:fpr128 = REV16v16i8 %copy
-    ; CHECK: $q0 = COPY %rev
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:fpr128 = COPY $q0
+    ; CHECK-NEXT: %rev:fpr128 = REV16v16i8 %copy
+    ; CHECK-NEXT: $q0 = COPY %rev
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %copy:fpr(<16 x s8>) = COPY $q0
     %rev:fpr(<16 x s8>) = G_REV16 %copy
     $q0 = COPY %rev(<16 x s8>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-rotate.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-rotate.mir
index 3775ecea63b97..2fb4df2a61dca 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-rotate.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-rotate.mir
@@ -18,11 +18,12 @@ body:             |
 
     ; CHECK-LABEL: name: fold_ror_eor
     ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[EXTRWrri:%[0-9]+]]:gpr32 = EXTRWrri [[COPY]], [[COPY]], 11
-    ; CHECK: [[EORWrs:%[0-9]+]]:gpr32 = EORWrs [[EXTRWrri]], [[COPY]], 198
-    ; CHECK: $w0 = COPY [[EORWrs]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[EXTRWrri:%[0-9]+]]:gpr32 = EXTRWrri [[COPY]], [[COPY]], 11
+    ; CHECK-NEXT: [[EORWrs:%[0-9]+]]:gpr32 = EORWrs [[EXTRWrri]], [[COPY]], 198
+    ; CHECK-NEXT: $w0 = COPY [[EORWrs]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %13:gpr(s64) = G_CONSTANT i64 6
     %2:gpr(s32) = G_ROTR %0, %13(s64)
@@ -51,11 +52,12 @@ body:             |
 
     ; CHECK-LABEL: name: fold_ror_eor_rhs_only
     ; CHECK: liveins: $w0, $w1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[EORWrs:%[0-9]+]]:gpr32 = EORWrs [[COPY1]], [[COPY]], 198
-    ; CHECK: $w0 = COPY [[EORWrs]]
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[EORWrs:%[0-9]+]]:gpr32 = EORWrs [[COPY1]], [[COPY]], 198
+    ; CHECK-NEXT: $w0 = COPY [[EORWrs]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
     %9:gpr(s64) = G_CONSTANT i64 6

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-saddo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-saddo.mir
index 42017e96e5a63..fe272c68d450f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-saddo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-saddo.mir
@@ -68,7 +68,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get ADDSWri when we can fold in a constant.
-    ;
     ; CHECK-LABEL: name: saddo_s32_imm
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}
@@ -93,7 +92,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get ADDSWrs when we can fold in a shift.
-    ;
     ; CHECK-LABEL: name: saddo_s32_shifted
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}
@@ -121,7 +119,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get SUBSWri when we can fold in a negative constant.
-    ;
     ; CHECK-LABEL: name: saddo_s32_neg_imm
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-sbfx.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-sbfx.mir
index ae11f1b1209a9..bbf6239b510cf 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-sbfx.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-sbfx.mir
@@ -12,10 +12,11 @@ body:             |
     liveins: $w0
     ; CHECK-LABEL: name: sbfx_s32
     ; CHECK: liveins: $w0
-    ; CHECK: %copy:gpr32 = COPY $w0
-    ; CHECK: %sbfx:gpr32 = SBFMWri %copy, 0, 9
-    ; CHECK: $w0 = COPY %sbfx
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:gpr32 = COPY $w0
+    ; CHECK-NEXT: %sbfx:gpr32 = SBFMWri %copy, 0, 9
+    ; CHECK-NEXT: $w0 = COPY %sbfx
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:gpr(s32) = COPY $w0
     %cst1:gpr(s32) = G_CONSTANT i32 0
     %cst2:gpr(s32) = G_CONSTANT i32 10
@@ -34,10 +35,11 @@ body:             |
     liveins: $x0
     ; CHECK-LABEL: name: sbfx_s64
     ; CHECK: liveins: $x0
-    ; CHECK: %copy:gpr64 = COPY $x0
-    ; CHECK: %sbfx:gpr64 = SBFMXri %copy, 0, 9
-    ; CHECK: $x0 = COPY %sbfx
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:gpr64 = COPY $x0
+    ; CHECK-NEXT: %sbfx:gpr64 = SBFMXri %copy, 0, 9
+    ; CHECK-NEXT: $x0 = COPY %sbfx
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy:gpr(s64) = COPY $x0
     %cst1:gpr(s64) = G_CONSTANT i64 0
     %cst2:gpr(s64) = G_CONSTANT i64 10
@@ -58,10 +60,11 @@ body:             |
 
     ; CHECK-LABEL: name: sbfx_s32_31_1
     ; CHECK: liveins: $w0
-    ; CHECK: %copy:gpr32 = COPY $w0
-    ; CHECK: %sbfx:gpr32 = SBFMWri %copy, 31, 31
-    ; CHECK: $w0 = COPY %sbfx
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %copy:gpr32 = COPY $w0
+    ; CHECK-NEXT: %sbfx:gpr32 = SBFMWri %copy, 31, 31
+    ; CHECK-NEXT: $w0 = COPY %sbfx
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:gpr(s32) = COPY $w0
     %cst1:gpr(s32) = G_CONSTANT i32 31
     %cst2:gpr(s32) = G_CONSTANT i32 1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-merge.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-merge.mir
index dafeee4d981f8..dc213520f320d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-merge.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-merge.mir
@@ -21,12 +21,14 @@ body:             |
     liveins: $w0, $w1
 
     ; CHECK-LABEL: name: gmerge_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY $w1
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
-    ; CHECK: [[SUBREG_TO_REG1:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_32
-    ; CHECK: [[BFMXri:%[0-9]+]]:gpr64 = BFMXri [[SUBREG_TO_REG]], [[SUBREG_TO_REG1]], 32, 31
-    ; CHECK: $x0 = COPY [[BFMXri]]
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY $w1
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
+    ; CHECK-NEXT: [[SUBREG_TO_REG1:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_32
+    ; CHECK-NEXT: [[BFMXri:%[0-9]+]]:gpr64 = BFMXri [[SUBREG_TO_REG]], [[SUBREG_TO_REG1]], 32, 31
+    ; CHECK-NEXT: $x0 = COPY [[BFMXri]]
     %0(s32) = COPY $w0
     %1(s32) = COPY $w1
     %2(s64) = G_MERGE_VALUES %0(s32), %1(s32)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-sextload.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-sextload.mir
index 91e9971bb3f7a..8f3a8f761ae4d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-sextload.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-sextload.mir
@@ -18,9 +18,11 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: sextload_s32_from_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRSHWui [[COPY]], 0 :: (load (s16) from %ir.addr)
-    ; CHECK: $w0 = COPY [[T0]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRSHWui:%[0-9]+]]:gpr32 = LDRSHWui [[COPY]], 0 :: (load (s16) from %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY [[LDRSHWui]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_SEXTLOAD %0 :: (load (s16) from %ir.addr)
     $w0 = COPY %1(s32)
@@ -36,10 +38,12 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: sextload_s32_from_s16_not_combined
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load (s16) from %ir.addr)
-    ; CHECK: [[T1:%[0-9]+]]:gpr32 = SBFMWri [[T0]], 0, 15
-    ; CHECK: $w0 = COPY [[T1]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load (s16) from %ir.addr)
+    ; CHECK-NEXT: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[LDRHHui]], 0, 15
+    ; CHECK-NEXT: $w0 = COPY [[SBFMWri]]
     %0:gpr(p0) = COPY $x0
     %1:gpr(s16) = G_LOAD %0 :: (load (s16) from %ir.addr)
     %2:gpr(s32) = G_SEXT %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-shufflevec-undef-mask-elt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-shufflevec-undef-mask-elt.mir
index 5f280ae2e3024..a5ac0b7ff8626 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-shufflevec-undef-mask-elt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-shufflevec-undef-mask-elt.mir
@@ -17,24 +17,25 @@ body:             |
 
     ; CHECK-LABEL: name: shuffle_undef_mask_elt
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
-    ; CHECK: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[DEF]], %subreg.ssub
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY [[INSERT_SUBREG]].dsub
-    ; CHECK: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) %const.0
-    ; CHECK: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0
-    ; CHECK: [[DEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[COPY]], %subreg.dsub
-    ; CHECK: [[DEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[COPY1]], %subreg.dsub
-    ; CHECK: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG2]], 0
-    ; CHECK: [[DEF4:%[0-9]+]]:fpr128 = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF4]], [[LDRDui]], %subreg.dsub
-    ; CHECK: [[TBLv16i8One:%[0-9]+]]:fpr128 = TBLv16i8One [[INSvi64lane]], [[INSERT_SUBREG3]]
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[TBLv16i8One]].dsub
-    ; CHECK: $d0 = COPY [[COPY2]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF1]], [[DEF]], %subreg.ssub
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY [[INSERT_SUBREG]].dsub
+    ; CHECK-NEXT: [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) %const.0
+    ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) %const.0 :: (load (s64) from constant-pool)
+    ; CHECK-NEXT: [[DEF2:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF2]], [[COPY]], %subreg.dsub
+    ; CHECK-NEXT: [[DEF3:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG2:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF3]], [[COPY1]], %subreg.dsub
+    ; CHECK-NEXT: [[INSvi64lane:%[0-9]+]]:fpr128 = INSvi64lane [[INSERT_SUBREG1]], 1, [[INSERT_SUBREG2]], 0
+    ; CHECK-NEXT: [[DEF4:%[0-9]+]]:fpr128 = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG3:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF4]], [[LDRDui]], %subreg.dsub
+    ; CHECK-NEXT: [[TBLv16i8One:%[0-9]+]]:fpr128 = TBLv16i8One [[INSvi64lane]], [[INSERT_SUBREG3]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY [[TBLv16i8One]].dsub
+    ; CHECK-NEXT: $d0 = COPY [[COPY2]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %6:gpr(s32) = G_IMPLICIT_DEF
     %7:gpr(s32) = G_IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ssubo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ssubo.mir
index 5a47a4515930c..700a7e298ec5f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ssubo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ssubo.mir
@@ -68,7 +68,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get ADDSWri when we can fold in a constant.
-    ;
     ; CHECK-LABEL: name: ssubo_s32_imm
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}
@@ -93,7 +92,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get ADDSWrs when we can fold in a shift.
-    ;
     ; CHECK-LABEL: name: ssubo_s32_shifted
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}
@@ -121,7 +119,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get SUBSWri when we can fold in a negative constant.
-    ;
     ; CHECK-LABEL: name: ssubo_s32_neg_imm
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-st2.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-st2.mir
index 2b1ec43e30058..3503b8e13c588 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-st2.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-st2.mir
@@ -13,12 +13,13 @@ body:             |
 
     ; CHECK-LABEL: name: v8i8_ST2Twov8b
     ; CHECK: liveins: $d0, $d1, $x0
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: %src1:fpr64 = COPY $d0
-    ; CHECK: %src2:fpr64 = COPY $d1
-    ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:dd = REG_SEQUENCE %src1, %subreg.dsub0, %src2, %subreg.dsub1
-    ; CHECK: ST2Twov8b [[REG_SEQUENCE]], %ptr :: (store (<2 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %src1:fpr64 = COPY $d0
+    ; CHECK-NEXT: %src2:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:dd = REG_SEQUENCE %src1, %subreg.dsub0, %src2, %subreg.dsub1
+    ; CHECK-NEXT: ST2Twov8b [[REG_SEQUENCE]], %ptr :: (store (<2 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:gpr(p0) = COPY $x0
     %src1:fpr(<8 x s8>) = COPY $d0
     %src2:fpr(<8 x s8>) = COPY $d1
@@ -38,12 +39,13 @@ body:             |
 
     ; CHECK-LABEL: name: v16i8_ST2Twov16b
     ; CHECK: liveins: $q0, $q1, $x0
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: %src1:fpr128 = COPY $q0
-    ; CHECK: %src2:fpr128 = COPY $q1
-    ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:qq = REG_SEQUENCE %src1, %subreg.qsub0, %src2, %subreg.qsub1
-    ; CHECK: ST2Twov16b [[REG_SEQUENCE]], %ptr :: (store (<4 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %src1:fpr128 = COPY $q0
+    ; CHECK-NEXT: %src2:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:qq = REG_SEQUENCE %src1, %subreg.qsub0, %src2, %subreg.qsub1
+    ; CHECK-NEXT: ST2Twov16b [[REG_SEQUENCE]], %ptr :: (store (<4 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:gpr(p0) = COPY $x0
     %src1:fpr(<16 x s8>) = COPY $q0
     %src2:fpr(<16 x s8>) = COPY $q1
@@ -63,12 +65,13 @@ body:             |
 
     ; CHECK-LABEL: name: v4i16_ST2Twov4h
     ; CHECK: liveins: $d0, $d1, $x0
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: %src1:fpr64 = COPY $d0
-    ; CHECK: %src2:fpr64 = COPY $d1
-    ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:dd = REG_SEQUENCE %src1, %subreg.dsub0, %src2, %subreg.dsub1
-    ; CHECK: ST2Twov4h [[REG_SEQUENCE]], %ptr :: (store (<2 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %src1:fpr64 = COPY $d0
+    ; CHECK-NEXT: %src2:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:dd = REG_SEQUENCE %src1, %subreg.dsub0, %src2, %subreg.dsub1
+    ; CHECK-NEXT: ST2Twov4h [[REG_SEQUENCE]], %ptr :: (store (<2 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:gpr(p0) = COPY $x0
     %src1:fpr(<4 x s16>) = COPY $d0
     %src2:fpr(<4 x s16>) = COPY $d1
@@ -88,12 +91,13 @@ body:             |
 
     ; CHECK-LABEL: name: v8i16_ST2Twov8h
     ; CHECK: liveins: $q0, $q1, $x0
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: %src1:fpr128 = COPY $q0
-    ; CHECK: %src2:fpr128 = COPY $q1
-    ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:qq = REG_SEQUENCE %src1, %subreg.qsub0, %src2, %subreg.qsub1
-    ; CHECK: ST2Twov8h [[REG_SEQUENCE]], %ptr :: (store (<4 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %src1:fpr128 = COPY $q0
+    ; CHECK-NEXT: %src2:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:qq = REG_SEQUENCE %src1, %subreg.qsub0, %src2, %subreg.qsub1
+    ; CHECK-NEXT: ST2Twov8h [[REG_SEQUENCE]], %ptr :: (store (<4 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:gpr(p0) = COPY $x0
     %src1:fpr(<8 x s16>) = COPY $q0
     %src2:fpr(<8 x s16>) = COPY $q1
@@ -112,12 +116,13 @@ body:             |
 
     ; CHECK-LABEL: name: v2i32_ST2Twov2s
     ; CHECK: liveins: $d0, $d1, $x0
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: %src1:fpr64 = COPY $d0
-    ; CHECK: %src2:fpr64 = COPY $d1
-    ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:dd = REG_SEQUENCE %src1, %subreg.dsub0, %src2, %subreg.dsub1
-    ; CHECK: ST2Twov2s [[REG_SEQUENCE]], %ptr :: (store (<2 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %src1:fpr64 = COPY $d0
+    ; CHECK-NEXT: %src2:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:dd = REG_SEQUENCE %src1, %subreg.dsub0, %src2, %subreg.dsub1
+    ; CHECK-NEXT: ST2Twov2s [[REG_SEQUENCE]], %ptr :: (store (<2 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:gpr(p0) = COPY $x0
     %src1:fpr(<2 x s32>) = COPY $d0
     %src2:fpr(<2 x s32>) = COPY $d1
@@ -136,12 +141,13 @@ body:             |
 
     ; CHECK-LABEL: name: v4i32_ST2Twov4s
     ; CHECK: liveins: $q0, $q1, $x0
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: %src1:fpr128 = COPY $q0
-    ; CHECK: %src2:fpr128 = COPY $q1
-    ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:qq = REG_SEQUENCE %src1, %subreg.qsub0, %src2, %subreg.qsub1
-    ; CHECK: ST2Twov4s [[REG_SEQUENCE]], %ptr :: (store (<4 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %src1:fpr128 = COPY $q0
+    ; CHECK-NEXT: %src2:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:qq = REG_SEQUENCE %src1, %subreg.qsub0, %src2, %subreg.qsub1
+    ; CHECK-NEXT: ST2Twov4s [[REG_SEQUENCE]], %ptr :: (store (<4 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:gpr(p0) = COPY $x0
     %src1:fpr(<4 x s32>) = COPY $q0
     %src2:fpr(<4 x s32>) = COPY $q1
@@ -160,12 +166,13 @@ body:             |
 
     ; CHECK-LABEL: name: v2i64_ST2Twov2d_s64_elts
     ; CHECK: liveins: $q0, $q1, $x0
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: %src1:fpr128 = COPY $q0
-    ; CHECK: %src2:fpr128 = COPY $q1
-    ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:qq = REG_SEQUENCE %src1, %subreg.qsub0, %src2, %subreg.qsub1
-    ; CHECK: ST2Twov2d [[REG_SEQUENCE]], %ptr :: (store (<4 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %src1:fpr128 = COPY $q0
+    ; CHECK-NEXT: %src2:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:qq = REG_SEQUENCE %src1, %subreg.qsub0, %src2, %subreg.qsub1
+    ; CHECK-NEXT: ST2Twov2d [[REG_SEQUENCE]], %ptr :: (store (<4 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:gpr(p0) = COPY $x0
     %src1:fpr(<2 x s64>) = COPY $q0
     %src2:fpr(<2 x s64>) = COPY $q1
@@ -184,12 +191,13 @@ body:             |
 
     ; CHECK-LABEL: name: v2i64_ST2Twov2d_s64_p0_elts
     ; CHECK: liveins: $q0, $q1, $x0
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: %src1:fpr128 = COPY $q0
-    ; CHECK: %src2:fpr128 = COPY $q1
-    ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:qq = REG_SEQUENCE %src1, %subreg.qsub0, %src2, %subreg.qsub1
-    ; CHECK: ST2Twov2d [[REG_SEQUENCE]], %ptr :: (store (<4 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %src1:fpr128 = COPY $q0
+    ; CHECK-NEXT: %src2:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:qq = REG_SEQUENCE %src1, %subreg.qsub0, %src2, %subreg.qsub1
+    ; CHECK-NEXT: ST2Twov2d [[REG_SEQUENCE]], %ptr :: (store (<4 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:gpr(p0) = COPY $x0
     %src1:fpr(<2 x p0>) = COPY $q0
     %src2:fpr(<2 x p0>) = COPY $q1
@@ -208,12 +216,13 @@ body:             |
 
     ; CHECK-LABEL: name: v1i64_ST1Twov1d_s64
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: %src1:gpr64all = COPY $x0
-    ; CHECK: %src2:gpr64all = COPY $x1
-    ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:dd = REG_SEQUENCE %src1, %subreg.dsub0, %src2, %subreg.dsub1
-    ; CHECK: ST1Twov1d [[REG_SEQUENCE]], %ptr :: (store (<2 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %src1:gpr64all = COPY $x0
+    ; CHECK-NEXT: %src2:gpr64all = COPY $x1
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:dd = REG_SEQUENCE %src1, %subreg.dsub0, %src2, %subreg.dsub1
+    ; CHECK-NEXT: ST1Twov1d [[REG_SEQUENCE]], %ptr :: (store (<2 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:gpr(p0) = COPY $x0
     %src1:gpr(s64) = COPY $x0
     %src2:gpr(s64) = COPY $x1
@@ -232,12 +241,13 @@ body:             |
 
     ; CHECK-LABEL: name: v1i64_ST1Twov1d_p0
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %ptr:gpr64sp = COPY $x0
-    ; CHECK: %src1:gpr64all = COPY $x0
-    ; CHECK: %src2:gpr64all = COPY $x1
-    ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:dd = REG_SEQUENCE %src1, %subreg.dsub0, %src2, %subreg.dsub1
-    ; CHECK: ST1Twov1d [[REG_SEQUENCE]], %ptr :: (store (<2 x s64>))
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %ptr:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %src1:gpr64all = COPY $x0
+    ; CHECK-NEXT: %src2:gpr64all = COPY $x1
+    ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:dd = REG_SEQUENCE %src1, %subreg.dsub0, %src2, %subreg.dsub1
+    ; CHECK-NEXT: ST1Twov1d [[REG_SEQUENCE]], %ptr :: (store (<2 x s64>))
+    ; CHECK-NEXT: RET_ReallyLR
     %ptr:gpr(p0) = COPY $x0
     %src1:gpr(p0) = COPY $x0
     %src2:gpr(p0) = COPY $x1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-stlxr-intrin.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-stlxr-intrin.mir
index 0eb8521424ddb..fd2cc71dd9040 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-stlxr-intrin.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-stlxr-intrin.mir
@@ -25,11 +25,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_store_release_i64
     ; CHECK: liveins: $w0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
-    ; CHECK: early-clobber %2:gpr32 = STLXRX [[COPY]], [[COPY1]] :: (volatile store (s64) into %ir.addr)
-    ; CHECK: $w0 = COPY %2
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
+    ; CHECK-NEXT: early-clobber %2:gpr32 = STLXRX [[COPY]], [[COPY1]] :: (volatile store (s64) into %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY %2
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:gpr(s64) = COPY $x1
     %2:gpr(p0) = COPY $x2
     %3:gpr(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.stlxr), %1(s64), %2(p0) :: (volatile store (s64) into %ir.addr)
@@ -48,11 +49,12 @@ body:             |
     liveins: $w0, $w1, $x2
     ; CHECK-LABEL: name: test_store_release_i32
     ; CHECK: liveins: $w0, $w1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
-    ; CHECK: early-clobber %3:gpr32 = STLXRW [[COPY]], [[COPY1]] :: (volatile store (s32) into %ir.addr)
-    ; CHECK: $w0 = COPY %3
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
+    ; CHECK-NEXT: early-clobber %3:gpr32 = STLXRW [[COPY]], [[COPY1]] :: (volatile store (s32) into %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY %3
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(p0) = COPY $x2
     %3:gpr(s64) = G_ZEXT %1(s32)
@@ -73,14 +75,15 @@ body:             |
 
     ; CHECK-LABEL: name: test_store_release_i8
     ; CHECK: liveins: $w0, $w1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[INSERT_SUBREG]].sub_32
-    ; CHECK: early-clobber %5:gpr32 = STLXRB [[COPY2]], [[COPY1]] :: (volatile store (s8) into %ir.addr)
-    ; CHECK: $w0 = COPY %5
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY [[INSERT_SUBREG]].sub_32
+    ; CHECK-NEXT: early-clobber %5:gpr32 = STLXRB [[COPY2]], [[COPY1]] :: (volatile store (s8) into %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY %5
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %3:gpr(s32) = COPY $w1
     %2:gpr(p0) = COPY $x2
     %6:gpr(s64) = G_CONSTANT i64 255
@@ -103,14 +106,15 @@ body:             |
 
     ; CHECK-LABEL: name: test_store_release_i16
     ; CHECK: liveins: $w0, $w1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[INSERT_SUBREG]].sub_32
-    ; CHECK: early-clobber %5:gpr32 = STLXRH [[COPY2]], [[COPY1]] :: (volatile store (s16) into %ir.addr)
-    ; CHECK: $w0 = COPY %5
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY [[INSERT_SUBREG]].sub_32
+    ; CHECK-NEXT: early-clobber %5:gpr32 = STLXRH [[COPY2]], [[COPY1]] :: (volatile store (s16) into %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY %5
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %3:gpr(s32) = COPY $w1
     %2:gpr(p0) = COPY $x2
     %6:gpr(s64) = G_CONSTANT i64 65535

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-store-truncating-float.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-store-truncating-float.mir
index 489868ba5cf5d..5953e5773bfa3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-store-truncating-float.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-store-truncating-float.mir
@@ -45,10 +45,11 @@ body:             |
 
     ; CHECK-LABEL: name: truncating_f32
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]].ssub
-    ; CHECK: STRSui [[COPY1]], %stack.0.alloca, 0 :: (store (s32) into %ir.alloca)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]].ssub
+    ; CHECK-NEXT: STRSui [[COPY1]], %stack.0.alloca, 0 :: (store (s32) into %ir.alloca)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:fpr(s64) = COPY $d0
     %1:gpr(p0) = G_FRAME_INDEX %stack.0.alloca
     G_STORE %0(s64), %1(p0) :: (store (s32) into %ir.alloca)
@@ -74,10 +75,11 @@ body:             |
 
     ; CHECK-LABEL: name: truncating_f16
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr16 = COPY [[COPY]].hsub
-    ; CHECK: STRHui [[COPY1]], %stack.0.alloca, 0 :: (store (s16) into %ir.alloca)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr16 = COPY [[COPY]].hsub
+    ; CHECK-NEXT: STRHui [[COPY1]], %stack.0.alloca, 0 :: (store (s16) into %ir.alloca)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:fpr(s64) = COPY $d0
     %1:gpr(p0) = G_FRAME_INDEX %stack.0.alloca
     G_STORE %0(s64), %1(p0) :: (store (s16) into %ir.alloca)
@@ -103,10 +105,11 @@ body:             |
 
     ; CHECK-LABEL: name: truncating_f8
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr8 = COPY [[COPY]].bsub
-    ; CHECK: STRBui [[COPY1]], %stack.0.alloca, 0 :: (store (s8) into %ir.alloca)
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr8 = COPY [[COPY]].bsub
+    ; CHECK-NEXT: STRBui [[COPY1]], %stack.0.alloca, 0 :: (store (s8) into %ir.alloca)
+    ; CHECK-NEXT: RET_ReallyLR
     %0:fpr(s64) = COPY $d0
     %1:gpr(p0) = G_FRAME_INDEX %stack.0.alloca
     G_STORE %0(s64), %1(p0) :: (store (s8) into %ir.alloca)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-store.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-store.mir
index bafd4113301c8..ade4322f5d87e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-store.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-store.mir
@@ -61,7 +61,9 @@ body:             |
     liveins: $x0, $x1
 
     ; CHECK-LABEL: name: store_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK-NEXT: STRXui [[COPY1]], [[COPY]], 0 :: (store (s64) into %ir.addr)
     %0(p0) = COPY $x0
@@ -84,7 +86,9 @@ body:             |
     liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: STRWui [[COPY1]], [[COPY]], 0 :: (store (s32) into %ir.addr)
     %0(p0) = COPY $x0
@@ -107,7 +111,9 @@ body:             |
     liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_s16_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: STRHHui [[COPY1]], [[COPY]], 0 :: (store (s16) into %ir.addr)
     %0(p0) = COPY $x0
@@ -131,7 +137,9 @@ body:             |
     liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_s8_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: STRBBui [[COPY1]], [[COPY]], 0 :: (store (s8) into %ir.addr)
     %0(p0) = COPY $x0
@@ -155,7 +163,9 @@ body:             |
     liveins: $x0, $x1
 
     ; CHECK-LABEL: name: store_zero_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: STRXui $xzr, [[COPY]], 0 :: (store (s64) into %ir.addr)
     %0(p0) = COPY $x0
     %1(s64) = G_CONSTANT i64 0
@@ -177,7 +187,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: store_zero_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: STRWui $wzr, [[COPY]], 0 :: (store (s32) into %ir.addr)
     %0(p0) = COPY $x0
     %1(s32) = G_CONSTANT i32 0
@@ -193,7 +205,9 @@ body:             |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: store_zero_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: STRHHui $wzr, [[COPY]], 0 :: (store (s16))
     %0:gpr(p0) = COPY $x0
     %1:gpr(s16) = G_CONSTANT i16 0
@@ -209,7 +223,9 @@ body:             |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: store_zero_s8
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: STRBBui $wzr, [[COPY]], 0 :: (store (s8))
     %0:gpr(p0) = COPY $x0
     %1:gpr(s8) = G_CONSTANT i8 0
@@ -224,7 +240,9 @@ body:             |
   bb.0:
     liveins: $x0
     ; CHECK-LABEL: name: store_zero_look_through_cst
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: STRXui $xzr, [[COPY]], 0 :: (store (s64) into %ir.addr)
     %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 0
@@ -249,7 +267,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: store_fi_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY [[COPY]]
     ; CHECK-NEXT: STRXui [[COPY1]], %stack.0.ptr0, 0 :: (store (p0))
     %0(p0) = COPY $x0
@@ -273,7 +293,9 @@ body:             |
     liveins: $x0, $x1
 
     ; CHECK-LABEL: name: store_gep_128_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK-NEXT: STRXui [[COPY1]], [[COPY]], 16 :: (store (s64) into %ir.addr)
     %0(p0) = COPY $x0
@@ -299,7 +321,9 @@ body:             |
     liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_gep_512_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: STRWui [[COPY1]], [[COPY]], 128 :: (store (s32) into %ir.addr)
     %0(p0) = COPY $x0
@@ -325,7 +349,9 @@ body:             |
     liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_gep_64_s16_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: STRHHui [[COPY1]], [[COPY]], 32 :: (store (s16) into %ir.addr)
     %0(p0) = COPY $x0
@@ -352,7 +378,9 @@ body:             |
     liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_gep_1_s8_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: STRBBui [[COPY1]], [[COPY]], 1 :: (store (s8) into %ir.addr)
     %0(p0) = COPY $x0
@@ -377,7 +405,9 @@ body:             |
     liveins: $x0, $d1
 
     ; CHECK-LABEL: name: store_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK-NEXT: STRDui [[COPY1]], [[COPY]], 0 :: (store (s64) into %ir.addr)
     %0(p0) = COPY $x0
@@ -400,7 +430,9 @@ body:             |
     liveins: $x0, $s1
 
     ; CHECK-LABEL: name: store_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK-NEXT: STRSui [[COPY1]], [[COPY]], 0 :: (store (s32) into %ir.addr)
     %0(p0) = COPY $x0
@@ -425,7 +457,9 @@ body:             |
     liveins: $x0, $d1
 
     ; CHECK-LABEL: name: store_gep_8_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK-NEXT: STRDui [[COPY1]], [[COPY]], 1 :: (store (s64) into %ir.addr)
     %0(p0) = COPY $x0
@@ -451,7 +485,9 @@ body:             |
     liveins: $x0, $s1
 
     ; CHECK-LABEL: name: store_gep_8_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $s1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK-NEXT: STRSui [[COPY1]], [[COPY]], 2 :: (store (s32) into %ir.addr)
     %0(p0) = COPY $x0
@@ -474,7 +510,9 @@ body:             |
     liveins: $x0, $d1
 
     ; CHECK-LABEL: name: store_v2s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK-NEXT: STRDui [[COPY1]], [[COPY]], 0 :: (store (<2 x s32>) into %ir.addr)
     %0(p0) = COPY $x0
@@ -495,7 +533,9 @@ body:             |
   bb.0:
     liveins: $x0, $d1
     ; CHECK-LABEL: name: store_v2s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $d1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
     ; CHECK-NEXT: STRQui [[COPY1]], [[COPY]], 0 :: (store (<2 x s64>) into %ir.addr, align 8)
     %0(p0) = COPY $x0
@@ -683,7 +723,9 @@ body:             |
     liveins: $x0, $w1, $x2
 
     ; CHECK-LABEL: name: truncstores
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0, $w1, $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: %val32:gpr32 = COPY $w1
     ; CHECK-NEXT: %val64:gpr64 = COPY $x2
     ; CHECK-NEXT: STRBBui %val32, [[COPY]], 0 :: (store (s8))

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir
index dbddaf5689264..31ba705fdadb6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir
@@ -20,14 +20,15 @@ body:             |
 
     ; CHECK-LABEL: name: test_store_i8
     ; CHECK: liveins: $w0, $w1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[INSERT_SUBREG]].sub_32
-    ; CHECK: early-clobber %5:gpr32 = STXRB [[COPY2]], [[COPY1]] :: (volatile store (s8) into %ir.addr)
-    ; CHECK: $w0 = COPY %5
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY [[INSERT_SUBREG]].sub_32
+    ; CHECK-NEXT: early-clobber %5:gpr32 = STXRB [[COPY2]], [[COPY1]] :: (volatile store (s8) into %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY %5
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %3:gpr(s32) = COPY $w1
     %2:gpr(p0) = COPY $x2
     %6:gpr(s64) = G_CONSTANT i64 255
@@ -51,14 +52,15 @@ body:             |
 
     ; CHECK-LABEL: name: test_store_i16
     ; CHECK: liveins: $w0, $w1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[INSERT_SUBREG]].sub_32
-    ; CHECK: early-clobber %5:gpr32 = STXRH [[COPY2]], [[COPY1]] :: (volatile store (s16) into %ir.addr)
-    ; CHECK: $w0 = COPY %5
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
+    ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[COPY]], %subreg.sub_32
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32 = COPY [[INSERT_SUBREG]].sub_32
+    ; CHECK-NEXT: early-clobber %5:gpr32 = STXRH [[COPY2]], [[COPY1]] :: (volatile store (s16) into %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY %5
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %3:gpr(s32) = COPY $w1
     %2:gpr(p0) = COPY $x2
     %6:gpr(s64) = G_CONSTANT i64 65535
@@ -82,11 +84,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_store_i32
     ; CHECK: liveins: $w0, $w1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
-    ; CHECK: early-clobber %3:gpr32 = STXRW [[COPY]], [[COPY1]] :: (volatile store (s32) into %ir.addr)
-    ; CHECK: $w0 = COPY %3
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
+    ; CHECK-NEXT: early-clobber %3:gpr32 = STXRW [[COPY]], [[COPY1]] :: (volatile store (s32) into %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY %3
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:gpr(s32) = COPY $w1
     %2:gpr(p0) = COPY $x2
     %3:gpr(s64) = G_ZEXT %1(s32)
@@ -108,11 +111,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_store_i64
     ; CHECK: liveins: $w0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
-    ; CHECK: early-clobber %2:gpr32 = STXRX [[COPY]], [[COPY1]] :: (volatile store (s64) into %ir.addr)
-    ; CHECK: $w0 = COPY %2
-    ; CHECK: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2
+    ; CHECK-NEXT: early-clobber %2:gpr32 = STXRX [[COPY]], [[COPY1]] :: (volatile store (s64) into %ir.addr)
+    ; CHECK-NEXT: $w0 = COPY %2
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %1:gpr(s64) = COPY $x1
     %2:gpr(p0) = COPY $x2
     %3:gpr(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.stxr), %1(s64), %2(p0) :: (volatile store (s64) into %ir.addr)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
index 25ecce4dd92bf..38addfabfe04b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-trap.mir
@@ -25,7 +25,7 @@ body:             |
   bb.1 (%ir-block.0):
     ; CHECK-LABEL: name: foo
     ; CHECK: BRK 1
-    ; CHECK: RET_ReallyLR
+    ; CHECK-NEXT: RET_ReallyLR
     G_TRAP
     RET_ReallyLR
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-trn.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-trn.mir
index be735c15fd916..816eaf53e0d89 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-trn.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-trn.mir
@@ -17,11 +17,12 @@ body:             |
     liveins: $d0, $d1
     ; CHECK-LABEL: name: TRN1v2i32
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[TRN1v2i32_:%[0-9]+]]:fpr64 = TRN1v2i32 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[TRN1v2i32_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[TRN1v2i32_:%[0-9]+]]:fpr64 = TRN1v2i32 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[TRN1v2i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = COPY $d1
     %2:fpr(<2 x s32>) = G_TRN1 %0, %1
@@ -40,11 +41,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: TRN1v2i64
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[TRN1v2i64_:%[0-9]+]]:fpr128 = TRN1v2i64 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN1v2i64_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[TRN1v2i64_:%[0-9]+]]:fpr128 = TRN1v2i64 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN1v2i64_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s64>) = COPY $q0
     %1:fpr(<2 x s64>) = COPY $q1
     %2:fpr(<2 x s64>) = G_TRN1 %0, %1
@@ -63,11 +65,12 @@ body:             |
     liveins: $d0, $d1
     ; CHECK-LABEL: name: TRN1v4i16
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[TRN1v4i16_:%[0-9]+]]:fpr64 = TRN1v4i16 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[TRN1v4i16_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[TRN1v4i16_:%[0-9]+]]:fpr64 = TRN1v4i16 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[TRN1v4i16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s16>) = COPY $d1
     %2:fpr(<4 x s16>) = G_TRN1 %0, %1
@@ -86,11 +89,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: TRN1v4i32
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[TRN1v4i32_:%[0-9]+]]:fpr128 = TRN1v4i32 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN1v4i32_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[TRN1v4i32_:%[0-9]+]]:fpr128 = TRN1v4i32 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN1v4i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s32>) = COPY $q0
     %1:fpr(<4 x s32>) = COPY $q1
     %2:fpr(<4 x s32>) = G_TRN1 %0, %1
@@ -109,11 +113,12 @@ body:             |
     liveins: $d0, $d1
     ; CHECK-LABEL: name: TRN1v8i8
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[TRN1v8i8_:%[0-9]+]]:fpr64 = TRN1v8i8 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[TRN1v8i8_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[TRN1v8i8_:%[0-9]+]]:fpr64 = TRN1v8i8 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[TRN1v8i8_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<8 x s8>) = COPY $d0
     %1:fpr(<8 x s8>) = COPY $d1
     %2:fpr(<8 x s8>) = G_TRN1 %0, %1
@@ -132,11 +137,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: TRN1v8i16
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[TRN1v8i16_:%[0-9]+]]:fpr128 = TRN1v8i16 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN1v8i16_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[TRN1v8i16_:%[0-9]+]]:fpr128 = TRN1v8i16 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN1v8i16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s16>) = COPY $q0
     %1:fpr(<8 x s16>) = COPY $q1
     %2:fpr(<8 x s16>) = G_TRN1 %0, %1
@@ -155,11 +161,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: TRN1v16i8
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[TRN1v16i8_:%[0-9]+]]:fpr128 = TRN1v16i8 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN1v16i8_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[TRN1v16i8_:%[0-9]+]]:fpr128 = TRN1v16i8 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN1v16i8_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<16 x s8>) = COPY $q0
     %1:fpr(<16 x s8>) = COPY $q1
     %2:fpr(<16 x s8>) = G_TRN1 %0, %1
@@ -178,11 +185,12 @@ body:             |
     liveins: $d0, $d1
     ; CHECK-LABEL: name: TRN2v2i32
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[TRN2v2i32_:%[0-9]+]]:fpr64 = TRN2v2i32 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[TRN2v2i32_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[TRN2v2i32_:%[0-9]+]]:fpr64 = TRN2v2i32 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[TRN2v2i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = COPY $d1
     %2:fpr(<2 x s32>) = G_TRN2 %0, %1
@@ -201,11 +209,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: TRN2v2i64
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[TRN2v2i64_:%[0-9]+]]:fpr128 = TRN2v2i64 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN2v2i64_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[TRN2v2i64_:%[0-9]+]]:fpr128 = TRN2v2i64 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN2v2i64_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s64>) = COPY $q0
     %1:fpr(<2 x s64>) = COPY $q1
     %2:fpr(<2 x s64>) = G_TRN2 %0, %1
@@ -224,11 +233,12 @@ body:             |
     liveins: $d0, $d1
     ; CHECK-LABEL: name: TRN2v4i16
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[TRN2v4i16_:%[0-9]+]]:fpr64 = TRN2v4i16 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[TRN2v4i16_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[TRN2v4i16_:%[0-9]+]]:fpr64 = TRN2v4i16 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[TRN2v4i16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s16>) = COPY $d1
     %2:fpr(<4 x s16>) = G_TRN2 %0, %1
@@ -247,11 +257,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: TRN2v4i32
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[TRN2v4i32_:%[0-9]+]]:fpr128 = TRN2v4i32 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN2v4i32_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[TRN2v4i32_:%[0-9]+]]:fpr128 = TRN2v4i32 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN2v4i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s32>) = COPY $q0
     %1:fpr(<4 x s32>) = COPY $q1
     %2:fpr(<4 x s32>) = G_TRN2 %0, %1
@@ -270,11 +281,12 @@ body:             |
     liveins: $d0, $d1
     ; CHECK-LABEL: name: TRN2v8i8
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[TRN2v8i8_:%[0-9]+]]:fpr64 = TRN2v8i8 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[TRN2v8i8_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[TRN2v8i8_:%[0-9]+]]:fpr64 = TRN2v8i8 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[TRN2v8i8_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<8 x s8>) = COPY $d0
     %1:fpr(<8 x s8>) = COPY $d1
     %2:fpr(<8 x s8>) = G_TRN2 %0, %1
@@ -293,11 +305,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: TRN2v8i16
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[TRN2v8i16_:%[0-9]+]]:fpr128 = TRN2v8i16 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN2v8i16_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[TRN2v8i16_:%[0-9]+]]:fpr128 = TRN2v8i16 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN2v8i16_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<8 x s16>) = COPY $q0
     %1:fpr(<8 x s16>) = COPY $q1
     %2:fpr(<8 x s16>) = G_TRN2 %0, %1
@@ -316,11 +329,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: TRN2v16i8
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[TRN2v16i8_:%[0-9]+]]:fpr128 = TRN2v16i8 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[TRN2v16i8_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[TRN2v16i8_:%[0-9]+]]:fpr128 = TRN2v16i8 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[TRN2v16i8_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<16 x s8>) = COPY $q0
     %1:fpr(<16 x s8>) = COPY $q1
     %2:fpr(<16 x s8>) = G_TRN2 %0, %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-trunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
index 374b5a109d8d1..6c91a374e67f7 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
@@ -25,7 +25,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: trunc_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32sp = COPY [[COPY]].sub_32
     ; CHECK-NEXT: $w0 = COPY [[COPY1]]
     %0(s64) = COPY $x0
@@ -47,7 +49,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: trunc_s8_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]]
     ; CHECK-NEXT: $w0 = COPY [[COPY2]]
@@ -71,7 +75,9 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: trunc_s8_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]]
     ; CHECK-NEXT: $w0 = COPY [[COPY1]]
     %0(s32) = COPY $w0
@@ -93,7 +99,9 @@ body:             |
     liveins: $q0
 
     ; CHECK-LABEL: name: trunc_s64_s128
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY [[COPY]].dsub
     ; CHECK-NEXT: $x0 = COPY [[COPY1]]
     %0(s128) = COPY $q0
@@ -114,7 +122,9 @@ body:             |
     liveins: $q0
 
     ; CHECK-LABEL: name: trunc_s32_s128
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK: liveins: $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]].ssub
     ; CHECK-NEXT: $w0 = COPY [[COPY1]]
     %0(s128) = COPY $q0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-uaddo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-uaddo.mir
index 55984b116e1ca..9dcd085fc4fd5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-uaddo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-uaddo.mir
@@ -68,7 +68,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get ADDSWri when we can fold in a constant.
-    ;
     ; CHECK-LABEL: name: uaddo_s32_imm
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}
@@ -93,7 +92,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get ADDSWrs when we can fold in a shift.
-    ;
     ; CHECK-LABEL: name: uaddo_s32_shifted
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}
@@ -121,7 +119,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get SUBSWri when we can fold in a negative constant.
-    ;
     ; CHECK-LABEL: name: uaddo_s32_neg_imm
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-usubo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-usubo.mir
index 1fad9d41aa404..a9295194779c8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-usubo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-usubo.mir
@@ -68,7 +68,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get ADDSWri when we can fold in a constant.
-    ;
     ; CHECK-LABEL: name: usubo_s32_imm
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}
@@ -93,7 +92,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get ADDSWrs when we can fold in a shift.
-    ;
     ; CHECK-LABEL: name: usubo_s32_shifted
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}
@@ -121,7 +119,6 @@ body:             |
   bb.1.entry:
     liveins: $w0, $w1, $x2
     ; Check that we get SUBSWri when we can fold in a negative constant.
-    ;
     ; CHECK-LABEL: name: usubo_s32_neg_imm
     ; CHECK: liveins: $w0, $w1, $x2
     ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-uzp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-uzp.mir
index 1d5affea6c10e..83ca512889cc1 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-uzp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-uzp.mir
@@ -16,11 +16,12 @@ body:             |
 
     ; CHECK-LABEL: name: uzp1_v4s32
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[UZP1v4i32_:%[0-9]+]]:fpr128 = UZP1v4i32 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[UZP1v4i32_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[UZP1v4i32_:%[0-9]+]]:fpr128 = UZP1v4i32 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[UZP1v4i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s32>) = COPY $q0
     %1:fpr(<4 x s32>) = COPY $q1
     %2:fpr(<4 x s32>) = G_UZP1 %0, %1
@@ -39,11 +40,12 @@ body:             |
 
     ; CHECK-LABEL: name: uzp2_v4s32
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[UZP2v4i32_:%[0-9]+]]:fpr128 = UZP2v4i32 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[UZP2v4i32_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[UZP2v4i32_:%[0-9]+]]:fpr128 = UZP2v4i32 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[UZP2v4i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
   %0:fpr(<4 x s32>) = COPY $q0
   %1:fpr(<4 x s32>) = COPY $q1
   %2:fpr(<4 x s32>) = G_UZP2 %0, %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-with-no-legality-check.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-with-no-legality-check.mir
index 93d799f12c907..f49d8e733156d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-with-no-legality-check.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-with-no-legality-check.mir
@@ -19,10 +19,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule14_id188_at_idx1067
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY1]], 0 :: (load (s128))
-    ; CHECK: $noreg = PATCHABLE_RET [[LDRQui]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY1]], 0 :: (load (s128))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[LDRQui]]
     %1:fpr(p0) = COPY $d0
     %0:fpr(s128) = G_LOAD %1(p0) :: (load (s128))
     $noreg = PATCHABLE_RET %0(s128)
@@ -46,11 +47,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule21_id2237_at_idx1449
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: STRDui [[COPY1]], [[COPY2]], 0 :: (store (<8 x s8>))
-    ; CHECK: $noreg = PATCHABLE_RET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: STRDui [[COPY1]], [[COPY2]], 0 :: (store (<8 x s8>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET
     %1:fpr(p0) = COPY $d1
     %0:fpr(<8 x s8>) = COPY $d0
     G_STORE %0(<8 x s8>), %1(p0) :: (store (<8 x s8>))
@@ -75,11 +77,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule22_id2238_at_idx1505
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: STRDui [[COPY1]], [[COPY2]], 0 :: (store (<4 x s16>))
-    ; CHECK: $noreg = PATCHABLE_RET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: STRDui [[COPY1]], [[COPY2]], 0 :: (store (<4 x s16>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET
     %1:fpr(p0) = COPY $d1
     %0:fpr(<4 x s16>) = COPY $d0
     G_STORE %0(<4 x s16>), %1(p0) :: (store (<4 x s16>))
@@ -104,11 +107,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule27_id2243_at_idx1781
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: STRQui [[COPY1]], [[COPY2]], 0 :: (store (<4 x s32>))
-    ; CHECK: $noreg = PATCHABLE_RET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: STRQui [[COPY1]], [[COPY2]], 0 :: (store (<4 x s32>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET
     %1:fpr(p0) = COPY $d0
     %0:fpr(<4 x s32>) = COPY $q0
     G_STORE %0(<4 x s32>), %1(p0) :: (store (<4 x s32>))
@@ -133,11 +137,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule28_id2244_at_idx1837
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: STRQui [[COPY1]], [[COPY2]], 0 :: (store (<2 x s64>))
-    ; CHECK: $noreg = PATCHABLE_RET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: STRQui [[COPY1]], [[COPY2]], 0 :: (store (<2 x s64>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET
     %1:fpr(p0) = COPY $d0
     %0:fpr(<2 x s64>) = COPY $q0
     G_STORE %0(<2 x s64>), %1(p0) :: (store (<2 x s64>))
@@ -162,11 +167,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule29_id2245_at_idx1893
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: STRQui [[COPY1]], [[COPY2]], 0 :: (store (<16 x s8>))
-    ; CHECK: $noreg = PATCHABLE_RET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: STRQui [[COPY1]], [[COPY2]], 0 :: (store (<16 x s8>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET
     %1:fpr(p0) = COPY $d0
     %0:fpr(<16 x s8>) = COPY $q0
     G_STORE %0(<16 x s8>), %1(p0) :: (store (<16 x s8>))
@@ -191,11 +197,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule30_id2246_at_idx1949
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: STRQui [[COPY1]], [[COPY2]], 0 :: (store (<8 x s16>))
-    ; CHECK: $noreg = PATCHABLE_RET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: STRQui [[COPY1]], [[COPY2]], 0 :: (store (<8 x s16>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET
     %1:fpr(p0) = COPY $d0
     %0:fpr(<8 x s16>) = COPY $q0
     G_STORE %0(<8 x s16>), %1(p0) :: (store (<8 x s16>))
@@ -220,11 +227,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule34_id2250_at_idx2173
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: STRQui [[COPY1]], [[COPY2]], 0 :: (store (s128))
-    ; CHECK: $noreg = PATCHABLE_RET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: STRQui [[COPY1]], [[COPY2]], 0 :: (store (s128))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET
     %1:fpr(p0) = COPY $d0
     %0:fpr(s128) = COPY $q0
     G_STORE %0(s128), %1(p0) :: (store (s128))
@@ -250,9 +258,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule92_id2150_at_idx7770
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load (s8))
-    ; CHECK: $noreg = PATCHABLE_RET [[LDRBBui]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load (s8))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[LDRBBui]]
     %2:gpr(p0) = COPY $x0
     %0:gpr(s32) = G_LOAD %2(p0) :: (load (s8))
     $noreg = PATCHABLE_RET %0(s32)
@@ -277,11 +286,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule96_id2146_at_idx8070
     ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[LDRBui:%[0-9]+]]:fpr8 = LDRBui [[COPY]], 0 :: (load (s8))
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[LDRBui]]
-    ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 0, 7
-    ; CHECK: $noreg = PATCHABLE_RET [[UBFMWri]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[LDRBui:%[0-9]+]]:fpr8 = LDRBui [[COPY]], 0 :: (load (s8))
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[LDRBui]]
+    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY1]], 0, 7
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UBFMWri]]
     %2:gpr(p0) = COPY $x0
     %0:fpr(s8) = G_LOAD %2(p0) :: (load (s8))
     %1:gpr(s32) = G_ZEXT %0(s8)
@@ -305,10 +315,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule129_id2130_at_idx10828
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY1]], 0 :: (load (<8 x s8>))
-    ; CHECK: $noreg = PATCHABLE_RET [[LDRDui]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY1]], 0 :: (load (<8 x s8>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[LDRDui]]
     %1:fpr(p0) = COPY $d0
     %0:fpr(<8 x s8>) = G_LOAD %1(p0) :: (load (<8 x s8>))
     $noreg = PATCHABLE_RET %0(<8 x s8>)
@@ -331,10 +342,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule130_id2131_at_idx10884
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY1]], 0 :: (load (<4 x s16>))
-    ; CHECK: $noreg = PATCHABLE_RET [[LDRDui]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY1]], 0 :: (load (<4 x s16>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[LDRDui]]
     %1:fpr(p0) = COPY $d0
     %0:fpr(<4 x s16>) = G_LOAD %1(p0) :: (load (<4 x s16>))
     $noreg = PATCHABLE_RET %0(<4 x s16>)
@@ -357,10 +369,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule135_id2136_at_idx11160
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY1]], 0 :: (load (<4 x s32>))
-    ; CHECK: $noreg = PATCHABLE_RET [[LDRQui]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY1]], 0 :: (load (<4 x s32>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[LDRQui]]
     %1:fpr(p0) = COPY $d0
     %0:fpr(<4 x s32>) = G_LOAD %1(p0) :: (load (<4 x s32>))
     $noreg = PATCHABLE_RET %0(<4 x s32>)
@@ -383,10 +396,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule136_id2137_at_idx11216
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY1]], 0 :: (load (<2 x s64>))
-    ; CHECK: $noreg = PATCHABLE_RET [[LDRQui]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY1]], 0 :: (load (<2 x s64>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[LDRQui]]
     %1:fpr(p0) = COPY $d0
     %0:fpr(<2 x s64>) = G_LOAD %1(p0) :: (load (<2 x s64>))
     $noreg = PATCHABLE_RET %0(<2 x s64>)
@@ -409,10 +423,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule137_id2138_at_idx11272
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY1]], 0 :: (load (<16 x s8>))
-    ; CHECK: $noreg = PATCHABLE_RET [[LDRQui]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY1]], 0 :: (load (<16 x s8>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[LDRQui]]
     %1:fpr(p0) = COPY $d0
     %0:fpr(<16 x s8>) = G_LOAD %1(p0) :: (load (<16 x s8>))
     $noreg = PATCHABLE_RET %0(<16 x s8>)
@@ -435,10 +450,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule138_id2139_at_idx11328
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
-    ; CHECK: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY1]], 0 :: (load (<8 x s16>))
-    ; CHECK: $noreg = PATCHABLE_RET [[LDRQui]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[COPY]]
+    ; CHECK-NEXT: [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY1]], 0 :: (load (<8 x s16>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[LDRQui]]
     %1:fpr(p0) = COPY $d0
     %0:fpr(<8 x s16>) = G_LOAD %1(p0) :: (load (<8 x s16>))
     $noreg = PATCHABLE_RET %0(<8 x s16>)
@@ -467,11 +483,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule339_id2369_at_idx26608
     ; CHECK: liveins: $s0, $s1, $s2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FNMADDSrrr:%[0-9]+]]:fpr32 = nofpexcept FNMADDSrrr [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNMADDSrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FNMADDSrrr:%[0-9]+]]:fpr32 = nofpexcept FNMADDSrrr [[COPY1]], [[COPY2]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNMADDSrrr]]
     %5:fpr(s32) = COPY $s2
     %4:fpr(s32) = COPY $s1
     %3:fpr(s32) = COPY $s0
@@ -504,11 +521,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule340_id2370_at_idx26714
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FNMADDDrrr:%[0-9]+]]:fpr64 = nofpexcept FNMADDDrrr [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNMADDDrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FNMADDDrrr:%[0-9]+]]:fpr64 = nofpexcept FNMADDDrrr [[COPY1]], [[COPY2]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNMADDDrrr]]
     %5:fpr(s64) = COPY $d2
     %4:fpr(s64) = COPY $d1
     %3:fpr(s64) = COPY $d0
@@ -541,11 +559,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule341_id2371_at_idx26820
     ; CHECK: liveins: $s0, $s1, $s2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FNMADDSrrr:%[0-9]+]]:fpr32 = nofpexcept FNMADDSrrr [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNMADDSrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FNMADDSrrr:%[0-9]+]]:fpr32 = nofpexcept FNMADDSrrr [[COPY1]], [[COPY2]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNMADDSrrr]]
     %5:fpr(s32) = COPY $s2
     %4:fpr(s32) = COPY $s1
     %3:fpr(s32) = COPY $s0
@@ -578,11 +597,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule342_id2372_at_idx26926
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FNMADDDrrr:%[0-9]+]]:fpr64 = nofpexcept FNMADDDrrr [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNMADDDrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FNMADDDrrr:%[0-9]+]]:fpr64 = nofpexcept FNMADDDrrr [[COPY1]], [[COPY2]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNMADDDrrr]]
     %5:fpr(s64) = COPY $d2
     %4:fpr(s64) = COPY $d1
     %3:fpr(s64) = COPY $d0
@@ -613,10 +633,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule343_id1266_at_idx27032
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SADDLv8i8_v8i16_:%[0-9]+]]:fpr128 = SADDLv8i8_v8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SADDLv8i8_v8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SADDLv8i8_v8i16_:%[0-9]+]]:fpr128 = SADDLv8i8_v8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SADDLv8i8_v8i16_]]
     %4:fpr(<8 x s8>) = COPY $d1
     %3:fpr(<8 x s8>) = COPY $d0
     %1:fpr(<8 x s16>) = G_SEXT %4(<8 x s8>)
@@ -646,10 +667,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule344_id1268_at_idx27128
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SADDLv4i16_v4i32_:%[0-9]+]]:fpr128 = SADDLv4i16_v4i32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SADDLv4i16_v4i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SADDLv4i16_v4i32_:%[0-9]+]]:fpr128 = SADDLv4i16_v4i32 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SADDLv4i16_v4i32_]]
     %4:fpr(<4 x s16>) = COPY $d1
     %3:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s32>) = G_SEXT %4(<4 x s16>)
@@ -679,10 +701,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule345_id1270_at_idx27224
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SADDLv2i32_v2i64_:%[0-9]+]]:fpr128 = SADDLv2i32_v2i64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SADDLv2i32_v2i64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SADDLv2i32_v2i64_:%[0-9]+]]:fpr128 = SADDLv2i32_v2i64 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SADDLv2i32_v2i64_]]
     %4:fpr(<2 x s32>) = COPY $d1
     %3:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s64>) = G_SEXT %4(<2 x s32>)
@@ -712,10 +735,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule346_id1326_at_idx27320
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[UADDLv8i8_v8i16_:%[0-9]+]]:fpr128 = UADDLv8i8_v8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UADDLv8i8_v8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[UADDLv8i8_v8i16_:%[0-9]+]]:fpr128 = UADDLv8i8_v8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UADDLv8i8_v8i16_]]
     %4:fpr(<8 x s8>) = COPY $d1
     %3:fpr(<8 x s8>) = COPY $d0
     %1:fpr(<8 x s16>) = G_ZEXT %4(<8 x s8>)
@@ -745,10 +769,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule347_id1328_at_idx27416
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[UADDLv4i16_v4i32_:%[0-9]+]]:fpr128 = UADDLv4i16_v4i32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UADDLv4i16_v4i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[UADDLv4i16_v4i32_:%[0-9]+]]:fpr128 = UADDLv4i16_v4i32 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UADDLv4i16_v4i32_]]
     %4:fpr(<4 x s16>) = COPY $d1
     %3:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s32>) = G_ZEXT %4(<4 x s16>)
@@ -778,10 +803,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule348_id1330_at_idx27512
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[UADDLv2i32_v2i64_:%[0-9]+]]:fpr128 = UADDLv2i32_v2i64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UADDLv2i32_v2i64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[UADDLv2i32_v2i64_:%[0-9]+]]:fpr128 = UADDLv2i32_v2i64 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UADDLv2i32_v2i64_]]
     %4:fpr(<2 x s32>) = COPY $d1
     %3:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s64>) = G_ZEXT %4(<2 x s32>)
@@ -811,10 +837,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule349_id1308_at_idx27608
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SSUBLv8i8_v8i16_:%[0-9]+]]:fpr128 = SSUBLv8i8_v8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SSUBLv8i8_v8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SSUBLv8i8_v8i16_:%[0-9]+]]:fpr128 = SSUBLv8i8_v8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SSUBLv8i8_v8i16_]]
     %4:fpr(<8 x s8>) = COPY $d1
     %3:fpr(<8 x s8>) = COPY $d0
     %1:fpr(<8 x s16>) = G_SEXT %4(<8 x s8>)
@@ -844,10 +871,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule350_id1310_at_idx27704
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SSUBLv4i16_v4i32_:%[0-9]+]]:fpr128 = SSUBLv4i16_v4i32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SSUBLv4i16_v4i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SSUBLv4i16_v4i32_:%[0-9]+]]:fpr128 = SSUBLv4i16_v4i32 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SSUBLv4i16_v4i32_]]
     %4:fpr(<4 x s16>) = COPY $d1
     %3:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s32>) = G_SEXT %4(<4 x s16>)
@@ -877,10 +905,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule351_id1312_at_idx27800
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SSUBLv2i32_v2i64_:%[0-9]+]]:fpr128 = SSUBLv2i32_v2i64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SSUBLv2i32_v2i64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SSUBLv2i32_v2i64_:%[0-9]+]]:fpr128 = SSUBLv2i32_v2i64 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SSUBLv2i32_v2i64_]]
     %4:fpr(<2 x s32>) = COPY $d1
     %3:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s64>) = G_SEXT %4(<2 x s32>)
@@ -910,10 +939,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule352_id1356_at_idx27896
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USUBLv8i8_v8i16_:%[0-9]+]]:fpr128 = USUBLv8i8_v8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[USUBLv8i8_v8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USUBLv8i8_v8i16_:%[0-9]+]]:fpr128 = USUBLv8i8_v8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USUBLv8i8_v8i16_]]
     %4:fpr(<8 x s8>) = COPY $d1
     %3:fpr(<8 x s8>) = COPY $d0
     %1:fpr(<8 x s16>) = G_ZEXT %4(<8 x s8>)
@@ -943,10 +973,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule353_id1358_at_idx27992
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USUBLv4i16_v4i32_:%[0-9]+]]:fpr128 = USUBLv4i16_v4i32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[USUBLv4i16_v4i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USUBLv4i16_v4i32_:%[0-9]+]]:fpr128 = USUBLv4i16_v4i32 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USUBLv4i16_v4i32_]]
     %4:fpr(<4 x s16>) = COPY $d1
     %3:fpr(<4 x s16>) = COPY $d0
     %1:fpr(<4 x s32>) = G_ZEXT %4(<4 x s16>)
@@ -976,10 +1007,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule354_id1360_at_idx28088
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USUBLv2i32_v2i64_:%[0-9]+]]:fpr128 = USUBLv2i32_v2i64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[USUBLv2i32_v2i64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USUBLv2i32_v2i64_:%[0-9]+]]:fpr128 = USUBLv2i32_v2i64 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USUBLv2i32_v2i64_]]
     %4:fpr(<2 x s32>) = COPY $d1
     %3:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s64>) = G_ZEXT %4(<2 x s32>)
@@ -1010,11 +1042,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule928_id2367_at_idx60019
     ; CHECK: liveins: $s0, $s1, $s2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FMSUBSrrr:%[0-9]+]]:fpr32 = nofpexcept FMSUBSrrr [[COPY]], [[COPY2]], [[COPY1]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMSUBSrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FMSUBSrrr:%[0-9]+]]:fpr32 = nofpexcept FMSUBSrrr [[COPY]], [[COPY2]], [[COPY1]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMSUBSrrr]]
     %4:fpr(s32) = COPY $s2
     %3:fpr(s32) = COPY $s1
     %2:fpr(s32) = COPY $s0
@@ -1045,11 +1078,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule929_id2368_at_idx60105
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FMSUBDrrr:%[0-9]+]]:fpr64 = nofpexcept FMSUBDrrr [[COPY]], [[COPY2]], [[COPY1]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMSUBDrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FMSUBDrrr:%[0-9]+]]:fpr64 = nofpexcept FMSUBDrrr [[COPY]], [[COPY2]], [[COPY1]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMSUBDrrr]]
     %4:fpr(s64) = COPY $d2
     %3:fpr(s64) = COPY $d1
     %2:fpr(s64) = COPY $d0
@@ -1080,11 +1114,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule930_id2446_at_idx60191
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FMLSv2f32_:%[0-9]+]]:fpr64 = nofpexcept FMLSv2f32 [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMLSv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FMLSv2f32_:%[0-9]+]]:fpr64 = nofpexcept FMLSv2f32 [[COPY1]], [[COPY2]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMLSv2f32_]]
     %4:fpr(<2 x s32>) = COPY $d2
     %3:fpr(<2 x s32>) = COPY $d1
     %2:fpr(<2 x s32>) = COPY $d0
@@ -1115,11 +1150,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule931_id2447_at_idx60277
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FMLSv4f32_:%[0-9]+]]:fpr128 = nofpexcept FMLSv4f32 [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMLSv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FMLSv4f32_:%[0-9]+]]:fpr128 = nofpexcept FMLSv4f32 [[COPY1]], [[COPY2]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMLSv4f32_]]
     %4:fpr(<4 x s32>) = COPY $q2
     %3:fpr(<4 x s32>) = COPY $q1
     %2:fpr(<4 x s32>) = COPY $q0
@@ -1150,11 +1186,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule932_id2448_at_idx60363
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FMLSv2f64_:%[0-9]+]]:fpr128 = nofpexcept FMLSv2f64 [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMLSv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FMLSv2f64_:%[0-9]+]]:fpr128 = nofpexcept FMLSv2f64 [[COPY1]], [[COPY2]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMLSv2f64_]]
     %4:fpr(<2 x s64>) = COPY $q2
     %3:fpr(<2 x s64>) = COPY $q1
     %2:fpr(<2 x s64>) = COPY $q0
@@ -1185,11 +1222,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule934_id429_at_idx60537
     ; CHECK: liveins: $s0, $s1, $s2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FMSUBSrrr:%[0-9]+]]:fpr32 = nofpexcept FMSUBSrrr [[COPY2]], [[COPY]], [[COPY1]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMSUBSrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FMSUBSrrr:%[0-9]+]]:fpr32 = nofpexcept FMSUBSrrr [[COPY2]], [[COPY]], [[COPY1]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMSUBSrrr]]
     %4:fpr(s32) = COPY $s2
     %3:fpr(s32) = COPY $s1
     %2:fpr(s32) = COPY $s0
@@ -1220,11 +1258,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule935_id430_at_idx60625
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FMSUBDrrr:%[0-9]+]]:fpr64 = nofpexcept FMSUBDrrr [[COPY2]], [[COPY]], [[COPY1]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMSUBDrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FMSUBDrrr:%[0-9]+]]:fpr64 = nofpexcept FMSUBDrrr [[COPY2]], [[COPY]], [[COPY1]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMSUBDrrr]]
     %4:fpr(s64) = COPY $d2
     %3:fpr(s64) = COPY $d1
     %2:fpr(s64) = COPY $d0
@@ -1255,11 +1294,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule938_id899_at_idx60889
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FMLSv2f32_:%[0-9]+]]:fpr64 = nofpexcept FMLSv2f32 [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMLSv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FMLSv2f32_:%[0-9]+]]:fpr64 = nofpexcept FMLSv2f32 [[COPY1]], [[COPY2]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMLSv2f32_]]
     %4:fpr(<2 x s32>) = COPY $d2
     %3:fpr(<2 x s32>) = COPY $d1
     %2:fpr(<2 x s32>) = COPY $d0
@@ -1290,11 +1330,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule939_id900_at_idx60977
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FMLSv4f32_:%[0-9]+]]:fpr128 = nofpexcept FMLSv4f32 [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMLSv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FMLSv4f32_:%[0-9]+]]:fpr128 = nofpexcept FMLSv4f32 [[COPY1]], [[COPY2]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMLSv4f32_]]
     %4:fpr(<4 x s32>) = COPY $q2
     %3:fpr(<4 x s32>) = COPY $q1
     %2:fpr(<4 x s32>) = COPY $q0
@@ -1325,11 +1366,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule940_id901_at_idx61065
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FMLSv2f64_:%[0-9]+]]:fpr128 = nofpexcept FMLSv2f64 [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMLSv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FMLSv2f64_:%[0-9]+]]:fpr128 = nofpexcept FMLSv2f64 [[COPY1]], [[COPY2]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMLSv2f64_]]
     %4:fpr(<2 x s64>) = COPY $q2
     %3:fpr(<2 x s64>) = COPY $q1
     %2:fpr(<2 x s64>) = COPY $q0
@@ -1360,11 +1402,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule942_id435_at_idx61241
     ; CHECK: liveins: $s0, $s1, $s2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FNMSUBSrrr:%[0-9]+]]:fpr32 = nofpexcept FNMSUBSrrr [[COPY2]], [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNMSUBSrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FNMSUBSrrr:%[0-9]+]]:fpr32 = nofpexcept FNMSUBSrrr [[COPY2]], [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNMSUBSrrr]]
     %4:fpr(s32) = COPY $s2
     %3:fpr(s32) = COPY $s1
     %2:fpr(s32) = COPY $s0
@@ -1395,11 +1438,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule943_id436_at_idx61329
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FNMSUBDrrr:%[0-9]+]]:fpr64 = nofpexcept FNMSUBDrrr [[COPY2]], [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNMSUBDrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FNMSUBDrrr:%[0-9]+]]:fpr64 = nofpexcept FNMSUBDrrr [[COPY2]], [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNMSUBDrrr]]
     %4:fpr(s64) = COPY $d2
     %3:fpr(s64) = COPY $d1
     %2:fpr(s64) = COPY $d0
@@ -1430,12 +1474,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule944_id3803_at_idx61417
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[MULv8i8_:%[0-9]+]]:fpr64 = MULv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: [[ADDv8i8_:%[0-9]+]]:fpr64 = ADDv8i8 [[MULv8i8_]], [[COPY2]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[MULv8i8_:%[0-9]+]]:fpr64 = MULv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[ADDv8i8_:%[0-9]+]]:fpr64 = ADDv8i8 [[MULv8i8_]], [[COPY2]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv8i8_]]
     %4:fpr(<8 x s8>) = COPY $d2
     %3:fpr(<8 x s8>) = COPY $d1
     %2:fpr(<8 x s8>) = COPY $d0
@@ -1466,12 +1511,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule945_id3804_at_idx61505
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[MULv16i8_:%[0-9]+]]:fpr128 = MULv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: [[ADDv16i8_:%[0-9]+]]:fpr128 = ADDv16i8 [[MULv16i8_]], [[COPY2]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[MULv16i8_:%[0-9]+]]:fpr128 = MULv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[ADDv16i8_:%[0-9]+]]:fpr128 = ADDv16i8 [[MULv16i8_]], [[COPY2]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv16i8_]]
     %4:fpr(<16 x s8>) = COPY $q2
     %3:fpr(<16 x s8>) = COPY $q1
     %2:fpr(<16 x s8>) = COPY $q0
@@ -1502,12 +1548,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule946_id3805_at_idx61593
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[MULv4i16_:%[0-9]+]]:fpr64 = MULv4i16 [[COPY1]], [[COPY]]
-    ; CHECK: [[ADDv4i16_:%[0-9]+]]:fpr64 = ADDv4i16 [[MULv4i16_]], [[COPY2]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv4i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[MULv4i16_:%[0-9]+]]:fpr64 = MULv4i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[ADDv4i16_:%[0-9]+]]:fpr64 = ADDv4i16 [[MULv4i16_]], [[COPY2]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv4i16_]]
     %4:fpr(<4 x s16>) = COPY $d2
     %3:fpr(<4 x s16>) = COPY $d1
     %2:fpr(<4 x s16>) = COPY $d0
@@ -1538,12 +1585,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule947_id3806_at_idx61681
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[MULv8i16_:%[0-9]+]]:fpr128 = MULv8i16 [[COPY1]], [[COPY]]
-    ; CHECK: [[ADDv8i16_:%[0-9]+]]:fpr128 = ADDv8i16 [[MULv8i16_]], [[COPY2]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[MULv8i16_:%[0-9]+]]:fpr128 = MULv8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[ADDv8i16_:%[0-9]+]]:fpr128 = ADDv8i16 [[MULv8i16_]], [[COPY2]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv8i16_]]
     %4:fpr(<8 x s16>) = COPY $q2
     %3:fpr(<8 x s16>) = COPY $q1
     %2:fpr(<8 x s16>) = COPY $q0
@@ -1572,10 +1620,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule950_id3869_at_idx61945
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SADDWv8i8_v8i16_:%[0-9]+]]:fpr128 = SADDWv8i8_v8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SADDWv8i8_v8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SADDWv8i8_v8i16_:%[0-9]+]]:fpr128 = SADDWv8i8_v8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SADDWv8i8_v8i16_]]
     %3:fpr(<8 x s8>) = COPY $d0
     %2:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_SEXT %3(<8 x s8>)
@@ -1603,10 +1652,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule951_id3871_at_idx62021
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SADDWv4i16_v4i32_:%[0-9]+]]:fpr128 = SADDWv4i16_v4i32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SADDWv4i16_v4i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SADDWv4i16_v4i32_:%[0-9]+]]:fpr128 = SADDWv4i16_v4i32 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SADDWv4i16_v4i32_]]
     %3:fpr(<4 x s16>) = COPY $d0
     %2:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_SEXT %3(<4 x s16>)
@@ -1634,10 +1684,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule952_id3873_at_idx62097
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SADDWv2i32_v2i64_:%[0-9]+]]:fpr128 = SADDWv2i32_v2i64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SADDWv2i32_v2i64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SADDWv2i32_v2i64_:%[0-9]+]]:fpr128 = SADDWv2i32_v2i64 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SADDWv2i32_v2i64_]]
     %3:fpr(<2 x s32>) = COPY $d0
     %2:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_SEXT %3(<2 x s32>)
@@ -1665,10 +1716,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule953_id3887_at_idx62173
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[UADDWv8i8_v8i16_:%[0-9]+]]:fpr128 = UADDWv8i8_v8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UADDWv8i8_v8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[UADDWv8i8_v8i16_:%[0-9]+]]:fpr128 = UADDWv8i8_v8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UADDWv8i8_v8i16_]]
     %3:fpr(<8 x s8>) = COPY $d0
     %2:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_ZEXT %3(<8 x s8>)
@@ -1696,10 +1748,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule954_id3889_at_idx62249
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[UADDWv4i16_v4i32_:%[0-9]+]]:fpr128 = UADDWv4i16_v4i32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UADDWv4i16_v4i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[UADDWv4i16_v4i32_:%[0-9]+]]:fpr128 = UADDWv4i16_v4i32 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UADDWv4i16_v4i32_]]
     %3:fpr(<4 x s16>) = COPY $d0
     %2:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_ZEXT %3(<4 x s16>)
@@ -1727,10 +1780,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule955_id3891_at_idx62325
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[UADDWv2i32_v2i64_:%[0-9]+]]:fpr128 = UADDWv2i32_v2i64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UADDWv2i32_v2i64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[UADDWv2i32_v2i64_:%[0-9]+]]:fpr128 = UADDWv2i32_v2i64 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UADDWv2i32_v2i64_]]
     %3:fpr(<2 x s32>) = COPY $d0
     %2:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_ZEXT %3(<2 x s32>)
@@ -1760,12 +1814,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule956_id927_at_idx62401
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[MULv8i8_:%[0-9]+]]:fpr64 = MULv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: [[ADDv8i8_:%[0-9]+]]:fpr64 = ADDv8i8 [[COPY2]], [[MULv8i8_]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[MULv8i8_:%[0-9]+]]:fpr64 = MULv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[ADDv8i8_:%[0-9]+]]:fpr64 = ADDv8i8 [[COPY2]], [[MULv8i8_]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv8i8_]]
     %4:fpr(<8 x s8>) = COPY $d2
     %3:fpr(<8 x s8>) = COPY $d1
     %2:fpr(<8 x s8>) = COPY $d0
@@ -1796,12 +1851,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule957_id928_at_idx62489
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[MULv16i8_:%[0-9]+]]:fpr128 = MULv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: [[ADDv16i8_:%[0-9]+]]:fpr128 = ADDv16i8 [[COPY2]], [[MULv16i8_]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[MULv16i8_:%[0-9]+]]:fpr128 = MULv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[ADDv16i8_:%[0-9]+]]:fpr128 = ADDv16i8 [[COPY2]], [[MULv16i8_]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv16i8_]]
     %4:fpr(<16 x s8>) = COPY $q2
     %3:fpr(<16 x s8>) = COPY $q1
     %2:fpr(<16 x s8>) = COPY $q0
@@ -1832,12 +1888,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule958_id929_at_idx62577
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[MULv4i16_:%[0-9]+]]:fpr64 = MULv4i16 [[COPY1]], [[COPY]]
-    ; CHECK: [[ADDv4i16_:%[0-9]+]]:fpr64 = ADDv4i16 [[COPY2]], [[MULv4i16_]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv4i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[MULv4i16_:%[0-9]+]]:fpr64 = MULv4i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[ADDv4i16_:%[0-9]+]]:fpr64 = ADDv4i16 [[COPY2]], [[MULv4i16_]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv4i16_]]
     %4:fpr(<4 x s16>) = COPY $d2
     %3:fpr(<4 x s16>) = COPY $d1
     %2:fpr(<4 x s16>) = COPY $d0
@@ -1868,12 +1925,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule959_id930_at_idx62665
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[MULv8i16_:%[0-9]+]]:fpr128 = MULv8i16 [[COPY1]], [[COPY]]
-    ; CHECK: [[ADDv8i16_:%[0-9]+]]:fpr128 = ADDv8i16 [[COPY2]], [[MULv8i16_]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[MULv8i16_:%[0-9]+]]:fpr128 = MULv8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[ADDv8i16_:%[0-9]+]]:fpr128 = ADDv8i16 [[COPY2]], [[MULv8i16_]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv8i16_]]
     %4:fpr(<8 x s16>) = COPY $q2
     %3:fpr(<8 x s16>) = COPY $q1
     %2:fpr(<8 x s16>) = COPY $q0
@@ -1902,10 +1960,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule962_id1272_at_idx62929
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SADDWv8i8_v8i16_:%[0-9]+]]:fpr128 = SADDWv8i8_v8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SADDWv8i8_v8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SADDWv8i8_v8i16_:%[0-9]+]]:fpr128 = SADDWv8i8_v8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SADDWv8i8_v8i16_]]
     %3:fpr(<8 x s8>) = COPY $d0
     %2:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_SEXT %3(<8 x s8>)
@@ -1933,10 +1992,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule963_id1274_at_idx63005
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SADDWv4i16_v4i32_:%[0-9]+]]:fpr128 = SADDWv4i16_v4i32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SADDWv4i16_v4i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SADDWv4i16_v4i32_:%[0-9]+]]:fpr128 = SADDWv4i16_v4i32 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SADDWv4i16_v4i32_]]
     %3:fpr(<4 x s16>) = COPY $d0
     %2:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_SEXT %3(<4 x s16>)
@@ -1964,10 +2024,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule964_id1276_at_idx63081
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SADDWv2i32_v2i64_:%[0-9]+]]:fpr128 = SADDWv2i32_v2i64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SADDWv2i32_v2i64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SADDWv2i32_v2i64_:%[0-9]+]]:fpr128 = SADDWv2i32_v2i64 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SADDWv2i32_v2i64_]]
     %3:fpr(<2 x s32>) = COPY $d0
     %2:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_SEXT %3(<2 x s32>)
@@ -1995,10 +2056,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule965_id1332_at_idx63157
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[UADDWv8i8_v8i16_:%[0-9]+]]:fpr128 = UADDWv8i8_v8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UADDWv8i8_v8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[UADDWv8i8_v8i16_:%[0-9]+]]:fpr128 = UADDWv8i8_v8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UADDWv8i8_v8i16_]]
     %3:fpr(<8 x s8>) = COPY $d0
     %2:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_ZEXT %3(<8 x s8>)
@@ -2026,10 +2088,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule966_id1334_at_idx63233
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[UADDWv4i16_v4i32_:%[0-9]+]]:fpr128 = UADDWv4i16_v4i32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UADDWv4i16_v4i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[UADDWv4i16_v4i32_:%[0-9]+]]:fpr128 = UADDWv4i16_v4i32 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UADDWv4i16_v4i32_]]
     %3:fpr(<4 x s16>) = COPY $d0
     %2:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_ZEXT %3(<4 x s16>)
@@ -2057,10 +2120,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule967_id1336_at_idx63309
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[UADDWv2i32_v2i64_:%[0-9]+]]:fpr128 = UADDWv2i32_v2i64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UADDWv2i32_v2i64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[UADDWv2i32_v2i64_:%[0-9]+]]:fpr128 = UADDWv2i32_v2i64 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UADDWv2i32_v2i64_]]
     %3:fpr(<2 x s32>) = COPY $d0
     %2:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_ZEXT %3(<2 x s32>)
@@ -2090,12 +2154,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule977_id933_at_idx64051
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[MULv8i8_:%[0-9]+]]:fpr64 = MULv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: [[SUBv8i8_:%[0-9]+]]:fpr64 = SUBv8i8 [[COPY2]], [[MULv8i8_]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SUBv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[MULv8i8_:%[0-9]+]]:fpr64 = MULv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[SUBv8i8_:%[0-9]+]]:fpr64 = SUBv8i8 [[COPY2]], [[MULv8i8_]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SUBv8i8_]]
     %4:fpr(<8 x s8>) = COPY $d2
     %3:fpr(<8 x s8>) = COPY $d1
     %2:fpr(<8 x s8>) = COPY $d0
@@ -2126,12 +2191,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule978_id934_at_idx64139
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[MULv16i8_:%[0-9]+]]:fpr128 = MULv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: [[SUBv16i8_:%[0-9]+]]:fpr128 = SUBv16i8 [[COPY2]], [[MULv16i8_]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SUBv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[MULv16i8_:%[0-9]+]]:fpr128 = MULv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[SUBv16i8_:%[0-9]+]]:fpr128 = SUBv16i8 [[COPY2]], [[MULv16i8_]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SUBv16i8_]]
     %4:fpr(<16 x s8>) = COPY $q2
     %3:fpr(<16 x s8>) = COPY $q1
     %2:fpr(<16 x s8>) = COPY $q0
@@ -2162,12 +2228,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule979_id935_at_idx64227
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[MULv4i16_:%[0-9]+]]:fpr64 = MULv4i16 [[COPY1]], [[COPY]]
-    ; CHECK: [[SUBv4i16_:%[0-9]+]]:fpr64 = SUBv4i16 [[COPY2]], [[MULv4i16_]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SUBv4i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[MULv4i16_:%[0-9]+]]:fpr64 = MULv4i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[SUBv4i16_:%[0-9]+]]:fpr64 = SUBv4i16 [[COPY2]], [[MULv4i16_]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SUBv4i16_]]
     %4:fpr(<4 x s16>) = COPY $d2
     %3:fpr(<4 x s16>) = COPY $d1
     %2:fpr(<4 x s16>) = COPY $d0
@@ -2198,12 +2265,13 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule980_id936_at_idx64315
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[MULv8i16_:%[0-9]+]]:fpr128 = MULv8i16 [[COPY1]], [[COPY]]
-    ; CHECK: [[SUBv8i16_:%[0-9]+]]:fpr128 = SUBv8i16 [[COPY2]], [[MULv8i16_]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SUBv8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[MULv8i16_:%[0-9]+]]:fpr128 = MULv8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: [[SUBv8i16_:%[0-9]+]]:fpr128 = SUBv8i16 [[COPY2]], [[MULv8i16_]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SUBv8i16_]]
     %4:fpr(<8 x s16>) = COPY $q2
     %3:fpr(<8 x s16>) = COPY $q1
     %2:fpr(<8 x s16>) = COPY $q0
@@ -2232,10 +2300,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule983_id1314_at_idx64579
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SSUBWv8i8_v8i16_:%[0-9]+]]:fpr128 = SSUBWv8i8_v8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SSUBWv8i8_v8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SSUBWv8i8_v8i16_:%[0-9]+]]:fpr128 = SSUBWv8i8_v8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SSUBWv8i8_v8i16_]]
     %3:fpr(<8 x s8>) = COPY $d0
     %2:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_SEXT %3(<8 x s8>)
@@ -2263,10 +2332,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule984_id1316_at_idx64655
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SSUBWv4i16_v4i32_:%[0-9]+]]:fpr128 = SSUBWv4i16_v4i32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SSUBWv4i16_v4i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SSUBWv4i16_v4i32_:%[0-9]+]]:fpr128 = SSUBWv4i16_v4i32 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SSUBWv4i16_v4i32_]]
     %3:fpr(<4 x s16>) = COPY $d0
     %2:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_SEXT %3(<4 x s16>)
@@ -2294,10 +2364,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule985_id1318_at_idx64731
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SSUBWv2i32_v2i64_:%[0-9]+]]:fpr128 = SSUBWv2i32_v2i64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SSUBWv2i32_v2i64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SSUBWv2i32_v2i64_:%[0-9]+]]:fpr128 = SSUBWv2i32_v2i64 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SSUBWv2i32_v2i64_]]
     %3:fpr(<2 x s32>) = COPY $d0
     %2:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_SEXT %3(<2 x s32>)
@@ -2325,10 +2396,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule986_id1362_at_idx64807
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[USUBWv8i8_v8i16_:%[0-9]+]]:fpr128 = USUBWv8i8_v8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[USUBWv8i8_v8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[USUBWv8i8_v8i16_:%[0-9]+]]:fpr128 = USUBWv8i8_v8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USUBWv8i8_v8i16_]]
     %3:fpr(<8 x s8>) = COPY $d0
     %2:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_ZEXT %3(<8 x s8>)
@@ -2356,10 +2428,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule987_id1364_at_idx64883
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[USUBWv4i16_v4i32_:%[0-9]+]]:fpr128 = USUBWv4i16_v4i32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[USUBWv4i16_v4i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[USUBWv4i16_v4i32_:%[0-9]+]]:fpr128 = USUBWv4i16_v4i32 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USUBWv4i16_v4i32_]]
     %3:fpr(<4 x s16>) = COPY $d0
     %2:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_ZEXT %3(<4 x s16>)
@@ -2387,10 +2460,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule988_id1366_at_idx64959
     ; CHECK: liveins: $q0, $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[USUBWv2i32_v2i64_:%[0-9]+]]:fpr128 = USUBWv2i32_v2i64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[USUBWv2i32_v2i64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[USUBWv2i32_v2i64_:%[0-9]+]]:fpr128 = USUBWv2i32_v2i64 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USUBWv2i32_v2i64_]]
     %3:fpr(<2 x s32>) = COPY $d0
     %2:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_ZEXT %3(<2 x s32>)
@@ -2420,11 +2494,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule990_id432_at_idx65123
     ; CHECK: liveins: $s0, $s1, $s2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FNMADDSrrr:%[0-9]+]]:fpr32 = nofpexcept FNMADDSrrr [[COPY2]], [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNMADDSrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FNMADDSrrr:%[0-9]+]]:fpr32 = nofpexcept FNMADDSrrr [[COPY2]], [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNMADDSrrr]]
     %4:fpr(s32) = COPY $s2
     %3:fpr(s32) = COPY $s1
     %2:fpr(s32) = COPY $s0
@@ -2455,11 +2530,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule991_id433_at_idx65211
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FNMADDDrrr:%[0-9]+]]:fpr64 = nofpexcept FNMADDDrrr [[COPY2]], [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNMADDDrrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FNMADDDrrr:%[0-9]+]]:fpr64 = nofpexcept FNMADDDrrr [[COPY2]], [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNMADDDrrr]]
     %4:fpr(s64) = COPY $d2
     %3:fpr(s64) = COPY $d1
     %2:fpr(s64) = COPY $d0
@@ -2488,10 +2564,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule993_id420_at_idx65375
     ; CHECK: liveins: $s0, $s1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FNMULSrr:%[0-9]+]]:fpr32 = nofpexcept FNMULSrr [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNMULSrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FNMULSrr:%[0-9]+]]:fpr32 = nofpexcept FNMULSrr [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNMULSrr]]
     %3:fpr(s32) = COPY $s1
     %2:fpr(s32) = COPY $s0
     %0:fpr(s32) = G_FMUL %2, %3
@@ -2519,10 +2596,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule994_id421_at_idx65451
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FNMULDrr:%[0-9]+]]:fpr64 = nofpexcept FNMULDrr [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNMULDrr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FNMULDrr:%[0-9]+]]:fpr64 = nofpexcept FNMULDrr [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNMULDrr]]
     %3:fpr(s64) = COPY $d1
     %2:fpr(s64) = COPY $d0
     %0:fpr(s64) = G_FMUL %2, %3
@@ -2548,11 +2626,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1230_id2969_at_idx81784
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64all = COPY $x0
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[COPY1]]
-    ; CHECK: ST1Onev8b [[COPY2]], [[COPY]] :: (store (<8 x s8>))
-    ; CHECK: $noreg = PATCHABLE_RET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64all = COPY $x0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY [[COPY1]]
+    ; CHECK-NEXT: ST1Onev8b [[COPY2]], [[COPY]] :: (store (<8 x s8>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET
     %1:gpr(p0) = COPY $x1
     %0:gpr(<8 x s8>) = COPY $x0
     G_STORE %0(<8 x s8>), %1(p0) :: (store (<8 x s8>))
@@ -2577,11 +2656,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1231_id2970_at_idx81816
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64all = COPY $x0
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY [[COPY1]]
-    ; CHECK: ST1Onev4h [[COPY2]], [[COPY]] :: (store (<4 x s16>))
-    ; CHECK: $noreg = PATCHABLE_RET
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64all = COPY $x0
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY [[COPY1]]
+    ; CHECK-NEXT: ST1Onev4h [[COPY2]], [[COPY]] :: (store (<4 x s16>))
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET
     %1:gpr(p0) = COPY $x1
     %0:gpr(<4 x s16>) = COPY $x0
     G_STORE %0(<4 x s16>), %1(p0) :: (store (<4 x s16>))
@@ -2609,11 +2689,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1239_id894_at_idx82201
     ; CHECK: liveins: $d0, $d1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FMLAv2f32_:%[0-9]+]]:fpr64 = nofpexcept FMLAv2f32 [[COPY]], [[COPY1]], [[COPY2]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMLAv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FMLAv2f32_:%[0-9]+]]:fpr64 = nofpexcept FMLAv2f32 [[COPY]], [[COPY1]], [[COPY2]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMLAv2f32_]]
     %3:fpr(<2 x s32>) = COPY $d2
     %2:fpr(<2 x s32>) = COPY $d1
     %1:fpr(<2 x s32>) = COPY $d0
@@ -2642,11 +2723,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1240_id895_at_idx82269
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FMLAv4f32_:%[0-9]+]]:fpr128 = nofpexcept FMLAv4f32 [[COPY]], [[COPY1]], [[COPY2]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMLAv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FMLAv4f32_:%[0-9]+]]:fpr128 = nofpexcept FMLAv4f32 [[COPY]], [[COPY1]], [[COPY2]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMLAv4f32_]]
     %3:fpr(<4 x s32>) = COPY $q2
     %2:fpr(<4 x s32>) = COPY $q1
     %1:fpr(<4 x s32>) = COPY $q0
@@ -2675,11 +2757,12 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1241_id896_at_idx82337
     ; CHECK: liveins: $q0, $q1, $q2
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FMLAv2f64_:%[0-9]+]]:fpr128 = nofpexcept FMLAv2f64 [[COPY]], [[COPY1]], [[COPY2]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMLAv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q2
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FMLAv2f64_:%[0-9]+]]:fpr128 = nofpexcept FMLAv2f64 [[COPY]], [[COPY1]], [[COPY2]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMLAv2f64_]]
     %3:fpr(<2 x s64>) = COPY $q2
     %2:fpr(<2 x s64>) = COPY $q1
     %1:fpr(<2 x s64>) = COPY $q0
@@ -2706,10 +2789,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1244_id751_at_idx82487
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[ADDv8i8_:%[0-9]+]]:fpr64 = ADDv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[ADDv8i8_:%[0-9]+]]:fpr64 = ADDv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv8i8_]]
     %2:fpr(<8 x s8>) = COPY $d1
     %1:fpr(<8 x s8>) = COPY $d0
     %0:fpr(<8 x s8>) = G_ADD %1, %2
@@ -2735,10 +2819,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1245_id752_at_idx82530
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[ADDv16i8_:%[0-9]+]]:fpr128 = ADDv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[ADDv16i8_:%[0-9]+]]:fpr128 = ADDv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv16i8_]]
     %2:fpr(<16 x s8>) = COPY $q1
     %1:fpr(<16 x s8>) = COPY $q0
     %0:fpr(<16 x s8>) = G_ADD %1, %2
@@ -2764,10 +2849,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1246_id753_at_idx82573
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[ADDv4i16_:%[0-9]+]]:fpr64 = ADDv4i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv4i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[ADDv4i16_:%[0-9]+]]:fpr64 = ADDv4i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv4i16_]]
     %2:fpr(<4 x s16>) = COPY $d1
     %1:fpr(<4 x s16>) = COPY $d0
     %0:fpr(<4 x s16>) = G_ADD %1, %2
@@ -2793,10 +2879,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1247_id754_at_idx82616
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[ADDv8i16_:%[0-9]+]]:fpr128 = ADDv8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ADDv8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[ADDv8i16_:%[0-9]+]]:fpr128 = ADDv8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ADDv8i16_]]
     %2:fpr(<8 x s16>) = COPY $q1
     %1:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_ADD %1, %2
@@ -2822,10 +2909,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1254_id1162_at_idx82913
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[ANDv8i8_:%[0-9]+]]:fpr64 = ANDv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ANDv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[ANDv8i8_:%[0-9]+]]:fpr64 = ANDv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ANDv8i8_]]
     %2:fpr(<8 x s8>) = COPY $d1
     %1:fpr(<8 x s8>) = COPY $d0
     %0:fpr(<8 x s8>) = G_AND %1, %2
@@ -2851,10 +2939,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1255_id1163_at_idx82956
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[ANDv16i8_:%[0-9]+]]:fpr128 = ANDv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ANDv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[ANDv16i8_:%[0-9]+]]:fpr128 = ANDv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ANDv16i8_]]
     %2:fpr(<16 x s8>) = COPY $q1
     %1:fpr(<16 x s8>) = COPY $q0
     %0:fpr(<16 x s8>) = G_AND %1, %2
@@ -2880,10 +2969,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1256_id1751_at_idx82999
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[ANDv8i8_:%[0-9]+]]:fpr64 = ANDv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ANDv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[ANDv8i8_:%[0-9]+]]:fpr64 = ANDv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ANDv8i8_]]
     %2:fpr(<4 x s16>) = COPY $d1
     %1:fpr(<4 x s16>) = COPY $d0
     %0:fpr(<4 x s16>) = G_AND %1, %2
@@ -2909,10 +2999,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1259_id1754_at_idx83128
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[ANDv16i8_:%[0-9]+]]:fpr128 = ANDv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ANDv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[ANDv16i8_:%[0-9]+]]:fpr128 = ANDv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ANDv16i8_]]
     %2:fpr(<8 x s16>) = COPY $q1
     %1:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_AND %1, %2
@@ -2938,10 +3029,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1268_id829_at_idx83513
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FADDv2f32_:%[0-9]+]]:fpr64 = nofpexcept FADDv2f32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FADDv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FADDv2f32_:%[0-9]+]]:fpr64 = nofpexcept FADDv2f32 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FADDv2f32_]]
     %2:fpr(<2 x s32>) = COPY $d1
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s32>) = G_FADD %1, %2
@@ -2967,10 +3059,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1269_id830_at_idx83556
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FADDv4f32_:%[0-9]+]]:fpr128 = nofpexcept FADDv4f32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FADDv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FADDv4f32_:%[0-9]+]]:fpr128 = nofpexcept FADDv4f32 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FADDv4f32_]]
     %2:fpr(<4 x s32>) = COPY $q1
     %1:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_FADD %1, %2
@@ -2996,10 +3089,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1270_id831_at_idx83599
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FADDv2f64_:%[0-9]+]]:fpr128 = nofpexcept FADDv2f64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FADDv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FADDv2f64_:%[0-9]+]]:fpr128 = nofpexcept FADDv2f64 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FADDv2f64_]]
     %2:fpr(<2 x s64>) = COPY $q1
     %1:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_FADD %1, %2
@@ -3025,10 +3119,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1276_id849_at_idx83857
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FDIVv2f32_:%[0-9]+]]:fpr64 = nofpexcept FDIVv2f32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FDIVv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FDIVv2f32_:%[0-9]+]]:fpr64 = nofpexcept FDIVv2f32 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FDIVv2f32_]]
     %2:fpr(<2 x s32>) = COPY $d1
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s32>) = G_FDIV %1, %2
@@ -3054,10 +3149,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1277_id850_at_idx83900
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FDIVv4f32_:%[0-9]+]]:fpr128 = nofpexcept FDIVv4f32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FDIVv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FDIVv4f32_:%[0-9]+]]:fpr128 = nofpexcept FDIVv4f32 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FDIVv4f32_]]
     %2:fpr(<4 x s32>) = COPY $q1
     %1:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_FDIV %1, %2
@@ -3083,10 +3179,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1278_id851_at_idx83943
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FDIVv2f64_:%[0-9]+]]:fpr128 = nofpexcept FDIVv2f64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FDIVv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FDIVv2f64_:%[0-9]+]]:fpr128 = nofpexcept FDIVv2f64 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FDIVv2f64_]]
     %2:fpr(<2 x s64>) = COPY $q1
     %1:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_FDIV %1, %2
@@ -3112,10 +3209,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1284_id909_at_idx84201
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FMULv2f32_:%[0-9]+]]:fpr64 = nofpexcept FMULv2f32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMULv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FMULv2f32_:%[0-9]+]]:fpr64 = nofpexcept FMULv2f32 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMULv2f32_]]
     %2:fpr(<2 x s32>) = COPY $d1
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s32>) = G_FMUL %1, %2
@@ -3141,10 +3239,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1285_id910_at_idx84244
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FMULv4f32_:%[0-9]+]]:fpr128 = nofpexcept FMULv4f32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMULv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FMULv4f32_:%[0-9]+]]:fpr128 = nofpexcept FMULv4f32 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMULv4f32_]]
     %2:fpr(<4 x s32>) = COPY $q1
     %1:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_FMUL %1, %2
@@ -3170,10 +3269,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1286_id911_at_idx84287
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FMULv2f64_:%[0-9]+]]:fpr128 = nofpexcept FMULv2f64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FMULv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FMULv2f64_:%[0-9]+]]:fpr128 = nofpexcept FMULv2f64 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FMULv2f64_]]
     %2:fpr(<2 x s64>) = COPY $q1
     %1:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_FMUL %1, %2
@@ -3199,10 +3299,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1292_id924_at_idx84545
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FSUBv2f32_:%[0-9]+]]:fpr64 = nofpexcept FSUBv2f32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FSUBv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FSUBv2f32_:%[0-9]+]]:fpr64 = nofpexcept FSUBv2f32 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FSUBv2f32_]]
     %2:fpr(<2 x s32>) = COPY $d1
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s32>) = G_FSUB %1, %2
@@ -3228,10 +3329,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1293_id925_at_idx84588
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FSUBv4f32_:%[0-9]+]]:fpr128 = nofpexcept FSUBv4f32 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FSUBv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FSUBv4f32_:%[0-9]+]]:fpr128 = nofpexcept FSUBv4f32 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FSUBv4f32_]]
     %2:fpr(<4 x s32>) = COPY $q1
     %1:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_FSUB %1, %2
@@ -3257,10 +3359,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1294_id926_at_idx84631
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FSUBv2f64_:%[0-9]+]]:fpr128 = nofpexcept FSUBv2f64 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FSUBv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FSUBv2f64_:%[0-9]+]]:fpr128 = nofpexcept FSUBv2f64 [[COPY1]], [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FSUBv2f64_]]
     %2:fpr(<2 x s64>) = COPY $q1
     %1:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_FSUB %1, %2
@@ -3286,10 +3389,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1296_id939_at_idx84715
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[MULv8i8_:%[0-9]+]]:fpr64 = MULv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[MULv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[MULv8i8_:%[0-9]+]]:fpr64 = MULv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[MULv8i8_]]
     %2:fpr(<8 x s8>) = COPY $d1
     %1:fpr(<8 x s8>) = COPY $d0
     %0:fpr(<8 x s8>) = G_MUL %1, %2
@@ -3315,10 +3419,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1297_id940_at_idx84758
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[MULv16i8_:%[0-9]+]]:fpr128 = MULv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[MULv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[MULv16i8_:%[0-9]+]]:fpr128 = MULv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[MULv16i8_]]
     %2:fpr(<16 x s8>) = COPY $q1
     %1:fpr(<16 x s8>) = COPY $q0
     %0:fpr(<16 x s8>) = G_MUL %1, %2
@@ -3344,10 +3449,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1298_id941_at_idx84801
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[MULv4i16_:%[0-9]+]]:fpr64 = MULv4i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[MULv4i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[MULv4i16_:%[0-9]+]]:fpr64 = MULv4i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[MULv4i16_]]
     %2:fpr(<4 x s16>) = COPY $d1
     %1:fpr(<4 x s16>) = COPY $d0
     %0:fpr(<4 x s16>) = G_MUL %1, %2
@@ -3373,10 +3479,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1299_id942_at_idx84844
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[MULv8i16_:%[0-9]+]]:fpr128 = MULv8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[MULv8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[MULv8i16_:%[0-9]+]]:fpr128 = MULv8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[MULv8i16_]]
     %2:fpr(<8 x s16>) = COPY $q1
     %1:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_MUL %1, %2
@@ -3402,10 +3509,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1304_id1174_at_idx85055
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[ORRv8i8_:%[0-9]+]]:fpr64 = ORRv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ORRv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[ORRv8i8_:%[0-9]+]]:fpr64 = ORRv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ORRv8i8_]]
     %2:fpr(<8 x s8>) = COPY $d1
     %1:fpr(<8 x s8>) = COPY $d0
     %0:fpr(<8 x s8>) = G_OR %1, %2
@@ -3431,10 +3539,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1305_id1175_at_idx85098
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[ORRv16i8_:%[0-9]+]]:fpr128 = ORRv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ORRv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[ORRv16i8_:%[0-9]+]]:fpr128 = ORRv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ORRv16i8_]]
     %2:fpr(<16 x s8>) = COPY $q1
     %1:fpr(<16 x s8>) = COPY $q0
     %0:fpr(<16 x s8>) = G_OR %1, %2
@@ -3460,10 +3569,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1306_id1827_at_idx85141
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[ORRv8i8_:%[0-9]+]]:fpr64 = ORRv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ORRv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[ORRv8i8_:%[0-9]+]]:fpr64 = ORRv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ORRv8i8_]]
     %2:fpr(<4 x s16>) = COPY $d1
     %1:fpr(<4 x s16>) = COPY $d0
     %0:fpr(<4 x s16>) = G_OR %1, %2
@@ -3489,10 +3599,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1309_id1830_at_idx85270
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[ORRv16i8_:%[0-9]+]]:fpr128 = ORRv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[ORRv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[ORRv16i8_:%[0-9]+]]:fpr128 = ORRv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[ORRv16i8_]]
     %2:fpr(<8 x s16>) = COPY $q1
     %1:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_OR %1, %2
@@ -3518,10 +3629,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1315_id1051_at_idx85522
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SUBv8i8_:%[0-9]+]]:fpr64 = SUBv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SUBv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SUBv8i8_:%[0-9]+]]:fpr64 = SUBv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SUBv8i8_]]
     %2:fpr(<8 x s8>) = COPY $d1
     %1:fpr(<8 x s8>) = COPY $d0
     %0:fpr(<8 x s8>) = G_SUB %1, %2
@@ -3547,10 +3659,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1316_id1052_at_idx85565
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SUBv16i8_:%[0-9]+]]:fpr128 = SUBv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SUBv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SUBv16i8_:%[0-9]+]]:fpr128 = SUBv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SUBv16i8_]]
     %2:fpr(<16 x s8>) = COPY $q1
     %1:fpr(<16 x s8>) = COPY $q0
     %0:fpr(<16 x s8>) = G_SUB %1, %2
@@ -3576,10 +3689,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1317_id1053_at_idx85608
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SUBv4i16_:%[0-9]+]]:fpr64 = SUBv4i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SUBv4i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SUBv4i16_:%[0-9]+]]:fpr64 = SUBv4i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SUBv4i16_]]
     %2:fpr(<4 x s16>) = COPY $d1
     %1:fpr(<4 x s16>) = COPY $d0
     %0:fpr(<4 x s16>) = G_SUB %1, %2
@@ -3605,10 +3719,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1318_id1054_at_idx85651
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SUBv8i16_:%[0-9]+]]:fpr128 = SUBv8i16 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SUBv8i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SUBv8i16_:%[0-9]+]]:fpr128 = SUBv8i16 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SUBv8i16_]]
     %2:fpr(<8 x s16>) = COPY $q1
     %1:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_SUB %1, %2
@@ -3634,10 +3749,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1329_id1170_at_idx86118
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[EORv8i8_:%[0-9]+]]:fpr64 = EORv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[EORv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[EORv8i8_:%[0-9]+]]:fpr64 = EORv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[EORv8i8_]]
     %2:fpr(<8 x s8>) = COPY $d1
     %1:fpr(<8 x s8>) = COPY $d0
     %0:fpr(<8 x s8>) = G_XOR %1, %2
@@ -3663,10 +3779,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1330_id1171_at_idx86161
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[EORv16i8_:%[0-9]+]]:fpr128 = EORv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[EORv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[EORv16i8_:%[0-9]+]]:fpr128 = EORv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[EORv16i8_]]
     %2:fpr(<16 x s8>) = COPY $q1
     %1:fpr(<16 x s8>) = COPY $q0
     %0:fpr(<16 x s8>) = G_XOR %1, %2
@@ -3692,10 +3809,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1331_id1791_at_idx86204
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[EORv8i8_:%[0-9]+]]:fpr64 = EORv8i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[EORv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[EORv8i8_:%[0-9]+]]:fpr64 = EORv8i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[EORv8i8_]]
     %2:fpr(<4 x s16>) = COPY $d1
     %1:fpr(<4 x s16>) = COPY $d0
     %0:fpr(<4 x s16>) = G_XOR %1, %2
@@ -3721,10 +3839,11 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1334_id1794_at_idx86333
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[EORv16i8_:%[0-9]+]]:fpr128 = EORv16i8 [[COPY1]], [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[EORv16i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[EORv16i8_:%[0-9]+]]:fpr128 = EORv16i8 [[COPY1]], [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[EORv16i8_]]
     %2:fpr(<8 x s16>) = COPY $q1
     %1:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s16>) = G_XOR %1, %2
@@ -3748,9 +3867,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1337_id2925_at_idx86462
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv8i8_shift:%[0-9]+]]:fpr128 = USHLLv8i8_shift [[COPY]], 0
-    ; CHECK: $noreg = PATCHABLE_RET [[USHLLv8i8_shift]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv8i8_shift:%[0-9]+]]:fpr128 = USHLLv8i8_shift [[COPY]], 0
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USHLLv8i8_shift]]
     %1:fpr(<8 x s8>) = COPY $d0
     %0:fpr(<8 x s16>) = G_ANYEXT %1(<8 x s8>)
     $noreg = PATCHABLE_RET %0(<8 x s16>)
@@ -3773,9 +3893,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1338_id2928_at_idx86507
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv4i16_shift:%[0-9]+]]:fpr128 = USHLLv4i16_shift [[COPY]], 0
-    ; CHECK: $noreg = PATCHABLE_RET [[USHLLv4i16_shift]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv4i16_shift:%[0-9]+]]:fpr128 = USHLLv4i16_shift [[COPY]], 0
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USHLLv4i16_shift]]
     %1:fpr(<4 x s16>) = COPY $d0
     %0:fpr(<4 x s32>) = G_ANYEXT %1(<4 x s16>)
     $noreg = PATCHABLE_RET %0(<4 x s32>)
@@ -3798,9 +3919,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1339_id2931_at_idx86552
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv2i32_shift:%[0-9]+]]:fpr128 = USHLLv2i32_shift [[COPY]], 0
-    ; CHECK: $noreg = PATCHABLE_RET [[USHLLv2i32_shift]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv2i32_shift:%[0-9]+]]:fpr128 = USHLLv2i32_shift [[COPY]], 0
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USHLLv2i32_shift]]
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s64>) = G_ANYEXT %1(<2 x s32>)
     $noreg = PATCHABLE_RET %0(<2 x s64>)
@@ -3823,9 +3945,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1582_id372_at_idx97075
     ; CHECK: liveins: $s0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
-    ; CHECK: [[FNEGSr:%[0-9]+]]:fpr32 = FNEGSr [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNEGSr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[FNEGSr:%[0-9]+]]:fpr32 = FNEGSr [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNEGSr]]
     %1:fpr(s32) = COPY $s0
     %0:fpr(s32) = G_FNEG %1
     $noreg = PATCHABLE_RET %0(s32)
@@ -3848,9 +3971,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1583_id373_at_idx97110
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FNEGDr:%[0-9]+]]:fpr64 = FNEGDr [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNEGDr]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FNEGDr:%[0-9]+]]:fpr64 = FNEGDr [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNEGDr]]
     %1:fpr(s64) = COPY $d0
     %0:fpr(s64) = G_FNEG %1
     $noreg = PATCHABLE_RET %0(s64)
@@ -3873,9 +3997,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1586_id597_at_idx97215
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FNEGv2f32_:%[0-9]+]]:fpr64 = FNEGv2f32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNEGv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FNEGv2f32_:%[0-9]+]]:fpr64 = FNEGv2f32 [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNEGv2f32_]]
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s32>) = G_FNEG %1
     $noreg = PATCHABLE_RET %0(<2 x s32>)
@@ -3898,9 +4023,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1587_id598_at_idx97250
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FNEGv4f32_:%[0-9]+]]:fpr128 = FNEGv4f32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNEGv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FNEGv4f32_:%[0-9]+]]:fpr128 = FNEGv4f32 [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNEGv4f32_]]
     %1:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_FNEG %1
     $noreg = PATCHABLE_RET %0(<4 x s32>)
@@ -3923,9 +4049,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1588_id599_at_idx97285
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FNEGv2f64_:%[0-9]+]]:fpr128 = FNEGv2f64 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FNEGv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FNEGv2f64_:%[0-9]+]]:fpr128 = FNEGv2f64 [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FNEGv2f64_]]
     %1:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_FNEG %1
     $noreg = PATCHABLE_RET %0(<2 x s64>)
@@ -3948,9 +4075,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1592_id2383_at_idx97425
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTLv2i32_:%[0-9]+]]:fpr128 = nofpexcept FCVTLv2i32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FCVTLv2i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTLv2i32_:%[0-9]+]]:fpr128 = nofpexcept FCVTLv2i32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FCVTLv2i32_]]
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s64>) = G_FPEXT %1(<2 x s32>)
     $noreg = PATCHABLE_RET %0(<2 x s64>)
@@ -3973,9 +4101,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1593_id2385_at_idx97458
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTLv4i16_:%[0-9]+]]:fpr128 = nofpexcept FCVTLv4i16 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FCVTLv4i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTLv4i16_:%[0-9]+]]:fpr128 = nofpexcept FCVTLv4i16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FCVTLv4i16_]]
     %1:fpr(<4 x s16>) = COPY $d0
     %0:fpr(<4 x s32>) = G_FPEXT %1(<4 x s16>)
     $noreg = PATCHABLE_RET %0(<4 x s32>)
@@ -3998,9 +4127,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1602_id587_at_idx97771
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTZSv2f32_:%[0-9]+]]:fpr64 = nofpexcept FCVTZSv2f32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FCVTZSv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTZSv2f32_:%[0-9]+]]:fpr64 = nofpexcept FCVTZSv2f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FCVTZSv2f32_]]
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s32>) = G_FPTOSI %1(<2 x s32>)
     $noreg = PATCHABLE_RET %0(<2 x s32>)
@@ -4023,9 +4153,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1603_id588_at_idx97806
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FCVTZSv4f32_:%[0-9]+]]:fpr128 = nofpexcept FCVTZSv4f32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FCVTZSv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FCVTZSv4f32_:%[0-9]+]]:fpr128 = nofpexcept FCVTZSv4f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FCVTZSv4f32_]]
     %1:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_FPTOSI %1(<4 x s32>)
     $noreg = PATCHABLE_RET %0(<4 x s32>)
@@ -4048,9 +4179,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1604_id589_at_idx97841
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FCVTZSv2f64_:%[0-9]+]]:fpr128 = nofpexcept FCVTZSv2f64 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FCVTZSv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FCVTZSv2f64_:%[0-9]+]]:fpr128 = nofpexcept FCVTZSv2f64 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FCVTZSv2f64_]]
     %1:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_FPTOSI %1(<2 x s64>)
     $noreg = PATCHABLE_RET %0(<2 x s64>)
@@ -4073,9 +4205,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1613_id592_at_idx98156
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[FCVTZUv2f32_:%[0-9]+]]:fpr64 = nofpexcept FCVTZUv2f32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FCVTZUv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[FCVTZUv2f32_:%[0-9]+]]:fpr64 = nofpexcept FCVTZUv2f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FCVTZUv2f32_]]
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s32>) = G_FPTOUI %1(<2 x s32>)
     $noreg = PATCHABLE_RET %0(<2 x s32>)
@@ -4098,9 +4231,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1614_id593_at_idx98191
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FCVTZUv4f32_:%[0-9]+]]:fpr128 = nofpexcept FCVTZUv4f32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FCVTZUv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FCVTZUv4f32_:%[0-9]+]]:fpr128 = nofpexcept FCVTZUv4f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FCVTZUv4f32_]]
     %1:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_FPTOUI %1(<4 x s32>)
     $noreg = PATCHABLE_RET %0(<4 x s32>)
@@ -4123,9 +4257,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1615_id594_at_idx98226
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FCVTZUv2f64_:%[0-9]+]]:fpr128 = nofpexcept FCVTZUv2f64 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FCVTZUv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FCVTZUv2f64_:%[0-9]+]]:fpr128 = nofpexcept FCVTZUv2f64 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FCVTZUv2f64_]]
     %1:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_FPTOUI %1(<2 x s64>)
     $noreg = PATCHABLE_RET %0(<2 x s64>)
@@ -4148,9 +4283,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1619_id2389_at_idx98366
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FCVTNv2i32_:%[0-9]+]]:fpr64 = nofpexcept FCVTNv2i32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FCVTNv2i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FCVTNv2i32_:%[0-9]+]]:fpr64 = nofpexcept FCVTNv2i32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FCVTNv2i32_]]
     %1:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s32>) = G_FPTRUNC %1(<2 x s64>)
     $noreg = PATCHABLE_RET %0(<2 x s32>)
@@ -4173,9 +4309,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1620_id2390_at_idx98399
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[FCVTNv4i16_:%[0-9]+]]:fpr64 = nofpexcept FCVTNv4i16 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[FCVTNv4i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[FCVTNv4i16_:%[0-9]+]]:fpr64 = nofpexcept FCVTNv4i16 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[FCVTNv4i16_]]
     %1:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s16>) = G_FPTRUNC %1(<4 x s32>)
     $noreg = PATCHABLE_RET %0(<4 x s16>)
@@ -4198,9 +4335,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1621_id2923_at_idx98432
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SSHLLv8i8_shift:%[0-9]+]]:fpr128 = SSHLLv8i8_shift [[COPY]], 0
-    ; CHECK: $noreg = PATCHABLE_RET [[SSHLLv8i8_shift]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SSHLLv8i8_shift:%[0-9]+]]:fpr128 = SSHLLv8i8_shift [[COPY]], 0
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SSHLLv8i8_shift]]
     %1:fpr(<8 x s8>) = COPY $d0
     %0:fpr(<8 x s16>) = G_SEXT %1(<8 x s8>)
     $noreg = PATCHABLE_RET %0(<8 x s16>)
@@ -4223,9 +4361,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1622_id2926_at_idx98477
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SSHLLv4i16_shift:%[0-9]+]]:fpr128 = SSHLLv4i16_shift [[COPY]], 0
-    ; CHECK: $noreg = PATCHABLE_RET [[SSHLLv4i16_shift]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SSHLLv4i16_shift:%[0-9]+]]:fpr128 = SSHLLv4i16_shift [[COPY]], 0
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SSHLLv4i16_shift]]
     %1:fpr(<4 x s16>) = COPY $d0
     %0:fpr(<4 x s32>) = G_SEXT %1(<4 x s16>)
     $noreg = PATCHABLE_RET %0(<4 x s32>)
@@ -4248,9 +4387,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1623_id2929_at_idx98522
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SSHLLv2i32_shift:%[0-9]+]]:fpr128 = SSHLLv2i32_shift [[COPY]], 0
-    ; CHECK: $noreg = PATCHABLE_RET [[SSHLLv2i32_shift]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SSHLLv2i32_shift:%[0-9]+]]:fpr128 = SSHLLv2i32_shift [[COPY]], 0
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SSHLLv2i32_shift]]
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s64>) = G_SEXT %1(<2 x s32>)
     $noreg = PATCHABLE_RET %0(<2 x s64>)
@@ -4273,9 +4413,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1632_id687_at_idx98847
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[SCVTFv2f32_:%[0-9]+]]:fpr64 = nofpexcept SCVTFv2f32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SCVTFv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[SCVTFv2f32_:%[0-9]+]]:fpr64 = nofpexcept SCVTFv2f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SCVTFv2f32_]]
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s32>) = G_SITOFP %1(<2 x s32>)
     $noreg = PATCHABLE_RET %0(<2 x s32>)
@@ -4298,9 +4439,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1633_id688_at_idx98882
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SCVTFv4f32_:%[0-9]+]]:fpr128 = nofpexcept SCVTFv4f32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SCVTFv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SCVTFv4f32_:%[0-9]+]]:fpr128 = nofpexcept SCVTFv4f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SCVTFv4f32_]]
     %1:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_SITOFP %1(<4 x s32>)
     $noreg = PATCHABLE_RET %0(<4 x s32>)
@@ -4323,9 +4465,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1634_id689_at_idx98917
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[SCVTFv2f64_:%[0-9]+]]:fpr128 = nofpexcept SCVTFv2f64 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[SCVTFv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[SCVTFv2f64_:%[0-9]+]]:fpr128 = nofpexcept SCVTFv2f64 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[SCVTFv2f64_]]
     %1:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_SITOFP %1(<2 x s64>)
     $noreg = PATCHABLE_RET %0(<2 x s64>)
@@ -4348,9 +4491,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1635_id748_at_idx98952
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[XTNv8i8_:%[0-9]+]]:fpr64 = XTNv8i8 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[XTNv8i8_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[XTNv8i8_:%[0-9]+]]:fpr64 = XTNv8i8 [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[XTNv8i8_]]
     %1:fpr(<8 x s16>) = COPY $q0
     %0:fpr(<8 x s8>) = G_TRUNC %1(<8 x s16>)
     $noreg = PATCHABLE_RET %0(<8 x s8>)
@@ -4373,9 +4517,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1636_id749_at_idx98987
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[XTNv4i16_:%[0-9]+]]:fpr64 = XTNv4i16 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[XTNv4i16_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[XTNv4i16_:%[0-9]+]]:fpr64 = XTNv4i16 [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[XTNv4i16_]]
     %1:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s16>) = G_TRUNC %1(<4 x s32>)
     $noreg = PATCHABLE_RET %0(<4 x s16>)
@@ -4398,9 +4543,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1637_id750_at_idx99022
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[XTNv2i32_:%[0-9]+]]:fpr64 = XTNv2i32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[XTNv2i32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[XTNv2i32_:%[0-9]+]]:fpr64 = XTNv2i32 [[COPY]]
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[XTNv2i32_]]
     %1:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s32>) = G_TRUNC %1(<2 x s64>)
     $noreg = PATCHABLE_RET %0(<2 x s32>)
@@ -4423,9 +4569,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1647_id731_at_idx99386
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[UCVTFv2f32_:%[0-9]+]]:fpr64 = nofpexcept UCVTFv2f32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UCVTFv2f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[UCVTFv2f32_:%[0-9]+]]:fpr64 = nofpexcept UCVTFv2f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UCVTFv2f32_]]
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s32>) = G_UITOFP %1(<2 x s32>)
     $noreg = PATCHABLE_RET %0(<2 x s32>)
@@ -4448,9 +4595,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1648_id732_at_idx99421
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[UCVTFv4f32_:%[0-9]+]]:fpr128 = nofpexcept UCVTFv4f32 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UCVTFv4f32_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[UCVTFv4f32_:%[0-9]+]]:fpr128 = nofpexcept UCVTFv4f32 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UCVTFv4f32_]]
     %1:fpr(<4 x s32>) = COPY $q0
     %0:fpr(<4 x s32>) = G_UITOFP %1(<4 x s32>)
     $noreg = PATCHABLE_RET %0(<4 x s32>)
@@ -4473,9 +4621,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1649_id733_at_idx99456
     ; CHECK: liveins: $q0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[UCVTFv2f64_:%[0-9]+]]:fpr128 = nofpexcept UCVTFv2f64 [[COPY]]
-    ; CHECK: $noreg = PATCHABLE_RET [[UCVTFv2f64_]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[UCVTFv2f64_:%[0-9]+]]:fpr128 = nofpexcept UCVTFv2f64 [[COPY]], implicit $fpcr
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[UCVTFv2f64_]]
     %1:fpr(<2 x s64>) = COPY $q0
     %0:fpr(<2 x s64>) = G_UITOFP %1(<2 x s64>)
     $noreg = PATCHABLE_RET %0(<2 x s64>)
@@ -4498,9 +4647,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1650_id2924_at_idx99491
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv8i8_shift:%[0-9]+]]:fpr128 = USHLLv8i8_shift [[COPY]], 0
-    ; CHECK: $noreg = PATCHABLE_RET [[USHLLv8i8_shift]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv8i8_shift:%[0-9]+]]:fpr128 = USHLLv8i8_shift [[COPY]], 0
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USHLLv8i8_shift]]
     %1:fpr(<8 x s8>) = COPY $d0
     %0:fpr(<8 x s16>) = G_ZEXT %1(<8 x s8>)
     $noreg = PATCHABLE_RET %0(<8 x s16>)
@@ -4523,9 +4673,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1651_id2927_at_idx99536
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv4i16_shift:%[0-9]+]]:fpr128 = USHLLv4i16_shift [[COPY]], 0
-    ; CHECK: $noreg = PATCHABLE_RET [[USHLLv4i16_shift]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv4i16_shift:%[0-9]+]]:fpr128 = USHLLv4i16_shift [[COPY]], 0
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USHLLv4i16_shift]]
     %1:fpr(<4 x s16>) = COPY $d0
     %0:fpr(<4 x s32>) = G_ZEXT %1(<4 x s16>)
     $noreg = PATCHABLE_RET %0(<4 x s32>)
@@ -4548,9 +4699,10 @@ body:             |
 
     ; CHECK-LABEL: name: test_rule1652_id2930_at_idx99581
     ; CHECK: liveins: $d0
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[USHLLv2i32_shift:%[0-9]+]]:fpr128 = USHLLv2i32_shift [[COPY]], 0
-    ; CHECK: $noreg = PATCHABLE_RET [[USHLLv2i32_shift]]
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[USHLLv2i32_shift:%[0-9]+]]:fpr128 = USHLLv2i32_shift [[COPY]], 0
+    ; CHECK-NEXT: $noreg = PATCHABLE_RET [[USHLLv2i32_shift]]
     %1:fpr(<2 x s32>) = COPY $d0
     %0:fpr(<2 x s64>) = G_ZEXT %1(<2 x s32>)
     $noreg = PATCHABLE_RET %0(<2 x s64>)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-xor.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-xor.mir
index 5b39ade02774b..6465e9a8c4e80 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-xor.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-xor.mir
@@ -29,10 +29,12 @@ body:             |
     liveins: $w0, $w1
 
     ; CHECK-LABEL: name: xor_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
-    ; CHECK: [[EORWrr:%[0-9]+]]:gpr32 = EORWrr [[COPY]], [[COPY1]]
-    ; CHECK: $w0 = COPY [[EORWrr]]
+    ; CHECK: liveins: $w0, $w1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK-NEXT: [[EORWrr:%[0-9]+]]:gpr32 = EORWrr [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $w0 = COPY [[EORWrr]]
     %0(s32) = COPY $w0
     %1(s32) = COPY $w1
     %2(s32) = G_XOR %0, %1
@@ -55,10 +57,12 @@ body:             |
     liveins: $x0, $x1
 
     ; CHECK-LABEL: name: xor_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[EORXrr:%[0-9]+]]:gpr64 = EORXrr [[COPY]], [[COPY1]]
-    ; CHECK: $x0 = COPY [[EORXrr]]
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[EORXrr:%[0-9]+]]:gpr64 = EORXrr [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $x0 = COPY [[EORXrr]]
     %0(s64) = COPY $x0
     %1(s64) = COPY $x1
     %2(s64) = G_XOR %0, %1
@@ -82,9 +86,11 @@ body:             |
     liveins: $w0
 
     ; CHECK-LABEL: name: xor_constant_n1_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]]
-    ; CHECK: $w0 = COPY [[ORNWrr]]
+    ; CHECK: liveins: $w0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK-NEXT: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]]
+    ; CHECK-NEXT: $w0 = COPY [[ORNWrr]]
     %0(s32) = COPY $w0
     %1(s32) = G_CONSTANT i32 -1
     %2(s32) = G_XOR %0, %1
@@ -107,9 +113,11 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: xor_constant_n1_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY]]
-    ; CHECK: $x0 = COPY [[ORNXrr]]
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY]]
+    ; CHECK-NEXT: $x0 = COPY [[ORNXrr]]
     %0(s64) = COPY $x0
     %1(s64) = G_CONSTANT i64 -1
     %2(s64) = G_XOR %0, %1
@@ -131,11 +139,15 @@ registers:
 body:             |
   ; CHECK-LABEL: name: xor_constant_n1_s32_gpr_2bb
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK: bb.1:
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-  ; CHECK:   [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]]
-  ; CHECK:   $w0 = COPY [[ORNWrr]]
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT:   liveins: $w0, $w1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   B %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+  ; CHECK-NEXT:   [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]]
+  ; CHECK-NEXT:   $w0 = COPY [[ORNWrr]]
   bb.0:
     liveins: $w0, $w1
     successors: %bb.1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-zext-as-copy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-zext-as-copy.mir
index 1e15322f41f80..d3636c1380d2e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-zext-as-copy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-zext-as-copy.mir
@@ -20,7 +20,9 @@ body:             |
     liveins: $x0, $x1
 
     ; CHECK-LABEL: name: zext_of_load_copy
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64common = IMPLICIT_DEF
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64common = IMPLICIT_DEF
     ; CHECK-NEXT: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[DEF]], 0 :: (load (s8))
     ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDRBBui]], %subreg.sub_32
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY [[SUBREG_TO_REG]]

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-zextload.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-zextload.mir
index 5b36d7ae5c91c..bbf573c3a6c81 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-zextload.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-zextload.mir
@@ -10,7 +10,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zextload_s32_from_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load (s16))
     ; CHECK-NEXT: $w0 = COPY [[LDRHHui]]
     %0:gpr(p0) = COPY $x0
@@ -26,7 +28,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zextload_s32_from_s16_not_combined
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load (s16))
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[LDRHHui]]
     ; CHECK-NEXT: $w0 = COPY [[COPY1]]
@@ -44,7 +48,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: i32_to_i64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load (s32))
     ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDRWui]], %subreg.sub_32
     ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
@@ -64,7 +70,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: i16_to_i64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load (s16))
     ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDRHHui]], %subreg.sub_32
     ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
@@ -84,7 +92,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: i8_to_i64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load (s8))
     ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDRBBui]], %subreg.sub_32
     ; CHECK-NEXT: $x0 = COPY [[SUBREG_TO_REG]]
@@ -104,7 +114,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: i8_to_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load (s8))
     ; CHECK-NEXT: $w0 = COPY [[LDRBBui]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -123,7 +135,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: i16_to_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load (s16))
     ; CHECK-NEXT: $w0 = COPY [[LDRHHui]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -143,7 +157,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zextload_s32_from_s8_atomic_unordered
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load unordered (s8))
     ; CHECK-NEXT: $w0 = COPY [[LDRBBui]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -163,7 +179,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zextload_s32_from_s8_atomic_monotonic
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load monotonic (s8))
     ; CHECK-NEXT: $w0 = COPY [[LDRBBui]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -183,7 +201,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zextload_s32_from_s8_atomic_acquire
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDARB:%[0-9]+]]:gpr32 = LDARB [[COPY]] :: (load acquire (s8))
     ; CHECK-NEXT: $w0 = COPY [[LDARB]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -203,7 +223,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zextload_s32_from_s8_atomic_seq_cst
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDARB:%[0-9]+]]:gpr32 = LDARB [[COPY]] :: (load seq_cst (s8))
     ; CHECK-NEXT: $w0 = COPY [[LDARB]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -223,7 +245,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zextload_s32_from_s16_atomic_unordered
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load unordered (s16))
     ; CHECK-NEXT: $w0 = COPY [[LDRHHui]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -243,7 +267,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zextload_s32_from_s16_atomic_monotonic
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load monotonic (s16))
     ; CHECK-NEXT: $w0 = COPY [[LDRHHui]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -263,7 +289,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zextload_s32_from_s16_atomic_acquire
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDARH:%[0-9]+]]:gpr32 = LDARH [[COPY]] :: (load acquire (s16))
     ; CHECK-NEXT: $w0 = COPY [[LDARH]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
@@ -283,7 +311,9 @@ body:             |
     liveins: $x0
 
     ; CHECK-LABEL: name: zextload_s32_from_s16_atomic_seq_cst
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: liveins: $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK-NEXT: [[LDARH:%[0-9]+]]:gpr32 = LDARH [[COPY]] :: (load seq_cst (s16))
     ; CHECK-NEXT: $w0 = COPY [[LDARH]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-zip.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-zip.mir
index 49bda326b63f3..da25425708244 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-zip.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-zip.mir
@@ -17,11 +17,12 @@ body:             |
 
     ; CHECK-LABEL: name: zip1_v2s32
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[ZIP1v2i32_:%[0-9]+]]:fpr64 = ZIP1v2i32 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[ZIP1v2i32_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[ZIP1v2i32_:%[0-9]+]]:fpr64 = ZIP1v2i32 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[ZIP1v2i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = COPY $d1
     %2:fpr(<2 x s32>) = G_ZIP1 %0, %1
@@ -40,11 +41,12 @@ body:             |
 
     ; CHECK-LABEL: name: zip1_v2s64
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[ZIP1v2i64_:%[0-9]+]]:fpr128 = ZIP1v2i64 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[ZIP1v2i64_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[ZIP1v2i64_:%[0-9]+]]:fpr128 = ZIP1v2i64 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[ZIP1v2i64_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s64>) = COPY $q0
     %1:fpr(<2 x s64>) = COPY $q1
     %2:fpr(<2 x s64>) = G_ZIP1 %0, %1
@@ -62,11 +64,12 @@ body:             |
     liveins: $q0, $q1
     ; CHECK-LABEL: name: zip1_v4s32
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[ZIP1v4i32_:%[0-9]+]]:fpr128 = ZIP1v4i32 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[ZIP1v4i32_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[ZIP1v4i32_:%[0-9]+]]:fpr128 = ZIP1v4i32 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[ZIP1v4i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s32>) = COPY $q0
     %1:fpr(<4 x s32>) = COPY $q1
     %2:fpr(<4 x s32>) = G_ZIP1 %0, %1
@@ -85,11 +88,12 @@ body:             |
 
     ; CHECK-LABEL: name: zip2_v2s32
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK: [[ZIP2v2i32_:%[0-9]+]]:fpr64 = ZIP2v2i32 [[COPY]], [[COPY1]]
-    ; CHECK: $d0 = COPY [[ZIP2v2i32_]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
+    ; CHECK-NEXT: [[ZIP2v2i32_:%[0-9]+]]:fpr64 = ZIP2v2i32 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $d0 = COPY [[ZIP2v2i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:fpr(<2 x s32>) = COPY $d0
     %1:fpr(<2 x s32>) = COPY $d1
     %2:fpr(<2 x s32>) = G_ZIP2 %0, %1
@@ -108,11 +112,12 @@ body:             |
 
     ; CHECK-LABEL: name: zip2_v2s64
     ; CHECK: liveins: $q0, $q1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[ZIP2v2i64_:%[0-9]+]]:fpr128 = ZIP2v2i64 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[ZIP2v2i64_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[ZIP2v2i64_:%[0-9]+]]:fpr128 = ZIP2v2i64 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[ZIP2v2i64_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<2 x s64>) = COPY $q0
     %1:fpr(<2 x s64>) = COPY $q1
     %2:fpr(<2 x s64>) = G_ZIP2 %0, %1
@@ -130,11 +135,12 @@ body:             |
     liveins: $d0, $d1
     ; CHECK-LABEL: name: zip2_v4s32
     ; CHECK: liveins: $d0, $d1
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
-    ; CHECK: [[ZIP2v4i32_:%[0-9]+]]:fpr128 = ZIP2v4i32 [[COPY]], [[COPY1]]
-    ; CHECK: $q0 = COPY [[ZIP2v4i32_]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
+    ; CHECK-NEXT: [[ZIP2v4i32_:%[0-9]+]]:fpr128 = ZIP2v4i32 [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: $q0 = COPY [[ZIP2v4i32_]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:fpr(<4 x s32>) = COPY $q0
     %1:fpr(<4 x s32>) = COPY $q1
     %2:fpr(<4 x s32>) = G_ZIP2 %0, %1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/speculative-hardening-brcond.mir b/llvm/test/CodeGen/AArch64/GlobalISel/speculative-hardening-brcond.mir
index a158f2466dce7..3de720f34f511 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/speculative-hardening-brcond.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/speculative-hardening-brcond.mir
@@ -18,13 +18,16 @@ regBankSelected: true
 body:             |
   ; CHECK-LABEL: name: no_tbnz
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
-  ; CHECK:   %reg:gpr32 = COPY $w0
-  ; CHECK:   [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri %reg, 1, implicit-def $nzcv
-  ; CHECK:   Bcc 1, %bb.1, implicit $nzcv
-  ; CHECK:   B %bb.0
-  ; CHECK: bb.1:
-  ; CHECK:   RET_ReallyLR
+  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   %reg:gpr32 = COPY $w0
+  ; CHECK-NEXT:   [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri %reg, 1, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 1, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   RET_ReallyLR
   bb.0:
     liveins: $w0
     successors: %bb.0, %bb.1
@@ -41,13 +44,16 @@ regBankSelected: true
 body:             |
   ; CHECK-LABEL: name: no_cbz
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
-  ; CHECK:   %reg:gpr32sp = COPY $w0
-  ; CHECK:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg, 0, 0, implicit-def $nzcv
-  ; CHECK:   Bcc 0, %bb.1, implicit $nzcv
-  ; CHECK:   B %bb.0
-  ; CHECK: bb.1:
-  ; CHECK:   RET_ReallyLR
+  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
+  ; CHECK-NEXT:   liveins: $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   %reg:gpr32sp = COPY $w0
+  ; CHECK-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg, 0, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   Bcc 0, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   RET_ReallyLR
   bb.0:
     liveins: $w0
     successors: %bb.0, %bb.1
@@ -66,14 +72,17 @@ regBankSelected: true
 body:             |
   ; CHECK-LABEL: name: fp
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
-  ; CHECK:   %reg0:fpr32 = COPY $s0
-  ; CHECK:   %reg1:fpr32 = COPY $s1
-  ; CHECK:   FCMPSrr %reg0, %reg1, implicit-def $nzcv
-  ; CHECK:   Bcc 0, %bb.1, implicit $nzcv
-  ; CHECK:   B %bb.0
-  ; CHECK: bb.1:
-  ; CHECK:   RET_ReallyLR
+  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
+  ; CHECK-NEXT:   liveins: $s0, $s1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   %reg0:fpr32 = COPY $s0
+  ; CHECK-NEXT:   %reg1:fpr32 = COPY $s1
+  ; CHECK-NEXT:   nofpexcept FCMPSrr %reg0, %reg1, implicit-def $nzcv, implicit $fpcr
+  ; CHECK-NEXT:   Bcc 0, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   B %bb.0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   RET_ReallyLR
   bb.0:
     liveins: $s0, $s1
     successors: %bb.0, %bb.1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/store-merging-debug.mir b/llvm/test/CodeGen/AArch64/GlobalISel/store-merging-debug.mir
index 2f4dbb7d47af5..56b3428a1bdf8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/store-merging-debug.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/store-merging-debug.mir
@@ -101,24 +101,24 @@ body:             |
     ; CHECK-LABEL: name: test_simple_4xs16
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0, debug-location !11
-    ; CHECK-NEXT: DBG_VALUE [[COPY]](p0), $noreg, !9, !DIExpression(), debug-location !11
-    ; CHECK-NEXT: DBG_VALUE %1:_(s16), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 2, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE %4:_(s16), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 3, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE %7:_(s16), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 4, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE %10:_(s16), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 5, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE 0, $noreg, !9, !DIExpression(), debug-location !DILocation(line: 6, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE %2:_(s64), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 7, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE %3:_(p0), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 8, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE 1, $noreg, !9, !DIExpression(), debug-location !DILocation(line: 9, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE %5:_(s64), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 10, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE %6:_(p0), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 11, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE 2, $noreg, !9, !DIExpression(), debug-location !DILocation(line: 12, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE %8:_(s64), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 13, column: 1, scope: !5)
-    ; CHECK-NEXT: DBG_VALUE %9:_(p0), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 14, column: 1, scope: !5)
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0,  debug-location !11
+    ; CHECK-NEXT: DBG_VALUE [[COPY]](p0), $noreg, !9, !DIExpression(),  debug-location !11
+    ; CHECK-NEXT: DBG_VALUE %1:_(s16), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 2, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE %4:_(s16), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 3, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE %7:_(s16), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 4, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE %10:_(s16), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 5, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE 0, $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 6, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE %2:_(s64), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 7, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE %3:_(p0), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 8, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE 1, $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 9, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE %5:_(s64), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 10, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE %6:_(p0), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 11, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE 2, $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 12, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE %8:_(s64), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 13, column: 1, scope: !5)
+    ; CHECK-NEXT: DBG_VALUE %9:_(p0), $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 14, column: 1, scope: !5)
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 3940688328982532
-    ; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0), debug-location !DILocation(line: 9, scope: !5) :: (store (s64), align 2)
-    ; CHECK-NEXT: DBG_VALUE 3, $noreg, !9, !DIExpression(), debug-location !DILocation(line: 15, column: 1, scope: !5)
+    ; CHECK-NEXT: G_STORE [[C]](s64), [[COPY]](p0),  debug-location !DILocation(line: 9, scope: !5) :: (store (s64), align 2)
+    ; CHECK-NEXT: DBG_VALUE 3, $noreg, !9, !DIExpression(),  debug-location !DILocation(line: 15, column: 1, scope: !5)
     ; CHECK-NEXT: RET_ReallyLR debug-location !DILocation(line: 16, column: 1, scope: !5)
     %0:_(p0) = COPY $x0, debug-location !11
     DBG_VALUE %0(p0), $noreg, !9, !DIExpression(), debug-location !11

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/store-wro-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/store-wro-addressing-modes.mir
index 43004835007a4..d8ca48555afed 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/store-wro-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/store-wro-addressing-modes.mir
@@ -12,10 +12,11 @@ body:             |
     liveins: $x0, $x1, $w2
     ; CHECK-LABEL: name: strwrow
     ; CHECK: liveins: $x0, $x1, $w2
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %dst:gpr32 = COPY $w2
-    ; CHECK: STRWroW %dst, %base, %foo, 1, 1 :: (store (s32))
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %dst:gpr32 = COPY $w2
+    ; CHECK-NEXT: STRWroW %dst, %base, %foo, 1, 1 :: (store (s32))
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_SEXT %foo(s32)
@@ -37,10 +38,11 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: strxrow
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %dst:gpr64 = COPY $x2
-    ; CHECK: STRXroW %dst, %base, %foo, 1, 1 :: (store (s64))
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %dst:gpr64 = COPY $x2
+    ; CHECK-NEXT: STRXroW %dst, %base, %foo, 1, 1 :: (store (s64))
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_SEXT %foo(s32)
@@ -62,11 +64,12 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: strxrow_p0
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %base:gpr64sp = COPY $x0
-    ; CHECK: %foo:gpr32 = COPY $w1
-    ; CHECK: %dst:gpr64all = COPY $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %dst
-    ; CHECK: STRXroW [[COPY]], %base, %foo, 1, 1 :: (store (p0))
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %base:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %foo:gpr32 = COPY $w1
+    ; CHECK-NEXT: %dst:gpr64all = COPY $x2
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY %dst
+    ; CHECK-NEXT: STRXroW [[COPY]], %base, %foo, 1, 1 :: (store (p0))
     %base:gpr(p0) = COPY $x0
     %foo:gpr(s32) = COPY $w1
     %ext:gpr(s64) = G_SEXT %foo(s32)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/subreg-copy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/subreg-copy.mir
index e7167207da558..94a551307fd82 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/subreg-copy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/subreg-copy.mir
@@ -9,15 +9,17 @@ tracksRegLiveness: true
 body: |
   ; CHECK-LABEL: name: test_128_fpr_truncation
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: $x0
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-  ; CHECK:   [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (s128))
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr32all = COPY [[LDRQui]].ssub
-  ; CHECK:   [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]]
-  ; CHECK:   TBNZW [[COPY2]], 0, %bb.1
-  ; CHECK: bb.1:
-  ; CHECK:   RET_ReallyLR
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT:   liveins: $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+  ; CHECK-NEXT:   [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (s128))
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32all = COPY [[LDRQui]].ssub
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr32 = COPY [[COPY1]]
+  ; CHECK-NEXT:   TBNZW [[COPY2]], 0, %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   RET_ReallyLR
   bb.0:
     liveins: $x0
     %1:gpr(p0) = COPY $x0
@@ -42,15 +44,17 @@ tracksRegLiveness: true
 body: |
   ; CHECK-LABEL: name: no_trunc
   ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: $x0
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-  ; CHECK:   [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (s128))
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr64all = COPY [[LDRQui]].dsub
-  ; CHECK:   [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
-  ; CHECK:   TBNZX [[COPY2]], 33, %bb.1
-  ; CHECK: bb.1:
-  ; CHECK:   RET_ReallyLR
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT:   liveins: $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+  ; CHECK-NEXT:   [[LDRQui:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0 :: (load (s128))
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr64all = COPY [[LDRQui]].dsub
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
+  ; CHECK-NEXT:   TBNZX [[COPY2]], 33, %bb.1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   RET_ReallyLR
   bb.0:
     liveins: $x0
     %1:gpr(p0) = COPY $x0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/xro-addressing-mode-constant.mir b/llvm/test/CodeGen/AArch64/GlobalISel/xro-addressing-mode-constant.mir
index 58cc627f5485d..0f430dcc8f5c8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/xro-addressing-mode-constant.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/xro-addressing-mode-constant.mir
@@ -16,10 +16,8 @@ body:             |
     liveins: $x0
     ; Check that we use the XRO addressing mode when the constant cannot be
     ; represented using an add + lsl.
-    ;
     ; cst = 0000000111000000
     ; cst & 000fffffff000000 != 0
-    ;
     ; CHECK-LABEL: name: use_xro_cannot_encode_add_lsl
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}
@@ -45,11 +43,9 @@ body:             |
     liveins: $x0
     ; Check that we use the XRO addressing mode when the constant can be
     ; represented using a single movk.
-    ;
     ; cst = 000000000000f000
     ; cst & 000fffffff000000 == 0
     ; cst & ffffffffffff0fff != 0
-    ;
     ; CHECK-LABEL: name: use_xro_preferred_mov
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}
@@ -75,7 +71,6 @@ body:             |
   bb.0:
     liveins: $x0
     ; Check that this works even if we have a negative immediate.
-    ;
     ; CHECK-LABEL: name: use_xro_negative_imm
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}
@@ -100,7 +95,6 @@ body:             |
   bb.0:
     liveins: $x0
     ; Immediates that can be encoded in a LDRXui should be skipped.
-    ;
     ; CHECK-LABEL: name: dont_use_xro_selectable_imm
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}
@@ -124,7 +118,6 @@ body:             |
   bb.0:
     liveins: $x0
     ; Immediates that can be encoded in a LDRXui should be skipped.
-    ;
     ; CHECK-LABEL: name: dont_use_xro_selectable_negative_imm
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}
@@ -148,7 +141,6 @@ body:             |
   bb.0:
     liveins: $x0
     ; Immediates that can be encoded in a LDRXui should be skipped.
-    ;
     ; CHECK-LABEL: name: dont_use_xro_zero
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}
@@ -173,7 +165,6 @@ body:             |
     liveins: $x0
     ; Check that we skip constants which can be encoded in an add.
     ; 17 is in [0x0, 0xfff]
-    ;
     ; CHECK-LABEL: name: dont_use_xro_in_range
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}
@@ -198,12 +189,10 @@ body:             |
     liveins: $x0
     ; Check that we skip when we have an add with an lsl which cannot be
     ; represented as a movk.
-    ;
     ; cst = 0x0000000000111000
     ; cst & 000fffffff000000 = 0
     ; cst & ffffffffff00ffff != 0
     ; cst & ffffffffffff0fff != 0
-    ;
     ; CHECK-LABEL: name: dont_use_xro_add_lsl
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}


        


More information about the llvm-commits mailing list