[llvm] [AArch64] isTBLMask(M, VT) as part of the shuffle mask check (PR #79058)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 24 15:49:34 PST 2024


https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/79058

>From 69ee672278fee08476beeedf656b0aa598b5723f Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Sat, 20 Jan 2024 22:05:15 -0500
Subject: [PATCH 1/6] [AArch64] isTBLMask(M, VT) as part of the shuffle mask
 check

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 19 ++++++++++++++-----
 llvm/lib/Target/ARM/ARMTargetTransformInfo.h  |  3 ++-
 2 files changed, 16 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 332fb37655288c..8ae758603089aa 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -11471,6 +11471,8 @@ static bool isWideDUPMask(ArrayRef<int> M, EVT VT, unsigned BlockSize,
 // vector sources of the shuffle are different.
 static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
                       unsigned &Imm) {
+  ReverseEXT = false;
+
   // Look for the first non-undef element.
   const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; });
 
@@ -11508,6 +11510,14 @@ static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
   return true;
 }
 
+static bool isTBLMask(ArrayRef<int> M, EVT VT) {
+  // We can handle <16 x i8> and <8 x i8> vector shuffles. If the index in the
+  // mask is out of range, then 0 is placed into the resulting vector. So pretty
+  // much any mask of 16 or 8 elements can work here.
+  return (VT == MVT::v8i8 && M.size() == 8) ||
+         (VT == MVT::v16i8 && M.size() == 16);
+}
+
 /// isREVMask - Check if a vector shuffle corresponds to a REV
 /// instruction with the specified blocksize.  (The order of the elements
 /// within each block of the vector is reversed.)
@@ -11542,7 +11552,7 @@ static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
     return false;
   WhichResult = (M[0] == 0 ? 0 : 1);
   unsigned Idx = WhichResult * NumElts / 2;
-  for (unsigned i = 0; i != NumElts; i += 2) {
+  for (unsigned i = 0; i < NumElts; i += 2) {
     if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
         (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts))
       return false;
@@ -12257,7 +12267,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
                        DAG.getConstant(8, dl, MVT::i32));
   }
 
-  bool ReverseEXT = false;
+  bool ReverseEXT;
   unsigned Imm;
   if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) {
     if (ReverseEXT)
@@ -13755,8 +13765,7 @@ bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
 
   return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) ||
           isREVMask(M, VT, 32) || isREVMask(M, VT, 16) ||
-          isEXTMask(M, VT, DummyBool, DummyUnsigned) ||
-          // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM.
+          isEXTMask(M, VT, DummyBool, DummyUnsigned) || isTBLMask(M, VT) ||
           isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) ||
           isZIPMask(M, VT, DummyUnsigned) ||
           isTRN_v_undef_Mask(M, VT, DummyUnsigned) ||
@@ -26641,7 +26650,7 @@ SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
     return convertFromScalableVector(DAG, VT, Op);
   }
 
-  bool ReverseEXT = false;
+  bool ReverseEXT;
   unsigned Imm;
   if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm) &&
       Imm == VT.getVectorNumElements() - 1) {
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index bb4b321b530091..e83d8b830a43cd 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -347,6 +347,7 @@ inline bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
   if (EltSz != 8 && EltSz != 16 && EltSz != 32)
     return false;
 
+  unsigned NumElts = VT.getVectorNumElements();
   unsigned BlockElts = M[0] + 1;
   // If the first shuffle index is UNDEF, be optimistic.
   if (M[0] < 0)
@@ -355,7 +356,7 @@ inline bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
     return false;
 
-  for (unsigned i = 0, e = M.size(); i < e; ++i) {
+  for (unsigned i = 0; i < NumElts; ++i) {
     if (M[i] < 0)
       continue; // ignore UNDEF indices
     if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))

>From 619d320ed588ca1460040669d76b44e01035feff Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Wed, 24 Jan 2024 16:15:41 -0500
Subject: [PATCH 2/6] s

---
 llvm/test/Analysis/CostModel/ARM/shuffle.ll   |  12 +-
 llvm/test/CodeGen/AArch64/aarch-multipart.ll  |   8 +
 llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll  |   9 +-
 .../AArch64/aarch64-fix-cortex-a53-835769.ll  | 481 ++++++++++++
 llvm/test/CodeGen/AArch64/aarch64-load-ext.ll |  23 +-
 .../CodeGen/AArch64/aarch64-loop-gep-opt.ll   |   1 +
 llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll  |  46 +-
 .../CodeGen/AArch64/aarch64-named-reg-x18.ll  |   1 +
 .../aarch64-neon-vector-insert-uaddlv.ll      |  56 +-
 llvm/test/CodeGen/AArch64/aarch64-smull.ll    | 271 ++++---
 llvm/test/CodeGen/AArch64/aarch64-tbz.ll      |  38 +
 llvm/test/CodeGen/AArch64/active_lane_mask.ll |  44 +-
 .../AArch64/arm64-2011-10-18-LdStOptBug.ll    |  23 +-
 ...rm64-2012-05-07-DAGCombineVectorExtract.ll |  11 +-
 .../arm64-2012-07-11-InstrEmitterBug.ll       |   1 +
 .../arm64-AnInfiniteLoopInDAGCombine.ll       |   1 +
 llvm/test/CodeGen/AArch64/arm64-aapcs.ll      | 123 ++-
 .../CodeGen/AArch64/arm64-anyregcc-crash.ll   |   1 +
 llvm/test/CodeGen/AArch64/arm64-anyregcc.ll   |   3 +
 llvm/test/CodeGen/AArch64/arm64-arith.ll      |  90 +--
 llvm/test/CodeGen/AArch64/arm64-bcc.ll        |   4 +-
 .../AArch64/arm64-big-endian-varargs.ll       |  48 +-
 .../AArch64/arm64-big-endian-vector-caller.ll | 177 +----
 .../CodeGen/AArch64/arm64-call-tailcalls.ll   |  42 +-
 .../test/CodeGen/AArch64/arm64-collect-loh.ll | 738 +++++++++++++++++-
 llvm/test/CodeGen/AArch64/arm64-crypto.ll     |  79 +-
 .../AArch64/arm64-dagcombiner-convergence.ll  |   1 +
 .../AArch64/arm64-extload-knownzero.ll        |  25 +-
 .../CodeGen/AArch64/arm64-fast-isel-br.ll     |  15 +-
 .../CodeGen/AArch64/arm64-fast-isel-gv.ll     |  22 +-
 .../AArch64/arm64-fast-isel-indirectbr.ll     |   8 +-
 .../AArch64/arm64-fast-isel-intrinsic.ll      | 216 ++---
 llvm/test/CodeGen/AArch64/arm64-fcmp-opt.ll   | 204 +++--
 llvm/test/CodeGen/AArch64/arm64-fmax-safe.ll  |  33 +-
 .../CodeGen/AArch64/arm64-fold-address.ll     |  12 +-
 .../test/CodeGen/AArch64/arm64-fp-imm-size.ll |  27 +-
 .../CodeGen/AArch64/arm64-fp128-folding.ll    |  11 +-
 .../arm64-homogeneous-prolog-epilog.ll        |  84 ++
 .../AArch64/arm64-indexed-vector-ldst.ll      |  32 +-
 .../AArch64/arm64-inline-asm-error-J.ll       |   1 +
 .../AArch64/arm64-inline-asm-error-K.ll       |   1 +
 .../AArch64/arm64-inline-asm-error-N.ll       |   1 +
 llvm/test/CodeGen/AArch64/arm64-ldp-aa.ll     |  25 +
 llvm/test/CodeGen/AArch64/arm64-leaf.ll       |  13 +-
 llvm/test/CodeGen/AArch64/arm64-mte.ll        | 359 ++++++---
 llvm/test/CodeGen/AArch64/arm64-neg.ll        |  41 +-
 .../test/CodeGen/AArch64/arm64-neon-2velem.ll |   7 -
 .../test/CodeGen/AArch64/arm64-neon-3vdiff.ll | 112 ++-
 .../CodeGen/AArch64/arm64-platform-reg.ll     | 186 +++--
 .../CodeGen/AArch64/arm64-prefetch-new.ll     |  16 +-
 llvm/test/CodeGen/AArch64/arm64-prefetch.ll   |  28 +-
 .../arm64-register-offset-addressing.ll       |  21 +-
 .../CodeGen/AArch64/arm64-regress-opt-cmp.mir |   3 +
 .../AArch64/arm64-reserve-call-saved-reg.ll   |  42 +-
 .../arm64-reserved-arg-reg-call-error.ll      |   1 +
 llvm/test/CodeGen/AArch64/arm64-sincos.ll     | 116 ++-
 .../test/CodeGen/AArch64/arm64-sli-sri-opt.ll |  32 +-
 .../arm64-spill-remarks-treshold-hotness.ll   | 130 +++
 .../CodeGen/AArch64/arm64-stackmap-nops.ll    |   7 +-
 llvm/test/CodeGen/AArch64/arm64-stacksave.ll  |   1 +
 llvm/test/CodeGen/AArch64/arm64-stp-aa.ll     |  48 ++
 llvm/test/CodeGen/AArch64/arm64-stur.ll       |  52 +-
 .../test/CodeGen/AArch64/arm64-subsections.ll |   4 +
 llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll |  21 +-
 .../CodeGen/AArch64/arm64-tls-initial-exec.ll |  47 +-
 llvm/test/CodeGen/AArch64/arm64-uminv.ll      |  47 +-
 llvm/test/CodeGen/AArch64/arm64-vaddlv.ll     |  19 +-
 .../CodeGen/AArch64/arm64-variadic-aapcs.ll   | 136 ++--
 llvm/test/CodeGen/AArch64/arm64-vmovn.ll      | 205 ++---
 llvm/test/CodeGen/AArch64/arm64-vqsub.ll      | 113 ++-
 llvm/test/CodeGen/AArch64/arm64-vsetcc_fp.ll  |   8 +-
 .../AArch64/arm64-zero-cycle-zeroing.ll       | 533 +++++++++++--
 llvm/test/CodeGen/AArch64/arm64-zeroreg.ll    | 143 +++-
 llvm/test/CodeGen/AArch64/arm64-zext.ll       |   8 +-
 llvm/test/CodeGen/AArch64/irg.ll              |  31 +-
 .../AArch64/neon-scalar-by-elem-fma.ll        | 335 +++++---
 .../AArch64/sme2-intrinsics-sqdmulh.ll        |   1 +
 .../CodeGen/AArch64/wineh-save-lrpair3.mir    |   3 +
 78 files changed, 4301 insertions(+), 1617 deletions(-)

diff --git a/llvm/test/Analysis/CostModel/ARM/shuffle.ll b/llvm/test/Analysis/CostModel/ARM/shuffle.ll
index 14f16252581aab..fdb280de175b92 100644
--- a/llvm/test/Analysis/CostModel/ARM/shuffle.ll
+++ b/llvm/test/Analysis/CostModel/ARM/shuffle.ll
@@ -103,11 +103,11 @@ define void @reverse() {
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v8i32 = shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 64 for instruction: %v2i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v4i64 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = shufflevector <2 x half> undef, <2 x half> undef, <2 x i32> <i32 1, i32 0>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f16 = shufflevector <2 x half> undef, <2 x half> undef, <2 x i32> <i32 1, i32 0>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %v8f16 = shufflevector <8 x half> undef, <8 x half> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 64 for instruction: %v16f16 = shufflevector <16 x half> undef, <16 x half> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f32 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %v4f32 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %v8f32 = shufflevector <8 x float> undef, <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
@@ -254,11 +254,11 @@ define void @select() {
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v8i32 = shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> <i32 8, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 64 for instruction: %v2i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v4i64 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = shufflevector <2 x half> undef, <2 x half> undef, <2 x i32> <i32 1, i32 0>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f16 = shufflevector <2 x half> undef, <2 x half> undef, <2 x i32> <i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %v8f16 = shufflevector <8 x half> undef, <8 x half> undef, <8 x i32> <i32 8, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 64 for instruction: %v16f16 = shufflevector <16 x half> undef, <16 x half> undef, <16 x i32> <i32 0, i32 17, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f32 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %v4f32 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %v8f32 = shufflevector <8 x float> undef, <8 x float> undef, <8 x i32> <i32 8, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
@@ -333,7 +333,7 @@ define void @vrev2() {
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v4i32 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v8i32 = shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v4i64 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v8f16 = shufflevector <8 x half> undef, <8 x half> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 64 for instruction: %v16f16 = shufflevector <16 x half> undef, <16 x half> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
diff --git a/llvm/test/CodeGen/AArch64/aarch-multipart.ll b/llvm/test/CodeGen/AArch64/aarch-multipart.ll
index fd42d6e8cd8eba..830033945216db 100644
--- a/llvm/test/CodeGen/AArch64/aarch-multipart.ll
+++ b/llvm/test/CodeGen/AArch64/aarch-multipart.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -o - | FileCheck %s
 
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
@@ -9,6 +10,13 @@ declare <4 x double> @user_func(<4 x double>) #1
 ; CHECK-LABEL: caller_function
 ; CHECK: ret
 define void @caller_function(<4 x double>, <4 x double>, <4 x double>, <4 x double>, <4 x double>) #1 {
+; CHECK-LABEL: caller_function:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    ldp q0, q1, [sp, #16]
+; CHECK-NEXT:    bl user_func
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
 entry:
   %r = call <4 x double> @user_func(<4 x double> %4)
   ret void
diff --git a/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll b/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll
index 2dde251cbc02f9..258c39d4fb7185 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-dup-ext.ll
@@ -116,11 +116,12 @@ entry:
 define <2 x i64> @dupzext_v2i16_v2i64(i16 %src, <2 x i16> %b) {
 ; CHECK-LABEL: dupzext_v2i16_v2i64:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi d1, #0x00ffff0000ffff
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    rev32 v0.4h, v0.4h
 ; CHECK-NEXT:    and w8, w0, #0xffff
-; CHECK-NEXT:    dup v2.2s, w8
-; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT:    umull v0.2d, v2.2s, v0.2s
+; CHECK-NEXT:    trn2 v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    dup v1.2s, w8
+; CHECK-NEXT:    umull v0.2d, v1.2s, v0.2s
 ; CHECK-NEXT:    ret
 entry:
     %in = zext i16 %src to i64
diff --git a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
index 296435adc8de52..f508281d85effa 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; REQUIRES: asserts
 ; The regression tests need to test for order of emitted instructions, and
 ; therefore, the tests are a bit fragile/reliant on instruction scheduling. The
@@ -21,6 +22,24 @@ target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--linux-gnu"
 
 define i64 @f_load_madd_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 {
+; CHECK-LABEL: f_load_madd_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x8, [x2]
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    madd x0, x8, x1, x0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_madd_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr x8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    madd x0, x8, x1, x0
+; CHECK-NOWORKAROUND-NEXT:    ret
+;
+; CHECK-BASIC-PASS-DISABLED-LABEL: f_load_madd_64:
+; CHECK-BASIC-PASS-DISABLED:       // %bb.0: // %entry
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ldr x8, [x2]
+; CHECK-BASIC-PASS-DISABLED-NEXT:    madd x0, x8, x1, x0
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ret
 entry:
   %0 = load i64, ptr %c, align 8
   %mul = mul nsw i64 %0, %b
@@ -40,6 +59,23 @@ entry:
 
 
 define i32 @f_load_madd_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 {
+; CHECK-LABEL: f_load_madd_32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x2]
+; CHECK-NEXT:    madd w0, w8, w1, w0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_madd_32:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    madd w0, w8, w1, w0
+; CHECK-NOWORKAROUND-NEXT:    ret
+;
+; CHECK-BASIC-PASS-DISABLED-LABEL: f_load_madd_32:
+; CHECK-BASIC-PASS-DISABLED:       // %bb.0: // %entry
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ldr w8, [x2]
+; CHECK-BASIC-PASS-DISABLED-NEXT:    madd w0, w8, w1, w0
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ret
 entry:
   %0 = load i32, ptr %c, align 4
   %mul = mul nsw i32 %0, %b
@@ -55,6 +91,24 @@ entry:
 
 
 define i64 @f_load_msub_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 {
+; CHECK-LABEL: f_load_msub_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x8, [x2]
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    msub x0, x8, x1, x0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_msub_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr x8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    msub x0, x8, x1, x0
+; CHECK-NOWORKAROUND-NEXT:    ret
+;
+; CHECK-BASIC-PASS-DISABLED-LABEL: f_load_msub_64:
+; CHECK-BASIC-PASS-DISABLED:       // %bb.0: // %entry
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ldr x8, [x2]
+; CHECK-BASIC-PASS-DISABLED-NEXT:    msub x0, x8, x1, x0
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ret
 entry:
   %0 = load i64, ptr %c, align 8
   %mul = mul nsw i64 %0, %b
@@ -71,6 +125,23 @@ entry:
 
 
 define i32 @f_load_msub_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 {
+; CHECK-LABEL: f_load_msub_32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x2]
+; CHECK-NEXT:    msub w0, w8, w1, w0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_msub_32:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    msub w0, w8, w1, w0
+; CHECK-NOWORKAROUND-NEXT:    ret
+;
+; CHECK-BASIC-PASS-DISABLED-LABEL: f_load_msub_32:
+; CHECK-BASIC-PASS-DISABLED:       // %bb.0: // %entry
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ldr w8, [x2]
+; CHECK-BASIC-PASS-DISABLED-NEXT:    msub w0, w8, w1, w0
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ret
 entry:
   %0 = load i32, ptr %c, align 4
   %mul = mul nsw i32 %0, %b
@@ -86,6 +157,23 @@ entry:
 
 
 define i64 @f_load_mul_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 {
+; CHECK-LABEL: f_load_mul_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x8, [x2]
+; CHECK-NEXT:    mul x0, x8, x1
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_mul_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr x8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    mul x0, x8, x1
+; CHECK-NOWORKAROUND-NEXT:    ret
+;
+; CHECK-BASIC-PASS-DISABLED-LABEL: f_load_mul_64:
+; CHECK-BASIC-PASS-DISABLED:       // %bb.0: // %entry
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ldr x8, [x2]
+; CHECK-BASIC-PASS-DISABLED-NEXT:    mul x0, x8, x1
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ret
 entry:
   %0 = load i64, ptr %c, align 8
   %mul = mul nsw i64 %0, %b
@@ -100,6 +188,23 @@ entry:
 
 
 define i32 @f_load_mul_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 {
+; CHECK-LABEL: f_load_mul_32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x2]
+; CHECK-NEXT:    mul w0, w8, w1
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_mul_32:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    mul w0, w8, w1
+; CHECK-NOWORKAROUND-NEXT:    ret
+;
+; CHECK-BASIC-PASS-DISABLED-LABEL: f_load_mul_32:
+; CHECK-BASIC-PASS-DISABLED:       // %bb.0: // %entry
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ldr w8, [x2]
+; CHECK-BASIC-PASS-DISABLED-NEXT:    mul w0, w8, w1
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ret
 entry:
   %0 = load i32, ptr %c, align 4
   %mul = mul nsw i32 %0, %b
@@ -114,6 +219,23 @@ entry:
 
 
 define i64 @f_load_mneg_64(i64 %a, i64 %b, ptr nocapture readonly %c) #0 {
+; CHECK-LABEL: f_load_mneg_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x8, [x2]
+; CHECK-NEXT:    mneg x0, x1, x8
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_mneg_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr x8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    mneg x0, x1, x8
+; CHECK-NOWORKAROUND-NEXT:    ret
+;
+; CHECK-BASIC-PASS-DISABLED-LABEL: f_load_mneg_64:
+; CHECK-BASIC-PASS-DISABLED:       // %bb.0: // %entry
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ldr x8, [x2]
+; CHECK-BASIC-PASS-DISABLED-NEXT:    mneg x0, x1, x8
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ret
 entry:
   %0 = load i64, ptr %c, align 8
   %mul = sub i64 0, %b
@@ -132,6 +254,23 @@ entry:
 
 
 define i32 @f_load_mneg_32(i32 %a, i32 %b, ptr nocapture readonly %c) #0 {
+; CHECK-LABEL: f_load_mneg_32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x2]
+; CHECK-NEXT:    mneg w0, w1, w8
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_mneg_32:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    mneg w0, w1, w8
+; CHECK-NOWORKAROUND-NEXT:    ret
+;
+; CHECK-BASIC-PASS-DISABLED-LABEL: f_load_mneg_32:
+; CHECK-BASIC-PASS-DISABLED:       // %bb.0: // %entry
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ldr w8, [x2]
+; CHECK-BASIC-PASS-DISABLED-NEXT:    mneg w0, w1, w8
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ret
 entry:
   %0 = load i32, ptr %c, align 4
   %mul = sub i32 0, %b
@@ -149,6 +288,20 @@ entry:
 
 
 define i64 @f_load_smaddl(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
+; CHECK-LABEL: f_load_smaddl:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldrsw x8, [x3]
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    smaddl x9, w2, w1, x0
+; CHECK-NEXT:    add x0, x9, x8
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_smaddl:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldrsw x8, [x3]
+; CHECK-NOWORKAROUND-NEXT:    smaddl x9, w2, w1, x0
+; CHECK-NOWORKAROUND-NEXT:    add x0, x9, x8
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %conv = sext i32 %b to i64
   %conv1 = sext i32 %c to i64
@@ -169,6 +322,20 @@ entry:
 
 
 define i64 @f_load_smsubl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
+; CHECK-LABEL: f_load_smsubl_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldrsw x8, [x3]
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    smsubl x9, w2, w1, x0
+; CHECK-NEXT:    add x0, x9, x8
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_smsubl_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldrsw x8, [x3]
+; CHECK-NOWORKAROUND-NEXT:    smsubl x9, w2, w1, x0
+; CHECK-NOWORKAROUND-NEXT:    add x0, x9, x8
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %conv = sext i32 %b to i64
   %conv1 = sext i32 %c to i64
@@ -189,6 +356,19 @@ entry:
 
 
 define i64 @f_load_smull(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
+; CHECK-LABEL: f_load_smull:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldrsw x8, [x3]
+; CHECK-NEXT:    smull x9, w2, w1
+; CHECK-NEXT:    sdiv x0, x9, x8
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_smull:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldrsw x8, [x3]
+; CHECK-NOWORKAROUND-NEXT:    smull x9, w2, w1
+; CHECK-NOWORKAROUND-NEXT:    sdiv x0, x9, x8
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %conv = sext i32 %b to i64
   %conv1 = sext i32 %c to i64
@@ -207,6 +387,27 @@ entry:
 
 
 define i64 @f_load_smnegl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
+; CHECK-LABEL: f_load_smnegl_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    sxtw x8, w2
+; CHECK-NEXT:    sxtw x9, w1
+; CHECK-NEXT:    ldrsw x10, [x3]
+; CHECK-NEXT:    mneg x8, x9, x8
+; CHECK-NEXT:    sdiv x0, x8, x10
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_smnegl_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    // kill: def $w2 killed $w2 def $x2
+; CHECK-NOWORKAROUND-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NOWORKAROUND-NEXT:    sxtw x8, w2
+; CHECK-NOWORKAROUND-NEXT:    sxtw x9, w1
+; CHECK-NOWORKAROUND-NEXT:    ldrsw x10, [x3]
+; CHECK-NOWORKAROUND-NEXT:    mneg x8, x9, x8
+; CHECK-NOWORKAROUND-NEXT:    sdiv x0, x8, x10
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %conv = sext i32 %b to i64
   %conv1 = sext i32 %c to i64
@@ -224,6 +425,20 @@ entry:
 
 
 define i64 @f_load_umaddl(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
+; CHECK-LABEL: f_load_umaddl:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x3]
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    umaddl x9, w2, w1, x0
+; CHECK-NEXT:    add x0, x9, x8
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_umaddl:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w8, [x3]
+; CHECK-NOWORKAROUND-NEXT:    umaddl x9, w2, w1, x0
+; CHECK-NOWORKAROUND-NEXT:    add x0, x9, x8
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %conv = zext i32 %b to i64
   %conv1 = zext i32 %c to i64
@@ -244,6 +459,20 @@ entry:
 
 
 define i64 @f_load_umsubl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
+; CHECK-LABEL: f_load_umsubl_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x3]
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    umsubl x9, w2, w1, x0
+; CHECK-NEXT:    add x0, x9, x8
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_umsubl_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w8, [x3]
+; CHECK-NOWORKAROUND-NEXT:    umsubl x9, w2, w1, x0
+; CHECK-NOWORKAROUND-NEXT:    add x0, x9, x8
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %conv = zext i32 %b to i64
   %conv1 = zext i32 %c to i64
@@ -264,6 +493,19 @@ entry:
 
 
 define i64 @f_load_umull(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
+; CHECK-LABEL: f_load_umull:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x3]
+; CHECK-NEXT:    umull x9, w2, w1
+; CHECK-NEXT:    udiv x0, x9, x8
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_umull:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w8, [x3]
+; CHECK-NOWORKAROUND-NEXT:    umull x9, w2, w1
+; CHECK-NOWORKAROUND-NEXT:    udiv x0, x9, x8
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %conv = zext i32 %b to i64
   %conv1 = zext i32 %c to i64
@@ -282,6 +524,23 @@ entry:
 
 
 define i64 @f_load_umnegl_64(i64 %a, i32 %b, i32 %c, ptr nocapture readonly %d) #0 {
+; CHECK-LABEL: f_load_umnegl_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, w2
+; CHECK-NEXT:    mov w9, w1
+; CHECK-NEXT:    ldr w10, [x3]
+; CHECK-NEXT:    mneg x8, x9, x8
+; CHECK-NEXT:    udiv x0, x8, x10
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_load_umnegl_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    mov w8, w2
+; CHECK-NOWORKAROUND-NEXT:    mov w9, w1
+; CHECK-NOWORKAROUND-NEXT:    ldr w10, [x3]
+; CHECK-NOWORKAROUND-NEXT:    mneg x8, x9, x8
+; CHECK-NOWORKAROUND-NEXT:    udiv x0, x8, x10
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %conv = zext i32 %b to i64
   %conv1 = zext i32 %c to i64
@@ -299,6 +558,22 @@ entry:
 
 
 define i64 @f_store_madd_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_store_madd_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x9, [x2]
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    str x8, [x3]
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    madd x0, x9, x1, x0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_store_madd_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr x9, [x2]
+; CHECK-NOWORKAROUND-NEXT:    mov x8, x0
+; CHECK-NOWORKAROUND-NEXT:    str x8, [x3]
+; CHECK-NOWORKAROUND-NEXT:    madd x0, x9, x1, x0
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i64, ptr %cp, align 8
   store i64 %a, ptr %e, align 8
@@ -316,6 +591,21 @@ entry:
 
 
 define i32 @f_store_madd_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_store_madd_32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w9, [x2]
+; CHECK-NEXT:    mov w8, w0
+; CHECK-NEXT:    str w8, [x3]
+; CHECK-NEXT:    madd w0, w9, w1, w0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_store_madd_32:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w9, [x2]
+; CHECK-NOWORKAROUND-NEXT:    mov w8, w0
+; CHECK-NOWORKAROUND-NEXT:    str w8, [x3]
+; CHECK-NOWORKAROUND-NEXT:    madd w0, w9, w1, w0
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i32, ptr %cp, align 4
   store i32 %a, ptr %e, align 4
@@ -332,6 +622,22 @@ entry:
 
 
 define i64 @f_store_msub_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_store_msub_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x9, [x2]
+; CHECK-NEXT:    mov x8, x0
+; CHECK-NEXT:    str x8, [x3]
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    msub x0, x9, x1, x0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_store_msub_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr x9, [x2]
+; CHECK-NOWORKAROUND-NEXT:    mov x8, x0
+; CHECK-NOWORKAROUND-NEXT:    str x8, [x3]
+; CHECK-NOWORKAROUND-NEXT:    msub x0, x9, x1, x0
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i64, ptr %cp, align 8
   store i64 %a, ptr %e, align 8
@@ -349,6 +655,21 @@ entry:
 
 
 define i32 @f_store_msub_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_store_msub_32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w9, [x2]
+; CHECK-NEXT:    mov w8, w0
+; CHECK-NEXT:    str w8, [x3]
+; CHECK-NEXT:    msub w0, w9, w1, w0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_store_msub_32:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w9, [x2]
+; CHECK-NOWORKAROUND-NEXT:    mov w8, w0
+; CHECK-NOWORKAROUND-NEXT:    str w8, [x3]
+; CHECK-NOWORKAROUND-NEXT:    msub w0, w9, w1, w0
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i32, ptr %cp, align 4
   store i32 %a, ptr %e, align 4
@@ -365,6 +686,23 @@ entry:
 
 
 define i64 @f_store_mul_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_store_mul_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x8, [x2]
+; CHECK-NEXT:    mov x9, x0
+; CHECK-NEXT:    str x9, [x3]
+; CHECK-NEXT:    mul x8, x8, x1
+; CHECK-NEXT:    mov x0, x8
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_store_mul_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr x8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    mov x9, x0
+; CHECK-NOWORKAROUND-NEXT:    str x9, [x3]
+; CHECK-NOWORKAROUND-NEXT:    mul x8, x8, x1
+; CHECK-NOWORKAROUND-NEXT:    mov x0, x8
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i64, ptr %cp, align 8
   store i64 %a, ptr %e, align 8
@@ -380,6 +718,23 @@ entry:
 
 
 define i32 @f_store_mul_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_store_mul_32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x2]
+; CHECK-NEXT:    mov w9, w0
+; CHECK-NEXT:    str w9, [x3]
+; CHECK-NEXT:    mul w8, w8, w1
+; CHECK-NEXT:    mov w0, w8
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_store_mul_32:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    mov w9, w0
+; CHECK-NOWORKAROUND-NEXT:    str w9, [x3]
+; CHECK-NOWORKAROUND-NEXT:    mul w8, w8, w1
+; CHECK-NOWORKAROUND-NEXT:    mov w0, w8
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i32, ptr %cp, align 4
   store i32 %a, ptr %e, align 4
@@ -395,6 +750,20 @@ entry:
 
 
 define i64 @f_prefetch_madd_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_prefetch_madd_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x8, [x2]
+; CHECK-NEXT:    prfm pldl1strm, [x3]
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    madd x0, x8, x1, x0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_madd_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr x8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    prfm pldl1strm, [x3]
+; CHECK-NOWORKAROUND-NEXT:    madd x0, x8, x1, x0
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i64, ptr %cp, align 8
   tail call void @llvm.prefetch(ptr %e, i32 0, i32 0, i32 1)
@@ -413,6 +782,19 @@ entry:
 declare void @llvm.prefetch(ptr nocapture, i32, i32, i32) #2
 
 define i32 @f_prefetch_madd_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_prefetch_madd_32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x2]
+; CHECK-NEXT:    prfm pstl1strm, [x3]
+; CHECK-NEXT:    madd w0, w8, w1, w0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_madd_32:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    prfm pstl1strm, [x3]
+; CHECK-NOWORKAROUND-NEXT:    madd w0, w8, w1, w0
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i32, ptr %cp, align 4
   tail call void @llvm.prefetch(ptr %e, i32 1, i32 0, i32 1)
@@ -428,6 +810,20 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	madd
 
 define i64 @f_prefetch_msub_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_prefetch_msub_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x8, [x2]
+; CHECK-NEXT:    prfm pldl3keep, [x3]
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    msub x0, x8, x1, x0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_msub_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr x8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    prfm pldl3keep, [x3]
+; CHECK-NOWORKAROUND-NEXT:    msub x0, x8, x1, x0
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i64, ptr %cp, align 8
   tail call void @llvm.prefetch(ptr %e, i32 0, i32 1, i32 1)
@@ -444,6 +840,19 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	msub
 
 define i32 @f_prefetch_msub_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_prefetch_msub_32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x2]
+; CHECK-NEXT:    prfm pstl3keep, [x3]
+; CHECK-NEXT:    msub w0, w8, w1, w0
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_msub_32:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    prfm pstl3keep, [x3]
+; CHECK-NOWORKAROUND-NEXT:    msub w0, w8, w1, w0
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i32, ptr %cp, align 4
   tail call void @llvm.prefetch(ptr %e, i32 1, i32 1, i32 1)
@@ -459,6 +868,19 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	msub
 
 define i64 @f_prefetch_mul_64(i64 %a, i64 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_prefetch_mul_64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x8, [x2]
+; CHECK-NEXT:    prfm pldl1keep, [x3]
+; CHECK-NEXT:    mul x0, x8, x1
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_mul_64:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr x8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    prfm pldl1keep, [x3]
+; CHECK-NOWORKAROUND-NEXT:    mul x0, x8, x1
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i64, ptr %cp, align 8
   tail call void @llvm.prefetch(ptr %e, i32 0, i32 3, i32 1)
@@ -473,6 +895,19 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	mul
 
 define i32 @f_prefetch_mul_32(i32 %a, i32 %b, ptr nocapture readonly %cp, ptr nocapture %e) #1 {
+; CHECK-LABEL: f_prefetch_mul_32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr w8, [x2]
+; CHECK-NEXT:    prfm pstl1keep, [x3]
+; CHECK-NEXT:    mul w0, w8, w1
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: f_prefetch_mul_32:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr w8, [x2]
+; CHECK-NOWORKAROUND-NEXT:    prfm pstl1keep, [x3]
+; CHECK-NOWORKAROUND-NEXT:    mul w0, w8, w1
+; CHECK-NOWORKAROUND-NEXT:    ret
 entry:
   %0 = load i32, ptr %cp, align 4
   tail call void @llvm.prefetch(ptr %e, i32 1, i32 3, i32 1)
@@ -487,6 +922,39 @@ entry:
 ; CHECK-NOWORKAROUND-NEXT:	mul
 
 define i64 @fall_through(i64 %a, i64 %b, ptr nocapture readonly %c) #0 {
+; CHECK-LABEL: fall_through:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr x8, [x2]
+; CHECK-NEXT:    nop
+; CHECK-NEXT:  .Ltmp0: // Block address taken
+; CHECK-NEXT:  // %bb.1: // %block1
+; CHECK-NEXT:    madd x8, x8, x1, x0
+; CHECK-NEXT:    adrp x9, .Ltmp0
+; CHECK-NEXT:    add x9, x9, :lo12:.Ltmp0
+; CHECK-NEXT:    add x0, x9, x8
+; CHECK-NEXT:    ret
+;
+; CHECK-NOWORKAROUND-LABEL: fall_through:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    ldr x8, [x2]
+; CHECK-NOWORKAROUND-NEXT:  .Ltmp0: // Block address taken
+; CHECK-NOWORKAROUND-NEXT:  // %bb.1: // %block1
+; CHECK-NOWORKAROUND-NEXT:    madd x8, x8, x1, x0
+; CHECK-NOWORKAROUND-NEXT:    adrp x9, .Ltmp0
+; CHECK-NOWORKAROUND-NEXT:    add x9, x9, :lo12:.Ltmp0
+; CHECK-NOWORKAROUND-NEXT:    add x0, x9, x8
+; CHECK-NOWORKAROUND-NEXT:    ret
+;
+; CHECK-BASIC-PASS-DISABLED-LABEL: fall_through:
+; CHECK-BASIC-PASS-DISABLED:       // %bb.0: // %entry
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ldr x8, [x2]
+; CHECK-BASIC-PASS-DISABLED-NEXT:  .Ltmp0: // Block address taken
+; CHECK-BASIC-PASS-DISABLED-NEXT:  // %bb.1: // %block1
+; CHECK-BASIC-PASS-DISABLED-NEXT:    madd x8, x8, x1, x0
+; CHECK-BASIC-PASS-DISABLED-NEXT:    adrp x9, .Ltmp0
+; CHECK-BASIC-PASS-DISABLED-NEXT:    add x9, x9, :lo12:.Ltmp0
+; CHECK-BASIC-PASS-DISABLED-NEXT:    add x0, x9, x8
+; CHECK-BASIC-PASS-DISABLED-NEXT:    ret
 entry:
   %0 = load i64, ptr %c, align 8
   br label %block1
@@ -512,6 +980,19 @@ block1:
 
 ; No checks for this, just check it doesn't crash
 define i32 @crash_check(ptr nocapture readnone %data) #0 {
+; CHECK-LABEL: crash_check:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    .p2align 4, , 8
+; CHECK-NEXT:  .LBB29_1: // %while.cond
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    b .LBB29_1
+;
+; CHECK-NOWORKAROUND-LABEL: crash_check:
+; CHECK-NOWORKAROUND:       // %bb.0: // %entry
+; CHECK-NOWORKAROUND-NEXT:    .p2align 4, , 8
+; CHECK-NOWORKAROUND-NEXT:  .LBB29_1: // %while.cond
+; CHECK-NOWORKAROUND-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NOWORKAROUND-NEXT:    b .LBB29_1
 entry:
   br label %while.cond
 
diff --git a/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll b/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll
index 945a73b05f1bae..1a61f51fc93b98 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll
@@ -202,17 +202,25 @@ define <4 x i32> @fzext_v4i32(ptr %a) {
 ; CHECK-LE-LABEL: fzext_v4i32:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
+; CHECK-LE-NEXT:    adrp x8, .LCPI9_0
+; CHECK-LE-NEXT:    ldr q1, [x8, :lo12:.LCPI9_0]
 ; CHECK-LE-NEXT:    ushll v0.8h, v0.8b, #0
 ; CHECK-LE-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-LE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-LE-NEXT:    ret
 ;
 ; CHECK-BE-LABEL: fzext_v4i32:
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr s0, [x0]
+; CHECK-BE-NEXT:    adrp x8, .LCPI9_0
+; CHECK-BE-NEXT:    add x8, x8, :lo12:.LCPI9_0
+; CHECK-BE-NEXT:    ld1 { v1.16b }, [x8]
 ; CHECK-BE-NEXT:    rev32 v0.8b, v0.8b
 ; CHECK-BE-NEXT:    ushll v0.8h, v0.8b, #0
 ; CHECK-BE-NEXT:    ushll v0.4s, v0.4h, #0
-; CHECK-BE-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-BE-NEXT:    rev32 v0.16b, v0.16b
+; CHECK-BE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
+; CHECK-BE-NEXT:    rev64 v0.16b, v0.16b
 ; CHECK-BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-BE-NEXT:    ret
   %x = load <4 x i8>, ptr %a
@@ -358,16 +366,25 @@ define <4 x i16> @fzext_v4i16(ptr %a) {
 ; CHECK-LE-LABEL: fzext_v4i16:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ldr s0, [x0]
+; CHECK-LE-NEXT:    adrp x8, .LCPI16_0
+; CHECK-LE-NEXT:    ldr d1, [x8, :lo12:.LCPI16_0]
 ; CHECK-LE-NEXT:    ushll v0.8h, v0.8b, #0
-; CHECK-LE-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-LE-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-LE-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-LE-NEXT:    ret
 ;
 ; CHECK-BE-LABEL: fzext_v4i16:
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ldr s0, [x0]
+; CHECK-BE-NEXT:    adrp x8, .LCPI16_0
+; CHECK-BE-NEXT:    add x8, x8, :lo12:.LCPI16_0
+; CHECK-BE-NEXT:    ld1 { v1.8b }, [x8]
 ; CHECK-BE-NEXT:    rev32 v0.8b, v0.8b
 ; CHECK-BE-NEXT:    ushll v0.8h, v0.8b, #0
-; CHECK-BE-NEXT:    rev64 v0.4h, v0.4h
+; CHECK-BE-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-BE-NEXT:    rev16 v0.16b, v0.16b
+; CHECK-BE-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-BE-NEXT:    rev64 v0.8b, v0.8b
 ; CHECK-BE-NEXT:    ret
   %x = load <4 x i8>, ptr %a
   %y = zext <4 x i8> %x to <4 x i16>
diff --git a/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll b/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll
index c32e0b9b3e7aa7..d36d8a349869c5 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-loop-gep-opt.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -O3 -aarch64-enable-gep-opt=true  -print-after=codegenprepare -mcpu=cortex-a53 < %s >%t 2>&1 && FileCheck <%t %s
 ; REQUIRES: asserts
 target triple = "aarch64--linux-android"
diff --git a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
index 194fe5be40c2bd..1caaf27eb716d6 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
@@ -1185,8 +1185,11 @@ entry:
 define i8 @uminv_v2i8(<2 x i8> %a) {
 ; CHECK-SD-LABEL: uminv_v2i8:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    movi d1, #0x0000ff000000ff
-; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    adrp x8, .LCPI60_0
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI60_0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    uminp v0.2s, v0.2s, v0.2s
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -1211,10 +1214,13 @@ define i8 @uminv_v3i8(<3 x i8> %a) {
 ; CHECK-SD-LABEL: uminv_v3i8:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    movi d0, #0xff00ff00ff00ff
+; CHECK-SD-NEXT:    adrp x8, .LCPI61_0
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI61_0]
 ; CHECK-SD-NEXT:    mov v0.h[0], w0
 ; CHECK-SD-NEXT:    mov v0.h[1], w1
 ; CHECK-SD-NEXT:    mov v0.h[2], w2
-; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    uminv h0, v0.4h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -1236,7 +1242,11 @@ entry:
 define i8 @uminv_v4i8(<4 x i8> %a) {
 ; CHECK-SD-LABEL: uminv_v4i8:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    adrp x8, .LCPI62_0
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI62_0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    uminv h0, v0.4h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -1300,8 +1310,9 @@ entry:
 define i16 @uminv_v2i16(<2 x i16> %a) {
 ; CHECK-SD-LABEL: uminv_v2i16:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    movi d1, #0x00ffff0000ffff
-; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-SD-NEXT:    rev32 v0.4h, v0.4h
+; CHECK-SD-NEXT:    trn2 v0.4h, v0.4h, v1.4h
 ; CHECK-SD-NEXT:    uminp v0.2s, v0.2s, v0.2s
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -1548,8 +1559,11 @@ entry:
 define i8 @umaxv_v2i8(<2 x i8> %a) {
 ; CHECK-SD-LABEL: umaxv_v2i8:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    movi d1, #0x0000ff000000ff
-; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    adrp x8, .LCPI79_0
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI79_0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    umaxp v0.2s, v0.2s, v0.2s
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -1574,10 +1588,13 @@ define i8 @umaxv_v3i8(<3 x i8> %a) {
 ; CHECK-SD-LABEL: umaxv_v3i8:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-SD-NEXT:    adrp x8, .LCPI80_0
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI80_0]
 ; CHECK-SD-NEXT:    mov v0.h[0], w0
 ; CHECK-SD-NEXT:    mov v0.h[1], w1
 ; CHECK-SD-NEXT:    mov v0.h[2], w2
-; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    umaxv h0, v0.4h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -1599,7 +1616,11 @@ entry:
 define i8 @umaxv_v4i8(<4 x i8> %a) {
 ; CHECK-SD-LABEL: umaxv_v4i8:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    adrp x8, .LCPI81_0
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI81_0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    umaxv h0, v0.4h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -1663,8 +1684,9 @@ entry:
 define i16 @umaxv_v2i16(<2 x i16> %a) {
 ; CHECK-SD-LABEL: umaxv_v2i16:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    movi d1, #0x00ffff0000ffff
-; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-SD-NEXT:    rev32 v0.4h, v0.4h
+; CHECK-SD-NEXT:    trn2 v0.4h, v0.4h, v1.4h
 ; CHECK-SD-NEXT:    umaxp v0.2s, v0.2s, v0.2s
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/aarch64-named-reg-x18.ll b/llvm/test/CodeGen/AArch64/aarch64-named-reg-x18.ll
index 9074f2c108af33..0344ff26aa3c23 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-named-reg-x18.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-named-reg-x18.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=aarch64-fuchsia -o - %s
 
 define void @set_x18(i64 %x) {
diff --git a/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll b/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
index 3c8aca5145261d..77918cd8b2bdac 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-neon-vector-insert-uaddlv.ll
@@ -274,14 +274,29 @@ define void @insert_vec_v16i8_uaddlv_from_v8i8(ptr %0) {
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    movi.2d v0, #0000000000000000
 ; CHECK-NEXT:    movi.2d v2, #0000000000000000
+; CHECK-NEXT:  Lloh0:
+; CHECK-NEXT:    adrp x8, lCPI13_1 at PAGE
+; CHECK-NEXT:  Lloh1:
+; CHECK-NEXT:    ldr d3, [x8, lCPI13_1 at PAGEOFF]
+; CHECK-NEXT:  Lloh2:
+; CHECK-NEXT:    adrp x8, lCPI13_0 at PAGE
 ; CHECK-NEXT:    uaddlv.8b h1, v0
-; CHECK-NEXT:    stp q0, q0, [x0, #32]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v3
+; CHECK-NEXT:  Lloh3:
+; CHECK-NEXT:    ldr d3, [x8, lCPI13_0 at PAGEOFF]
 ; CHECK-NEXT:    mov.h v2[0], v1[0]
-; CHECK-NEXT:    bic.4h v2, #255, lsl #8
+; CHECK-NEXT:    ushll.4s v0, v0, #0
+; CHECK-NEXT:    ucvtf.4s v0, v0
+; CHECK-NEXT:    mov.d v2[1], v2[0]
+; CHECK-NEXT:    stp q0, q0, [x0, #32]
+; CHECK-NEXT:    tbl.8b v2, { v2 }, v3
 ; CHECK-NEXT:    ushll.4s v2, v2, #0
 ; CHECK-NEXT:    ucvtf.4s v2, v2
 ; CHECK-NEXT:    stp q2, q0, [x0]
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh2, Lloh3
+; CHECK-NEXT:    .loh AdrpAdrp Lloh0, Lloh2
+; CHECK-NEXT:    .loh AdrpLdr Lloh0, Lloh1
 
 entry:
   %vaddlv = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> zeroinitializer)
@@ -296,14 +311,20 @@ define void @insert_vec_v8i8_uaddlv_from_v8i8(ptr %0) {
 ; CHECK-LABEL: insert_vec_v8i8_uaddlv_from_v8i8:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    movi.2d v0, #0000000000000000
+; CHECK-NEXT:  Lloh4:
+; CHECK-NEXT:    adrp x8, lCPI14_0 at PAGE
 ; CHECK-NEXT:    stp xzr, xzr, [x0, #16]
+; CHECK-NEXT:  Lloh5:
+; CHECK-NEXT:    ldr d2, [x8, lCPI14_0 at PAGEOFF]
 ; CHECK-NEXT:    uaddlv.8b h1, v0
 ; CHECK-NEXT:    mov.h v0[0], v1[0]
-; CHECK-NEXT:    bic.4h v0, #7, lsl #8
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v2
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
 ; CHECK-NEXT:    ucvtf.4s v0, v0
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh4, Lloh5
 
 entry:
   %vaddlv = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> zeroinitializer)
@@ -426,14 +447,20 @@ define void @insert_vec_v8i8_uaddlv_from_v4i32(ptr %0) {
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    movi.2d v0, #0000000000000000
 ; CHECK-NEXT:    movi.2d v1, #0000000000000000
+; CHECK-NEXT:  Lloh6:
+; CHECK-NEXT:    adrp x8, lCPI20_0 at PAGE
 ; CHECK-NEXT:    stp xzr, xzr, [x0, #16]
 ; CHECK-NEXT:    uaddlv.4s d0, v0
 ; CHECK-NEXT:    mov.h v1[0], v0[0]
-; CHECK-NEXT:    bic.4h v1, #255, lsl #8
-; CHECK-NEXT:    ushll.4s v0, v1, #0
+; CHECK-NEXT:  Lloh7:
+; CHECK-NEXT:    ldr d0, [x8, lCPI20_0 at PAGEOFF]
+; CHECK-NEXT:    mov.d v1[1], v1[0]
+; CHECK-NEXT:    tbl.8b v0, { v1 }, v0
+; CHECK-NEXT:    ushll.4s v0, v0, #0
 ; CHECK-NEXT:    ucvtf.4s v0, v0
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh6, Lloh7
 
 entry:
   %vaddlv = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> zeroinitializer)
@@ -449,15 +476,30 @@ define void @insert_vec_v16i8_uaddlv_from_v4i32(ptr %0) {
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    movi.2d v0, #0000000000000000
 ; CHECK-NEXT:    movi.2d v1, #0000000000000000
+; CHECK-NEXT:  Lloh8:
+; CHECK-NEXT:    adrp x8, lCPI21_1 at PAGE
+; CHECK-NEXT:  Lloh9:
+; CHECK-NEXT:    ldr d2, [x8, lCPI21_1 at PAGEOFF]
+; CHECK-NEXT:  Lloh10:
+; CHECK-NEXT:    adrp x8, lCPI21_0 at PAGE
 ; CHECK-NEXT:    uaddlv.4s d0, v0
 ; CHECK-NEXT:    mov.h v1[0], v0[0]
 ; CHECK-NEXT:    movi.2d v0, #0000000000000000
-; CHECK-NEXT:    bic.4h v1, #255, lsl #8
-; CHECK-NEXT:    stp q0, q0, [x0, #32]
+; CHECK-NEXT:    mov.d v1[1], v1[0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v2
+; CHECK-NEXT:  Lloh11:
+; CHECK-NEXT:    ldr d2, [x8, lCPI21_0 at PAGEOFF]
+; CHECK-NEXT:    tbl.8b v1, { v1 }, v2
+; CHECK-NEXT:    ushll.4s v0, v0, #0
+; CHECK-NEXT:    ucvtf.4s v0, v0
 ; CHECK-NEXT:    ushll.4s v1, v1, #0
 ; CHECK-NEXT:    ucvtf.4s v1, v1
+; CHECK-NEXT:    stp q0, q0, [x0, #32]
 ; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh10, Lloh11
+; CHECK-NEXT:    .loh AdrpAdrp Lloh8, Lloh10
+; CHECK-NEXT:    .loh AdrpLdr Lloh8, Lloh9
 
 entry:
   %vaddlv = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> zeroinitializer)
diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index dbc5417e23133d..87da6c81db4eb9 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -192,9 +192,14 @@ define <4 x i32> @smull_zext_v4i16_v4i32(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_zext_v4i16_v4i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
-; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    adrp x8, .LCPI6_0
+; CHECK-NEXT:    ldr d2, [x1]
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI6_0]
 ; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
-; CHECK-NEXT:    smull v0.4s, v0.4h, v1.4h
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
+; CHECK-NEXT:    sshll v1.4s, v2.4h, #0
+; CHECK-NEXT:    mul v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    ret
   %load.A = load <4 x i8>, ptr %A
   %load.B = load <4 x i16>, ptr %B
@@ -207,27 +212,37 @@ define <4 x i32> @smull_zext_v4i16_v4i32(ptr %A, ptr %B) nounwind {
 define <2 x i64> @smull_zext_v2i32_v2i64(ptr %A, ptr %B) nounwind {
 ; CHECK-NEON-LABEL: smull_zext_v2i32_v2i64:
 ; CHECK-NEON:       // %bb.0:
-; CHECK-NEON-NEXT:    ldr d0, [x1]
 ; CHECK-NEON-NEXT:    ldrh w9, [x0]
 ; CHECK-NEON-NEXT:    ldrh w10, [x0, #2]
-; CHECK-NEON-NEXT:    sshll v0.2d, v0.2s, #0
-; CHECK-NEON-NEXT:    fmov x11, d0
-; CHECK-NEON-NEXT:    mov x8, v0.d[1]
-; CHECK-NEON-NEXT:    smull x9, w9, w11
-; CHECK-NEON-NEXT:    smull x8, w10, w8
+; CHECK-NEON-NEXT:    adrp x8, .LCPI7_0
+; CHECK-NEON-NEXT:    ldr q1, [x8, :lo12:.LCPI7_0]
+; CHECK-NEON-NEXT:    ldr d2, [x1]
 ; CHECK-NEON-NEXT:    fmov d0, x9
+; CHECK-NEON-NEXT:    mov v0.d[1], x10
+; CHECK-NEON-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
+; CHECK-NEON-NEXT:    sshll v1.2d, v2.2s, #0
+; CHECK-NEON-NEXT:    fmov x11, d1
+; CHECK-NEON-NEXT:    mov x9, v1.d[1]
+; CHECK-NEON-NEXT:    fmov x10, d0
+; CHECK-NEON-NEXT:    mov x8, v0.d[1]
+; CHECK-NEON-NEXT:    mul x10, x10, x11
+; CHECK-NEON-NEXT:    mul x8, x8, x9
+; CHECK-NEON-NEXT:    fmov d0, x10
 ; CHECK-NEON-NEXT:    mov v0.d[1], x8
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: smull_zext_v2i32_v2i64:
 ; CHECK-SVE:       // %bb.0:
 ; CHECK-SVE-NEXT:    ldrh w8, [x0]
-; CHECK-SVE-NEXT:    ptrue p0.d, vl2
 ; CHECK-SVE-NEXT:    ldrh w9, [x0, #2]
-; CHECK-SVE-NEXT:    ldr d0, [x1]
-; CHECK-SVE-NEXT:    fmov d1, x8
-; CHECK-SVE-NEXT:    sshll v0.2d, v0.2s, #0
-; CHECK-SVE-NEXT:    mov v1.d[1], x9
+; CHECK-SVE-NEXT:    ptrue p0.d, vl2
+; CHECK-SVE-NEXT:    ldr d2, [x1]
+; CHECK-SVE-NEXT:    fmov d0, x8
+; CHECK-SVE-NEXT:    adrp x8, .LCPI7_0
+; CHECK-SVE-NEXT:    ldr q1, [x8, :lo12:.LCPI7_0]
+; CHECK-SVE-NEXT:    mov v0.d[1], x9
+; CHECK-SVE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
+; CHECK-SVE-NEXT:    sshll v1.2d, v2.2s, #0
 ; CHECK-SVE-NEXT:    mul z0.d, p0/m, z0.d, z1.d
 ; CHECK-SVE-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-SVE-NEXT:    ret
@@ -359,16 +374,20 @@ define <8 x i16> @amull_v8i8_v8i16(ptr %A, ptr %B) nounwind {
 ; CHECK-NEON:       // %bb.0:
 ; CHECK-NEON-NEXT:    ldr d0, [x0]
 ; CHECK-NEON-NEXT:    ldr d1, [x1]
+; CHECK-NEON-NEXT:    adrp x8, .LCPI12_0
 ; CHECK-NEON-NEXT:    smull v0.8h, v0.8b, v1.8b
-; CHECK-NEON-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEON-NEXT:    ldr q1, [x8, :lo12:.LCPI12_0]
+; CHECK-NEON-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amull_v8i8_v8i16:
 ; CHECK-SVE:       // %bb.0:
 ; CHECK-SVE-NEXT:    ldr d0, [x0]
 ; CHECK-SVE-NEXT:    ldr d1, [x1]
+; CHECK-SVE-NEXT:    adrp x8, .LCPI12_0
 ; CHECK-SVE-NEXT:    smull v0.8h, v0.8b, v1.8b
-; CHECK-SVE-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-SVE-NEXT:    ldr q1, [x8, :lo12:.LCPI12_0]
+; CHECK-SVE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amull_v8i8_v8i16:
@@ -391,20 +410,22 @@ define <8 x i16> @amull_v8i8_v8i16(ptr %A, ptr %B) nounwind {
 define <4 x i32> @amull_v4i16_v4i32(ptr %A, ptr %B) nounwind {
 ; CHECK-NEON-LABEL: amull_v4i16_v4i32:
 ; CHECK-NEON:       // %bb.0:
-; CHECK-NEON-NEXT:    ldr d1, [x0]
-; CHECK-NEON-NEXT:    ldr d2, [x1]
-; CHECK-NEON-NEXT:    movi v0.2d, #0x00ffff0000ffff
-; CHECK-NEON-NEXT:    smull v1.4s, v1.4h, v2.4h
-; CHECK-NEON-NEXT:    and v0.16b, v1.16b, v0.16b
+; CHECK-NEON-NEXT:    ldr d0, [x0]
+; CHECK-NEON-NEXT:    ldr d1, [x1]
+; CHECK-NEON-NEXT:    adrp x8, .LCPI13_0
+; CHECK-NEON-NEXT:    smull v0.4s, v0.4h, v1.4h
+; CHECK-NEON-NEXT:    ldr q1, [x8, :lo12:.LCPI13_0]
+; CHECK-NEON-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amull_v4i16_v4i32:
 ; CHECK-SVE:       // %bb.0:
-; CHECK-SVE-NEXT:    ldr d1, [x0]
-; CHECK-SVE-NEXT:    ldr d2, [x1]
-; CHECK-SVE-NEXT:    movi v0.2d, #0x00ffff0000ffff
-; CHECK-SVE-NEXT:    smull v1.4s, v1.4h, v2.4h
-; CHECK-SVE-NEXT:    and v0.16b, v1.16b, v0.16b
+; CHECK-SVE-NEXT:    ldr d0, [x0]
+; CHECK-SVE-NEXT:    ldr d1, [x1]
+; CHECK-SVE-NEXT:    adrp x8, .LCPI13_0
+; CHECK-SVE-NEXT:    smull v0.4s, v0.4h, v1.4h
+; CHECK-SVE-NEXT:    ldr q1, [x8, :lo12:.LCPI13_0]
+; CHECK-SVE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amull_v4i16_v4i32:
@@ -429,18 +450,20 @@ define <2 x i64> @amull_v2i32_v2i64(ptr %A, ptr %B) nounwind {
 ; CHECK-NEON:       // %bb.0:
 ; CHECK-NEON-NEXT:    ldr d1, [x0]
 ; CHECK-NEON-NEXT:    ldr d2, [x1]
-; CHECK-NEON-NEXT:    movi v0.2d, #0x000000ffffffff
+; CHECK-NEON-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEON-NEXT:    smull v1.2d, v1.2s, v2.2s
-; CHECK-NEON-NEXT:    and v0.16b, v1.16b, v0.16b
+; CHECK-NEON-NEXT:    rev64 v1.4s, v1.4s
+; CHECK-NEON-NEXT:    trn2 v0.4s, v1.4s, v0.4s
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amull_v2i32_v2i64:
 ; CHECK-SVE:       // %bb.0:
 ; CHECK-SVE-NEXT:    ldr d1, [x0]
 ; CHECK-SVE-NEXT:    ldr d2, [x1]
-; CHECK-SVE-NEXT:    movi v0.2d, #0x000000ffffffff
+; CHECK-SVE-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-SVE-NEXT:    smull v1.2d, v1.2s, v2.2s
-; CHECK-SVE-NEXT:    and v0.16b, v1.16b, v0.16b
+; CHECK-SVE-NEXT:    rev64 v1.4s, v1.4s
+; CHECK-SVE-NEXT:    trn2 v0.4s, v1.4s, v0.4s
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amull_v2i32_v2i64:
@@ -573,18 +596,22 @@ define <8 x i16> @amlal_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-NEON:       // %bb.0:
 ; CHECK-NEON-NEXT:    ldr q0, [x0]
 ; CHECK-NEON-NEXT:    ldr d1, [x1]
+; CHECK-NEON-NEXT:    adrp x8, .LCPI21_0
 ; CHECK-NEON-NEXT:    ldr d2, [x2]
 ; CHECK-NEON-NEXT:    smlal v0.8h, v1.8b, v2.8b
-; CHECK-NEON-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEON-NEXT:    ldr q1, [x8, :lo12:.LCPI21_0]
+; CHECK-NEON-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amlal_v8i8_v8i16:
 ; CHECK-SVE:       // %bb.0:
 ; CHECK-SVE-NEXT:    ldr q0, [x0]
 ; CHECK-SVE-NEXT:    ldr d1, [x1]
+; CHECK-SVE-NEXT:    adrp x8, .LCPI21_0
 ; CHECK-SVE-NEXT:    ldr d2, [x2]
 ; CHECK-SVE-NEXT:    smlal v0.8h, v1.8b, v2.8b
-; CHECK-SVE-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-SVE-NEXT:    ldr q1, [x8, :lo12:.LCPI21_0]
+; CHECK-SVE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amlal_v8i8_v8i16:
@@ -612,20 +639,22 @@ define <4 x i32> @amlal_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-NEON:       // %bb.0:
 ; CHECK-NEON-NEXT:    ldr q0, [x0]
 ; CHECK-NEON-NEXT:    ldr d1, [x1]
+; CHECK-NEON-NEXT:    adrp x8, .LCPI22_0
 ; CHECK-NEON-NEXT:    ldr d2, [x2]
 ; CHECK-NEON-NEXT:    smlal v0.4s, v1.4h, v2.4h
-; CHECK-NEON-NEXT:    movi v1.2d, #0x00ffff0000ffff
-; CHECK-NEON-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    ldr q1, [x8, :lo12:.LCPI22_0]
+; CHECK-NEON-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amlal_v4i16_v4i32:
 ; CHECK-SVE:       // %bb.0:
 ; CHECK-SVE-NEXT:    ldr q0, [x0]
 ; CHECK-SVE-NEXT:    ldr d1, [x1]
+; CHECK-SVE-NEXT:    adrp x8, .LCPI22_0
 ; CHECK-SVE-NEXT:    ldr d2, [x2]
 ; CHECK-SVE-NEXT:    smlal v0.4s, v1.4h, v2.4h
-; CHECK-SVE-NEXT:    movi v1.2d, #0x00ffff0000ffff
-; CHECK-SVE-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-SVE-NEXT:    ldr q1, [x8, :lo12:.LCPI22_0]
+; CHECK-SVE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amlal_v4i16_v4i32:
@@ -655,8 +684,9 @@ define <2 x i64> @amlal_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-NEON-NEXT:    ldr d1, [x1]
 ; CHECK-NEON-NEXT:    ldr d2, [x2]
 ; CHECK-NEON-NEXT:    smlal v0.2d, v1.2s, v2.2s
-; CHECK-NEON-NEXT:    movi v1.2d, #0x000000ffffffff
-; CHECK-NEON-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEON-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEON-NEXT:    trn2 v0.4s, v0.4s, v1.4s
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amlal_v2i32_v2i64:
@@ -665,8 +695,9 @@ define <2 x i64> @amlal_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-SVE-NEXT:    ldr d1, [x1]
 ; CHECK-SVE-NEXT:    ldr d2, [x2]
 ; CHECK-SVE-NEXT:    smlal v0.2d, v1.2s, v2.2s
-; CHECK-SVE-NEXT:    movi v1.2d, #0x000000ffffffff
-; CHECK-SVE-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-SVE-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-SVE-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-SVE-NEXT:    trn2 v0.4s, v0.4s, v1.4s
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amlal_v2i32_v2i64:
@@ -802,18 +833,22 @@ define <8 x i16> @amlsl_v8i8_v8i16(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-NEON:       // %bb.0:
 ; CHECK-NEON-NEXT:    ldr q0, [x0]
 ; CHECK-NEON-NEXT:    ldr d1, [x1]
+; CHECK-NEON-NEXT:    adrp x8, .LCPI30_0
 ; CHECK-NEON-NEXT:    ldr d2, [x2]
 ; CHECK-NEON-NEXT:    smlsl v0.8h, v1.8b, v2.8b
-; CHECK-NEON-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEON-NEXT:    ldr q1, [x8, :lo12:.LCPI30_0]
+; CHECK-NEON-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amlsl_v8i8_v8i16:
 ; CHECK-SVE:       // %bb.0:
 ; CHECK-SVE-NEXT:    ldr q0, [x0]
 ; CHECK-SVE-NEXT:    ldr d1, [x1]
+; CHECK-SVE-NEXT:    adrp x8, .LCPI30_0
 ; CHECK-SVE-NEXT:    ldr d2, [x2]
 ; CHECK-SVE-NEXT:    smlsl v0.8h, v1.8b, v2.8b
-; CHECK-SVE-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-SVE-NEXT:    ldr q1, [x8, :lo12:.LCPI30_0]
+; CHECK-SVE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amlsl_v8i8_v8i16:
@@ -841,20 +876,22 @@ define <4 x i32> @amlsl_v4i16_v4i32(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-NEON:       // %bb.0:
 ; CHECK-NEON-NEXT:    ldr q0, [x0]
 ; CHECK-NEON-NEXT:    ldr d1, [x1]
+; CHECK-NEON-NEXT:    adrp x8, .LCPI31_0
 ; CHECK-NEON-NEXT:    ldr d2, [x2]
 ; CHECK-NEON-NEXT:    smlsl v0.4s, v1.4h, v2.4h
-; CHECK-NEON-NEXT:    movi v1.2d, #0x00ffff0000ffff
-; CHECK-NEON-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    ldr q1, [x8, :lo12:.LCPI31_0]
+; CHECK-NEON-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amlsl_v4i16_v4i32:
 ; CHECK-SVE:       // %bb.0:
 ; CHECK-SVE-NEXT:    ldr q0, [x0]
 ; CHECK-SVE-NEXT:    ldr d1, [x1]
+; CHECK-SVE-NEXT:    adrp x8, .LCPI31_0
 ; CHECK-SVE-NEXT:    ldr d2, [x2]
 ; CHECK-SVE-NEXT:    smlsl v0.4s, v1.4h, v2.4h
-; CHECK-SVE-NEXT:    movi v1.2d, #0x00ffff0000ffff
-; CHECK-SVE-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-SVE-NEXT:    ldr q1, [x8, :lo12:.LCPI31_0]
+; CHECK-SVE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amlsl_v4i16_v4i32:
@@ -884,8 +921,9 @@ define <2 x i64> @amlsl_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-NEON-NEXT:    ldr d1, [x1]
 ; CHECK-NEON-NEXT:    ldr d2, [x2]
 ; CHECK-NEON-NEXT:    smlsl v0.2d, v1.2s, v2.2s
-; CHECK-NEON-NEXT:    movi v1.2d, #0x000000ffffffff
-; CHECK-NEON-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEON-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEON-NEXT:    trn2 v0.4s, v0.4s, v1.4s
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amlsl_v2i32_v2i64:
@@ -894,8 +932,9 @@ define <2 x i64> @amlsl_v2i32_v2i64(ptr %A, ptr %B, ptr %C) nounwind {
 ; CHECK-SVE-NEXT:    ldr d1, [x1]
 ; CHECK-SVE-NEXT:    ldr d2, [x2]
 ; CHECK-SVE-NEXT:    smlsl v0.2d, v1.2s, v2.2s
-; CHECK-SVE-NEXT:    movi v1.2d, #0x000000ffffffff
-; CHECK-SVE-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-SVE-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-SVE-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-SVE-NEXT:    trn2 v0.4s, v0.4s, v1.4s
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amlsl_v2i32_v2i64:
@@ -1154,15 +1193,19 @@ define <8 x i16> @amull_extvec_v8i8_v8i16(<8 x i8> %arg) nounwind {
 ; CHECK-NEON-LABEL: amull_extvec_v8i8_v8i16:
 ; CHECK-NEON:       // %bb.0:
 ; CHECK-NEON-NEXT:    movi v1.8b, #12
+; CHECK-NEON-NEXT:    adrp x8, .LCPI41_0
 ; CHECK-NEON-NEXT:    smull v0.8h, v0.8b, v1.8b
-; CHECK-NEON-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEON-NEXT:    ldr q1, [x8, :lo12:.LCPI41_0]
+; CHECK-NEON-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amull_extvec_v8i8_v8i16:
 ; CHECK-SVE:       // %bb.0:
 ; CHECK-SVE-NEXT:    movi v1.8b, #12
+; CHECK-SVE-NEXT:    adrp x8, .LCPI41_0
 ; CHECK-SVE-NEXT:    smull v0.8h, v0.8b, v1.8b
-; CHECK-SVE-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-SVE-NEXT:    ldr q1, [x8, :lo12:.LCPI41_0]
+; CHECK-SVE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amull_extvec_v8i8_v8i16:
@@ -1184,18 +1227,20 @@ define <4 x i32> @amull_extvec_v4i16_v4i32(<4 x i16> %arg) nounwind {
 ; CHECK-NEON:       // %bb.0:
 ; CHECK-NEON-NEXT:    mov w8, #1234 // =0x4d2
 ; CHECK-NEON-NEXT:    dup v1.4h, w8
+; CHECK-NEON-NEXT:    adrp x8, .LCPI42_0
 ; CHECK-NEON-NEXT:    smull v0.4s, v0.4h, v1.4h
-; CHECK-NEON-NEXT:    movi v1.2d, #0x00ffff0000ffff
-; CHECK-NEON-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    ldr q1, [x8, :lo12:.LCPI42_0]
+; CHECK-NEON-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amull_extvec_v4i16_v4i32:
 ; CHECK-SVE:       // %bb.0:
 ; CHECK-SVE-NEXT:    mov w8, #1234 // =0x4d2
 ; CHECK-SVE-NEXT:    dup v1.4h, w8
+; CHECK-SVE-NEXT:    adrp x8, .LCPI42_0
 ; CHECK-SVE-NEXT:    smull v0.4s, v0.4h, v1.4h
-; CHECK-SVE-NEXT:    movi v1.2d, #0x00ffff0000ffff
-; CHECK-SVE-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-SVE-NEXT:    ldr q1, [x8, :lo12:.LCPI42_0]
+; CHECK-SVE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amull_extvec_v4i16_v4i32:
@@ -1219,8 +1264,9 @@ define <2 x i64> @amull_extvec_v2i32_v2i64(<2 x i32> %arg) nounwind {
 ; CHECK-NEON-NEXT:    mov w8, #1234 // =0x4d2
 ; CHECK-NEON-NEXT:    dup v1.2s, w8
 ; CHECK-NEON-NEXT:    smull v0.2d, v0.2s, v1.2s
-; CHECK-NEON-NEXT:    movi v1.2d, #0x000000ffffffff
-; CHECK-NEON-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEON-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEON-NEXT:    trn2 v0.4s, v0.4s, v1.4s
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amull_extvec_v2i32_v2i64:
@@ -1228,8 +1274,9 @@ define <2 x i64> @amull_extvec_v2i32_v2i64(<2 x i32> %arg) nounwind {
 ; CHECK-SVE-NEXT:    mov w8, #1234 // =0x4d2
 ; CHECK-SVE-NEXT:    dup v1.2s, w8
 ; CHECK-SVE-NEXT:    smull v0.2d, v0.2s, v1.2s
-; CHECK-SVE-NEXT:    movi v1.2d, #0x000000ffffffff
-; CHECK-SVE-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-SVE-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-SVE-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-SVE-NEXT:    trn2 v0.4s, v0.4s, v1.4s
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amull_extvec_v2i32_v2i64:
@@ -1509,19 +1556,21 @@ define <16 x i16> @amull2_i8(<16 x i8> %arg1, <16 x i8> %arg2) {
 ; CHECK-NEON-LABEL: amull2_i8:
 ; CHECK-NEON:       // %bb.0:
 ; CHECK-NEON-NEXT:    smull v2.8h, v0.8b, v1.8b
-; CHECK-NEON-NEXT:    smull2 v1.8h, v0.16b, v1.16b
-; CHECK-NEON-NEXT:    bic v2.8h, #255, lsl #8
-; CHECK-NEON-NEXT:    bic v1.8h, #255, lsl #8
-; CHECK-NEON-NEXT:    mov v0.16b, v2.16b
+; CHECK-NEON-NEXT:    smull2 v0.8h, v0.16b, v1.16b
+; CHECK-NEON-NEXT:    adrp x8, .LCPI53_0
+; CHECK-NEON-NEXT:    ldr q3, [x8, :lo12:.LCPI53_0]
+; CHECK-NEON-NEXT:    tbl v1.16b, { v0.16b }, v3.16b
+; CHECK-NEON-NEXT:    tbl v0.16b, { v2.16b }, v3.16b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amull2_i8:
 ; CHECK-SVE:       // %bb.0:
 ; CHECK-SVE-NEXT:    smull v2.8h, v0.8b, v1.8b
-; CHECK-SVE-NEXT:    smull2 v1.8h, v0.16b, v1.16b
-; CHECK-SVE-NEXT:    bic v2.8h, #255, lsl #8
-; CHECK-SVE-NEXT:    bic v1.8h, #255, lsl #8
-; CHECK-SVE-NEXT:    mov v0.16b, v2.16b
+; CHECK-SVE-NEXT:    smull2 v0.8h, v0.16b, v1.16b
+; CHECK-SVE-NEXT:    adrp x8, .LCPI53_0
+; CHECK-SVE-NEXT:    ldr q3, [x8, :lo12:.LCPI53_0]
+; CHECK-SVE-NEXT:    tbl v1.16b, { v0.16b }, v3.16b
+; CHECK-SVE-NEXT:    tbl v0.16b, { v2.16b }, v3.16b
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amull2_i8:
@@ -1542,20 +1591,22 @@ define <16 x i16> @amull2_i8(<16 x i8> %arg1, <16 x i8> %arg2) {
 define <8 x i32> @amull2_i16(<8 x i16> %arg1, <8 x i16> %arg2) {
 ; CHECK-NEON-LABEL: amull2_i16:
 ; CHECK-NEON:       // %bb.0:
-; CHECK-NEON-NEXT:    movi v2.2d, #0x00ffff0000ffff
-; CHECK-NEON-NEXT:    smull v3.4s, v0.4h, v1.4h
+; CHECK-NEON-NEXT:    smull v2.4s, v0.4h, v1.4h
 ; CHECK-NEON-NEXT:    smull2 v0.4s, v0.8h, v1.8h
-; CHECK-NEON-NEXT:    and v1.16b, v0.16b, v2.16b
-; CHECK-NEON-NEXT:    and v0.16b, v3.16b, v2.16b
+; CHECK-NEON-NEXT:    adrp x8, .LCPI54_0
+; CHECK-NEON-NEXT:    ldr q3, [x8, :lo12:.LCPI54_0]
+; CHECK-NEON-NEXT:    tbl v1.16b, { v0.16b }, v3.16b
+; CHECK-NEON-NEXT:    tbl v0.16b, { v2.16b }, v3.16b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amull2_i16:
 ; CHECK-SVE:       // %bb.0:
-; CHECK-SVE-NEXT:    movi v2.2d, #0x00ffff0000ffff
-; CHECK-SVE-NEXT:    smull v3.4s, v0.4h, v1.4h
+; CHECK-SVE-NEXT:    smull v2.4s, v0.4h, v1.4h
 ; CHECK-SVE-NEXT:    smull2 v0.4s, v0.8h, v1.8h
-; CHECK-SVE-NEXT:    and v1.16b, v0.16b, v2.16b
-; CHECK-SVE-NEXT:    and v0.16b, v3.16b, v2.16b
+; CHECK-SVE-NEXT:    adrp x8, .LCPI54_0
+; CHECK-SVE-NEXT:    ldr q3, [x8, :lo12:.LCPI54_0]
+; CHECK-SVE-NEXT:    tbl v1.16b, { v0.16b }, v3.16b
+; CHECK-SVE-NEXT:    tbl v0.16b, { v2.16b }, v3.16b
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amull2_i16:
@@ -1576,20 +1627,24 @@ define <8 x i32> @amull2_i16(<8 x i16> %arg1, <8 x i16> %arg2) {
 define <4 x i64> @amull2_i32(<4 x i32> %arg1, <4 x i32> %arg2) {
 ; CHECK-NEON-LABEL: amull2_i32:
 ; CHECK-NEON:       // %bb.0:
-; CHECK-NEON-NEXT:    movi v2.2d, #0x000000ffffffff
 ; CHECK-NEON-NEXT:    smull v3.2d, v0.2s, v1.2s
 ; CHECK-NEON-NEXT:    smull2 v0.2d, v0.4s, v1.4s
-; CHECK-NEON-NEXT:    and v1.16b, v0.16b, v2.16b
-; CHECK-NEON-NEXT:    and v0.16b, v3.16b, v2.16b
+; CHECK-NEON-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEON-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEON-NEXT:    rev64 v3.4s, v3.4s
+; CHECK-NEON-NEXT:    trn2 v1.4s, v0.4s, v2.4s
+; CHECK-NEON-NEXT:    trn2 v0.4s, v3.4s, v2.4s
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: amull2_i32:
 ; CHECK-SVE:       // %bb.0:
-; CHECK-SVE-NEXT:    movi v2.2d, #0x000000ffffffff
 ; CHECK-SVE-NEXT:    smull v3.2d, v0.2s, v1.2s
 ; CHECK-SVE-NEXT:    smull2 v0.2d, v0.4s, v1.4s
-; CHECK-SVE-NEXT:    and v1.16b, v0.16b, v2.16b
-; CHECK-SVE-NEXT:    and v0.16b, v3.16b, v2.16b
+; CHECK-SVE-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-SVE-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-SVE-NEXT:    rev64 v3.4s, v3.4s
+; CHECK-SVE-NEXT:    trn2 v1.4s, v0.4s, v2.4s
+; CHECK-SVE-NEXT:    trn2 v0.4s, v3.4s, v2.4s
 ; CHECK-SVE-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: amull2_i32:
@@ -1611,14 +1666,12 @@ define <4 x i64> @amull2_i32(<4 x i32> %arg1, <4 x i32> %arg2) {
 define <8 x i16> @umull_and_v8i16(<8 x i8> %src1, <8 x i16> %src2) {
 ; CHECK-NEON-LABEL: umull_and_v8i16:
 ; CHECK-NEON:       // %bb.0: // %entry
-; CHECK-NEON-NEXT:    bic v1.8h, #255, lsl #8
 ; CHECK-NEON-NEXT:    xtn v1.8b, v1.8h
 ; CHECK-NEON-NEXT:    umull v0.8h, v0.8b, v1.8b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: umull_and_v8i16:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    bic v1.8h, #255, lsl #8
 ; CHECK-SVE-NEXT:    xtn v1.8b, v1.8h
 ; CHECK-SVE-NEXT:    umull v0.8h, v0.8b, v1.8b
 ; CHECK-SVE-NEXT:    ret
@@ -1640,14 +1693,12 @@ entry:
 define <8 x i16> @umull_and_v8i16_c(<8 x i8> %src1, <8 x i16> %src2) {
 ; CHECK-NEON-LABEL: umull_and_v8i16_c:
 ; CHECK-NEON:       // %bb.0: // %entry
-; CHECK-NEON-NEXT:    bic v1.8h, #255, lsl #8
 ; CHECK-NEON-NEXT:    xtn v1.8b, v1.8h
 ; CHECK-NEON-NEXT:    umull v0.8h, v1.8b, v0.8b
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: umull_and_v8i16_c:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    bic v1.8h, #255, lsl #8
 ; CHECK-SVE-NEXT:    xtn v1.8b, v1.8h
 ; CHECK-SVE-NEXT:    umull v0.8h, v1.8b, v0.8b
 ; CHECK-SVE-NEXT:    ret
@@ -1710,7 +1761,6 @@ define <8 x i16> @umull_smaller_v8i16(<8 x i4> %src1, <8 x i16> %src2) {
 ; CHECK-NEON-LABEL: umull_smaller_v8i16:
 ; CHECK-NEON:       // %bb.0: // %entry
 ; CHECK-NEON-NEXT:    movi v2.8b, #15
-; CHECK-NEON-NEXT:    bic v1.8h, #255, lsl #8
 ; CHECK-NEON-NEXT:    xtn v1.8b, v1.8h
 ; CHECK-NEON-NEXT:    and v0.8b, v0.8b, v2.8b
 ; CHECK-NEON-NEXT:    umull v0.8h, v0.8b, v1.8b
@@ -1719,7 +1769,6 @@ define <8 x i16> @umull_smaller_v8i16(<8 x i4> %src1, <8 x i16> %src2) {
 ; CHECK-SVE-LABEL: umull_smaller_v8i16:
 ; CHECK-SVE:       // %bb.0: // %entry
 ; CHECK-SVE-NEXT:    movi v2.8b, #15
-; CHECK-SVE-NEXT:    bic v1.8h, #255, lsl #8
 ; CHECK-SVE-NEXT:    xtn v1.8b, v1.8h
 ; CHECK-SVE-NEXT:    and v0.8b, v0.8b, v2.8b
 ; CHECK-SVE-NEXT:    umull v0.8h, v0.8b, v1.8b
@@ -1744,16 +1793,18 @@ entry:
 define <4 x i32> @umull_and_v4i32(<4 x i16> %src1, <4 x i32> %src2) {
 ; CHECK-NEON-LABEL: umull_and_v4i32:
 ; CHECK-NEON:       // %bb.0: // %entry
-; CHECK-NEON-NEXT:    movi v2.2d, #0x0000ff000000ff
-; CHECK-NEON-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEON-NEXT:    adrp x8, .LCPI61_0
+; CHECK-NEON-NEXT:    ldr q2, [x8, :lo12:.LCPI61_0]
+; CHECK-NEON-NEXT:    tbl v1.16b, { v1.16b }, v2.16b
 ; CHECK-NEON-NEXT:    xtn v1.4h, v1.4s
 ; CHECK-NEON-NEXT:    umull v0.4s, v0.4h, v1.4h
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: umull_and_v4i32:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    movi v2.2d, #0x0000ff000000ff
-; CHECK-SVE-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-SVE-NEXT:    adrp x8, .LCPI61_0
+; CHECK-SVE-NEXT:    ldr q2, [x8, :lo12:.LCPI61_0]
+; CHECK-SVE-NEXT:    tbl v1.16b, { v1.16b }, v2.16b
 ; CHECK-SVE-NEXT:    xtn v1.4h, v1.4s
 ; CHECK-SVE-NEXT:    umull v0.4s, v0.4h, v1.4h
 ; CHECK-SVE-NEXT:    ret
@@ -1775,9 +1826,10 @@ entry:
 define <8 x i32> @umull_and_v8i32(<8 x i16> %src1, <8 x i32> %src2) {
 ; CHECK-NEON-LABEL: umull_and_v8i32:
 ; CHECK-NEON:       // %bb.0: // %entry
-; CHECK-NEON-NEXT:    movi v3.2d, #0x0000ff000000ff
-; CHECK-NEON-NEXT:    and v2.16b, v2.16b, v3.16b
-; CHECK-NEON-NEXT:    and v1.16b, v1.16b, v3.16b
+; CHECK-NEON-NEXT:    adrp x8, .LCPI62_0
+; CHECK-NEON-NEXT:    ldr q3, [x8, :lo12:.LCPI62_0]
+; CHECK-NEON-NEXT:    tbl v2.16b, { v2.16b }, v3.16b
+; CHECK-NEON-NEXT:    tbl v1.16b, { v1.16b }, v3.16b
 ; CHECK-NEON-NEXT:    uzp1 v2.8h, v1.8h, v2.8h
 ; CHECK-NEON-NEXT:    umull2 v1.4s, v0.8h, v2.8h
 ; CHECK-NEON-NEXT:    umull v0.4s, v0.4h, v2.4h
@@ -1785,9 +1837,10 @@ define <8 x i32> @umull_and_v8i32(<8 x i16> %src1, <8 x i32> %src2) {
 ;
 ; CHECK-SVE-LABEL: umull_and_v8i32:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    movi v3.2d, #0x0000ff000000ff
-; CHECK-SVE-NEXT:    and v2.16b, v2.16b, v3.16b
-; CHECK-SVE-NEXT:    and v1.16b, v1.16b, v3.16b
+; CHECK-SVE-NEXT:    adrp x8, .LCPI62_0
+; CHECK-SVE-NEXT:    ldr q3, [x8, :lo12:.LCPI62_0]
+; CHECK-SVE-NEXT:    tbl v2.16b, { v2.16b }, v3.16b
+; CHECK-SVE-NEXT:    tbl v1.16b, { v1.16b }, v3.16b
 ; CHECK-SVE-NEXT:    uzp1 v2.8h, v1.8h, v2.8h
 ; CHECK-SVE-NEXT:    umull2 v1.4s, v0.8h, v2.8h
 ; CHECK-SVE-NEXT:    umull v0.4s, v0.4h, v2.4h
@@ -1848,16 +1901,18 @@ entry:
 define <2 x i64> @umull_and_v2i64(<2 x i32> %src1, <2 x i64> %src2) {
 ; CHECK-NEON-LABEL: umull_and_v2i64:
 ; CHECK-NEON:       // %bb.0: // %entry
-; CHECK-NEON-NEXT:    movi v2.2d, #0x000000000000ff
-; CHECK-NEON-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEON-NEXT:    adrp x8, .LCPI64_0
+; CHECK-NEON-NEXT:    ldr q2, [x8, :lo12:.LCPI64_0]
+; CHECK-NEON-NEXT:    tbl v1.16b, { v1.16b }, v2.16b
 ; CHECK-NEON-NEXT:    xtn v1.2s, v1.2d
 ; CHECK-NEON-NEXT:    umull v0.2d, v0.2s, v1.2s
 ; CHECK-NEON-NEXT:    ret
 ;
 ; CHECK-SVE-LABEL: umull_and_v2i64:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    movi v2.2d, #0x000000000000ff
-; CHECK-SVE-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-SVE-NEXT:    adrp x8, .LCPI64_0
+; CHECK-SVE-NEXT:    ldr q2, [x8, :lo12:.LCPI64_0]
+; CHECK-SVE-NEXT:    tbl v1.16b, { v1.16b }, v2.16b
 ; CHECK-SVE-NEXT:    xtn v1.2s, v1.2d
 ; CHECK-SVE-NEXT:    umull v0.2d, v0.2s, v1.2s
 ; CHECK-SVE-NEXT:    ret
@@ -1888,9 +1943,10 @@ entry:
 define <4 x i64> @umull_and_v4i64(<4 x i32> %src1, <4 x i64> %src2) {
 ; CHECK-NEON-LABEL: umull_and_v4i64:
 ; CHECK-NEON:       // %bb.0: // %entry
-; CHECK-NEON-NEXT:    movi v3.2d, #0x000000000000ff
-; CHECK-NEON-NEXT:    and v2.16b, v2.16b, v3.16b
-; CHECK-NEON-NEXT:    and v1.16b, v1.16b, v3.16b
+; CHECK-NEON-NEXT:    adrp x8, .LCPI65_0
+; CHECK-NEON-NEXT:    ldr q3, [x8, :lo12:.LCPI65_0]
+; CHECK-NEON-NEXT:    tbl v2.16b, { v2.16b }, v3.16b
+; CHECK-NEON-NEXT:    tbl v1.16b, { v1.16b }, v3.16b
 ; CHECK-NEON-NEXT:    uzp1 v2.4s, v1.4s, v2.4s
 ; CHECK-NEON-NEXT:    umull2 v1.2d, v0.4s, v2.4s
 ; CHECK-NEON-NEXT:    umull v0.2d, v0.2s, v2.2s
@@ -1898,9 +1954,10 @@ define <4 x i64> @umull_and_v4i64(<4 x i32> %src1, <4 x i64> %src2) {
 ;
 ; CHECK-SVE-LABEL: umull_and_v4i64:
 ; CHECK-SVE:       // %bb.0: // %entry
-; CHECK-SVE-NEXT:    movi v3.2d, #0x000000000000ff
-; CHECK-SVE-NEXT:    and v2.16b, v2.16b, v3.16b
-; CHECK-SVE-NEXT:    and v1.16b, v1.16b, v3.16b
+; CHECK-SVE-NEXT:    adrp x8, .LCPI65_0
+; CHECK-SVE-NEXT:    ldr q3, [x8, :lo12:.LCPI65_0]
+; CHECK-SVE-NEXT:    tbl v2.16b, { v2.16b }, v3.16b
+; CHECK-SVE-NEXT:    tbl v1.16b, { v1.16b }, v3.16b
 ; CHECK-SVE-NEXT:    uzp1 v2.4s, v1.4s, v2.4s
 ; CHECK-SVE-NEXT:    umull2 v1.2d, v0.4s, v2.4s
 ; CHECK-SVE-NEXT:    umull v0.2d, v0.2s, v2.2s
diff --git a/llvm/test/CodeGen/AArch64/aarch64-tbz.ll b/llvm/test/CodeGen/AArch64/aarch64-tbz.ll
index 28629a8c2f0dd3..73cf5bbaedc385 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-tbz.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-tbz.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnueabi < %s | FileCheck %s
 ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnueabi -cgp-verify-bfi-updates=true < %s | FileCheck %s
 
@@ -9,6 +10,15 @@
 
 ; CHECK: b
 define void @test1(i64 %A, i64 %B) {
+; CHECK-LABEL: test1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    tbz w1, #3, .LBB0_3
+; CHECK-NEXT:  // %bb.1: // %entry
+; CHECK-NEXT:    tbz w0, #2, .LBB0_3
+; CHECK-NEXT:  // %bb.2: // %if.then2
+; CHECK-NEXT:    b foo
+; CHECK-NEXT:  .LBB0_3: // %if.end3
+; CHECK-NEXT:    ret
 entry:
   %and = and i64 %A, 4
   %notlhs = icmp eq i64 %and, 0
@@ -32,6 +42,16 @@ if.end3:                                          ; preds = %if.then2, %entry
 ; CHECK-NOT: cbz x{{[0-9]+}}, .LBB1_3
 
 define void @test2(i64 %A, ptr readonly %B) #0 {
+; CHECK-LABEL: test2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x1, .LBB1_3
+; CHECK-NEXT:  // %bb.1: // %entry
+; CHECK-NEXT:    tbz w0, #3, .LBB1_3
+; CHECK-NEXT:  // %bb.2: // %if.then2
+; CHECK-NEXT:    ldr x1, [x1]
+; CHECK-NEXT:    b foo
+; CHECK-NEXT:  .LBB1_3: // %if.end3
+; CHECK-NEXT:    ret
 entry:
   %tobool = icmp eq ptr %B, null
   %and = and i64 %A, 8
@@ -55,6 +75,15 @@ if.end3:                                          ; preds = %entry, %if.then2
 ; CHECK-NOT: and x{{[0-9]+}}, x[[REG1]]
 ; CHECK-NOT: cbz x{{[0-9]+}}, .LBB2_3
 define void @test3(i64 %A, i64 %B) {
+; CHECK-LABEL: test3:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    tbz w1, #3, .LBB2_3
+; CHECK-NEXT:  // %bb.1: // %entry
+; CHECK-NEXT:    tbz w0, #28, .LBB2_3
+; CHECK-NEXT:  // %bb.2: // %if.end3
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_3: // %if.then2
+; CHECK-NEXT:    b foo
 entry:
   %shift = shl i64 1, 28
   %and = and i64 %A, %shift
@@ -78,6 +107,15 @@ if.end3:                                          ; preds = %if.then2, %entry
 ; CHECK-NOT: and x{{[0-9]+}}, x[[REG1]]
 ; CHECK-NOT: cbz x{{[0-9]+}}, .LBB2_3
 define void @test4(i64 %A, i64 %B) {
+; CHECK-LABEL: test4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    tbz w1, #3, .LBB3_3
+; CHECK-NEXT:  // %bb.1: // %entry
+; CHECK-NEXT:    tbz x0, #35, .LBB3_3
+; CHECK-NEXT:  // %bb.2: // %if.end3
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_3: // %if.then2
+; CHECK-NEXT:    b foo
 entry:
   %shift = shl i64 1, 35
   %and = and i64 %A, %shift
diff --git a/llvm/test/CodeGen/AArch64/active_lane_mask.ll b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
index a65c5d66677946..fc2877a2d63392 100644
--- a/llvm/test/CodeGen/AArch64/active_lane_mask.ll
+++ b/llvm/test/CodeGen/AArch64/active_lane_mask.ll
@@ -457,14 +457,20 @@ define <4 x i1> @lane_mask_v4i1_i8(i8 %index, i8 %TC) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    dup v0.4h, w0
 ; CHECK-NEXT:    adrp x8, .LCPI26_0
-; CHECK-NEXT:    movi d2, #0xff00ff00ff00ff
+; CHECK-NEXT:    movi d3, #0xff00ff00ff00ff
 ; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI26_0]
-; CHECK-NEXT:    bic v0.4h, #255, lsl #8
-; CHECK-NEXT:    add v0.4h, v0.4h, v1.4h
-; CHECK-NEXT:    dup v1.4h, w1
-; CHECK-NEXT:    umin v0.4h, v0.4h, v2.4h
-; CHECK-NEXT:    bic v1.4h, #255, lsl #8
-; CHECK-NEXT:    cmhi v0.4h, v1.4h, v0.4h
+; CHECK-NEXT:    adrp x8, .LCPI26_1
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI26_1]
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-NEXT:    add v0.4h, v0.4h, v2.4h
+; CHECK-NEXT:    dup v2.4h, w1
+; CHECK-NEXT:    umin v0.4h, v0.4h, v3.4h
+; CHECK-NEXT:    mov v2.d[1], v2.d[0]
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    tbl v2.8b, { v2.16b }, v1.8b
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-NEXT:    cmhi v0.4h, v2.4h, v0.4h
 ; CHECK-NEXT:    ret
   %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i8(i8 %index, i8 %TC)
   ret <4 x i1> %active.lane.mask
@@ -473,16 +479,22 @@ define <4 x i1> @lane_mask_v4i1_i8(i8 %index, i8 %TC) {
 define <2 x i1> @lane_mask_v2i1_i8(i8 %index, i8 %TC) {
 ; CHECK-LABEL: lane_mask_v2i1_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi d0, #0x0000ff000000ff
-; CHECK-NEXT:    dup v1.2s, w0
+; CHECK-NEXT:    dup v0.2s, w0
 ; CHECK-NEXT:    adrp x8, .LCPI27_0
-; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI27_0]
-; CHECK-NEXT:    dup v3.2s, w1
-; CHECK-NEXT:    and v1.8b, v1.8b, v0.8b
-; CHECK-NEXT:    add v1.2s, v1.2s, v2.2s
-; CHECK-NEXT:    umin v1.2s, v1.2s, v0.2s
-; CHECK-NEXT:    and v0.8b, v3.8b, v0.8b
-; CHECK-NEXT:    cmhi v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    movi d3, #0x0000ff000000ff
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI27_0]
+; CHECK-NEXT:    adrp x8, .LCPI27_1
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI27_1]
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-NEXT:    add v0.2s, v0.2s, v2.2s
+; CHECK-NEXT:    dup v2.2s, w1
+; CHECK-NEXT:    umin v0.2s, v0.2s, v3.2s
+; CHECK-NEXT:    mov v2.d[1], v2.d[0]
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    tbl v2.8b, { v2.16b }, v1.8b
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-NEXT:    cmhi v0.2s, v2.2s, v0.2s
 ; CHECK-NEXT:    ret
   %active.lane.mask = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i8(i8 %index, i8 %TC)
   ret <2 x i1> %active.lane.mask
diff --git a/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll b/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
index 3b6c4fa875e604..ffbffbc3997f49 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-apple-ios | FileCheck %s
 
 ; Can't fold the increment by 1<<12 into a post-increment load
@@ -7,14 +8,28 @@
 
 define void @t() nounwind ssp {
 ; CHECK-LABEL: t:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:  Lloh0:
+; CHECK-NEXT:    adrp x9, _test_data at GOTPAGE
+; CHECK-NEXT:    mov x10, #268435456 ; =0x10000000
+; CHECK-NEXT:    mov x8, xzr
+; CHECK-NEXT:  Lloh1:
+; CHECK-NEXT:    ldr x9, [x9, _test_data at GOTPAGEOFF]
+; CHECK-NEXT:    movk x10, #8, lsl #32
+; CHECK-NEXT:  LBB0_1: ; %for.body
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldr w11, [x8, x10]
+; CHECK-NEXT:    add x8, x8, #1, lsl #12 ; =4096
+; CHECK-NEXT:    cmp w8, #200, lsl #12 ; =819200
+; CHECK-NEXT:    str w11, [x9]
+; CHECK-NEXT:    b.ne LBB0_1
+; CHECK-NEXT:  ; %bb.2: ; %for.end
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGot Lloh0, Lloh1
 entry:
   br label %for.body
 
 for.body:
-; CHECK: for.body
-; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}, x{{[0-9]+}}]
-; CHECK: add x[[REG:[0-9]+]],
-; CHECK:                      x[[REG]], #1, lsl  #12
   %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
   %0 = shl nsw i64 %indvars.iv, 12
   %add = add nsw i64 %0, 34628173824
diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll
index a509100292576c..2ab8678a3db5fd 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-DAGCombineVectorExtract.ll
@@ -1,9 +1,11 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 
 define i32 @foo(<4 x i32> %a, i32 %n) nounwind {
 ; CHECK-LABEL: foo:
-; CHECK: fmov w0, s0
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %b = bitcast <4 x i32> %a to i128
   %c = trunc i128 %b to i32
   ret i32 %c
@@ -11,8 +13,9 @@ define i32 @foo(<4 x i32> %a, i32 %n) nounwind {
 
 define i64 @bar(<2 x i64> %a, i64 %n) nounwind {
 ; CHECK-LABEL: bar:
-; CHECK: fmov x0, d0
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
   %b = bitcast <2 x i64> %a to i128
   %c = trunc i128 %b to i64
   ret i64 %c
diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll
index b87fe926fb32c0..afedd64704d94d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-07-11-InstrEmitterBug.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-apple-ios
 ; rdar://11849816
 
diff --git a/llvm/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll b/llvm/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll
index 226026faf32031..10d27ef6ec0e09 100644
--- a/llvm/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-AnInfiniteLoopInDAGCombine.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi
 
 ; This test case tests an infinite loop bug in DAG combiner.
diff --git a/llvm/test/CodeGen/AArch64/arm64-aapcs.ll b/llvm/test/CodeGen/AArch64/arm64-aapcs.ll
index 03393ad6aef5c8..db5a1c0a649133 100644
--- a/llvm/test/CodeGen/AArch64/arm64-aapcs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-aapcs.ll
@@ -1,57 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=arm64-linux-gnu -enable-misched=false -disable-post-ra < %s | FileCheck %s
 
 @var = dso_local global i32 0, align 4
 
 ; CHECK-LABEL: @test_i128_align
 define dso_local i128 @test_i128_align(i32, i128 %arg, i32 %after) {
+; CHECK-LABEL: test_i128_align:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x1, x3
+; CHECK-NEXT:    mov x0, x2
+; CHECK-NEXT:    adrp x8, var
+; CHECK-NEXT:    str w4, [x8, :lo12:var]
+; CHECK-NEXT:    ret
   store i32 %after, ptr @var, align 4
-; CHECK-DAG: str w4, [{{x[0-9]+}}, :lo12:var]
 
   ret i128 %arg
-; CHECK-DAG: mov x0, x2
-; CHECK-DAG: mov x1, x3
 }
 
 ; CHECK-LABEL: @test_i64x2_align
 define [2 x i64] @test_i64x2_align(i32, [2 x i64] %arg, i32 %after) {
+; CHECK-LABEL: test_i64x2_align:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x0, x1
+; CHECK-NEXT:    adrp x8, var
+; CHECK-NEXT:    str w3, [x8, :lo12:var]
+; CHECK-NEXT:    mov x1, x2
+; CHECK-NEXT:    ret
   store i32 %after, ptr @var, align 4
-; CHECK-DAG: str w3, [{{x[0-9]+}}, :lo12:var]
 
   ret [2 x i64] %arg
-; CHECK-DAG: mov x0, x1
-; CHECK: mov x1, x2
 }
 
 @var64 = dso_local global i64 0, align 8
 
 ; Check stack slots are 64-bit at all times.
 define dso_local void @test_stack_slots([8 x i64], i1 %bool, i8 %char, i16 %short,
-                                i32 %int, i64 %long) {
 ; CHECK-LABEL: test_stack_slots:
-; CHECK-DAG: ldr w[[ext1:[0-9]+]], [sp, #24]
-; CHECK-DAG: ldrh w[[ext2:[0-9]+]], [sp, #16]
-; CHECK-DAG: ldrb w[[ext3:[0-9]+]], [sp, #8]
-; CHECK-DAG: ldr x[[ext4:[0-9]+]], [sp, #32]
-; CHECK-DAG: ldrb w[[ext5:[0-9]+]], [sp]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [sp, #24]
+; CHECK-NEXT:    ldrh w9, [sp, #16]
+; CHECK-NEXT:    ldrb w10, [sp, #8]
+; CHECK-NEXT:    ldr x11, [sp, #32]
+; CHECK-NEXT:    ldrb w12, [sp]
+; CHECK-NEXT:    and x12, x12, #0x1
+; CHECK-NEXT:    adrp x13, var64
+; CHECK-NEXT:    str x12, [x13, :lo12:var64]
+; CHECK-NEXT:    str x10, [x13, :lo12:var64]
+; CHECK-NEXT:    str x9, [x13, :lo12:var64]
+; CHECK-NEXT:    str x8, [x13, :lo12:var64]
+; CHECK-NEXT:    str x11, [x13, :lo12:var64]
+; CHECK-NEXT:    ret
+                                i32 %int, i64 %long) {
 
   %ext_bool = zext i1 %bool to i64
   store volatile i64 %ext_bool, ptr @var64, align 8
-; CHECK: str x[[ext5]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_char = zext i8 %char to i64
   store volatile i64 %ext_char, ptr @var64, align 8
-; CHECK: str x[[ext3]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_short = zext i16 %short to i64
   store volatile i64 %ext_short, ptr @var64, align 8
-; CHECK: str x[[ext2]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_int = zext i32 %int to i64
   store volatile i64 %ext_int, ptr @var64, align 8
-; CHECK: str x[[ext1]], [{{x[0-9]+}}, :lo12:var64]
 
   store volatile i64 %long, ptr @var64, align 8
-; CHECK: str x[[ext4]], [{{x[0-9]+}}, :lo12:var64]
 
   ret void
 }
@@ -60,25 +73,32 @@ define dso_local void @test_stack_slots([8 x i64], i1 %bool, i8 %char, i16 %shor
 ; keyword on args) while we're here.
 
 define dso_local void @test_extension(i1 %bool, i8 %char, i16 %short, i32 %int) {
+; CHECK-LABEL: test_extension:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
+; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
+; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-NEXT:    and x8, x0, #0x1
+; CHECK-NEXT:    adrp x9, var64
+; CHECK-NEXT:    str x8, [x9, :lo12:var64]
+; CHECK-NEXT:    sxtb x8, w1
+; CHECK-NEXT:    str x8, [x9, :lo12:var64]
+; CHECK-NEXT:    and x8, x2, #0xffff
+; CHECK-NEXT:    str x8, [x9, :lo12:var64]
+; CHECK-NEXT:    mov w8, w3
+; CHECK-NEXT:    str x8, [x9, :lo12:var64]
+; CHECK-NEXT:    ret
   %ext_bool = zext i1 %bool to i64
   store volatile i64 %ext_bool, ptr @var64
-; CHECK: and [[EXT:x[0-9]+]], x0, #0x1
-; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_char = sext i8 %char to i64
   store volatile i64 %ext_char, ptr @var64
-; CHECK: sxtb [[EXT:x[0-9]+]], w1
-; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_short = zext i16 %short to i64
   store volatile i64 %ext_short, ptr @var64
-; CHECK: and [[EXT:x[0-9]+]], x2, #0xffff
-; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
 
   %ext_int = zext i32 %int to i64
   store volatile i64 %ext_int, ptr @var64
-; CHECK: mov w[[EXT:[0-9]+]], w3
-; CHECK: str x[[EXT]], [{{x[0-9]+}}, :lo12:var64]
 
   ret void
 }
@@ -88,10 +108,18 @@ declare void @variadic(i32 %a, ...)
   ; Under AAPCS variadic functions have the same calling convention as
   ; others. The extra arguments should go in registers rather than on the stack.
 define dso_local void @test_variadic() {
+; CHECK-LABEL: test_variadic:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    mov w1, #1 // =0x1
+; CHECK-NEXT:    fmov d0, #2.00000000
+; CHECK-NEXT:    bl variadic
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
   call void(i32, ...) @variadic(i32 0, i64 1, double 2.0)
-; CHECK: mov w1, #1
-; CHECK: fmov d0, #2.0
-; CHECK: bl variadic
   ret void
 }
 
@@ -100,7 +128,9 @@ define dso_local void @test_variadic() {
 ; into x7. Yuck!
 define dso_local i128 @test_i128_shadow([7 x i64] %x0_x6, i128 %sp) {
 ; CHECK-LABEL: test_i128_shadow:
-; CHECK: ldp x0, x1, [sp]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp x0, x1, [sp]
+; CHECK-NEXT:    ret
 
   ret i128 %sp
 }
@@ -108,56 +138,73 @@ define dso_local i128 @test_i128_shadow([7 x i64] %x0_x6, i128 %sp) {
 ; This test is to check if fp128 can be correctly handled on stack.
 define fp128 @test_fp128([8 x float] %arg0, fp128 %arg1) {
 ; CHECK-LABEL: test_fp128:
-; CHECK: ldr {{q[0-9]+}}, [sp]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    ret
   ret fp128 %arg1
 }
 
 ; Check if VPR can be correctly pass by stack.
 define dso_local <2 x double> @test_vreg_stack([8 x <2 x double>], <2 x double> %varg_stack) {
-entry:
 ; CHECK-LABEL: test_vreg_stack:
-; CHECK: ldr {{q[0-9]+}}, [sp]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    ret
+entry:
   ret <2 x double> %varg_stack;
 }
 
 ; Check that f16 can be passed and returned (ACLE 2.0 extension)
 define half @test_half(float, half %arg) {
 ; CHECK-LABEL: test_half:
-; CHECK: fmov s0, s1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s0, s1
+; CHECK-NEXT:    ret
   ret half %arg;
 }
 
 ; Check that f16 constants are materialized correctly
 define half @test_half_const() {
 ; CHECK-LABEL: test_half_const:
-; CHECK: ldr h0, [x{{[0-9]+}}, :lo12:{{.*}}]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI9_0
+; CHECK-NEXT:    ldr h0, [x8, :lo12:.LCPI9_0]
+; CHECK-NEXT:    ret
   ret half 0xH4248
 }
 
 ; Check that v4f16 can be passed and returned in registers
 define dso_local <4 x half> @test_v4_half_register(float, <4 x half> %arg) {
 ; CHECK-LABEL: test_v4_half_register:
-; CHECK: fmov d0, d1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, d1
+; CHECK-NEXT:    ret
   ret <4 x half> %arg;
 }
 
 ; Check that v8f16 can be passed and returned in registers
 define dso_local <8 x half> @test_v8_half_register(float, <8 x half> %arg) {
 ; CHECK-LABEL: test_v8_half_register:
-; CHECK: mov v0.16b, v1.16b
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    ret
   ret <8 x half> %arg;
 }
 
 ; Check that v4f16 can be passed and returned on the stack
 define dso_local <4 x half> @test_v4_half_stack([8 x <2 x double>], <4 x half> %arg) {
 ; CHECK-LABEL: test_v4_half_stack:
-; CHECK: ldr d0, [sp]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [sp]
+; CHECK-NEXT:    ret
   ret <4 x half> %arg;
 }
 
 ; Check that v8f16 can be passed and returned on the stack
 define dso_local <8 x half> @test_v8_half_stack([8 x <2 x double>], <8 x half> %arg) {
 ; CHECK-LABEL: test_v8_half_stack:
-; CHECK: ldr q0, [sp]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    ret
   ret <8 x half> %arg;
 }
diff --git a/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll b/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll
index b4fb9b613233d2..140480003e6246 100644
--- a/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-anyregcc-crash.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: not llc < %s -mtriple=arm64-apple-darwin 2>&1 | FileCheck %s
 ;
 ; Check that misuse of anyregcc results in a compile time error.
diff --git a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
index 225d4c602f181f..e053598c18f984 100644
--- a/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-anyregcc.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -debug-entry-values -mtriple=arm64-apple-darwin | FileCheck %s
 
 ; Stackmap Header: no constants - 6 callsites
@@ -459,3 +460,5 @@ entry:
 
 declare void @llvm.experimental.patchpoint.void(i64, i32, ptr, i32, ...)
 declare i64 @llvm.experimental.patchpoint.i64(i64, i32, ptr, i32, ...)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-arith.ll b/llvm/test/CodeGen/AArch64/arm64-arith.ll
index 3c7d43eeb1f6ba..98b2167e7a83c2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-arith.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-arith.ll
@@ -1,109 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -asm-verbose=false | FileCheck %s
 
 define i32 @t1(i32 %a, i32 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t1:
-; CHECK: add w0, w1, w0
-; CHECK: ret
   %add = add i32 %b, %a
   ret i32 %add
 }
 
 define i32 @t2(i32 %a, i32 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t2:
-; CHECK: udiv w0, w0, w1
-; CHECK: ret
   %udiv = udiv i32 %a, %b
   ret i32 %udiv
 }
 
 define i64 @t3(i64 %a, i64 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t3:
-; CHECK: udiv x0, x0, x1
-; CHECK: ret
   %udiv = udiv i64 %a, %b
   ret i64 %udiv
 }
 
 define i32 @t4(i32 %a, i32 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t4:
-; CHECK: sdiv w0, w0, w1
-; CHECK: ret
   %sdiv = sdiv i32 %a, %b
   ret i32 %sdiv
 }
 
 define i64 @t5(i64 %a, i64 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t5:
-; CHECK: sdiv x0, x0, x1
-; CHECK: ret
   %sdiv = sdiv i64 %a, %b
   ret i64 %sdiv
 }
 
 define i32 @t6(i32 %a, i32 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t6:
-; CHECK: lsl w0, w0, w1
-; CHECK: ret
   %shl = shl i32 %a, %b
   ret i32 %shl
 }
 
 define i64 @t7(i64 %a, i64 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t7:
-; CHECK: lsl x0, x0, x1
-; CHECK: ret
   %shl = shl i64 %a, %b
   ret i64 %shl
 }
 
 define i32 @t8(i32 %a, i32 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t8:
-; CHECK: lsr w0, w0, w1
-; CHECK: ret
   %lshr = lshr i32 %a, %b
   ret i32 %lshr
 }
 
 define i64 @t9(i64 %a, i64 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t9:
-; CHECK: lsr x0, x0, x1
-; CHECK: ret
   %lshr = lshr i64 %a, %b
   ret i64 %lshr
 }
 
 define i32 @t10(i32 %a, i32 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t10:
-; CHECK: asr w0, w0, w1
-; CHECK: ret
   %ashr = ashr i32 %a, %b
   ret i32 %ashr
 }
 
 define i64 @t11(i64 %a, i64 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t11:
-; CHECK: asr x0, x0, x1
-; CHECK: ret
   %ashr = ashr i64 %a, %b
   ret i64 %ashr
 }
 
 define i32 @t12(i16 %a, i32 %x) nounwind ssp {
 entry:
-; CHECK-LABEL: t12:
-; CHECK: add	w0, w1, w0, sxth
-; CHECK: ret
   %c = sext i16 %a to i32
   %e = add i32 %x, %c
   ret i32 %e
@@ -111,9 +76,6 @@ entry:
 
 define i32 @t13(i16 %a, i32 %x) nounwind ssp {
 entry:
-; CHECK-LABEL: t13:
-; CHECK: add	w0, w1, w0, sxth #2
-; CHECK: ret
   %c = sext i16 %a to i32
   %d = shl i32 %c, 2
   %e = add i32 %x, %d
@@ -122,9 +84,6 @@ entry:
 
 define i64 @t14(i16 %a, i64 %x) nounwind ssp {
 entry:
-; CHECK-LABEL: t14:
-; CHECK: add   x0, x1, w0, uxth #3
-; CHECK: ret
   %c = zext i16 %a to i64
   %d = shl i64 %c, 3
   %e = add i64 %x, %d
@@ -134,9 +93,6 @@ entry:
 ; rdar://9160598
 define i64 @t15(i64 %a, i64 %x) nounwind ssp {
 entry:
-; CHECK-LABEL: t15:
-; CHECK: add x0, x1, w0, uxtw
-; CHECK: ret
   %b = and i64 %a, 4294967295
   %c = add i64 %x, %b
   ret i64 %c
@@ -144,9 +100,6 @@ entry:
 
 define i64 @t16(i64 %x) nounwind ssp {
 entry:
-; CHECK-LABEL: t16:
-; CHECK: lsl x0, x0, #1
-; CHECK: ret
   %a = shl i64 %x, 1
   ret i64 %a
 }
@@ -154,10 +107,6 @@ entry:
 ; rdar://9166974
 define i64 @t17(i16 %a, i64 %x) nounwind ssp {
 entry:
-; CHECK-LABEL: t17:
-; CHECK: sxth [[REG:x[0-9]+]], w0
-; CHECK: neg x0, [[REG]], lsl #32
-; CHECK: ret
   %tmp16 = sext i16 %a to i64
   %tmp17 = mul i64 %tmp16, -4294967296
   ret i64 %tmp17
@@ -165,36 +114,24 @@ entry:
 
 define i32 @t18(i32 %a, i32 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t18:
-; CHECK: sdiv w0, w0, w1
-; CHECK: ret
   %sdiv = call i32 @llvm.aarch64.sdiv.i32(i32 %a, i32 %b)
   ret i32 %sdiv
 }
 
 define i64 @t19(i64 %a, i64 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t19:
-; CHECK: sdiv x0, x0, x1
-; CHECK: ret
   %sdiv = call i64 @llvm.aarch64.sdiv.i64(i64 %a, i64 %b)
   ret i64 %sdiv
 }
 
 define i32 @t20(i32 %a, i32 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t20:
-; CHECK: udiv w0, w0, w1
-; CHECK: ret
   %udiv = call i32 @llvm.aarch64.udiv.i32(i32 %a, i32 %b)
   ret i32 %udiv
 }
 
 define i64 @t21(i64 %a, i64 %b) nounwind readnone ssp {
 entry:
-; CHECK-LABEL: t21:
-; CHECK: udiv x0, x0, x1
-; CHECK: ret
   %udiv = call i64 @llvm.aarch64.udiv.i64(i64 %a, i64 %b)
   ret i64 %udiv
 }
@@ -207,9 +144,6 @@ declare i64 @llvm.aarch64.udiv.i64(i64, i64) nounwind readnone
 ; 32-bit not.
 define i32 @inv_32(i32 %x) nounwind ssp {
 entry:
-; CHECK: inv_32
-; CHECK: mvn w0, w0
-; CHECK: ret
   %inv = xor i32 %x, -1
   ret i32 %inv
 }
@@ -217,9 +151,6 @@ entry:
 ; 64-bit not.
 define i64 @inv_64(i64 %x) nounwind ssp {
 entry:
-; CHECK: inv_64
-; CHECK: mvn x0, x0
-; CHECK: ret
   %inv = xor i64 %x, -1
   ret i64 %inv
 }
@@ -228,43 +159,28 @@ entry:
 ; and add/sub rather than the madd/msub instructions. The latter are 4+ cycles,
 ; and the former are two (total for the two instruction sequence for subtract).
 define i32 @f0(i32 %a) nounwind readnone ssp {
-; CHECK-LABEL: f0:
-; CHECK-NEXT: add w0, w0, w0, lsl #3
-; CHECK-NEXT: ret
   %res = mul i32 %a, 9
   ret i32 %res
 }
 
 define i64 @f1(i64 %a) nounwind readnone ssp {
-; CHECK-LABEL: f1:
-; CHECK-NEXT: lsl x8, x0, #4
-; CHECK-NEXT: sub x0, x8, x0
-; CHECK-NEXT: ret
   %res = mul i64 %a, 15
   ret i64 %res
 }
 
 define i32 @f2(i32 %a) nounwind readnone ssp {
-; CHECK-LABEL: f2:
-; CHECK-NEXT: lsl w8, w0, #3
-; CHECK-NEXT: sub w0, w8, w0
-; CHECK-NEXT: ret
   %res = mul nsw i32 %a, 7
   ret i32 %res
 }
 
 define i64 @f3(i64 %a) nounwind readnone ssp {
-; CHECK-LABEL: f3:
-; CHECK-NEXT: add x0, x0, x0, lsl #4
-; CHECK-NEXT: ret
   %res = mul nsw i64 %a, 17
   ret i64 %res
 }
 
 define i32 @f4(i32 %a) nounwind readnone ssp {
-; CHECK-LABEL: f4:
-; CHECK-NEXT: add w0, w0, w0, lsl #1
-; CHECK-NEXT: ret
   %res = mul i32 %a, 3
   ret i32 %res
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-bcc.ll b/llvm/test/CodeGen/AArch64/arm64-bcc.ll
index 08e7e9f57b6411..6f471f14484e04 100644
--- a/llvm/test/CodeGen/AArch64/arm64-bcc.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-bcc.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-apple-darwin  | FileCheck %s
 ; Checks for conditional branch b.vs
 
@@ -15,7 +16,6 @@ entry:
 ; <label>:6                                       ; preds = %entry
   tail call void @llvm.trap()
   unreachable
-; CHECK: b.vs
 }
 
 %S64 = type <{ i64 }>
@@ -57,3 +57,5 @@ declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
 
 ; Function Attrs: noreturn nounwind
 declare void @llvm.trap()
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll
index 656dc300470b63..3ddd834f562ebb 100644
--- a/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s | FileCheck %s
 
 ; Vararg saving must save Q registers using the equivalent of STR/STP.
@@ -11,12 +12,47 @@ declare void @llvm.va_start(ptr) nounwind
 declare void @llvm.va_end(ptr) nounwind
 
 define double @callee(i32 %a, ...) {
-; CHECK: stp
-; CHECK: stp
-; CHECK: stp
-; CHECK: stp
-; CHECK: stp
-; CHECK: stp
+; CHECK-LABEL: callee:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #224
+; CHECK-NEXT:    .cfi_def_cfa_offset 224
+; CHECK-NEXT:    mov x8, #-128 // =0xffffffffffffff80
+; CHECK-NEXT:    mov x9, sp
+; CHECK-NEXT:    add x10, sp, #136
+; CHECK-NEXT:    movk x8, #65480, lsl #32
+; CHECK-NEXT:    add x9, x9, #128
+; CHECK-NEXT:    stp x1, x2, [sp, #136]
+; CHECK-NEXT:    stp x9, x8, [sp, #208]
+; CHECK-NEXT:    add x9, x10, #56
+; CHECK-NEXT:    add x10, sp, #224
+; CHECK-NEXT:    mov x8, #-128 // =0xffffffffffffff80
+; CHECK-NEXT:    stp x3, x4, [sp, #152]
+; CHECK-NEXT:    stp x5, x6, [sp, #168]
+; CHECK-NEXT:    stp q0, q1, [sp]
+; CHECK-NEXT:    stp q2, q3, [sp, #32]
+; CHECK-NEXT:    stp q4, q5, [sp, #64]
+; CHECK-NEXT:    stp q6, q7, [sp, #96]
+; CHECK-NEXT:    str x9, [sp, #200]
+; CHECK-NEXT:    stp x7, x10, [sp, #184]
+; CHECK-NEXT:    tbz w8, #31, .LBB0_2
+; CHECK-NEXT:  // %bb.1: // %vaarg.maybe_reg
+; CHECK-NEXT:    add w9, w8, #16
+; CHECK-NEXT:    cmp w9, #0
+; CHECK-NEXT:    str w9, [sp, #220]
+; CHECK-NEXT:    b.le .LBB0_4
+; CHECK-NEXT:  .LBB0_2: // %vaarg.on_stack
+; CHECK-NEXT:    ldr x8, [sp, #192]
+; CHECK-NEXT:    add x9, x8, #8
+; CHECK-NEXT:    str x9, [sp, #192]
+; CHECK-NEXT:  .LBB0_3: // %vaarg.end
+; CHECK-NEXT:    ldr d0, [x8]
+; CHECK-NEXT:    add sp, sp, #224
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_4: // %vaarg.in_reg
+; CHECK-NEXT:    ldr x9, [sp, #208]
+; CHECK-NEXT:    add x8, x9, x8
+; CHECK-NEXT:    add x8, x8, #8
+; CHECK-NEXT:    b .LBB0_3
 entry:
   %vl = alloca %struct.__va_list, align 8
   call void @llvm.va_start(ptr %vl)
diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll
index f1dccae36b21ce..7d937ade825d45 100644
--- a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -o - | FileCheck %s
 ; RUN: llc -mtriple aarch64_be < %s -aarch64-enable-ldst-opt=false -fast-isel=true -O0 -o - | FileCheck %s
 
@@ -8,7 +9,6 @@
 ; CHECK-LABEL: test_i64_f64:
 declare i64 @test_i64_f64_helper(double %p)
 define void @test_i64_f64(ptr %p, ptr %q) {
-; CHECK-NOT: rev
     %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call i64 @test_i64_f64_helper(double %2)
@@ -22,7 +22,6 @@ return_bb:
 ; CHECK-LABEL: test_i64_v1i64:
 declare i64 @test_i64_v1i64_helper(<1 x i64> %p)
 define void @test_i64_v1i64(ptr %p, ptr %q) {
-; CHECK-NOT: rev
     %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call i64 @test_i64_v1i64_helper(<1 x i64> %2)
@@ -36,7 +35,6 @@ return_bb:
 ; CHECK-LABEL: test_i64_v2f32:
 declare i64 @test_i64_v2f32_helper(<2 x float> %p)
 define void @test_i64_v2f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call i64 @test_i64_v2f32_helper(<2 x float> %2)
@@ -50,7 +48,6 @@ return_bb:
 ; CHECK-LABEL: test_i64_v2i32:
 declare i64 @test_i64_v2i32_helper(<2 x i32> %p)
 define void @test_i64_v2i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call i64 @test_i64_v2i32_helper(<2 x i32> %2)
@@ -64,7 +61,6 @@ return_bb:
 ; CHECK-LABEL: test_i64_v4i16:
 declare i64 @test_i64_v4i16_helper(<4 x i16> %p)
 define void @test_i64_v4i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4h
     %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call i64 @test_i64_v4i16_helper(<4 x i16> %2)
@@ -78,7 +74,6 @@ return_bb:
 ; CHECK-LABEL: test_i64_v8i8:
 declare i64 @test_i64_v8i8_helper(<8 x i8> %p)
 define void @test_i64_v8i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8b
     %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call i64 @test_i64_v8i8_helper(<8 x i8> %2)
@@ -92,7 +87,6 @@ return_bb:
 ; CHECK-LABEL: test_f64_i64:
 declare double @test_f64_i64_helper(i64 %p)
 define void @test_f64_i64(ptr %p, ptr %q) {
-; CHECK-NOT: rev
     %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call double @test_f64_i64_helper(i64 %2)
@@ -106,7 +100,6 @@ return_bb:
 ; CHECK-LABEL: test_f64_v1i64:
 declare double @test_f64_v1i64_helper(<1 x i64> %p)
 define void @test_f64_v1i64(ptr %p, ptr %q) {
-; CHECK-NOT: rev
     %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call double @test_f64_v1i64_helper(<1 x i64> %2)
@@ -120,7 +113,6 @@ return_bb:
 ; CHECK-LABEL: test_f64_v2f32:
 declare double @test_f64_v2f32_helper(<2 x float> %p)
 define void @test_f64_v2f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call double @test_f64_v2f32_helper(<2 x float> %2)
@@ -134,7 +126,6 @@ return_bb:
 ; CHECK-LABEL: test_f64_v2i32:
 declare double @test_f64_v2i32_helper(<2 x i32> %p)
 define void @test_f64_v2i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call double @test_f64_v2i32_helper(<2 x i32> %2)
@@ -148,7 +139,6 @@ return_bb:
 ; CHECK-LABEL: test_f64_v4i16:
 declare double @test_f64_v4i16_helper(<4 x i16> %p)
 define void @test_f64_v4i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4h
     %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call double @test_f64_v4i16_helper(<4 x i16> %2)
@@ -162,7 +152,6 @@ return_bb:
 ; CHECK-LABEL: test_f64_v8i8:
 declare double @test_f64_v8i8_helper(<8 x i8> %p)
 define void @test_f64_v8i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8b
     %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call double @test_f64_v8i8_helper(<8 x i8> %2)
@@ -176,7 +165,6 @@ return_bb:
 ; CHECK-LABEL: test_v1i64_i64:
 declare <1 x i64> @test_v1i64_i64_helper(i64 %p)
 define void @test_v1i64_i64(ptr %p, ptr %q) {
-; CHECK-NOT: rev
     %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call <1 x i64> @test_v1i64_i64_helper(i64 %2)
@@ -190,7 +178,6 @@ return_bb:
 ; CHECK-LABEL: test_v1i64_f64:
 declare <1 x i64> @test_v1i64_f64_helper(double %p)
 define void @test_v1i64_f64(ptr %p, ptr %q) {
-; CHECK-NOT: rev
     %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call <1 x i64> @test_v1i64_f64_helper(double %2)
@@ -204,7 +191,6 @@ return_bb:
 ; CHECK-LABEL: test_v1i64_v2f32:
 declare <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %p)
 define void @test_v1i64_v2f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %2)
@@ -218,7 +204,6 @@ return_bb:
 ; CHECK-LABEL: test_v1i64_v2i32:
 declare <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %p)
 define void @test_v1i64_v2i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %2)
@@ -232,7 +217,6 @@ return_bb:
 ; CHECK-LABEL: test_v1i64_v4i16:
 declare <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %p)
 define void @test_v1i64_v4i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4h
     %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %2)
@@ -246,7 +230,6 @@ return_bb:
 ; CHECK-LABEL: test_v1i64_v8i8:
 declare <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %p)
 define void @test_v1i64_v8i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8b
     %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %2)
@@ -260,7 +243,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f32_i64:
 declare <2 x float> @test_v2f32_i64_helper(i64 %p)
 define void @test_v2f32_i64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call <2 x float> @test_v2f32_i64_helper(i64 %2)
@@ -274,7 +256,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f32_f64:
 declare <2 x float> @test_v2f32_f64_helper(double %p)
 define void @test_v2f32_f64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call <2 x float> @test_v2f32_f64_helper(double %2)
@@ -288,7 +269,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f32_v1i64:
 declare <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %p)
 define void @test_v2f32_v1i64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %2)
@@ -302,8 +282,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f32_v2i32:
 declare <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %p)
 define void @test_v2f32_v2i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %2)
@@ -317,8 +295,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f32_v4i16:
 declare <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %p)
 define void @test_v2f32_v4i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4h
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %2)
@@ -332,8 +308,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f32_v8i8:
 declare <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %p)
 define void @test_v2f32_v8i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8b
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %2)
@@ -347,7 +321,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i32_i64:
 declare <2 x i32> @test_v2i32_i64_helper(i64 %p)
 define void @test_v2i32_i64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call <2 x i32> @test_v2i32_i64_helper(i64 %2)
@@ -361,7 +334,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i32_f64:
 declare <2 x i32> @test_v2i32_f64_helper(double %p)
 define void @test_v2i32_f64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call <2 x i32> @test_v2i32_f64_helper(double %2)
@@ -375,7 +347,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i32_v1i64:
 declare <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %p)
 define void @test_v2i32_v1i64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %2)
@@ -389,8 +360,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i32_v2f32:
 declare <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %p)
 define void @test_v2i32_v2f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %2)
@@ -404,8 +373,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i32_v4i16:
 declare <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %p)
 define void @test_v2i32_v4i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4h
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %2)
@@ -419,8 +386,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i32_v8i8:
 declare <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %p)
 define void @test_v2i32_v8i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8b
-; CHECK: rev64 v{{[0-9]+}}.2s
     %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %2)
@@ -434,7 +399,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i16_i64:
 declare <4 x i16> @test_v4i16_i64_helper(i64 %p)
 define void @test_v4i16_i64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4h
     %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call <4 x i16> @test_v4i16_i64_helper(i64 %2)
@@ -448,7 +412,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i16_f64:
 declare <4 x i16> @test_v4i16_f64_helper(double %p)
 define void @test_v4i16_f64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4h
     %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call <4 x i16> @test_v4i16_f64_helper(double %2)
@@ -462,7 +425,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i16_v1i64:
 declare <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %p)
 define void @test_v4i16_v1i64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4h
     %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %2)
@@ -476,8 +438,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i16_v2f32:
 declare <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %p)
 define void @test_v4i16_v2f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
-; CHECK: rev64 v{{[0-9]+}}.4h
     %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %2)
@@ -491,8 +451,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i16_v2i32:
 declare <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %p)
 define void @test_v4i16_v2i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
-; CHECK: rev64 v{{[0-9]+}}.4h
     %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %2)
@@ -506,8 +464,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i16_v8i8:
 declare <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %p)
 define void @test_v4i16_v8i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8b
-; CHECK: rev64 v{{[0-9]+}}.4h
     %1 = load <8 x i8>, ptr %p
     %2 = add <8 x i8> %1, %1
     %3 = call <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %2)
@@ -521,7 +477,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i8_i64:
 declare <8 x i8> @test_v8i8_i64_helper(i64 %p)
 define void @test_v8i8_i64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8b
     %1 = load i64, ptr %p
     %2 = add i64 %1, %1
     %3 = call <8 x i8> @test_v8i8_i64_helper(i64 %2)
@@ -535,7 +490,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i8_f64:
 declare <8 x i8> @test_v8i8_f64_helper(double %p)
 define void @test_v8i8_f64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8b
     %1 = load double, ptr %p
     %2 = fadd double %1, %1
     %3 = call <8 x i8> @test_v8i8_f64_helper(double %2)
@@ -549,7 +503,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i8_v1i64:
 declare <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %p)
 define void @test_v8i8_v1i64(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8b
     %1 = load <1 x i64>, ptr %p
     %2 = add <1 x i64> %1, %1
     %3 = call <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %2)
@@ -563,8 +516,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i8_v2f32:
 declare <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %p)
 define void @test_v8i8_v2f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
-; CHECK: rev64 v{{[0-9]+}}.8b
     %1 = load <2 x float>, ptr %p
     %2 = fadd <2 x float> %1, %1
     %3 = call <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %2)
@@ -578,8 +529,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i8_v2i32:
 declare <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %p)
 define void @test_v8i8_v2i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.2s
-; CHECK: rev64 v{{[0-9]+}}.8b
     %1 = load <2 x i32>, ptr %p
     %2 = add <2 x i32> %1, %1
     %3 = call <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %2)
@@ -593,8 +542,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i8_v4i16:
 declare <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %p)
 define void @test_v8i8_v4i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4h
-; CHECK: rev64 v{{[0-9]+}}.8b
     %1 = load <4 x i16>, ptr %p
     %2 = add <4 x i16> %1, %1
     %3 = call <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %2)
@@ -608,7 +555,6 @@ return_bb:
 ; CHECK-LABEL: test_f128_v2f64:
 declare fp128 @test_f128_v2f64_helper(<2 x double> %p)
 define void @test_f128_v2f64(ptr %p, ptr %q) {
-; CHECK: ext
     %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call fp128 @test_f128_v2f64_helper(<2 x double> %2)
@@ -622,7 +568,6 @@ return_bb:
 ; CHECK-LABEL: test_f128_v2i64:
 declare fp128 @test_f128_v2i64_helper(<2 x i64> %p)
 define void @test_f128_v2i64(ptr %p, ptr %q) {
-; CHECK: ext
     %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call fp128 @test_f128_v2i64_helper(<2 x i64> %2)
@@ -636,8 +581,6 @@ return_bb:
 ; CHECK-LABEL: test_f128_v4f32:
 declare fp128 @test_f128_v4f32_helper(<4 x float> %p)
 define void @test_f128_v4f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call fp128 @test_f128_v4f32_helper(<4 x float> %2)
@@ -651,8 +594,6 @@ return_bb:
 ; CHECK-LABEL: test_f128_v4i32:
 declare fp128 @test_f128_v4i32_helper(<4 x i32> %p)
 define void @test_f128_v4i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call fp128 @test_f128_v4i32_helper(<4 x i32> %2)
@@ -666,8 +607,6 @@ return_bb:
 ; CHECK-LABEL: test_f128_v8i16:
 declare fp128 @test_f128_v8i16_helper(<8 x i16> %p)
 define void @test_f128_v8i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
     %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call fp128 @test_f128_v8i16_helper(<8 x i16> %2)
@@ -681,8 +620,6 @@ return_bb:
 ; CHECK-LABEL: test_f128_v16i8:
 declare fp128 @test_f128_v16i8_helper(<16 x i8> %p)
 define void @test_f128_v16i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
     %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call fp128 @test_f128_v16i8_helper(<16 x i8> %2)
@@ -696,7 +633,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f64_f128:
 declare <2 x double> @test_v2f64_f128_helper(fp128 %p)
 define void @test_v2f64_f128(ptr %p, ptr %q) {
-; CHECK: ext
     %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <2 x double> @test_v2f64_f128_helper(fp128 %2)
@@ -710,8 +646,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f64_v2i64:
 declare <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %p)
 define void @test_v2f64_v2i64(ptr %p, ptr %q) {
-; CHECK: ext
-; CHECK: ext
     %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %2)
@@ -725,9 +659,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f64_v4f32:
 declare <2 x double> @test_v2f64_v4f32_helper(<4 x float> %p)
 define void @test_v2f64_v4f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
-; CHECK: ext
     %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <2 x double> @test_v2f64_v4f32_helper(<4 x float> %2)
@@ -741,9 +672,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f64_v4i32:
 declare <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %p)
 define void @test_v2f64_v4i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
-; CHECK: ext
     %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %2)
@@ -757,9 +685,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f64_v8i16:
 declare <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %p)
 define void @test_v2f64_v8i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
-; CHECK: ext
     %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %2)
@@ -773,9 +698,6 @@ return_bb:
 ; CHECK-LABEL: test_v2f64_v16i8:
 declare <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %p)
 define void @test_v2f64_v16i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
-; CHECK: ext
     %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %2)
@@ -789,7 +711,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i64_f128:
 declare <2 x i64> @test_v2i64_f128_helper(fp128 %p)
 define void @test_v2i64_f128(ptr %p, ptr %q) {
-; CHECK: ext
     %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <2 x i64> @test_v2i64_f128_helper(fp128 %2)
@@ -803,8 +724,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i64_v2f64:
 declare <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %p)
 define void @test_v2i64_v2f64(ptr %p, ptr %q) {
-; CHECK: ext
-; CHECK: ext
     %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %2)
@@ -818,9 +737,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i64_v4f32:
 declare <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %p)
 define void @test_v2i64_v4f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
-; CHECK: ext
     %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %2)
@@ -834,9 +750,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i64_v4i32:
 declare <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %p)
 define void @test_v2i64_v4i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
-; CHECK: ext
     %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %2)
@@ -850,9 +763,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i64_v8i16:
 declare <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %p)
 define void @test_v2i64_v8i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
-; CHECK: ext
     %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %2)
@@ -866,9 +776,6 @@ return_bb:
 ; CHECK-LABEL: test_v2i64_v16i8:
 declare <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %p)
 define void @test_v2i64_v16i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
-; CHECK: ext
     %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %2)
@@ -882,8 +789,6 @@ return_bb:
 ; CHECK-LABEL: test_v4f32_f128:
 declare <4 x float> @test_v4f32_f128_helper(fp128 %p)
 define void @test_v4f32_f128(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <4 x float> @test_v4f32_f128_helper(fp128 %2)
@@ -897,9 +802,6 @@ return_bb:
 ; CHECK-LABEL: test_v4f32_v2f64:
 declare <4 x float> @test_v4f32_v2f64_helper(<2 x double> %p)
 define void @test_v4f32_v2f64(ptr %p, ptr %q) {
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <4 x float> @test_v4f32_v2f64_helper(<2 x double> %2)
@@ -913,9 +815,6 @@ return_bb:
 ; CHECK-LABEL: test_v4f32_v2i64:
 declare <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %p)
 define void @test_v4f32_v2i64(ptr %p, ptr %q) {
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %2)
@@ -929,10 +828,6 @@ return_bb:
 ; CHECK-LABEL: test_v4f32_v4i32:
 declare <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %p)
 define void @test_v4f32_v4i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %2)
@@ -946,10 +841,6 @@ return_bb:
 ; CHECK-LABEL: test_v4f32_v8i16:
 declare <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %p)
 define void @test_v4f32_v8i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %2)
@@ -963,10 +854,6 @@ return_bb:
 ; CHECK-LABEL: test_v4f32_v16i8:
 declare <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %p)
 define void @test_v4f32_v16i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %2)
@@ -980,8 +867,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i32_f128:
 declare <4 x i32> @test_v4i32_f128_helper(fp128 %p)
 define void @test_v4i32_f128(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <4 x i32> @test_v4i32_f128_helper(fp128 %2)
@@ -995,9 +880,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i32_v2f64:
 declare <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %p)
 define void @test_v4i32_v2f64(ptr %p, ptr %q) {
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %2)
@@ -1011,9 +893,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i32_v2i64:
 declare <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %p)
 define void @test_v4i32_v2i64(ptr %p, ptr %q) {
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %2)
@@ -1027,10 +906,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i32_v4f32:
 declare <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %p)
 define void @test_v4i32_v4f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %2)
@@ -1044,10 +919,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i32_v8i16:
 declare <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %p)
 define void @test_v4i32_v8i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %2)
@@ -1061,10 +932,6 @@ return_bb:
 ; CHECK-LABEL: test_v4i32_v16i8:
 declare <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %p)
 define void @test_v4i32_v16i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
     %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %2)
@@ -1078,8 +945,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i16_f128:
 declare <8 x i16> @test_v8i16_f128_helper(fp128 %p)
 define void @test_v8i16_f128(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
     %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <8 x i16> @test_v8i16_f128_helper(fp128 %2)
@@ -1093,9 +958,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i16_v2f64:
 declare <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %p)
 define void @test_v8i16_v2f64(ptr %p, ptr %q) {
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
     %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %2)
@@ -1109,9 +971,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i16_v2i64:
 declare <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %p)
 define void @test_v8i16_v2i64(ptr %p, ptr %q) {
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
     %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %2)
@@ -1125,10 +984,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i16_v4f32:
 declare <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %p)
 define void @test_v8i16_v4f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
     %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %2)
@@ -1142,10 +997,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i16_v4i32:
 declare <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %p)
 define void @test_v8i16_v4i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
     %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %2)
@@ -1159,10 +1010,6 @@ return_bb:
 ; CHECK-LABEL: test_v8i16_v16i8:
 declare <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %p)
 define void @test_v8i16_v16i8(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
     %1 = load <16 x i8>, ptr %p
     %2 = add <16 x i8> %1, %1
     %3 = call <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %2)
@@ -1176,8 +1023,6 @@ return_bb:
 ; CHECK-LABEL: test_v16i8_f128:
 declare <16 x i8> @test_v16i8_f128_helper(fp128 %p)
 define void @test_v16i8_f128(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
     %1 = load fp128, ptr %p
     %2 = fadd fp128 %1, %1
     %3 = call <16 x i8> @test_v16i8_f128_helper(fp128 %2)
@@ -1191,9 +1036,6 @@ return_bb:
 ; CHECK-LABEL: test_v16i8_v2f64:
 declare <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %p)
 define void @test_v16i8_v2f64(ptr %p, ptr %q) {
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
     %1 = load <2 x double>, ptr %p
     %2 = fadd <2 x double> %1, %1
     %3 = call <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %2)
@@ -1207,9 +1049,6 @@ return_bb:
 ; CHECK-LABEL: test_v16i8_v2i64:
 declare <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %p)
 define void @test_v16i8_v2i64(ptr %p, ptr %q) {
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
     %1 = load <2 x i64>, ptr %p
     %2 = add <2 x i64> %1, %1
     %3 = call <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %2)
@@ -1223,10 +1062,6 @@ return_bb:
 ; CHECK-LABEL: test_v16i8_v4f32:
 declare <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %p)
 define void @test_v16i8_v4f32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
     %1 = load <4 x float>, ptr %p
     %2 = fadd <4 x float> %1, %1
     %3 = call <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %2)
@@ -1240,10 +1075,6 @@ return_bb:
 ; CHECK-LABEL: test_v16i8_v4i32:
 declare <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %p)
 define void @test_v16i8_v4i32(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.4s
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
     %1 = load <4 x i32>, ptr %p
     %2 = add <4 x i32> %1, %1
     %3 = call <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %2)
@@ -1257,10 +1088,6 @@ return_bb:
 ; CHECK-LABEL: test_v16i8_v8i16:
 declare <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %p)
 define void @test_v16i8_v8i16(ptr %p, ptr %q) {
-; CHECK: rev64 v{{[0-9]+}}.8h
-; CHECK: ext
-; CHECK: rev64 v{{[0-9]+}}.16b
-; CHECK: ext
     %1 = load <8 x i16>, ptr %p
     %2 = add <8 x i16> %1, %1
     %3 = call <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %2)
@@ -1270,3 +1097,5 @@ return_bb:
     store <16 x i8> %4, ptr %q
     ret void
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll b/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll
index 7745f8dab1c2bf..fdb96e1d8a7509 100644
--- a/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 ; RUN: llc -global-isel < %s -mtriple=arm64-apple-ios7.0 | FileCheck %s
 
@@ -6,10 +7,15 @@
 
 define void @t2() {
 ; CHECK-LABEL: t2:
-; CHECK: adrp	x[[GOTADDR:[0-9]+]], _t at GOTPAGE
-; CHECK: ldr	x[[ADDR:[0-9]+]], [x[[GOTADDR]], _t at GOTPAGEOFF]
-; CHECK: ldr	x[[DEST:[0-9]+]], [x[[ADDR]]]
-; CHECK: br	x[[DEST]]
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh0:
+; CHECK-NEXT:    adrp x8, _t at GOTPAGE
+; CHECK-NEXT:  Lloh1:
+; CHECK-NEXT:    ldr x8, [x8, _t at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh2:
+; CHECK-NEXT:    ldr x0, [x8]
+; CHECK-NEXT:    br x0
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh0, Lloh1, Lloh2
   %tmp = load ptr, ptr @t
   %tmp.upgrd.2 = tail call i32 %tmp()
   ret void
@@ -17,29 +23,29 @@ define void @t2() {
 
 define void @t3() {
 ; CHECK-LABEL: t3:
-; CHECK: b	_t2
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    b _t2
   tail call void @t2()
   ret void
 }
 
 define double @t4(double %a) nounwind readonly ssp {
 ; CHECK-LABEL: t4:
-; CHECK: b	_sin
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    b _sin
   %tmp = tail call double @sin(double %a) nounwind readonly
   ret double %tmp
 }
 
 define float @t5(float %a) nounwind readonly ssp {
 ; CHECK-LABEL: t5:
-; CHECK: b	_sinf
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    b _sinf
   %tmp = tail call float @sinf(float %a) nounwind readonly
   ret float %tmp
 }
 
 define void @t7() nounwind {
-; CHECK-LABEL: t7:
-; CHECK: b	_foo
-; CHECK: b	_bar
 
   br i1 undef, label %bb, label %bb1.lr.ph
 
@@ -54,9 +60,16 @@ bb:                                               ; preds = %entry
 
 define i32 @t8(i32 %x) nounwind ssp {
 ; CHECK-LABEL: t8:
-; CHECK: b	_c
-; CHECK: b	_a
-; CHECK: b	_b
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    tbnz w0, #0, LBB5_3
+; CHECK-NEXT:  ; %bb.1: ; %if.end
+; CHECK-NEXT:    tbnz w0, #1, LBB5_4
+; CHECK-NEXT:  ; %bb.2: ; %if.end5
+; CHECK-NEXT:    b _c
+; CHECK-NEXT:  LBB5_3: ; %if.then
+; CHECK-NEXT:    b _a
+; CHECK-NEXT:  LBB5_4: ; %if.then3
+; CHECK-NEXT:    b _b
   %and = and i32 %x, 1
   %tobool = icmp eq i32 %and, 0
   br i1 %tobool, label %if.end, label %if.then
@@ -94,6 +107,9 @@ declare i32 @c(i32)
 ; CHECK-LABEL: tswift:
 ; CHECK: b _swiftfunc
 define swiftcc i32 @tswift(i32 %a) nounwind {
+; CHECK-LABEL: tswift:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    b _swiftfunc
   %res = tail call i32 @swiftfunc(i32 %a)
   ret i32 %res
 }
diff --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll
index 0b2acdf102c2cc..ad88c95e33a520 100644
--- a/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -o - %s -mtriple=arm64-apple-ios -O2 | FileCheck %s
 ; RUN: llc -o - %s -mtriple=arm64_32-apple-watchos -O2 | FileCheck %s
 ; RUN: llc -o - %s -mtriple=arm64-linux-gnu -O2 | FileCheck %s --check-prefix=CHECK-ELF
@@ -12,6 +13,21 @@
 
 ; Function Attrs: noinline nounwind ssp
 define void @foo(i32 %t) {
+; CHECK-LABEL: foo:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    adrp x8, _a at PAGE
+; CHECK-NEXT:    ldr w9, [x8, _a at PAGEOFF]
+; CHECK-NEXT:    add w9, w9, w0
+; CHECK-NEXT:    str w9, [x8, _a at PAGEOFF]
+; CHECK-NEXT:    ret
+;
+; CHECK-ELF-LABEL: foo:
+; CHECK-ELF:       // %bb.0: // %entry
+; CHECK-ELF-NEXT:    adrp x8, a
+; CHECK-ELF-NEXT:    ldr w9, [x8, :lo12:a]
+; CHECK-ELF-NEXT:    add w9, w9, w0
+; CHECK-ELF-NEXT:    str w9, [x8, :lo12:a]
+; CHECK-ELF-NEXT:    ret
 entry:
   %tmp = load i32, i32* @a, align 4
   %add = add nsw i32 %tmp, %t
@@ -28,6 +44,73 @@ entry:
 ; CHECK: ret
 ; CHECK-NOT: .loh AdrpAdrp
 define i32 @test(i32 %t) {
+; CHECK-LABEL: test:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
+; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    .cfi_offset w19, -24
+; CHECK-NEXT:    .cfi_offset w20, -32
+; CHECK-NEXT:    mov w19, w0
+; CHECK-NEXT:    cmp w0, #6
+; CHECK-NEXT:    b.lt LBB1_3
+; CHECK-NEXT:  ; %bb.1: ; %if.then
+; CHECK-NEXT:    adrp x20, _a at PAGE
+; CHECK-NEXT:    ldr w8, [x20, _a at PAGEOFF]
+; CHECK-NEXT:    add w0, w8, w19
+; CHECK-NEXT:    cmp w0, #13
+; CHECK-NEXT:    b.lt LBB1_3
+; CHECK-NEXT:  ; %bb.2: ; %if.then2
+; CHECK-NEXT:    bl _foo
+; CHECK-NEXT:    ldr w19, [x20, _a at PAGEOFF]
+; CHECK-NEXT:  LBB1_3: ; %if.end4
+; CHECK-NEXT:  Lloh0:
+; CHECK-NEXT:    adrp x20, _b at GOTPAGE
+; CHECK-NEXT:  Lloh1:
+; CHECK-NEXT:    ldr x20, [x20, _b at GOTPAGEOFF]
+; CHECK-NEXT:    ldr w8, [x20]
+; CHECK-NEXT:    add w0, w8, w19
+; CHECK-NEXT:    bl _foo
+; CHECK-NEXT:    ldr w8, [x20]
+; CHECK-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
+; CHECK-NEXT:    add w0, w8, w19
+; CHECK-NEXT:    ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGot Lloh0, Lloh1
+;
+; CHECK-ELF-LABEL: test:
+; CHECK-ELF:       // %bb.0: // %entry
+; CHECK-ELF-NEXT:    str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-ELF-NEXT:    stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-ELF-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-ELF-NEXT:    .cfi_offset w19, -8
+; CHECK-ELF-NEXT:    .cfi_offset w20, -16
+; CHECK-ELF-NEXT:    .cfi_offset w30, -32
+; CHECK-ELF-NEXT:    mov w19, w0
+; CHECK-ELF-NEXT:    cmp w0, #6
+; CHECK-ELF-NEXT:    b.lt .LBB1_3
+; CHECK-ELF-NEXT:  // %bb.1: // %if.then
+; CHECK-ELF-NEXT:    adrp x20, a
+; CHECK-ELF-NEXT:    ldr w8, [x20, :lo12:a]
+; CHECK-ELF-NEXT:    add w0, w8, w19
+; CHECK-ELF-NEXT:    cmp w0, #13
+; CHECK-ELF-NEXT:    b.lt .LBB1_3
+; CHECK-ELF-NEXT:  // %bb.2: // %if.then2
+; CHECK-ELF-NEXT:    bl foo
+; CHECK-ELF-NEXT:    ldr w19, [x20, :lo12:a]
+; CHECK-ELF-NEXT:  .LBB1_3: // %if.end4
+; CHECK-ELF-NEXT:    adrp x20, :got:b
+; CHECK-ELF-NEXT:    ldr x20, [x20, :got_lo12:b]
+; CHECK-ELF-NEXT:    ldr w8, [x20]
+; CHECK-ELF-NEXT:    add w0, w8, w19
+; CHECK-ELF-NEXT:    bl foo
+; CHECK-ELF-NEXT:    ldr w8, [x20]
+; CHECK-ELF-NEXT:    add w0, w8, w19
+; CHECK-ELF-NEXT:    ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-ELF-NEXT:    ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-ELF-NEXT:    ret
 entry:
   %cmp = icmp sgt i32 %t, 5
   br i1 %cmp, label %if.then, label %if.end4
@@ -67,6 +150,23 @@ if.end4:                                          ; preds = %if.then2, %if.then,
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i32 @getC() {
+; CHECK-LABEL: getC:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh2:
+; CHECK-NEXT:    adrp x8, _C at GOTPAGE
+; CHECK-NEXT:  Lloh3:
+; CHECK-NEXT:    ldr x8, [x8, _C at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh4:
+; CHECK-NEXT:    ldr w0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh2, Lloh3, Lloh4
+;
+; CHECK-ELF-LABEL: getC:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:C
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:C]
+; CHECK-ELF-NEXT:    ldr w0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load i32, i32* @C, align 4
   ret i32 %res
 }
@@ -83,6 +183,23 @@ define i32 @getC() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i64 @getSExtC() {
+; CHECK-LABEL: getSExtC:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh5:
+; CHECK-NEXT:    adrp x8, _C at GOTPAGE
+; CHECK-NEXT:  Lloh6:
+; CHECK-NEXT:    ldr x8, [x8, _C at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh7:
+; CHECK-NEXT:    ldrsw x0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh5, Lloh6, Lloh7
+;
+; CHECK-ELF-LABEL: getSExtC:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:C
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:C]
+; CHECK-ELF-NEXT:    ldrsw x0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load i32, i32* @C, align 4
   %sextres = sext i32 %res to i64
   ret i64 %sextres
@@ -102,6 +219,26 @@ define i64 @getSExtC() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGot [[ADRP_LABEL]], [[LDRGOT_LABEL]]
 define void @getSeveralC(i32 %t) {
+; CHECK-LABEL: getSeveralC:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:  Lloh8:
+; CHECK-NEXT:    adrp x8, _C at GOTPAGE
+; CHECK-NEXT:  Lloh9:
+; CHECK-NEXT:    ldr x8, [x8, _C at GOTPAGEOFF]
+; CHECK-NEXT:    ldr w9, [x8]
+; CHECK-NEXT:    add w9, w9, w0
+; CHECK-NEXT:    str w9, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGot Lloh8, Lloh9
+;
+; CHECK-ELF-LABEL: getSeveralC:
+; CHECK-ELF:       // %bb.0: // %entry
+; CHECK-ELF-NEXT:    adrp x8, :got:C
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:C]
+; CHECK-ELF-NEXT:    ldr w9, [x8]
+; CHECK-ELF-NEXT:    add w9, w9, w0
+; CHECK-ELF-NEXT:    str w9, [x8]
+; CHECK-ELF-NEXT:    ret
 entry:
   %tmp = load i32, i32* @C, align 4
   %add = add nsw i32 %tmp, %t
@@ -121,6 +258,23 @@ entry:
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define void @setC(i32 %t) {
+; CHECK-LABEL: setC:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:  Lloh10:
+; CHECK-NEXT:    adrp x8, _C at GOTPAGE
+; CHECK-NEXT:  Lloh11:
+; CHECK-NEXT:    ldr x8, [x8, _C at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh12:
+; CHECK-NEXT:    str w0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotStr Lloh10, Lloh11, Lloh12
+;
+; CHECK-ELF-LABEL: setC:
+; CHECK-ELF:       // %bb.0: // %entry
+; CHECK-ELF-NEXT:    adrp x8, :got:C
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:C]
+; CHECK-ELF-NEXT:    str w0, [x8]
+; CHECK-ELF-NEXT:    ret
 entry:
   store i32 %t, i32* @C, align 4
   ret void
@@ -143,6 +297,23 @@ entry:
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpAddLdr [[ADRP_LABEL]], [[ADDGOT_LABEL]], [[LDR_LABEL]]
 define i32 @getInternalCPlus4() {
+; CHECK-LABEL: getInternalCPlus4:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh13:
+; CHECK-NEXT:    adrp x8, _InternalC at PAGE
+; CHECK-NEXT:  Lloh14:
+; CHECK-NEXT:    add x8, x8, _InternalC at PAGEOFF
+; CHECK-NEXT:  Lloh15:
+; CHECK-NEXT:    ldr w0, [x8, #16]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpAddLdr Lloh13, Lloh14, Lloh15
+;
+; CHECK-ELF-LABEL: getInternalCPlus4:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, InternalC
+; CHECK-ELF-NEXT:    add x8, x8, :lo12:InternalC
+; CHECK-ELF-NEXT:    ldr w0, [x8, #16]
+; CHECK-ELF-NEXT:    ret
   %addr = getelementptr inbounds i32, i32* @InternalC, i32 4
   %res = load i32, i32* %addr, align 4
   ret i32 %res
@@ -160,6 +331,23 @@ define i32 @getInternalCPlus4() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpAddLdr [[ADRP_LABEL]], [[ADDGOT_LABEL]], [[LDR_LABEL]]
 define i64 @getSExtInternalCPlus4() {
+; CHECK-LABEL: getSExtInternalCPlus4:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh16:
+; CHECK-NEXT:    adrp x8, _InternalC at PAGE
+; CHECK-NEXT:  Lloh17:
+; CHECK-NEXT:    add x8, x8, _InternalC at PAGEOFF
+; CHECK-NEXT:  Lloh18:
+; CHECK-NEXT:    ldrsw x0, [x8, #16]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpAddLdr Lloh16, Lloh17, Lloh18
+;
+; CHECK-ELF-LABEL: getSExtInternalCPlus4:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, InternalC
+; CHECK-ELF-NEXT:    add x8, x8, :lo12:InternalC
+; CHECK-ELF-NEXT:    ldrsw x0, [x8, #16]
+; CHECK-ELF-NEXT:    ret
   %addr = getelementptr inbounds i32, i32* @InternalC, i32 4
   %res = load i32, i32* %addr, align 4
   %sextres = sext i32 %res to i64
@@ -180,6 +368,26 @@ define i64 @getSExtInternalCPlus4() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpAdd [[ADRP_LABEL]], [[ADDGOT_LABEL]]
 define void @getSeveralInternalCPlus4(i32 %t) {
+; CHECK-LABEL: getSeveralInternalCPlus4:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:  Lloh19:
+; CHECK-NEXT:    adrp x8, _InternalC at PAGE
+; CHECK-NEXT:  Lloh20:
+; CHECK-NEXT:    add x8, x8, _InternalC at PAGEOFF
+; CHECK-NEXT:    ldr w9, [x8, #16]
+; CHECK-NEXT:    add w9, w9, w0
+; CHECK-NEXT:    str w9, [x8, #16]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpAdd Lloh19, Lloh20
+;
+; CHECK-ELF-LABEL: getSeveralInternalCPlus4:
+; CHECK-ELF:       // %bb.0: // %entry
+; CHECK-ELF-NEXT:    adrp x8, InternalC
+; CHECK-ELF-NEXT:    add x8, x8, :lo12:InternalC
+; CHECK-ELF-NEXT:    ldr w9, [x8, #16]
+; CHECK-ELF-NEXT:    add w9, w9, w0
+; CHECK-ELF-NEXT:    str w9, [x8, #16]
+; CHECK-ELF-NEXT:    ret
 entry:
   %addr = getelementptr inbounds i32, i32* @InternalC, i32 4
   %tmp = load i32, i32* %addr, align 4
@@ -200,6 +408,23 @@ entry:
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpAddStr [[ADRP_LABEL]], [[ADDGOT_LABEL]], [[LDR_LABEL]]
 define void @setInternalCPlus4(i32 %t) {
+; CHECK-LABEL: setInternalCPlus4:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:  Lloh21:
+; CHECK-NEXT:    adrp x8, _InternalC at PAGE
+; CHECK-NEXT:  Lloh22:
+; CHECK-NEXT:    add x8, x8, _InternalC at PAGEOFF
+; CHECK-NEXT:  Lloh23:
+; CHECK-NEXT:    str w0, [x8, #16]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpAddStr Lloh21, Lloh22, Lloh23
+;
+; CHECK-ELF-LABEL: setInternalCPlus4:
+; CHECK-ELF:       // %bb.0: // %entry
+; CHECK-ELF-NEXT:    adrp x8, InternalC
+; CHECK-ELF-NEXT:    add x8, x8, :lo12:InternalC
+; CHECK-ELF-NEXT:    str w0, [x8, #16]
+; CHECK-ELF-NEXT:    ret
 entry:
   %addr = getelementptr inbounds i32, i32* @InternalC, i32 4
   store i32 %t, i32* %addr, align 4
@@ -216,6 +441,20 @@ entry:
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdr [[ADRP_LABEL]], [[LDR_LABEL]]
 define i32 @getInternalC() {
+; CHECK-LABEL: getInternalC:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh24:
+; CHECK-NEXT:    adrp x8, _InternalC at PAGE
+; CHECK-NEXT:  Lloh25:
+; CHECK-NEXT:    ldr w0, [x8, _InternalC at PAGEOFF]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh24, Lloh25
+;
+; CHECK-ELF-LABEL: getInternalC:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, InternalC
+; CHECK-ELF-NEXT:    ldr w0, [x8, :lo12:InternalC]
+; CHECK-ELF-NEXT:    ret
   %res = load i32, i32* @InternalC, align 4
   ret i32 %res
 }
@@ -230,6 +469,20 @@ define i32 @getInternalC() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdr [[ADRP_LABEL]], [[LDR_LABEL]]
 define i64 @getSExtInternalC() {
+; CHECK-LABEL: getSExtInternalC:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh26:
+; CHECK-NEXT:    adrp x8, _InternalC at PAGE
+; CHECK-NEXT:  Lloh27:
+; CHECK-NEXT:    ldrsw x0, [x8, _InternalC at PAGEOFF]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh26, Lloh27
+;
+; CHECK-ELF-LABEL: getSExtInternalC:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, InternalC
+; CHECK-ELF-NEXT:    ldrsw x0, [x8, :lo12:InternalC]
+; CHECK-ELF-NEXT:    ret
   %res = load i32, i32* @InternalC, align 4
   %sextres = sext i32 %res to i64
   ret i64 %sextres
@@ -246,6 +499,21 @@ define i64 @getSExtInternalC() {
 ; CHECK-NEXT: str [[ADD]], [[[ADRP_REG]], _InternalC at PAGEOFF]
 ; CHECK-NEXT: ret
 define void @getSeveralInternalC(i32 %t) {
+; CHECK-LABEL: getSeveralInternalC:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    adrp x8, _InternalC at PAGE
+; CHECK-NEXT:    ldr w9, [x8, _InternalC at PAGEOFF]
+; CHECK-NEXT:    add w9, w9, w0
+; CHECK-NEXT:    str w9, [x8, _InternalC at PAGEOFF]
+; CHECK-NEXT:    ret
+;
+; CHECK-ELF-LABEL: getSeveralInternalC:
+; CHECK-ELF:       // %bb.0: // %entry
+; CHECK-ELF-NEXT:    adrp x8, InternalC
+; CHECK-ELF-NEXT:    ldr w9, [x8, :lo12:InternalC]
+; CHECK-ELF-NEXT:    add w9, w9, w0
+; CHECK-ELF-NEXT:    str w9, [x8, :lo12:InternalC]
+; CHECK-ELF-NEXT:    ret
 entry:
   %tmp = load i32, i32* @InternalC, align 4
   %add = add nsw i32 %tmp, %t
@@ -262,6 +530,17 @@ entry:
 ; CHECK-NEXT: str w0, [[[ADRP_REG]], _InternalC at PAGEOFF]
 ; CHECK-NEXT: ret
 define void @setInternalC(i32 %t) {
+; CHECK-LABEL: setInternalC:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    adrp x8, _InternalC at PAGE
+; CHECK-NEXT:    str w0, [x8, _InternalC at PAGEOFF]
+; CHECK-NEXT:    ret
+;
+; CHECK-ELF-LABEL: setInternalC:
+; CHECK-ELF:       // %bb.0: // %entry
+; CHECK-ELF-NEXT:    adrp x8, InternalC
+; CHECK-ELF-NEXT:    str w0, [x8, :lo12:InternalC]
+; CHECK-ELF-NEXT:    ret
 entry:
   store i32 %t, i32* @InternalC, align 4
   ret void
@@ -282,6 +561,22 @@ entry:
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGot [[ADRP_LABEL]], [[LDRGOT_LABEL]]
 define i8 @getD() {
+; CHECK-LABEL: getD:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh28:
+; CHECK-NEXT:    adrp x8, _D at GOTPAGE
+; CHECK-NEXT:  Lloh29:
+; CHECK-NEXT:    ldr x8, [x8, _D at GOTPAGEOFF]
+; CHECK-NEXT:    ldrb w0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGot Lloh28, Lloh29
+;
+; CHECK-ELF-LABEL: getD:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:D
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:D]
+; CHECK-ELF-NEXT:    ldrb w0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load i8, i8* @D, align 4
   ret i8 %res
 }
@@ -296,6 +591,23 @@ define i8 @getD() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setD(i8 %t) {
+; CHECK-LABEL: setD:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh30:
+; CHECK-NEXT:    adrp x8, _D at GOTPAGE
+; CHECK-NEXT:  Lloh31:
+; CHECK-NEXT:    ldr x8, [x8, _D at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh32:
+; CHECK-NEXT:    strb w0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotStr Lloh30, Lloh31, Lloh32
+;
+; CHECK-ELF-LABEL: setD:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:D
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:D]
+; CHECK-ELF-NEXT:    strb w0, [x8]
+; CHECK-ELF-NEXT:    ret
   store i8 %t, i8* @D, align 4
   ret void
 }
@@ -312,6 +624,23 @@ define void @setD(i8 %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i32 @getSExtD() {
+; CHECK-LABEL: getSExtD:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh33:
+; CHECK-NEXT:    adrp x8, _D at GOTPAGE
+; CHECK-NEXT:  Lloh34:
+; CHECK-NEXT:    ldr x8, [x8, _D at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh35:
+; CHECK-NEXT:    ldrsb w0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh33, Lloh34, Lloh35
+;
+; CHECK-ELF-LABEL: getSExtD:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:D
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:D]
+; CHECK-ELF-NEXT:    ldrsb w0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load i8, i8* @D, align 4
   %sextres = sext i8 %res to i32
   ret i32 %sextres
@@ -329,6 +658,23 @@ define i32 @getSExtD() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i64 @getSExt64D() {
+; CHECK-LABEL: getSExt64D:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh36:
+; CHECK-NEXT:    adrp x8, _D at GOTPAGE
+; CHECK-NEXT:  Lloh37:
+; CHECK-NEXT:    ldr x8, [x8, _D at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh38:
+; CHECK-NEXT:    ldrsb x0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh36, Lloh37, Lloh38
+;
+; CHECK-ELF-LABEL: getSExt64D:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:D
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:D]
+; CHECK-ELF-NEXT:    ldrsb x0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load i8, i8* @D, align 4
   %sextres = sext i8 %res to i64
   ret i64 %sextres
@@ -347,6 +693,22 @@ define i64 @getSExt64D() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGot [[ADRP_LABEL]], [[LDRGOT_LABEL]]
 define i16 @getE() {
+; CHECK-LABEL: getE:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh39:
+; CHECK-NEXT:    adrp x8, _E at GOTPAGE
+; CHECK-NEXT:  Lloh40:
+; CHECK-NEXT:    ldr x8, [x8, _E at GOTPAGEOFF]
+; CHECK-NEXT:    ldrh w0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGot Lloh39, Lloh40
+;
+; CHECK-ELF-LABEL: getE:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:E
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:E]
+; CHECK-ELF-NEXT:    ldrh w0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load i16, i16* @E, align 4
   ret i16 %res
 }
@@ -363,6 +725,23 @@ define i16 @getE() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i32 @getSExtE() {
+; CHECK-LABEL: getSExtE:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh41:
+; CHECK-NEXT:    adrp x8, _E at GOTPAGE
+; CHECK-NEXT:  Lloh42:
+; CHECK-NEXT:    ldr x8, [x8, _E at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh43:
+; CHECK-NEXT:    ldrsh w0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh41, Lloh42, Lloh43
+;
+; CHECK-ELF-LABEL: getSExtE:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:E
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:E]
+; CHECK-ELF-NEXT:    ldrsh w0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load i16, i16* @E, align 4
   %sextres = sext i16 %res to i32
   ret i32 %sextres
@@ -378,6 +757,23 @@ define i32 @getSExtE() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setE(i16 %t) {
+; CHECK-LABEL: setE:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh44:
+; CHECK-NEXT:    adrp x8, _E at GOTPAGE
+; CHECK-NEXT:  Lloh45:
+; CHECK-NEXT:    ldr x8, [x8, _E at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh46:
+; CHECK-NEXT:    strh w0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotStr Lloh44, Lloh45, Lloh46
+;
+; CHECK-ELF-LABEL: setE:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:E
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:E]
+; CHECK-ELF-NEXT:    strh w0, [x8]
+; CHECK-ELF-NEXT:    ret
   store i16 %t, i16* @E, align 4
   ret void
 }
@@ -394,6 +790,23 @@ define void @setE(i16 %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i64 @getSExt64E() {
+; CHECK-LABEL: getSExt64E:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh47:
+; CHECK-NEXT:    adrp x8, _E at GOTPAGE
+; CHECK-NEXT:  Lloh48:
+; CHECK-NEXT:    ldr x8, [x8, _E at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh49:
+; CHECK-NEXT:    ldrsh x0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh47, Lloh48, Lloh49
+;
+; CHECK-ELF-LABEL: getSExt64E:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:E
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:E]
+; CHECK-ELF-NEXT:    ldrsh x0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load i16, i16* @E, align 4
   %sextres = sext i16 %res to i64
   ret i64 %sextres
@@ -413,6 +826,23 @@ define i64 @getSExt64E() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define i64 @getF() {
+; CHECK-LABEL: getF:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh50:
+; CHECK-NEXT:    adrp x8, _F at GOTPAGE
+; CHECK-NEXT:  Lloh51:
+; CHECK-NEXT:    ldr x8, [x8, _F at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh52:
+; CHECK-NEXT:    ldr x0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh50, Lloh51, Lloh52
+;
+; CHECK-ELF-LABEL: getF:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:F
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:F]
+; CHECK-ELF-NEXT:    ldr x0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load i64, i64* @F, align 4
   ret i64 %res
 }
@@ -427,6 +857,23 @@ define i64 @getF() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setF(i64 %t) {
+; CHECK-LABEL: setF:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh53:
+; CHECK-NEXT:    adrp x8, _F at GOTPAGE
+; CHECK-NEXT:  Lloh54:
+; CHECK-NEXT:    ldr x8, [x8, _F at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh55:
+; CHECK-NEXT:    str x0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotStr Lloh53, Lloh54, Lloh55
+;
+; CHECK-ELF-LABEL: setF:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:F
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:F]
+; CHECK-ELF-NEXT:    str x0, [x8]
+; CHECK-ELF-NEXT:    ret
   store i64 %t, i64* @F, align 4
   ret void
 }
@@ -445,6 +892,23 @@ define void @setF(i64 %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define float @getG() {
+; CHECK-LABEL: getG:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh56:
+; CHECK-NEXT:    adrp x8, _G at GOTPAGE
+; CHECK-NEXT:  Lloh57:
+; CHECK-NEXT:    ldr x8, [x8, _G at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh58:
+; CHECK-NEXT:    ldr s0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh56, Lloh57, Lloh58
+;
+; CHECK-ELF-LABEL: getG:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:G
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:G]
+; CHECK-ELF-NEXT:    ldr s0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load float, float* @G, align 4
   ret float %res
 }
@@ -459,6 +923,23 @@ define float @getG() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setG(float %t) {
+; CHECK-LABEL: setG:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh59:
+; CHECK-NEXT:    adrp x8, _G at GOTPAGE
+; CHECK-NEXT:  Lloh60:
+; CHECK-NEXT:    ldr x8, [x8, _G at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh61:
+; CHECK-NEXT:    str s0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotStr Lloh59, Lloh60, Lloh61
+;
+; CHECK-ELF-LABEL: setG:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:G
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:G]
+; CHECK-ELF-NEXT:    str s0, [x8]
+; CHECK-ELF-NEXT:    ret
   store float %t, float* @G, align 4
   ret void
 }
@@ -477,6 +958,23 @@ define void @setG(float %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define half @getH() {
+; CHECK-LABEL: getH:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh62:
+; CHECK-NEXT:    adrp x8, _H at GOTPAGE
+; CHECK-NEXT:  Lloh63:
+; CHECK-NEXT:    ldr x8, [x8, _H at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh64:
+; CHECK-NEXT:    ldr h0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh62, Lloh63, Lloh64
+;
+; CHECK-ELF-LABEL: getH:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:H
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:H]
+; CHECK-ELF-NEXT:    ldr h0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load half, half* @H, align 4
   ret half %res
 }
@@ -491,6 +989,23 @@ define half @getH() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setH(half %t) {
+; CHECK-LABEL: setH:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh65:
+; CHECK-NEXT:    adrp x8, _H at GOTPAGE
+; CHECK-NEXT:  Lloh66:
+; CHECK-NEXT:    ldr x8, [x8, _H at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh67:
+; CHECK-NEXT:    str h0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotStr Lloh65, Lloh66, Lloh67
+;
+; CHECK-ELF-LABEL: setH:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:H
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:H]
+; CHECK-ELF-NEXT:    str h0, [x8]
+; CHECK-ELF-NEXT:    ret
   store half %t, half* @H, align 4
   ret void
 }
@@ -509,6 +1024,23 @@ define void @setH(half %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define double @getI() {
+; CHECK-LABEL: getI:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh68:
+; CHECK-NEXT:    adrp x8, _I at GOTPAGE
+; CHECK-NEXT:  Lloh69:
+; CHECK-NEXT:    ldr x8, [x8, _I at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh70:
+; CHECK-NEXT:    ldr d0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh68, Lloh69, Lloh70
+;
+; CHECK-ELF-LABEL: getI:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:I
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:I]
+; CHECK-ELF-NEXT:    ldr d0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load double, double* @I, align 4
   ret double %res
 }
@@ -523,6 +1055,23 @@ define double @getI() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setI(double %t) {
+; CHECK-LABEL: setI:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh71:
+; CHECK-NEXT:    adrp x8, _I at GOTPAGE
+; CHECK-NEXT:  Lloh72:
+; CHECK-NEXT:    ldr x8, [x8, _I at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh73:
+; CHECK-NEXT:    str d0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotStr Lloh71, Lloh72, Lloh73
+;
+; CHECK-ELF-LABEL: setI:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:I
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:I]
+; CHECK-ELF-NEXT:    str d0, [x8]
+; CHECK-ELF-NEXT:    ret
   store double %t, double* @I, align 4
   ret void
 }
@@ -541,6 +1090,23 @@ define void @setI(double %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define <2 x i32> @getJ() {
+; CHECK-LABEL: getJ:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh74:
+; CHECK-NEXT:    adrp x8, _J at GOTPAGE
+; CHECK-NEXT:  Lloh75:
+; CHECK-NEXT:    ldr x8, [x8, _J at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh76:
+; CHECK-NEXT:    ldr d0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh74, Lloh75, Lloh76
+;
+; CHECK-ELF-LABEL: getJ:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:J
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:J]
+; CHECK-ELF-NEXT:    ldr d0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load <2 x i32>, <2 x i32>* @J, align 4
   ret <2 x i32> %res
 }
@@ -555,6 +1121,23 @@ define <2 x i32> @getJ() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setJ(<2 x i32> %t) {
+; CHECK-LABEL: setJ:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh77:
+; CHECK-NEXT:    adrp x8, _J at GOTPAGE
+; CHECK-NEXT:  Lloh78:
+; CHECK-NEXT:    ldr x8, [x8, _J at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh79:
+; CHECK-NEXT:    str d0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotStr Lloh77, Lloh78, Lloh79
+;
+; CHECK-ELF-LABEL: setJ:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:J
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:J]
+; CHECK-ELF-NEXT:    str d0, [x8]
+; CHECK-ELF-NEXT:    ret
   store <2 x i32> %t, <2 x i32>* @J, align 4
   ret void
 }
@@ -573,6 +1156,23 @@ define void @setJ(<2 x i32> %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define <4 x i32> @getK() {
+; CHECK-LABEL: getK:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh80:
+; CHECK-NEXT:    adrp x8, _K at GOTPAGE
+; CHECK-NEXT:  Lloh81:
+; CHECK-NEXT:    ldr x8, [x8, _K at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh82:
+; CHECK-NEXT:    ldr q0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh80, Lloh81, Lloh82
+;
+; CHECK-ELF-LABEL: getK:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:K
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:K]
+; CHECK-ELF-NEXT:    ldr q0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load <4 x i32>, <4 x i32>* @K, align 4
   ret <4 x i32> %res
 }
@@ -587,6 +1187,23 @@ define <4 x i32> @getK() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotStr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[STR_LABEL]]
 define void @setK(<4 x i32> %t) {
+; CHECK-LABEL: setK:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh83:
+; CHECK-NEXT:    adrp x8, _K at GOTPAGE
+; CHECK-NEXT:  Lloh84:
+; CHECK-NEXT:    ldr x8, [x8, _K at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh85:
+; CHECK-NEXT:    str q0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotStr Lloh83, Lloh84, Lloh85
+;
+; CHECK-ELF-LABEL: setK:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:K
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:K]
+; CHECK-ELF-NEXT:    str q0, [x8]
+; CHECK-ELF-NEXT:    ret
   store <4 x i32> %t, <4 x i32>* @K, align 4
   ret void
 }
@@ -605,6 +1222,23 @@ define void @setK(<4 x i32> %t) {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGotLdr [[ADRP_LABEL]], [[LDRGOT_LABEL]], [[LDR_LABEL]]
 define <1 x i8> @getL() {
+; CHECK-LABEL: getL:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh86:
+; CHECK-NEXT:    adrp x8, _L at GOTPAGE
+; CHECK-NEXT:  Lloh87:
+; CHECK-NEXT:    ldr x8, [x8, _L at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh88:
+; CHECK-NEXT:    ldr b0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGotLdr Lloh86, Lloh87, Lloh88
+;
+; CHECK-ELF-LABEL: getL:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:L
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:L]
+; CHECK-ELF-NEXT:    ldr b0, [x8]
+; CHECK-ELF-NEXT:    ret
   %res = load <1 x i8>, <1 x i8>* @L, align 4
   ret <1 x i8> %res
 }
@@ -621,6 +1255,24 @@ define <1 x i8> @getL() {
 ; CHECK-NEXT: ret
 ; CHECK: .loh AdrpLdrGot [[ADRP_LABEL]], [[LDRGOT_LABEL]]
 define void @setL(<1 x i8> %t) {
+; CHECK-LABEL: setL:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh89:
+; CHECK-NEXT:    adrp x8, _L at GOTPAGE
+; CHECK-NEXT:    ; kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:  Lloh90:
+; CHECK-NEXT:    ldr x8, [x8, _L at GOTPAGEOFF]
+; CHECK-NEXT:    st1.b { v0 }[0], [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdrGot Lloh89, Lloh90
+;
+; CHECK-ELF-LABEL: setL:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, :got:L
+; CHECK-ELF-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-ELF-NEXT:    ldr x8, [x8, :got_lo12:L]
+; CHECK-ELF-NEXT:    st1 { v0.b }[0], [x8]
+; CHECK-ELF-NEXT:    ret
   store <1 x i8> %t, <1 x i8>* @L, align 4
   ret void
 }
@@ -643,6 +1295,32 @@ define void @setL(<1 x i8> %t) {
 ; CHECK: ret
 ; CHECK: .loh AdrpLdr [[LOH_LABEL0]], [[LOH_LABEL1]]
 define void @uninterestingSub(i8* nocapture %row) #0 {
+; CHECK-LABEL: uninterestingSub:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:  Lloh91:
+; CHECK-NEXT:    adrp x8, lCPI36_0 at PAGE
+; CHECK-NEXT:  Lloh92:
+; CHECK-NEXT:    ldr q0, [x8, lCPI36_0 at PAGEOFF]
+; CHECK-NEXT:    ldp q1, q2, [x0]
+; CHECK-NEXT:    ext.16b v0, v0, v1, #1
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ldr q0, [x8]
+; CHECK-NEXT:    ext.16b v0, v2, v0, #1
+; CHECK-NEXT:    str q0, [x8]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh91, Lloh92
+;
+; CHECK-ELF-LABEL: uninterestingSub:
+; CHECK-ELF:       // %bb.0:
+; CHECK-ELF-NEXT:    adrp x8, .LCPI36_0
+; CHECK-ELF-NEXT:    ldr q0, [x8, :lo12:.LCPI36_0]
+; CHECK-ELF-NEXT:    ldp q1, q2, [x0]
+; CHECK-ELF-NEXT:    ext v0.16b, v0.16b, v1.16b, #1
+; CHECK-ELF-NEXT:    str q0, [x0]
+; CHECK-ELF-NEXT:    ldr q0, [x8]
+; CHECK-ELF-NEXT:    ext v0.16b, v2.16b, v0.16b, #1
+; CHECK-ELF-NEXT:    str q0, [x8]
+; CHECK-ELF-NEXT:    ret
   %tmp = bitcast i8* %row to <16 x i8>*
   %tmp1 = load <16 x i8>, <16 x i8>* %tmp, align 16
   %vext43 = shufflevector <16 x i8> <i8 undef, i8 16, i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2>, <16 x i8> %tmp1, <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16>
@@ -663,21 +1341,71 @@ define void @uninterestingSub(i8* nocapture %row) #0 {
 @.str.90 = external unnamed_addr constant [5 x i8], align 1
 ; CHECK-LABEL: test_r274582
 define void @test_r274582(double %x) {
+; CHECK-LABEL: test_r274582:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    cbnz wzr, LBB37_2
+; CHECK-NEXT:  ; %bb.1: ; %if.then.i
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  LBB37_2: ; %if.end.i
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:  Lloh93:
+; CHECK-NEXT:    adrp x8, lCPI37_0 at PAGE
+; CHECK-NEXT:  Lloh94:
+; CHECK-NEXT:    adrp x0, _.str.89 at GOTPAGE
+; CHECK-NEXT:  Lloh95:
+; CHECK-NEXT:    ldr d1, [x8, lCPI37_0 at PAGEOFF]
+; CHECK-NEXT:  Lloh96:
+; CHECK-NEXT:    adrp x8, _.str.90 at GOTPAGE
+; CHECK-NEXT:  Lloh97:
+; CHECK-NEXT:    ldr x8, [x8, _.str.90 at GOTPAGEOFF]
+; CHECK-NEXT:  Lloh98:
+; CHECK-NEXT:    ldr x0, [x0, _.str.89 at GOTPAGEOFF]
+; CHECK-NEXT:    fmul d0, d0, d1
+; CHECK-NEXT:    str x8, [sp]
+; CHECK-NEXT:    fadd d0, d0, d0
+; CHECK-NEXT:    fsub d0, d0, d0
+; CHECK-NEXT:    str d0, [sp, #8]
+; CHECK-NEXT:    bl _callee
+; CHECK-NEXT:    brk #0x1
+; CHECK-NEXT:    .loh AdrpLdrGot Lloh96, Lloh97
+; CHECK-NEXT:    .loh AdrpLdrGot Lloh94, Lloh98
+; CHECK-NEXT:    .loh AdrpAdrp Lloh93, Lloh96
+; CHECK-NEXT:    .loh AdrpLdr Lloh93, Lloh95
+;
+; CHECK-ELF-LABEL: test_r274582:
+; CHECK-ELF:       // %bb.0: // %entry
+; CHECK-ELF-NEXT:    cbnz wzr, .LBB37_2
+; CHECK-ELF-NEXT:  // %bb.1: // %if.then.i
+; CHECK-ELF-NEXT:    ret
+; CHECK-ELF-NEXT:  .LBB37_2: // %if.end.i
+; CHECK-ELF-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-ELF-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ELF-NEXT:    .cfi_offset w30, -16
+; CHECK-ELF-NEXT:    adrp x8, .LCPI37_0
+; CHECK-ELF-NEXT:    adrp x0, :got:.str.89
+; CHECK-ELF-NEXT:    adrp x1, :got:.str.90
+; CHECK-ELF-NEXT:    ldr d1, [x8, :lo12:.LCPI37_0]
+; CHECK-ELF-NEXT:    ldr x0, [x0, :got_lo12:.str.89]
+; CHECK-ELF-NEXT:    ldr x1, [x1, :got_lo12:.str.90]
+; CHECK-ELF-NEXT:    fmul d0, d0, d1
+; CHECK-ELF-NEXT:    fadd d0, d0, d0
+; CHECK-ELF-NEXT:    fsub d0, d0, d0
+; CHECK-ELF-NEXT:    bl callee
 entry:
   br i1 undef, label %if.then.i, label %if.end.i
 if.then.i:
   ret void
 if.end.i:
-; CHECK: .loh AdrpLdrGot
-; CHECK: .loh AdrpLdrGot
-; CHECK: .loh AdrpAdrp
-; CHECK: .loh AdrpLdr
   %mul = fmul double %x, 1.000000e-06
   %add = fadd double %mul, %mul
   %sub = fsub double %add, %add
   call void (i8*, ...) @callee(i8* getelementptr inbounds ([12 x i8], [12 x i8]* @.str.89, i64 0, i64 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str.90, i64 0, i64 0), double %sub)
   unreachable
 }
-declare void @callee(i8* nocapture readonly, ...) 
+declare void @callee(i8* nocapture readonly, ...)
 
 attributes #0 = { "target-cpu"="cyclone" }
diff --git a/llvm/test/CodeGen/AArch64/arm64-crypto.ll b/llvm/test/CodeGen/AArch64/arm64-crypto.ll
index 1def7588e7bde9..20d098eed6917f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-crypto.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-crypto.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=arm64-eabi -mattr=crypto -aarch64-neon-syntax=apple -o - %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-eabi -global-isel -global-isel-abort=2 -pass-remarks-missed=gisel* -mattr=crypto -aarch64-neon-syntax=apple -o - %s 2>&1 | FileCheck %s --check-prefixes=CHECK,FALLBACK
 
@@ -8,28 +9,36 @@ declare <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %data)
 
 define <16 x i8> @test_aese(<16 x i8> %data, <16 x i8> %key) {
 ; CHECK-LABEL: test_aese:
-; CHECK: aese.16b v0, v1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    aese.16b v0, v1
+; CHECK-NEXT:    ret
   %res = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %data, <16 x i8> %key)
   ret <16 x i8> %res
 }
 
 define <16 x i8> @test_aesd(<16 x i8> %data, <16 x i8> %key) {
 ; CHECK-LABEL: test_aesd:
-; CHECK: aesd.16b v0, v1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    aesd.16b v0, v1
+; CHECK-NEXT:    ret
   %res = call <16 x i8> @llvm.aarch64.crypto.aesd(<16 x i8> %data, <16 x i8> %key)
   ret <16 x i8> %res
 }
 
 define <16 x i8> @test_aesmc(<16 x i8> %data) {
 ; CHECK-LABEL: test_aesmc:
-; CHECK: aesmc.16b v0, v0
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    aesmc.16b v0, v0
+; CHECK-NEXT:    ret
  %res = call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %data)
   ret <16 x i8> %res
 }
 
 define <16 x i8> @test_aesimc(<16 x i8> %data) {
 ; CHECK-LABEL: test_aesimc:
-; CHECK: aesimc.16b v0, v0
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    aesimc.16b v0, v0
+; CHECK-NEXT:    ret
  %res = call <16 x i8> @llvm.aarch64.crypto.aesimc(<16 x i8> %data)
   ret <16 x i8> %res
 }
@@ -43,8 +52,10 @@ declare <4 x i32> @llvm.aarch64.crypto.sha1su1(<4 x i32> %wk0_3, <4 x i32> %wk12
 
 define <4 x i32> @test_sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
 ; CHECK-LABEL: test_sha1c:
-; CHECK: fmov [[HASH_E:s[0-9]+]], w0
-; CHECK: sha1c.4s q0, [[HASH_E]], v1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s2, w0
+; CHECK-NEXT:    sha1c.4s q0, s2, v1
+; CHECK-NEXT:    ret
   %res = call <4 x i32> @llvm.aarch64.crypto.sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
   ret <4 x i32> %res
 }
@@ -52,10 +63,12 @@ define <4 x i32> @test_sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
 ; <rdar://problem/14742333> Incomplete removal of unnecessary FMOV instructions in intrinsic SHA1
 define <4 x i32> @test_sha1c_in_a_row(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
 ; CHECK-LABEL: test_sha1c_in_a_row:
-; CHECK: fmov [[HASH_E:s[0-9]+]], w0
-; CHECK: sha1c.4s q[[SHA1RES:[0-9]+]], [[HASH_E]], v1
-; CHECK-NOT: fmov
-; CHECK: sha1c.4s q0, s[[SHA1RES]], v1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s2, w0
+; CHECK-NEXT:    mov.16b v3, v0
+; CHECK-NEXT:    sha1c.4s q3, s2, v1
+; CHECK-NEXT:    sha1c.4s q0, s3, v1
+; CHECK-NEXT:    ret
   %res = call <4 x i32> @llvm.aarch64.crypto.sha1c(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
   %extract = extractelement <4 x i32> %res, i32 0
   %res2 = call <4 x i32> @llvm.aarch64.crypto.sha1c(<4 x i32> %hash_abcd, i32 %extract, <4 x i32> %wk)
@@ -64,16 +77,20 @@ define <4 x i32> @test_sha1c_in_a_row(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i3
 
 define <4 x i32> @test_sha1p(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
 ; CHECK-LABEL: test_sha1p:
-; CHECK: fmov [[HASH_E:s[0-9]+]], w0
-; CHECK: sha1p.4s q0, [[HASH_E]], v1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s2, w0
+; CHECK-NEXT:    sha1p.4s q0, s2, v1
+; CHECK-NEXT:    ret
   %res = call <4 x i32> @llvm.aarch64.crypto.sha1p(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
   ret <4 x i32> %res
 }
 
 define <4 x i32> @test_sha1m(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
 ; CHECK-LABEL: test_sha1m:
-; CHECK: fmov [[HASH_E:s[0-9]+]], w0
-; CHECK: sha1m.4s q0, [[HASH_E]], v1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s2, w0
+; CHECK-NEXT:    sha1m.4s q0, s2, v1
+; CHECK-NEXT:    ret
   %res = call <4 x i32> @llvm.aarch64.crypto.sha1m(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk)
   ret <4 x i32> %res
 }
@@ -81,23 +98,29 @@ define <4 x i32> @test_sha1m(<4 x i32> %hash_abcd, i32 %hash_e, <4 x i32> %wk) {
 ; FALLBACK-NOT: remark{{.*}}test_sha1h
 define i32 @test_sha1h(i32 %hash_e) {
 ; CHECK-LABEL: test_sha1h:
-; CHECK: fmov [[HASH_E:s[0-9]+]], w0
-; CHECK: sha1h [[RES:s[0-9]+]], [[HASH_E]]
-; CHECK: fmov w0, [[RES]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    sha1h s0, s0
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %res = call i32 @llvm.aarch64.crypto.sha1h(i32 %hash_e)
   ret i32 %res
 }
 
 define <4 x i32> @test_sha1su0(<4 x i32> %wk0_3, <4 x i32> %wk4_7, <4 x i32> %wk8_11) {
 ; CHECK-LABEL: test_sha1su0:
-; CHECK: sha1su0.4s v0, v1, v2
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sha1su0.4s v0, v1, v2
+; CHECK-NEXT:    ret
   %res = call <4 x i32> @llvm.aarch64.crypto.sha1su0(<4 x i32> %wk0_3, <4 x i32> %wk4_7, <4 x i32> %wk8_11)
   ret <4 x i32> %res
 }
 
 define <4 x i32> @test_sha1su1(<4 x i32> %wk0_3, <4 x i32> %wk12_15) {
 ; CHECK-LABEL: test_sha1su1:
-; CHECK: sha1su1.4s v0, v1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sha1su1.4s v0, v1
+; CHECK-NEXT:    ret
   %res = call <4 x i32> @llvm.aarch64.crypto.sha1su1(<4 x i32> %wk0_3, <4 x i32> %wk12_15)
   ret <4 x i32> %res
 }
@@ -109,14 +132,18 @@ declare <4 x i32> @llvm.aarch64.crypto.sha256su1(<4 x i32> %w0_3, <4 x i32> %w8_
 
 define <4 x i32> @test_sha256h(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk) {
 ; CHECK-LABEL: test_sha256h:
-; CHECK: sha256h.4s q0, q1, v2
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sha256h.4s q0, q1, v2
+; CHECK-NEXT:    ret
   %res = call <4 x i32> @llvm.aarch64.crypto.sha256h(<4 x i32> %hash_abcd, <4 x i32> %hash_efgh, <4 x i32> %wk)
   ret <4 x i32> %res
 }
 
 define <4 x i32> @test_sha256h2(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk) {
 ; CHECK-LABEL: test_sha256h2:
-; CHECK: sha256h2.4s q0, q1, v2
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sha256h2.4s q0, q1, v2
+; CHECK-NEXT:    ret
 
   %res = call <4 x i32> @llvm.aarch64.crypto.sha256h2(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x i32> %wk)
   ret <4 x i32> %res
@@ -124,14 +151,20 @@ define <4 x i32> @test_sha256h2(<4 x i32> %hash_efgh, <4 x i32> %hash_abcd, <4 x
 
 define <4 x i32> @test_sha256su0(<4 x i32> %w0_3, <4 x i32> %w4_7) {
 ; CHECK-LABEL: test_sha256su0:
-; CHECK: sha256su0.4s v0, v1
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sha256su0.4s v0, v1
+; CHECK-NEXT:    ret
   %res = call <4 x i32> @llvm.aarch64.crypto.sha256su0(<4 x i32> %w0_3, <4 x i32> %w4_7)
   ret <4 x i32> %res
 }
 
 define <4 x i32> @test_sha256su1(<4 x i32> %w0_3, <4 x i32> %w8_11, <4 x i32> %w12_15) {
 ; CHECK-LABEL: test_sha256su1:
-; CHECK: sha256su1.4s v0, v1, v2
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sha256su1.4s v0, v1, v2
+; CHECK-NEXT:    ret
   %res = call <4 x i32> @llvm.aarch64.crypto.sha256su1(<4 x i32> %w0_3, <4 x i32> %w8_11, <4 x i32> %w12_15)
   ret <4 x i32> %res
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; FALLBACK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-convergence.ll b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-convergence.ll
index a45e31320de89d..bc5dc7eda19f0e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-convergence.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-convergence.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -o /dev/null
 ; rdar://10795250
 ; DAGCombiner should converge.
diff --git a/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll b/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll
index 5963d98ec3240b..8e63832a899dd2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll
@@ -1,19 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 ; rdar://12771555
 
 define void @foo(ptr %ptr, i32 %a) nounwind {
-entry:
 ; CHECK-LABEL: foo:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cmp w1, #99
+; CHECK-NEXT:    b.hi .LBB0_2
+; CHECK-NEXT:  // %bb.1: // %bb1
+; CHECK-NEXT:    ldrh w8, [x0]
+; CHECK-NEXT:    cmp w8, #23
+; CHECK-NEXT:    b.ls .LBB0_3
+; CHECK-NEXT:    b .LBB0_4
+; CHECK-NEXT:  .LBB0_2:
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    cmp w8, #23
+; CHECK-NEXT:    b.hi .LBB0_4
+; CHECK-NEXT:  .LBB0_3: // %bb3
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    bl bar
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:  .LBB0_4: // %exit
+; CHECK-NEXT:    ret
+entry:
   %tmp1 = icmp ult i32 %a, 100
   br i1 %tmp1, label %bb1, label %bb2
 bb1:
-; CHECK: %bb1
-; CHECK: ldrh [[REG:w[0-9]+]]
   %tmp2 = load i16, ptr %ptr, align 2
   br label %bb2
 bb2:
-; CHECK-NOT: and {{w[0-9]+}}, [[REG]], #0xffff
-; CHECK: cmp [[REG]], #23
   %tmp3 = phi i16 [ 0, %entry ], [ %tmp2, %bb1 ]
   %cmp = icmp ult i16 %tmp3, 24
   br i1 %cmp, label %bb3, label %exit
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll
index 04617d1c89f1b1..9be2152735b62e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -mtriple=arm64-apple-darwin -mcpu=cyclone -verify-machineinstrs < %s | FileCheck %s
 
 define void @branch1() nounwind uwtable ssp {
@@ -58,12 +59,9 @@ define void @branch2() nounwind uwtable ssp {
 }
 
 define void @true_() nounwind uwtable ssp {
-; CHECK: @true_
-; CHECK: b LBB2_1
   br i1 true, label %1, label %2
 
 ; <label>:1
-; CHECK: LBB2_1
   br label %2
 
 ; <label>:2
@@ -71,15 +69,12 @@ define void @true_() nounwind uwtable ssp {
 }
 
 define void @false_() nounwind uwtable ssp {
-; CHECK: @false_
-; CHECK: b LBB3_2
   br i1 false, label %1, label %2
 
 ; <label>:1
   br label %2
 
 ; <label>:2
-; CHECK: LBB3_2
   ret void
 }
 
@@ -94,7 +89,6 @@ entry:
   store i32 %c, ptr %c.addr, align 4
   store i64 %d, ptr %d.addr, align 8
   %0 = load i16, ptr %b.addr, align 2
-; CHECK: tbz {{w[0-9]+}}, #0, LBB4_2
   %conv = trunc i16 %0 to i1
   br i1 %conv, label %if.then, label %if.end
 
@@ -104,7 +98,6 @@ if.then:                                          ; preds = %entry
 
 if.end:                                           ; preds = %if.then, %entry
   %1 = load i32, ptr %c.addr, align 4
-; CHECK: tbz w{{[0-9]+}}, #0, LBB4_4
   %conv1 = trunc i32 %1 to i1
   br i1 %conv1, label %if.then3, label %if.end4
 
@@ -114,7 +107,6 @@ if.then3:                                         ; preds = %if.end
 
 if.end4:                                          ; preds = %if.then3, %if.end
   %2 = load i64, ptr %d.addr, align 8
-; CHECK: tbz w{{[0-9]+}}, #0, LBB4_6
   %conv5 = trunc i64 %2 to i1
   br i1 %conv5, label %if.then7, label %if.end8
 
@@ -131,9 +123,6 @@ declare void @foo1()
 
 ; rdar://15174028
 define i32 @trunc64(i64 %foo) nounwind {
-; CHECK: trunc64
-; CHECK: and x[[REG1:[0-9]+]], x0, #0x1
-; CHECK: tbz w[[REG1]], #0, LBB5_2
   %a = and i64 %foo, 1
   %b = trunc i64 %a to i1
   br i1 %b, label %if.then, label %if.else
@@ -144,3 +133,5 @@ if.then:
 if.else:
   ret i32 0
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
index 2f263343ada768..442f14e70e3802 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
 
 ; Test load/store of global value from global offset table.
@@ -5,31 +6,12 @@
 
 define void @Initrand() nounwind {
 entry:
-; CHECK: @Initrand
-; CHECK: adrp [[REG:x[0-9]+]], _seed at GOTPAGE
-; CHECK: ldr  [[REG2:x[0-9]+]], [[[REG]], _seed at GOTPAGEOFF]
-; CHECK: str  {{x[0-9]+}}, [[[REG2]]]
   store i64 74755, ptr @seed, align 8
   ret void
 }
 
 define i32 @Rand() nounwind {
 entry:
-; CHECK: @Rand
-; CHECK: adrp [[REG1:x[0-9]+]], _seed at GOTPAGE
-; CHECK: ldr  [[REG2:x[0-9]+]], [[[REG1]], _seed at GOTPAGEOFF]
-; CHECK: ldr  [[REG5:x[0-9]+]], [[[REG2]]]
-; CHECK: mov  [[REG4:x[0-9]+]], #1309
-; CHECK: mul  [[REG6:x[0-9]+]], [[REG5]], [[REG4]]
-; CHECK: mov  [[REG3:x[0-9]+]], #13849
-; CHECK: add  [[REG7:x[0-9]+]], [[REG6]], [[REG3]]
-; CHECK: and  [[REG8:x[0-9]+]], [[REG7]], #0xffff
-; CHECK: adrp [[REG1:x[0-9]+]], _seed at GOTPAGE
-; CHECK: ldr  [[REG1]], [[[REG1]], _seed at GOTPAGEOFF]
-; CHECK: str  [[REG8]], [[[REG1]]]
-; CHECK: adrp [[REG1:x[0-9]+]], _seed at GOTPAGE
-; CHECK: ldr  [[REG1]], [[[REG1]], _seed at GOTPAGEOFF]
-; CHECK: ldr  {{x[0-9]+}}, [[[REG1]]]
   %0 = load i64, ptr @seed, align 8
   %mul = mul nsw i64 %0, 1309
   %add = add nsw i64 %mul, 13849
@@ -39,3 +21,5 @@ entry:
   %conv = trunc i64 %1 to i32
   ret i32 %conv
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
index 5131182b89be27..2cb945fffe9f12 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
@@ -1,10 +1,10 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs -mtriple=arm64-apple-darwin < %s | FileCheck %s
 
 @fn.table = internal global [2 x ptr] [ptr blockaddress(@fn, %ZERO), ptr blockaddress(@fn, %ONE)], align 8
 
 define i32 @fn(i32 %target) nounwind {
 entry:
-; CHECK-LABEL: fn
   %retval = alloca i32, align 4
   %target.addr = alloca i32, align 4
   store i32 %target, ptr %target.addr, align 4
@@ -15,12 +15,10 @@ entry:
   br label %indirectgoto
 
 ZERO:                                             ; preds = %indirectgoto
-; CHECK: LBB0_1
   store i32 0, ptr %retval
   br label %return
 
 ONE:                                              ; preds = %indirectgoto
-; CHECK: LBB0_2
   store i32 1, ptr %retval
   br label %return
 
@@ -29,8 +27,8 @@ return:                                           ; preds = %ONE, %ZERO
   ret i32 %2
 
 indirectgoto:                                     ; preds = %entry
-; CHECK:      ldr [[REG:x[0-9]+]], [sp]
-; CHECK-NEXT: br [[REG]]
   %indirect.goto.dest = phi ptr [ %1, %entry ]
   indirectbr ptr %indirect.goto.dest, [label %ZERO, label %ONE]
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
index 40966cb2a2cc3c..c3710abf619579 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-intrinsic.ll
@@ -1,16 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -verify-machineinstrs -relocation-model=dynamic-no-pic -mtriple=arm64-apple-ios < %s | FileCheck %s --check-prefix=ARM64
 
 @message = global [80 x i8] c"The LLVM Compiler Infrastructure\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00\00", align 16
 @temp = common global [80 x i8] zeroinitializer, align 16
 
 define void @t1() {
-; ARM64-LABEL: t1
-; ARM64: adrp x8, _message at PAGE
-; ARM64: add x0, x8, _message at PAGEOFF
-; ARM64: mov [[REG:w[0-9]+]], wzr
-; ARM64: mov x2, #80
-; ARM64: uxtb w1, [[REG]]
-; ARM64: bl _memset
+; ARM64-LABEL: t1:
+; ARM64:       ; %bb.0:
+; ARM64-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; ARM64-NEXT:    .cfi_def_cfa_offset 16
+; ARM64-NEXT:    .cfi_offset w30, -8
+; ARM64-NEXT:    .cfi_offset w29, -16
+; ARM64-NEXT:    adrp x8, _message at PAGE
+; ARM64-NEXT:    add x0, x8, _message at PAGEOFF
+; ARM64-NEXT:    mov w8, wzr
+; ARM64-NEXT:    mov x2, #80 ; =0x50
+; ARM64-NEXT:    uxtb w1, w8
+; ARM64-NEXT:    bl _memset
+; ARM64-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; ARM64-NEXT:    ret
   call void @llvm.memset.p0.i64(ptr align 16 @message, i8 0, i64 80, i1 false)
   ret void
 }
@@ -18,13 +26,20 @@ define void @t1() {
 declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
 
 define void @t2() {
-; ARM64-LABEL: t2
-; ARM64: adrp x8, _temp at GOTPAGE
-; ARM64: ldr x0, [x8, _temp at GOTPAGEOFF]
-; ARM64: adrp x8, _message at PAGE
-; ARM64: add x1, x8, _message at PAGEOFF
-; ARM64: mov x2, #80
-; ARM64: bl _memcpy
+; ARM64-LABEL: t2:
+; ARM64:       ; %bb.0:
+; ARM64-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; ARM64-NEXT:    .cfi_def_cfa_offset 16
+; ARM64-NEXT:    .cfi_offset w30, -8
+; ARM64-NEXT:    .cfi_offset w29, -16
+; ARM64-NEXT:    adrp x8, _temp at GOTPAGE
+; ARM64-NEXT:    ldr x0, [x8, _temp at GOTPAGEOFF]
+; ARM64-NEXT:    adrp x8, _message at PAGE
+; ARM64-NEXT:    add x1, x8, _message at PAGEOFF
+; ARM64-NEXT:    mov x2, #80 ; =0x50
+; ARM64-NEXT:    bl _memcpy
+; ARM64-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; ARM64-NEXT:    ret
   call void @llvm.memcpy.p0.p0.i64(ptr align 16 @temp, ptr align 16 @message, i64 80, i1 false)
   ret void
 }
@@ -32,13 +47,20 @@ define void @t2() {
 declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
 
 define void @t3() {
-; ARM64-LABEL: t3
-; ARM64: adrp x8, _temp at GOTPAGE
-; ARM64: ldr x0, [x8, _temp at GOTPAGEOFF]
-; ARM64: adrp x8, _message at PAGE
-; ARM64: add x1, x8, _message at PAGEOFF
-; ARM64: mov x2, #20
-; ARM64: bl _memmove
+; ARM64-LABEL: t3:
+; ARM64:       ; %bb.0:
+; ARM64-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; ARM64-NEXT:    .cfi_def_cfa_offset 16
+; ARM64-NEXT:    .cfi_offset w30, -8
+; ARM64-NEXT:    .cfi_offset w29, -16
+; ARM64-NEXT:    adrp x8, _temp at GOTPAGE
+; ARM64-NEXT:    ldr x0, [x8, _temp at GOTPAGEOFF]
+; ARM64-NEXT:    adrp x8, _message at PAGE
+; ARM64-NEXT:    add x1, x8, _message at PAGEOFF
+; ARM64-NEXT:    mov x2, #20 ; =0x14
+; ARM64-NEXT:    bl _memmove
+; ARM64-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; ARM64-NEXT:    ret
   call void @llvm.memmove.p0.p0.i64(ptr align 16 @temp, ptr align 16 @message, i64 20, i1 false)
   ret void
 }
@@ -46,101 +68,115 @@ define void @t3() {
 declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1)
 
 define void @t4() {
-; ARM64-LABEL: t4
-; ARM64: adrp x8, _temp at GOTPAGE
-; ARM64: ldr [[REG0:x[0-9]+]], [x8, _temp at GOTPAGEOFF]
-; ARM64: adrp [[REG1:x[0-9]+]], _message at PAGE
-; ARM64: add [[REG2:x[0-9]+]], [[REG1]], _message at PAGEOFF
-; ARM64: ldr x10, [[[REG2]]]
-; ARM64: str x10, [[[REG0]]]
-; ARM64: ldr x10, [[[REG2]], #8]
-; ARM64: str x10, [[[REG0]], #8]
-; ARM64: ldrb [[REG3:w[0-9]+]], [[[REG2]], #16]
-; ARM64: strb [[REG3]], [[[REG0]], #16]
-; ARM64: ret
+; ARM64-LABEL: t4:
+; ARM64:       ; %bb.0:
+; ARM64-NEXT:    adrp x8, _temp at GOTPAGE
+; ARM64-NEXT:    ldr x9, [x8, _temp at GOTPAGEOFF]
+; ARM64-NEXT:    adrp x8, _message at PAGE
+; ARM64-NEXT:    add x8, x8, _message at PAGEOFF
+; ARM64-NEXT:    ldr x10, [x8]
+; ARM64-NEXT:    str x10, [x9]
+; ARM64-NEXT:    ldr x10, [x8, #8]
+; ARM64-NEXT:    str x10, [x9, #8]
+; ARM64-NEXT:    ldrb w8, [x8, #16]
+; ARM64-NEXT:    strb w8, [x9, #16]
+; ARM64-NEXT:    ret
   call void @llvm.memcpy.p0.p0.i64(ptr align 16 @temp, ptr align 16 @message, i64 17, i1 false)
   ret void
 }
 
 define void @t5() {
-; ARM64-LABEL: t5
-; ARM64: adrp x8, _temp at GOTPAGE
-; ARM64: ldr [[REG0:x[0-9]+]], [x8, _temp at GOTPAGEOFF]
-; ARM64: adrp [[REG3:x[0-9]+]], _message at PAGE
-; ARM64: add [[REG1:x[0-9]+]], [[REG3]], _message at PAGEOFF
-; ARM64: ldr x10, [[[REG1]]]
-; ARM64: str x10, [[[REG0]]]
-; ARM64: ldr x10, [[[REG1]], #8]
-; ARM64: str x10, [[[REG0]], #8]
-; ARM64: ldrb [[REG4:w[0-9]+]], [[[REG1]], #16]
-; ARM64: strb [[REG4]], [[[REG0]], #16]
-; ARM64: ret
+; ARM64-LABEL: t5:
+; ARM64:       ; %bb.0:
+; ARM64-NEXT:    adrp x8, _temp at GOTPAGE
+; ARM64-NEXT:    ldr x9, [x8, _temp at GOTPAGEOFF]
+; ARM64-NEXT:    adrp x8, _message at PAGE
+; ARM64-NEXT:    add x8, x8, _message at PAGEOFF
+; ARM64-NEXT:    ldr x10, [x8]
+; ARM64-NEXT:    str x10, [x9]
+; ARM64-NEXT:    ldr x10, [x8, #8]
+; ARM64-NEXT:    str x10, [x9, #8]
+; ARM64-NEXT:    ldrb w8, [x8, #16]
+; ARM64-NEXT:    strb w8, [x9, #16]
+; ARM64-NEXT:    ret
   call void @llvm.memcpy.p0.p0.i64(ptr align 8 @temp, ptr align 8 @message, i64 17, i1 false)
   ret void
 }
 
 define void @t6() {
-; ARM64-LABEL: t6
-; ARM64: adrp x8, _temp at GOTPAGE
-; ARM64: ldr [[REG0:x[0-9]+]], [x8, _temp at GOTPAGEOFF]
-; ARM64: adrp [[REG1:x[0-9]+]], _message at PAGE
-; ARM64: add [[REG2:x[0-9]+]], [[REG1]], _message at PAGEOFF
-; ARM64: ldr w10, [[[REG2]]]
-; ARM64: str w10, [[[REG0]]]
-; ARM64: ldr w10, [[[REG2]], #4]
-; ARM64: str w10, [[[REG0]], #4]
-; ARM64: ldrb [[REG3:w[0-9]+]], [[[REG2]], #8]
-; ARM64: strb [[REG3]], [[[REG0]], #8]
-; ARM64: ret
+; ARM64-LABEL: t6:
+; ARM64:       ; %bb.0:
+; ARM64-NEXT:    adrp x8, _temp at GOTPAGE
+; ARM64-NEXT:    ldr x9, [x8, _temp at GOTPAGEOFF]
+; ARM64-NEXT:    adrp x8, _message at PAGE
+; ARM64-NEXT:    add x8, x8, _message at PAGEOFF
+; ARM64-NEXT:    ldr w10, [x8]
+; ARM64-NEXT:    str w10, [x9]
+; ARM64-NEXT:    ldr w10, [x8, #4]
+; ARM64-NEXT:    str w10, [x9, #4]
+; ARM64-NEXT:    ldrb w8, [x8, #8]
+; ARM64-NEXT:    strb w8, [x9, #8]
+; ARM64-NEXT:    ret
   call void @llvm.memcpy.p0.p0.i64(ptr align 4 @temp, ptr align 4 @message, i64 9, i1 false)
   ret void
 }
 
 define void @t7() {
-; ARM64-LABEL: t7
-; ARM64: adrp x8, _temp at GOTPAGE
-; ARM64: ldr [[REG0:x[0-9]+]], [x8, _temp at GOTPAGEOFF]
-; ARM64: adrp [[REG1:x[0-9]+]], _message at PAGE
-; ARM64: add [[REG2:x[0-9]+]], [[REG1]], _message at PAGEOFF
-; ARM64: ldrh w10, [[[REG2]]]
-; ARM64: strh w10, [[[REG0]]]
-; ARM64: ldrh w10, [[[REG2]], #2]
-; ARM64: strh w10, [[[REG0]], #2]
-; ARM64: ldrh w10, [[[REG2]], #4]
-; ARM64: strh w10, [[[REG0]], #4]
-; ARM64: ldrb [[REG3:w[0-9]+]], [[[REG2]], #6]
-; ARM64: strb [[REG3]], [[[REG0]], #6]
-; ARM64: ret
+; ARM64-LABEL: t7:
+; ARM64:       ; %bb.0:
+; ARM64-NEXT:    adrp x8, _temp at GOTPAGE
+; ARM64-NEXT:    ldr x9, [x8, _temp at GOTPAGEOFF]
+; ARM64-NEXT:    adrp x8, _message at PAGE
+; ARM64-NEXT:    add x8, x8, _message at PAGEOFF
+; ARM64-NEXT:    ldrh w10, [x8]
+; ARM64-NEXT:    strh w10, [x9]
+; ARM64-NEXT:    ldrh w10, [x8, #2]
+; ARM64-NEXT:    strh w10, [x9, #2]
+; ARM64-NEXT:    ldrh w10, [x8, #4]
+; ARM64-NEXT:    strh w10, [x9, #4]
+; ARM64-NEXT:    ldrb w8, [x8, #6]
+; ARM64-NEXT:    strb w8, [x9, #6]
+; ARM64-NEXT:    ret
   call void @llvm.memcpy.p0.p0.i64(ptr align 2 @temp, ptr align 2 @message, i64 7, i1 false)
   ret void
 }
 
 define void @t8() {
-; ARM64-LABEL: t8
-; ARM64: adrp x8, _temp at GOTPAGE
-; ARM64: ldr [[REG0:x[0-9]+]], [x8, _temp at GOTPAGEOFF]
-; ARM64: adrp [[REG1:x[0-9]+]], _message at PAGE
-; ARM64: add [[REG2:x[0-9]+]], [[REG1:x[0-9]+]], _message at PAGEOFF
-; ARM64: ldrb w10, [[[REG2]]]
-; ARM64: strb w10, [[[REG0]]]
-; ARM64: ldrb w10, [[[REG2]], #1]
-; ARM64: strb w10, [[[REG0]], #1]
-; ARM64: ldrb w10, [[[REG2]], #2]
-; ARM64: strb w10, [[[REG0]], #2]
-; ARM64: ldrb [[REG3:w[0-9]+]], [[[REG2]], #3]
-; ARM64: strb [[REG3]], [[[REG0]], #3]
-; ARM64: ret
+; ARM64-LABEL: t8:
+; ARM64:       ; %bb.0:
+; ARM64-NEXT:    adrp x8, _temp at GOTPAGE
+; ARM64-NEXT:    ldr x9, [x8, _temp at GOTPAGEOFF]
+; ARM64-NEXT:    adrp x8, _message at PAGE
+; ARM64-NEXT:    add x8, x8, _message at PAGEOFF
+; ARM64-NEXT:    ldrb w10, [x8]
+; ARM64-NEXT:    strb w10, [x9]
+; ARM64-NEXT:    ldrb w10, [x8, #1]
+; ARM64-NEXT:    strb w10, [x9, #1]
+; ARM64-NEXT:    ldrb w10, [x8, #2]
+; ARM64-NEXT:    strb w10, [x9, #2]
+; ARM64-NEXT:    ldrb w8, [x8, #3]
+; ARM64-NEXT:    strb w8, [x9, #3]
+; ARM64-NEXT:    ret
   call void @llvm.memcpy.p0.p0.i64(ptr align 1 @temp, ptr align 1 @message, i64 4, i1 false)
   ret void
 }
 
 define void @test_distant_memcpy(ptr %dst) {
 ; ARM64-LABEL: test_distant_memcpy:
-; ARM64: mov [[ARRAY:x[0-9]+]], sp
-; ARM64: mov [[OFFSET:x[0-9]+]], #8000
-; ARM64: add x[[ADDR:[0-9]+]], [[ARRAY]], [[OFFSET]]
-; ARM64: ldrb [[BYTE:w[0-9]+]], [x[[ADDR]]]
-; ARM64: strb [[BYTE]], [x0]
+; ARM64:       ; %bb.0:
+; ARM64-NEXT:    stp x28, x27, [sp, #-16]! ; 16-byte Folded Spill
+; ARM64-NEXT:    sub sp, sp, #2, lsl #12 ; =8192
+; ARM64-NEXT:    .cfi_def_cfa_offset 8208
+; ARM64-NEXT:    .cfi_offset w27, -8
+; ARM64-NEXT:    .cfi_offset w28, -16
+; ARM64-NEXT:    mov x8, sp
+; ARM64-NEXT:    mov x9, #8000 ; =0x1f40
+; ARM64-NEXT:    add x8, x8, x9
+; ARM64-NEXT:    ldrb w8, [x8]
+; ARM64-NEXT:    strb w8, [x0]
+; ARM64-NEXT:    add sp, sp, #2, lsl #12 ; =8192
+; ARM64-NEXT:    ldp x28, x27, [sp], #16 ; 16-byte Folded Reload
+; ARM64-NEXT:    ret
   %array = alloca i8, i32 8192
   %elem = getelementptr i8, ptr %array, i32 8000
   call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %elem, i64 1, i1 false)
diff --git a/llvm/test/CodeGen/AArch64/arm64-fcmp-opt.ll b/llvm/test/CodeGen/AArch64/arm64-fcmp-opt.ll
index 5155d49cc3fa05..9f79214b6b8eaa 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fcmp-opt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fcmp-opt.ll
@@ -1,49 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone -aarch64-neon-syntax=apple | FileCheck %s
 ; rdar://10263824
 
 define i1 @fcmp_float1(float %a) nounwind ssp {
+; CHECK-LABEL: fcmp_float1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmp s0, #0.0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: @fcmp_float1
-; CHECK: fcmp s0, #0.0
-; CHECK: cset w0, ne
   %cmp = fcmp une float %a, 0.000000e+00
   ret i1 %cmp
 }
 
 define i1 @fcmp_float2(float %a, float %b) nounwind ssp {
+; CHECK-LABEL: fcmp_float2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: @fcmp_float2
-; CHECK: fcmp s0, s1
-; CHECK: cset w0, ne
   %cmp = fcmp une float %a, %b
   ret i1 %cmp
 }
 
 define i1 @fcmp_double1(double %a) nounwind ssp {
+; CHECK-LABEL: fcmp_double1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmp d0, #0.0
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: @fcmp_double1
-; CHECK: fcmp d0, #0.0
-; CHECK: cset w0, ne
   %cmp = fcmp une double %a, 0.000000e+00
   ret i1 %cmp
 }
 
 define i1 @fcmp_double2(double %a, double %b) nounwind ssp {
+; CHECK-LABEL: fcmp_double2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fcmp d0, d1
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
 entry:
-; CHECK-LABEL: @fcmp_double2
-; CHECK: fcmp d0, d1
-; CHECK: cset w0, ne
   %cmp = fcmp une double %a, %b
   ret i1 %cmp
 }
 
 ; Check each fcmp condition
 define float @fcmp_oeq(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_oeq
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], eq
+; CHECK-LABEL: fcmp_oeq:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, eq
+; CHECK-NEXT:    ret
 
   %cmp = fcmp oeq float %a, %b
   %conv = uitofp i1 %cmp to float
@@ -51,11 +62,13 @@ define float @fcmp_oeq(float %a, float %b) nounwind ssp {
 }
 
 define float @fcmp_ogt(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_ogt
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], gt
+; CHECK-LABEL: fcmp_ogt:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, gt
+; CHECK-NEXT:    ret
 
   %cmp = fcmp ogt float %a, %b
   %conv = uitofp i1 %cmp to float
@@ -63,11 +76,13 @@ define float @fcmp_ogt(float %a, float %b) nounwind ssp {
 }
 
 define float @fcmp_oge(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_oge
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], ge
+; CHECK-LABEL: fcmp_oge:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, ge
+; CHECK-NEXT:    ret
 
   %cmp = fcmp oge float %a, %b
   %conv = uitofp i1 %cmp to float
@@ -75,11 +90,13 @@ define float @fcmp_oge(float %a, float %b) nounwind ssp {
 }
 
 define float @fcmp_olt(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_olt
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], mi
+; CHECK-LABEL: fcmp_olt:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, mi
+; CHECK-NEXT:    ret
 
   %cmp = fcmp olt float %a, %b
   %conv = uitofp i1 %cmp to float
@@ -87,11 +104,13 @@ define float @fcmp_olt(float %a, float %b) nounwind ssp {
 }
 
 define float @fcmp_ole(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_ole
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], ls
+; CHECK-LABEL: fcmp_ole:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, ls
+; CHECK-NEXT:    ret
 
   %cmp = fcmp ole float %a, %b
   %conv = uitofp i1 %cmp to float
@@ -99,77 +118,91 @@ define float @fcmp_ole(float %a, float %b) nounwind ssp {
 }
 
 define float @fcmp_ord(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_ord
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], vc
+; CHECK-LABEL: fcmp_ord:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, vc
+; CHECK-NEXT:    ret
   %cmp = fcmp ord float %a, %b
   %conv = uitofp i1 %cmp to float
   ret float %conv
 }
 
 define float @fcmp_uno(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_uno
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], vs
+; CHECK-LABEL: fcmp_uno:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, vs
+; CHECK-NEXT:    ret
   %cmp = fcmp uno float %a, %b
   %conv = uitofp i1 %cmp to float
   ret float %conv
 }
 
 define float @fcmp_ugt(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_ugt
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], hi
+; CHECK-LABEL: fcmp_ugt:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, hi
+; CHECK-NEXT:    ret
   %cmp = fcmp ugt float %a, %b
   %conv = uitofp i1 %cmp to float
   ret float %conv
 }
 
 define float @fcmp_uge(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_uge
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], pl
+; CHECK-LABEL: fcmp_uge:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, pl
+; CHECK-NEXT:    ret
   %cmp = fcmp uge float %a, %b
   %conv = uitofp i1 %cmp to float
   ret float %conv
 }
 
 define float @fcmp_ult(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_ult
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], lt
+; CHECK-LABEL: fcmp_ult:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, lt
+; CHECK-NEXT:    ret
   %cmp = fcmp ult float %a, %b
   %conv = uitofp i1 %cmp to float
   ret float %conv
 }
 
 define float @fcmp_ule(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_ule
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], le
+; CHECK-LABEL: fcmp_ule:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, le
+; CHECK-NEXT:    ret
   %cmp = fcmp ule float %a, %b
   %conv = uitofp i1 %cmp to float
   ret float %conv
 }
 
 define float @fcmp_une(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_une
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel s0, s[[ONE]], s[[ZERO]], ne
+; CHECK-LABEL: fcmp_une:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, ne
+; CHECK-NEXT:    ret
   %cmp = fcmp une float %a, %b
   %conv = uitofp i1 %cmp to float
   ret float %conv
@@ -178,12 +211,15 @@ define float @fcmp_une(float %a, float %b) nounwind ssp {
 ; Possible opportunity for improvement.  See comment in
 ; ARM64TargetLowering::LowerSETCC()
 define float @fcmp_one(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_one
+; CHECK-LABEL: fcmp_one:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, mi
+; CHECK-NEXT:    fcsel s0, s1, s0, gt
+; CHECK-NEXT:    ret
 ;	fcmp	s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel [[TMP:s[0-9]+]], s[[ONE]], s[[ZERO]], mi
-; CHECK: fcsel s0, s[[ONE]], [[TMP]], gt
   %cmp = fcmp one float %a, %b
   %conv = uitofp i1 %cmp to float
   ret float %conv
@@ -192,12 +228,14 @@ define float @fcmp_one(float %a, float %b) nounwind ssp {
 ; Possible opportunity for improvement.  See comment in
 ; ARM64TargetLowering::LowerSETCC()
 define float @fcmp_ueq(float %a, float %b) nounwind ssp {
-; CHECK-LABEL: @fcmp_ueq
-; CHECK: fcmp s0, s1
-; CHECK-DAG: fmov s[[ZERO:[0-9]+]], wzr
-; CHECK-DAG: fmov s[[ONE:[0-9]+]], #1.0
-; CHECK: fcsel [[TMP:s[0-9]+]], s[[ONE]], s[[ZERO]], eq
-; CHECK: fcsel s0, s[[ONE]], [[TMP]], vs
+; CHECK-LABEL: fcmp_ueq:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fmov s0, wzr
+; CHECK-NEXT:    fmov s1, #1.00000000
+; CHECK-NEXT:    fcsel s0, s1, s0, eq
+; CHECK-NEXT:    fcsel s0, s1, s0, vs
+; CHECK-NEXT:    ret
   %cmp = fcmp ueq float %a, %b
   %conv = uitofp i1 %cmp to float
   ret float %conv
diff --git a/llvm/test/CodeGen/AArch64/arm64-fmax-safe.ll b/llvm/test/CodeGen/AArch64/arm64-fmax-safe.ll
index 550e89f4a27f96..dfdd92fc13d784 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fmax-safe.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fmax-safe.ll
@@ -1,54 +1,75 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 
 define double @test_direct(float %in) {
 ; CHECK-LABEL: test_direct:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi d1, #0000000000000000
+; CHECK-NEXT:    fcmp s0, #0.0
+; CHECK-NEXT:    fcsel s0, s1, s0, mi
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    ret
   %cmp = fcmp olt float %in, 0.000000e+00
   %val = select i1 %cmp, float 0.000000e+00, float %in
   %longer = fpext float %val to double
   ret double %longer
 
-; CHECK: fcmp
-; CHECK: fcsel
 }
 
 define double @test_cross(float %in) {
 ; CHECK-LABEL: test_cross:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi d1, #0000000000000000
+; CHECK-NEXT:    fcmp s0, #0.0
+; CHECK-NEXT:    fcsel s0, s0, s1, lt
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    ret
   %cmp = fcmp ult float %in, 0.000000e+00
   %val = select i1 %cmp, float %in, float 0.000000e+00
   %longer = fpext float %val to double
   ret double %longer
 
-; CHECK: fcmp
-; CHECK: fcsel
 }
 
 ; Same as previous, but with ordered comparison;
 ; must become fminnm, not fmin.
 define double @test_cross_fail_nan(float %in) {
 ; CHECK-LABEL: test_cross_fail_nan:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    movi d1, #0000000000000000
+; CHECK-NEXT:    fminnm s0, s0, s1
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    ret
   %cmp = fcmp olt float %in, 0.000000e+00
   %val = select i1 %cmp, float %in, float 0.000000e+00
   %longer = fpext float %val to double
   ret double %longer
 
-; CHECK: fminnm s
 }
 
 ; This isn't a min or a max, but passes the first condition for swapping the
 ; results. Make sure they're put back before we resort to the normal fcsel.
 define float @test_cross_fail(float %lhs, float %rhs) {
 ; CHECK-LABEL: test_cross_fail:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fcmp s0, s1
+; CHECK-NEXT:    fcsel s0, s1, s0, ne
+; CHECK-NEXT:    ret
   %tst = fcmp une float %lhs, %rhs
   %res = select i1 %tst, float %rhs, float %lhs
   ret float %res
 
   ; The register allocator would have to decide to be deliberately obtuse before
   ; other register were used.
-; CHECK: fcsel s0, s1, s0, ne
 }
 
 ; Make sure the transformation isn't triggered for integers
 define i64 @test_integer(i64  %in) {
+; CHECK-LABEL: test_integer:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, #0
+; CHECK-NEXT:    csel x0, xzr, x0, lt
+; CHECK-NEXT:    ret
   %cmp = icmp slt i64 %in, 0
   %val = select i1 %cmp, i64 0, i64 %in
   ret i64 %val
diff --git a/llvm/test/CodeGen/AArch64/arm64-fold-address.ll b/llvm/test/CodeGen/AArch64/arm64-fold-address.ll
index 1775f13cd5aad1..907cdbe36609fd 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fold-address.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fold-address.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -O2 -mtriple=arm64-apple-darwin | FileCheck %s
 
 %0 = type opaque
@@ -9,11 +10,6 @@
 
 define hidden %struct.CGRect @nofold(ptr nocapture %self, ptr nocapture %_cmd) nounwind readonly optsize ssp {
 entry:
-; CHECK-LABEL: nofold:
-; CHECK: add x[[REG:[0-9]+]], x0, x{{[0-9]+}}
-; CHECK: ldp d0, d1, [x[[REG]]]
-; CHECK: ldp d2, d3, [x[[REG]], #16]
-; CHECK: ret
   %ivar = load i64, ptr @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
   %add.ptr = getelementptr inbounds i8, ptr %self, i64 %ivar
   %tmp11 = load double, ptr %add.ptr, align 8
@@ -37,10 +33,6 @@ entry:
 
 define hidden %struct.CGRect @fold(ptr nocapture %self, ptr nocapture %_cmd) nounwind readonly optsize ssp {
 entry:
-; CHECK-LABEL: fold:
-; CHECK: ldr d0, [x0, x{{[0-9]+}}]
-; CHECK-NOT: add x0, x0, x1
-; CHECK: ret
   %ivar = load i64, ptr @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
   %add.ptr = getelementptr inbounds i8, ptr %self, i64 %ivar
   %tmp11 = load double, ptr %add.ptr, align 8
@@ -67,3 +59,5 @@ entry:
 !2 = !{i32 1, !"Objective-C Image Info Section", !"__DATA, __objc_imageinfo, regular, no_dead_strip"}
 !3 = !{i32 4, !"Objective-C Garbage Collection", i32 0}
 !4 = !{}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-fp-imm-size.ll b/llvm/test/CodeGen/AArch64/arm64-fp-imm-size.ll
index cfb7c60f5a8b00..9c57c6ed0412f5 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp-imm-size.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp-imm-size.ll
@@ -1,31 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
 ; RUN: llc < %s -mtriple=arm64-apple-darwin -global-isel | FileCheck %s
 
 ; CHECK: literal8
 ; CHECK: .quad  0x400921fb54442d18
 define double @foo() optsize {
-; CHECK: _foo:
-; CHECK: adrp x[[REG:[0-9]+]], lCPI0_0 at PAGE
-; CHECK: ldr  d0, [x[[REG]], lCPI0_0 at PAGEOFF]
-; CHECK-NEXT: ret
   ret double 0x400921FB54442D18
 }
 
 ; CHECK: literal8
 ; CHECK: .quad 0x0000001fffffffc
 define double @foo2() optsize {
-; CHECK: _foo2:
-; CHECK: adrp x[[REG:[0-9]+]], lCPI1_0 at PAGE
-; CHECK: ldr  d0, [x[[REG]], lCPI1_0 at PAGEOFF]
-; CHECK-NEXT: ret
   ret double 0x1FFFFFFFC1
 }
 
 define float @bar() optsize {
-; CHECK: _bar:
-; CHECK: adrp x[[REG:[0-9]+]], lCPI2_0 at PAGE
-; CHECK: ldr  s0, [x[[REG]], lCPI2_0 at PAGEOFF]
-; CHECK-NEXT:  ret
   ret float 0x400921FB60000000
 }
 
@@ -33,28 +22,16 @@ define float @bar() optsize {
 ; CHECK: .quad 0
 ; CHECK: .quad 0
 define fp128 @baz() optsize {
-; CHECK: _baz:
-; CHECK:  adrp x[[REG:[0-9]+]], lCPI3_0 at PAGE
-; CHECK:  ldr  q0, [x[[REG]], lCPI3_0 at PAGEOFF]
-; CHECK-NEXT:  ret
   ret fp128 0xL00000000000000000000000000000000
 }
 
 ; CHECK: literal8
 ; CHECK: .quad 0x0000001fffffffd
 define double @foo2_pgso() !prof !14 {
-; CHECK: _foo2_pgso:
-; CHECK: adrp x[[REG:[0-9]+]], lCPI4_0 at PAGE
-; CHECK: ldr  d0, [x[[REG]], lCPI4_0 at PAGEOFF]
-; CHECK-NEXT: ret
   ret double 0x1FFFFFFFd1
 }
 
 define float @bar_pgso() !prof !14 {
-; CHECK: _bar_pgso:
-; CHECK: adrp x[[REG:[0-9]+]], lCPI5_0 at PAGE
-; CHECK: ldr  s0, [x[[REG]], lCPI5_0 at PAGEOFF]
-; CHECK-NEXT:  ret
   ret float 0x400921FB80000000
 }
 
@@ -74,3 +51,5 @@ define float @bar_pgso() !prof !14 {
 !12 = !{i32 999000, i64 100, i32 1}
 !13 = !{i32 999999, i64 1, i32 2}
 !14 = !{!"function_entry_count", i64 0}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-fp128-folding.ll b/llvm/test/CodeGen/AArch64/arm64-fp128-folding.ll
index 2f336a1d3874c0..a38c6878bee398 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp128-folding.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp128-folding.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -verify-machineinstrs | FileCheck %s
 declare void @bar(ptr, ptr, ptr)
 
@@ -6,12 +7,20 @@ declare void @bar(ptr, ptr, ptr)
 
 define fp128 @test_folding() {
 ; CHECK-LABEL: test_folding:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    mov w8, #42 // =0x2a
+; CHECK-NEXT:    str w8, [sp, #12]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
   %l = alloca i32
   store i32 42, ptr %l
   %val = load i32, ptr %l
   %fpval = sitofp i32 %val to fp128
   ; If the value is loaded from a constant pool into an fp128, it's been folded
   ; successfully.
-; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}},
   ret fp128 %fpval
 }
diff --git a/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll b/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll
index c414c7c2464f8b..5e67fc99415fdf 100644
--- a/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-homogeneous-prolog-epilog.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -homogeneous-prolog-epilog| FileCheck %s
 ; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu  -homogeneous-prolog-epilog | FileCheck %s --check-prefixes=CHECK-LINUX
 
@@ -18,6 +19,37 @@
 ; CHECK-LINUX-NEXT: b _Z3gooi
 
 define i32 @_Z3hooii(i32 %b, i32 %a) nounwind ssp minsize {
+; CHECK-LABEL: _Z3hooii:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]!
+; CHECK-NEXT:    bl _OUTLINED_FUNCTION_PROLOG_x30x29x19x20x21x22
+; CHECK-NEXT:    mov w19, w1
+; CHECK-NEXT:    mov w20, w0
+; CHECK-NEXT:    bl __Z3gooi
+; CHECK-NEXT:    mov w21, w0
+; CHECK-NEXT:    mov w0, w19
+; CHECK-NEXT:    bl __Z3gooi
+; CHECK-NEXT:    add w8, w19, w20
+; CHECK-NEXT:    add w8, w8, w21
+; CHECK-NEXT:    add w0, w8, w0
+; CHECK-NEXT:    bl _OUTLINED_FUNCTION_EPILOG_x30x29x19x20x21x22
+; CHECK-NEXT:    b __Z3gooi
+;
+; CHECK-LINUX-LABEL: _Z3hooii:
+; CHECK-LINUX:       // %bb.0:
+; CHECK-LINUX-NEXT:    stp x29, x30, [sp, #-48]!
+; CHECK-LINUX-NEXT:    bl OUTLINED_FUNCTION_PROLOG_x19x20x21x22x30x29
+; CHECK-LINUX-NEXT:    mov w19, w1
+; CHECK-LINUX-NEXT:    mov w20, w0
+; CHECK-LINUX-NEXT:    bl _Z3gooi
+; CHECK-LINUX-NEXT:    mov w21, w0
+; CHECK-LINUX-NEXT:    mov w0, w19
+; CHECK-LINUX-NEXT:    bl _Z3gooi
+; CHECK-LINUX-NEXT:    add w8, w19, w20
+; CHECK-LINUX-NEXT:    add w8, w8, w21
+; CHECK-LINUX-NEXT:    add w0, w8, w0
+; CHECK-LINUX-NEXT:    bl OUTLINED_FUNCTION_EPILOG_x19x20x21x22x30x29
+; CHECK-LINUX-NEXT:    b _Z3gooi
   %1 = tail call i32 @_Z3gooi(i32 %b)
   %2 = tail call i32 @_Z3gooi(i32 %a)
   %3 = add i32 %a, %b
@@ -35,6 +67,29 @@ declare i32 @_Z3gooi(i32);
 ; CHECK:        add sp, sp, #16
 
 define i32 @foo(i32 %c) nounwind minsize {
+; CHECK-LABEL: foo:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    stp x20, x19, [sp, #-32]!
+; CHECK-NEXT:    stp x29, x30, [sp, #16]
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    mov w19, w0
+; CHECK-NEXT:    add x0, sp, #12
+; CHECK-NEXT:    bl _goo
+; CHECK-NEXT:    sub w0, w19, w0
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    b _OUTLINED_FUNCTION_EPILOG_TAIL_x30x29x19x20
+;
+; CHECK-LINUX-LABEL: foo:
+; CHECK-LINUX:       // %bb.0: // %entry
+; CHECK-LINUX-NEXT:    stp x29, x30, [sp, #-32]!
+; CHECK-LINUX-NEXT:    stp x20, x19, [sp, #16]
+; CHECK-LINUX-NEXT:    sub sp, sp, #16
+; CHECK-LINUX-NEXT:    mov w19, w0
+; CHECK-LINUX-NEXT:    add x0, sp, #12
+; CHECK-LINUX-NEXT:    bl goo
+; CHECK-LINUX-NEXT:    sub w0, w19, w0
+; CHECK-LINUX-NEXT:    add sp, sp, #16
+; CHECK-LINUX-NEXT:    b OUTLINED_FUNCTION_EPILOG_TAIL_x19x20x30x29
 entry:
   %buffer = alloca [1 x i32], align 4
   call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %buffer)
@@ -75,5 +130,34 @@ declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
 
 ; nothing to check - hit assert if not bailing out for swiftasync
 define void @swift_async(i8* swiftasync %ctx) minsize "frame-pointer"="all" {
+; CHECK-LABEL: swift_async:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    orr x29, x29, #0x1000000000000000
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT:    str x22, [sp, #8]
+; CHECK-NEXT:    add x29, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
+; CHECK-NEXT:    and x29, x29, #0xefffffffffffffff
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    ret
+;
+; CHECK-LINUX-LABEL: swift_async:
+; CHECK-LINUX:       // %bb.0:
+; CHECK-LINUX-NEXT:    orr x29, x29, #0x1000000000000000
+; CHECK-LINUX-NEXT:    sub sp, sp, #32
+; CHECK-LINUX-NEXT:    stp x29, x30, [sp, #16] // 16-byte Folded Spill
+; CHECK-LINUX-NEXT:    str x22, [sp, #8]
+; CHECK-LINUX-NEXT:    add x29, sp, #16
+; CHECK-LINUX-NEXT:    .cfi_def_cfa w29, 16
+; CHECK-LINUX-NEXT:    .cfi_offset w30, -8
+; CHECK-LINUX-NEXT:    .cfi_offset w29, -16
+; CHECK-LINUX-NEXT:    ldp x29, x30, [sp, #16] // 16-byte Folded Reload
+; CHECK-LINUX-NEXT:    and x29, x29, #0xefffffffffffffff
+; CHECK-LINUX-NEXT:    add sp, sp, #32
+; CHECK-LINUX-NEXT:    ret
   ret void
 }
diff --git a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
index 7493afd672d437..830e79fde2ef41 100644
--- a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
@@ -14751,17 +14751,11 @@ define i32 @load_single_extract_valid_const_index_v3i32(ptr %A, i32 %idx) {
 }
 
 define i32 @load_single_extract_variable_index_masked_i32(ptr %A, i32 %idx) {
-; SDAG-LABEL: load_single_extract_variable_index_masked_i32:
-; SDAG:       ; %bb.0:
-; SDAG-NEXT:    and w8, w1, #0x3
-; SDAG-NEXT:    ldr w0, [x0, w8, uxtw #2]
-; SDAG-NEXT:    ret
-;
-; CHECK-GISEL-LABEL: load_single_extract_variable_index_masked_i32:
-; CHECK-GISEL:       ; %bb.0:
-; CHECK-GISEL-NEXT:    and w8, w1, #0x3
-; CHECK-GISEL-NEXT:    ldr w0, [x0, w8, uxtw #2]
-; CHECK-GISEL-NEXT:    ret
+; CHECK-LABEL: load_single_extract_variable_index_masked_i32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    and w8, w1, #0x3
+; CHECK-NEXT:    ldr w0, [x0, w8, uxtw #2]
+; CHECK-NEXT:    ret
   %idx.x = and i32 %idx, 3
   %lv = load <4 x i32>, ptr %A
   %e = extractelement <4 x i32> %lv, i32 %idx.x
@@ -14769,17 +14763,11 @@ define i32 @load_single_extract_variable_index_masked_i32(ptr %A, i32 %idx) {
 }
 
 define i32 @load_single_extract_variable_index_masked2_i32(ptr %A, i32 %idx) {
-; SDAG-LABEL: load_single_extract_variable_index_masked2_i32:
-; SDAG:       ; %bb.0:
-; SDAG-NEXT:    and w8, w1, #0x1
-; SDAG-NEXT:    ldr w0, [x0, w8, uxtw #2]
-; SDAG-NEXT:    ret
-;
-; CHECK-GISEL-LABEL: load_single_extract_variable_index_masked2_i32:
-; CHECK-GISEL:       ; %bb.0:
-; CHECK-GISEL-NEXT:    and w8, w1, #0x1
-; CHECK-GISEL-NEXT:    ldr w0, [x0, w8, uxtw #2]
-; CHECK-GISEL-NEXT:    ret
+; CHECK-LABEL: load_single_extract_variable_index_masked2_i32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    and w8, w1, #0x1
+; CHECK-NEXT:    ldr w0, [x0, w8, uxtw #2]
+; CHECK-NEXT:    ret
   %idx.x = and i32 %idx, 1
   %lv = load <4 x i32>, ptr %A
   %e = extractelement <4 x i32> %lv, i32 %idx.x
diff --git a/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-J.ll b/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-J.ll
index c90178513d27ea..ed902168de9989 100644
--- a/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-J.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-J.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: not llc -mtriple=arm64-eabi < %s  2> %t
 ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
 
diff --git a/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-K.ll b/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-K.ll
index e5baf85e954248..26b9d0583f8a01 100644
--- a/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-K.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-K.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: not llc -mtriple=arm64-eabi < %s  2> %t
 ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
 
diff --git a/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-N.ll b/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-N.ll
index 38b0ab8fa29642..131d94f951c8ce 100644
--- a/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-N.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-inline-asm-error-N.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: not llc -mtriple=arm64-eabi < %s  2> %t
 ; RUN: FileCheck --check-prefix=CHECK-ERRORS < %t %s
 
diff --git a/llvm/test/CodeGen/AArch64/arm64-ldp-aa.ll b/llvm/test/CodeGen/AArch64/arm64-ldp-aa.ll
index 34b927e0a1b66b..33040bee35c5cf 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ldp-aa.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ldp-aa.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -enable-misched=false -verify-machineinstrs | FileCheck %s
 
 ; The next set of tests makes sure we can combine the second instruction into
@@ -8,6 +9,12 @@
 ; CHECK: str w0, [x1, #8]
 ; CHECK: ret
 define i32 @ldp_int_aa(i32 %a, ptr %p) nounwind {
+; CHECK-LABEL: ldp_int_aa:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp w8, w9, [x1]
+; CHECK-NEXT:    str w0, [x1, #8]
+; CHECK-NEXT:    add w0, w9, w8
+; CHECK-NEXT:    ret
   %tmp = load i32, ptr %p, align 4
   %str.ptr = getelementptr inbounds i32, ptr %p, i64 2
   store i32 %a, ptr %str.ptr, align 4
@@ -22,6 +29,12 @@ define i32 @ldp_int_aa(i32 %a, ptr %p) nounwind {
 ; CHECK: str x0, [x1, #16]
 ; CHECK: ret
 define i64 @ldp_long_aa(i64 %a, ptr %p) nounwind {
+; CHECK-LABEL: ldp_long_aa:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp x8, x9, [x1]
+; CHECK-NEXT:    str x0, [x1, #16]
+; CHECK-NEXT:    add x0, x9, x8
+; CHECK-NEXT:    ret
   %tmp = load i64, ptr %p, align 8
   %str.ptr = getelementptr inbounds i64, ptr %p, i64 2
   store i64 %a, ptr %str.ptr, align 4
@@ -36,6 +49,12 @@ define i64 @ldp_long_aa(i64 %a, ptr %p) nounwind {
 ; CHECK: ldp s1, s0, [x0]
 ; CHECK: ret
 define float @ldp_float_aa(float %a, ptr %p) nounwind {
+; CHECK-LABEL: ldp_float_aa:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str s0, [x0, #8]
+; CHECK-NEXT:    ldp s1, s0, [x0]
+; CHECK-NEXT:    fadd s0, s1, s0
+; CHECK-NEXT:    ret
   %tmp = load float, ptr %p, align 4
   %str.ptr = getelementptr inbounds float, ptr %p, i64 2
   store float %a, ptr %str.ptr, align 4
@@ -50,6 +69,12 @@ define float @ldp_float_aa(float %a, ptr %p) nounwind {
 ; CHECK: ldp d1, d0, [x0]
 ; CHECK: ret
 define double @ldp_double_aa(double %a, ptr %p) nounwind {
+; CHECK-LABEL: ldp_double_aa:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str d0, [x0, #16]
+; CHECK-NEXT:    ldp d1, d0, [x0]
+; CHECK-NEXT:    fadd d0, d1, d0
+; CHECK-NEXT:    ret
   %tmp = load double, ptr %p, align 8
   %str.ptr = getelementptr inbounds double, ptr %p, i64 2
   store double %a, ptr %str.ptr, align 4
diff --git a/llvm/test/CodeGen/AArch64/arm64-leaf.ll b/llvm/test/CodeGen/AArch64/arm64-leaf.ll
index 2bdf0290013dd8..28ea83910981a2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-leaf.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-leaf.ll
@@ -1,13 +1,16 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-apple-ios | FileCheck %s
 ; rdar://12829704
 
 define void @t8() nounwind ssp {
 ; CHECK-LABEL: t8:
-; CHECK-NOT: stp	fp, lr, [sp, #-16]!
-; CHECK-NOT: mov	fp, sp
-; CHECK: nop
-; CHECK-NOT: mov	sp, fp
-; CHECK-NOT: ldp	fp, lr, [sp], #16
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    stp d9, d8, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-NEXT:    ; InlineAsm Start
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    ; InlineAsm End
+; CHECK-NEXT:    ldp d9, d8, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
   tail call void asm sideeffect "nop", "~{v8}"() nounwind
   ret void
 }
diff --git a/llvm/test/CodeGen/AArch64/arm64-mte.ll b/llvm/test/CodeGen/AArch64/arm64-mte.ll
index d78f4eb830e102..e84071c7af85c3 100644
--- a/llvm/test/CodeGen/AArch64/arm64-mte.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-mte.ll
@@ -1,406 +1,581 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -mattr=+mte | FileCheck %s
 
 ; test create_tag
 define ptr @create_tag(ptr %ptr, i32 %m) {
-entry:
 ; CHECK-LABEL: create_tag:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, w1
+; CHECK-NEXT:    irg x0, x0, x8
+; CHECK-NEXT:    ret
+entry:
   %0 = zext i32 %m to i64
   %1 = tail call ptr @llvm.aarch64.irg(ptr %ptr, i64 %0)
   ret ptr %1
-;CHECK: irg x0, x0, {{x[0-9]+}}
 }
 
 ; *********** __arm_mte_increment_tag  *************
 ; test increment_tag1
 define ptr @increment_tag1(ptr %ptr) {
-entry:
 ; CHECK-LABEL: increment_tag1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    addg x0, x0, #0, #7
+; CHECK-NEXT:    ret
+entry:
   %0 = tail call ptr @llvm.aarch64.addg(ptr %ptr, i64 7)
   ret ptr %0
-; CHECK: addg x0, x0, #0, #7
 }
 
 %struct.S2K = type { [512 x i32] }
 define ptr @increment_tag1stack(ptr %ptr) {
-entry:
 ; CHECK-LABEL: increment_tag1stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2048
+; CHECK-NEXT:    .cfi_def_cfa_offset 2064
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    addg x0, sp, #0, #7
+; CHECK-NEXT:    add sp, sp, #2048
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S2K, align 4
   call void @llvm.lifetime.start.p0(i64 2048, ptr nonnull %s)
   %0 = call ptr @llvm.aarch64.addg(ptr nonnull %s, i64 7)
   call void @llvm.lifetime.end.p0(i64 2048, ptr nonnull %s)
   ret ptr %0
-; CHECK: addg x0, sp, #0, #7
 }
 
 
 define ptr @increment_tag2(ptr %ptr) {
-entry:
 ; CHECK-LABEL: increment_tag2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    addg x0, x0, #16, #7
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 4
   %0 = tail call ptr @llvm.aarch64.addg(ptr nonnull %add.ptr, i64 7)
   ret ptr %0
-; CHECK: addg x0, x0, #16, #7
 }
 
 define ptr @increment_tag2stack(ptr %ptr) {
-entry:
 ; CHECK-LABEL: increment_tag2stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2048
+; CHECK-NEXT:    .cfi_def_cfa_offset 2064
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    addg x0, sp, #16, #7
+; CHECK-NEXT:    add sp, sp, #2048
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S2K, align 4
   call void @llvm.lifetime.start.p0(i64 2048, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S2K, ptr %s, i64 0, i32 0, i64 4
   %0 = call ptr @llvm.aarch64.addg(ptr nonnull %arrayidx, i64 7)
   call void @llvm.lifetime.end.p0(i64 2048, ptr nonnull %s)
   ret ptr %0
-; CHECK: addg x0, sp, #16, #7
 }
 
 define ptr @increment_tag3(ptr %ptr) {
-entry:
 ; CHECK-LABEL: increment_tag3:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    addg x0, x0, #1008, #7
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 252
   %0 = tail call ptr @llvm.aarch64.addg(ptr nonnull %add.ptr, i64 7)
   ret ptr %0
-; CHECK: addg x0, x0, #1008, #7
 }
 
 define ptr @increment_tag3stack(ptr %ptr) {
-entry:
 ; CHECK-LABEL: increment_tag3stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2048
+; CHECK-NEXT:    .cfi_def_cfa_offset 2064
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    addg x0, sp, #1008, #7
+; CHECK-NEXT:    add sp, sp, #2048
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S2K, align 4
   call void @llvm.lifetime.start.p0(i64 2048, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S2K, ptr %s, i64 0, i32 0, i64 252
   %0 = call ptr @llvm.aarch64.addg(ptr nonnull %arrayidx, i64 7)
   call void @llvm.lifetime.end.p0(i64 2048, ptr nonnull %s)
   ret ptr %0
-; CHECK: addg x0, sp, #1008, #7
 }
 
 
 define ptr @increment_tag4(ptr %ptr) {
-entry:
 ; CHECK-LABEL: increment_tag4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    add x8, x0, #1024
+; CHECK-NEXT:    addg x0, x8, #0, #7
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 256
   %0 = tail call ptr @llvm.aarch64.addg(ptr nonnull %add.ptr, i64 7)
   ret ptr %0
-; CHECK: add [[T0:x[0-9]+]], x0, #1024
-; CHECK-NEXT: addg x0, [[T0]], #0, #7
 }
 
 define ptr @increment_tag4stack(ptr %ptr) {
-entry:
 ; CHECK-LABEL: increment_tag4stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2048
+; CHECK-NEXT:    .cfi_def_cfa_offset 2064
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    add x8, x8, #1024
+; CHECK-NEXT:    addg x0, x8, #0, #7
+; CHECK-NEXT:    add sp, sp, #2048
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S2K, align 4
   call void @llvm.lifetime.start.p0(i64 2048, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S2K, ptr %s, i64 0, i32 0, i64 256
   %0 = call ptr @llvm.aarch64.addg(ptr nonnull %arrayidx, i64 7)
   call void @llvm.lifetime.end.p0(i64 2048, ptr nonnull %s)
   ret ptr %0
-; CHECK: add [[T0:x[0-9]+]], {{.*}}, #1024
-; CHECK-NEXT: addg x0, [[T0]], #0, #7
 }
 
 
 define ptr @increment_tag5(ptr %ptr) {
-entry:
 ; CHECK-LABEL: increment_tag5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    add x8, x0, #20
+; CHECK-NEXT:    addg x0, x8, #0, #7
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 5
   %0 = tail call ptr @llvm.aarch64.addg(ptr nonnull %add.ptr, i64 7)
   ret ptr %0
-; CHECK: add [[T0:x[0-9]+]], x0, #20
-; CHECK-NEXT: addg x0, [[T0]], #0, #7
 }
 
 define ptr @increment_tag5stack(ptr %ptr) {
-entry:
 ; CHECK-LABEL: increment_tag5stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2048
+; CHECK-NEXT:    .cfi_def_cfa_offset 2064
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    add x8, x8, #20
+; CHECK-NEXT:    addg x0, x8, #0, #7
+; CHECK-NEXT:    add sp, sp, #2048
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S2K, align 4
   call void @llvm.lifetime.start.p0(i64 2048, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S2K, ptr %s, i64 0, i32 0, i64 5
   %0 = call ptr @llvm.aarch64.addg(ptr nonnull %arrayidx, i64 7)
   call void @llvm.lifetime.end.p0(i64 2048, ptr nonnull %s)
   ret ptr %0
-; CHECK: add [[T0:x[0-9]+]], {{.*}}, #20
-; CHECK-NEXT: addg x0, [[T0]], #0, #7
 }
 
 
 ; *********** __arm_mte_exclude_tag  *************
 ; test exclude_tag
 define i32 @exclude_tag(ptr %ptr, i32 %m) local_unnamed_addr #0 {
+; CHECK-LABEL: exclude_tag:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, w1
+; CHECK-NEXT:    gmi x0, x0, x8
+; CHECK-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-NEXT:    ret
 entry:
-;CHECK-LABEL: exclude_tag:
   %0 = zext i32 %m to i64
   %1 = tail call i64 @llvm.aarch64.gmi(ptr %ptr, i64 %0)
   %conv = trunc i64 %1 to i32
   ret i32 %conv
-; CHECK: gmi	x0, x0, {{x[0-9]+}}
 }
 
 
 ; *********** __arm_mte_get_tag *************
 %struct.S8K = type { [2048 x i32] }
 define ptr @get_tag1(ptr %ptr) {
-entry:
 ; CHECK-LABEL: get_tag1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldg x0, [x0]
+; CHECK-NEXT:    ret
+entry:
   %0 = tail call ptr @llvm.aarch64.ldg(ptr %ptr, ptr %ptr)
   ret ptr %0
-; CHECK: ldg x0, [x0]
 }
 
 define ptr @get_tag1_two_parm(ptr %ret_ptr, ptr %ptr) {
-entry:
 ; CHECK-LABEL: get_tag1_two_parm:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ldg x0, [x1]
+; CHECK-NEXT:    ret
+entry:
   %0 = tail call ptr @llvm.aarch64.ldg(ptr %ret_ptr, ptr %ptr)
   ret ptr %0
-; CHECK: ldg x0, [x1]
 }
 
 define ptr @get_tag1stack() {
-entry:
 ; CHECK-LABEL: get_tag1stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    .cfi_def_cfa_offset 8208
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    mov x0, sp
+; CHECK-NEXT:    ldg x0, [sp]
+; CHECK-NEXT:    add sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S8K, align 4
   call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
   %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %s, ptr nonnull %s)
   call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret ptr %0
-; CHECK: mov [[T0:x[0-9]+]], sp
-; CHECK: ldg [[T0]], [sp]
 }
 
 define ptr @get_tag1stack_two_param(ptr %ret_ptr) {
-entry:
 ; CHECK-LABEL: get_tag1stack_two_param:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    .cfi_def_cfa_offset 8208
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    ldg x0, [sp]
+; CHECK-NEXT:    add sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S8K, align 4
   call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
   %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %ret_ptr, ptr nonnull %s)
   call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret ptr %0
-; CHECK-NOT: mov {{.*}}, sp
-; CHECK: ldg x0, [sp]
 }
 
 
 define ptr @get_tag2(ptr %ptr) {
-entry:
 ; CHECK-LABEL: get_tag2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    add x8, x0, #16
+; CHECK-NEXT:    ldg x8, [x0, #16]
+; CHECK-NEXT:    mov x0, x8
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 4
   %0 = tail call ptr @llvm.aarch64.ldg(ptr nonnull %add.ptr, ptr nonnull %add.ptr)
   ret ptr %0
-; CHECK: add  [[T0:x[0-9]+]], x0, #16
-; CHECK: ldg  [[T0]], [x0, #16]
 }
 
 define ptr @get_tag2stack() {
-entry:
 ; CHECK-LABEL: get_tag2stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    .cfi_def_cfa_offset 8208
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    add x0, x8, #16
+; CHECK-NEXT:    ldg x0, [sp, #16]
+; CHECK-NEXT:    add sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S8K, align 4
   call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 4
   %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %arrayidx, ptr nonnull %arrayidx)
   call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret ptr %0
-; CHECK: mov [[T0:x[0-9]+]], sp
-; CHECK: add x0, [[T0]], #16
-; CHECK: ldg x0, [sp, #16]
 }
 
 
 define ptr @get_tag3(ptr %ptr) {
-entry:
 ; CHECK-LABEL: get_tag3:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    add x8, x0, #4080
+; CHECK-NEXT:    ldg x8, [x0, #4080]
+; CHECK-NEXT:    mov x0, x8
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 1020
   %0 = tail call ptr @llvm.aarch64.ldg(ptr nonnull %add.ptr, ptr nonnull %add.ptr)
   ret ptr %0
-; CHECK: add [[T0:x[0-8]+]], x0, #4080
-; CHECK: ldg [[T0]], [x0, #4080]
 }
 
 define ptr @get_tag3stack() {
-entry:
 ; CHECK-LABEL: get_tag3stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    .cfi_def_cfa_offset 8208
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    add x0, x8, #4080
+; CHECK-NEXT:    ldg x0, [sp, #4080]
+; CHECK-NEXT:    add sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S8K, align 4
   call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 1020
   %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %arrayidx, ptr nonnull %arrayidx)
   call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret ptr %0
-; CHECK: mov [[T0:x[0-9]+]], sp
-; CHECK: add x0, [[T0]], #4080
-; CHECK: ldg x0, [sp, #4080]
 }
 
 
 define ptr @get_tag4(ptr %ptr) {
-entry:
 ; CHECK-LABEL: get_tag4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    add x0, x0, #1, lsl #12 // =4096
+; CHECK-NEXT:    ldg x0, [x0]
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 1024
   %0 = tail call ptr @llvm.aarch64.ldg(ptr nonnull %add.ptr, ptr nonnull %add.ptr)
   ret ptr %0
-; CHECK: add x0, x0, #1, lsl #12
-; CHECK-NEXT: ldg x0, [x0]
 }
 
 define ptr @get_tag4stack() {
-entry:
 ; CHECK-LABEL: get_tag4stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    .cfi_def_cfa_offset 8208
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    add x0, x8, #1, lsl #12 // =4096
+; CHECK-NEXT:    ldg x0, [x0]
+; CHECK-NEXT:    add sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S8K, align 4
   call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 1024
   %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %arrayidx, ptr nonnull %arrayidx)
   call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret ptr %0
-; CHECK: mov [[T0:x[0-9]+]], sp
-; CHECK-NEXT: add x[[T1:[0-9]+]], [[T0]], #1, lsl #12
-; CHECK-NEXT: ldg x[[T1]], [x[[T1]]]
 }
 
 define ptr @get_tag5(ptr %ptr) {
-entry:
 ; CHECK-LABEL: get_tag5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    add x0, x0, #20
+; CHECK-NEXT:    ldg x0, [x0]
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 5
   %0 = tail call ptr @llvm.aarch64.ldg(ptr nonnull %add.ptr, ptr nonnull %add.ptr)
   ret ptr %0
-; CHECK: add x0, x0, #20
-; CHECK-NEXT: ldg x0, [x0]
 }
 
 define ptr @get_tag5stack() {
-entry:
 ; CHECK-LABEL: get_tag5stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    .cfi_def_cfa_offset 8208
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    add x0, x8, #20
+; CHECK-NEXT:    ldg x0, [x0]
+; CHECK-NEXT:    add sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S8K, align 4
   call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 5
   %0 = call ptr @llvm.aarch64.ldg(ptr nonnull %arrayidx, ptr nonnull %arrayidx)
   call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret ptr %0
-; CHECK: mov [[T0:x[0-9]+]], sp
-; CHECK: add x[[T1:[0-9]+]], [[T0]], #20
-; CHECK-NEXT: ldg x[[T1]], [x[[T1]]]
 }
 
 
 ; *********** __arm_mte_set_tag  *************
 define void @set_tag1(ptr %tag, ptr %ptr) {
-entry:
 ; CHECK-LABEL: set_tag1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    stg x0, [x1]
+; CHECK-NEXT:    ret
+entry:
   tail call void @llvm.aarch64.stg(ptr %tag, ptr %ptr)
   ret void
-; CHECK: stg x0, [x1]
 }
 
 define void @set_tag1stack(ptr %tag) {
-entry:
 ; CHECK-LABEL: set_tag1stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    .cfi_def_cfa_offset 8208
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    stg x0, [sp]
+; CHECK-NEXT:    add sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S8K, align 4
   call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
   call void @llvm.aarch64.stg(ptr %tag, ptr nonnull %s)
   call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %tag)
   ret void
-; CHECK: stg x0, [sp]
 }
 
 
 define void @set_tag2(ptr %tag, ptr %ptr) {
-entry:
 ; CHECK-LABEL: set_tag2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    stg x0, [x1, #16]
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 4
   tail call void @llvm.aarch64.stg(ptr %tag, ptr %add.ptr)
   ret void
-; CHECK: stg x0, [x1, #16]
 }
 
 define void @set_tag2stack(ptr %tag, ptr %ptr) {
-entry:
 ; CHECK-LABEL: set_tag2stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    .cfi_def_cfa_offset 8208
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    stg x0, [sp, #16]
+; CHECK-NEXT:    add sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S8K, align 4
   call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 4
   call void @llvm.aarch64.stg(ptr %tag, ptr nonnull %arrayidx)
   call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret void
-; CHECK: stg x0, [sp, #16]
 }
 
 
 
 define void @set_tag3(ptr %tag, ptr %ptr) {
-entry:
 ; CHECK-LABEL: set_tag3:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    stg x0, [x1, #4080]
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 1020
   tail call void @llvm.aarch64.stg(ptr %tag, ptr %add.ptr)
   ret void
-; CHECK: stg x0, [x1, #4080]
 }
 
 define void @set_tag3stack(ptr %tag, ptr %ptr) {
-entry:
 ; CHECK-LABEL: set_tag3stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    .cfi_def_cfa_offset 8208
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    stg x0, [sp, #4080]
+; CHECK-NEXT:    add sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S8K, align 4
   call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 1020
   call void @llvm.aarch64.stg(ptr %tag, ptr nonnull %arrayidx)
   call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret void
-; CHECK: stg x0, [sp, #4080]
 }
 
 
 
 define void @set_tag4(ptr %tag, ptr %ptr) {
-entry:
 ; CHECK-LABEL: set_tag4:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    add x8, x1, #1, lsl #12 // =4096
+; CHECK-NEXT:    stg x0, [x8]
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 1024
   tail call void @llvm.aarch64.stg(ptr %tag, ptr %add.ptr)
   ret void
-; CHECK: add x[[T0:[0-9]+]], x1, #1, lsl #12
-; CHECK-NEXT: stg x0, [x[[T0]]]
 }
 
 define void @set_tag4stack(ptr %tag, ptr %ptr) {
-entry:
 ; CHECK-LABEL: set_tag4stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    .cfi_def_cfa_offset 8208
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    add x8, x8, #1, lsl #12 // =4096
+; CHECK-NEXT:    stg x0, [x8]
+; CHECK-NEXT:    add sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S8K, align 4
   call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 1024
   call void @llvm.aarch64.stg(ptr %tag, ptr nonnull %arrayidx)
   call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret void
-; CHECK: add x[[T0:[0-9]+]], {{.*}}, #1, lsl #12
-; CHECK-NEXT: stg x0, [x[[T0]]]
 }
 
 
 define void @set_tag5(ptr %tag, ptr %ptr) {
-entry:
 ; CHECK-LABEL: set_tag5:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    add x8, x1, #20
+; CHECK-NEXT:    stg x0, [x8]
+; CHECK-NEXT:    ret
+entry:
   %add.ptr = getelementptr inbounds i32, ptr %ptr, i64 5
   tail call void @llvm.aarch64.stg(ptr %tag, ptr %add.ptr)
   ret void
-; CHECK: add x[[T0:[0-9]+]], x1, #20
-; CHECK-NEXT: stg x0, [x[[T0]]]
 }
 
 define void @set_tag5stack(ptr %tag, ptr %ptr) {
-entry:
 ; CHECK-LABEL: set_tag5stack:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    .cfi_def_cfa_offset 8208
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    add x8, x8, #20
+; CHECK-NEXT:    stg x0, [x8]
+; CHECK-NEXT:    add sp, sp, #2, lsl #12 // =8192
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
   %s = alloca %struct.S8K, align 4
   call void @llvm.lifetime.start.p0(i64 8192, ptr nonnull %s)
   %arrayidx = getelementptr inbounds %struct.S8K, ptr %s, i64 0, i32 0, i64 5
   call void @llvm.aarch64.stg(ptr %tag, ptr nonnull %arrayidx)
   call void @llvm.lifetime.end.p0(i64 8192, ptr nonnull %s)
   ret void
-; CHECK: add x[[T0:[0-9]+]], {{.*}}, #20
-; CHECK-NEXT: stg x0, [x[[T0]]]
 }
 
 
 ; *********** __arm_mte_ptrdiff  *************
 define i64 @subtract_pointers(ptr %ptra, ptr %ptrb) {
-entry:
 ; CHECK-LABEL: subtract_pointers:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    subp x0, x0, x1
+; CHECK-NEXT:    ret
+entry:
   %0 = tail call i64 @llvm.aarch64.subp(ptr %ptra, ptr %ptrb)
   ret i64 %0
-; CHECK: subp x0, x0, x1
 }
 
 declare ptr @llvm.aarch64.irg(ptr, i64)
diff --git a/llvm/test/CodeGen/AArch64/arm64-neg.ll b/llvm/test/CodeGen/AArch64/arm64-neg.ll
index 659ce988a706f8..ca02df183b73fc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neg.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neg.ll
@@ -1,71 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s
 
 define i32 @test_neg_i32(i32 %in) {
 ; CHECK-LABEL: test_neg_i32:
-; CHECK: neg w0, w0
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg w0, w0
+; CHECK-NEXT:    ret
   %res = sub i32 0, %in
   ret i32 %res
 }
 
 define i64 @test_neg_i64(i64 %in) {
 ; CHECK-LABEL: test_neg_i64:
-; CHECK: neg x0, x0
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg x0, x0
+; CHECK-NEXT:    ret
   %res = sub i64 0, %in
   ret i64 %res
 }
 
 define <8 x i8> @test_neg_v8i8(<8 x i8> %in) {
 ; CHECK-LABEL: test_neg_v8i8:
-; CHECK: neg v0.8b, v0.8b
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.8b, v0.8b
+; CHECK-NEXT:    ret
   %res = sub <8 x i8> zeroinitializer, %in
   ret <8 x i8> %res
 }
 
 define <4 x i16> @test_neg_v4i16(<4 x i16> %in) {
 ; CHECK-LABEL: test_neg_v4i16:
-; CHECK: neg v0.4h, v0.4h
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.4h, v0.4h
+; CHECK-NEXT:    ret
   %res = sub <4 x i16> zeroinitializer, %in
   ret <4 x i16> %res
 }
 
 define <2 x i32> @test_neg_v2i32(<2 x i32> %in) {
 ; CHECK-LABEL: test_neg_v2i32:
-; CHECK: neg v0.2s, v0.2s
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.2s, v0.2s
+; CHECK-NEXT:    ret
   %res = sub <2 x i32> zeroinitializer, %in
   ret <2 x i32> %res
 }
 
 define <16 x i8> @test_neg_v16i8(<16 x i8> %in) {
 ; CHECK-LABEL: test_neg_v16i8:
-; CHECK: neg v0.16b, v0.16b
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.16b, v0.16b
+; CHECK-NEXT:    ret
   %res = sub <16 x i8> zeroinitializer, %in
   ret <16 x i8> %res
 }
 
 define <8 x i16> @test_neg_v8i16(<8 x i16> %in) {
 ; CHECK-LABEL: test_neg_v8i16:
-; CHECK: neg v0.8h, v0.8h
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.8h, v0.8h
+; CHECK-NEXT:    ret
   %res = sub <8 x i16> zeroinitializer, %in
   ret <8 x i16> %res
 }
 
 define <4 x i32> @test_neg_v4i32(<4 x i32> %in) {
 ; CHECK-LABEL: test_neg_v4i32:
-; CHECK: neg v0.4s, v0.4s
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.4s, v0.4s
+; CHECK-NEXT:    ret
   %res = sub <4 x i32> zeroinitializer, %in
   ret <4 x i32> %res
 }
 
 define <2 x i64> @test_neg_v2i64(<2 x i64> %in) {
 ; CHECK-LABEL: test_neg_v2i64:
-; CHECK: neg v0.2d, v0.2d
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg v0.2d, v0.2d
+; CHECK-NEXT:    ret
   %res = sub <2 x i64> zeroinitializer, %in
   ret <2 x i64> %res
 }
 
 define <1 x i64> @test_neg_v1i64(<1 x i64> %in) {
 ; CHECK-LABEL: test_neg_v1i64:
-; CHECK: neg d0, d0
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    neg d0, d0
+; CHECK-NEXT:    ret
   %res = sub <1 x i64> zeroinitializer, %in
   ret <1 x i64> %res
 }
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll b/llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll
index cb87ba9a4ed6c2..cf1b4876ef9ec5 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll
@@ -1969,7 +1969,6 @@ define <4 x i16> @test_vadd_lane2_i16_bitcast_bigger_aligned(<4 x i16> %a, <16 x
 ; CHECK-LABEL: test_vadd_lane2_i16_bitcast_bigger_aligned:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    dup v1.4h, v1.h[2]
-; CHECK-NEXT:    dup v1.4h, v1.h[1]
 ; CHECK-NEXT:    add v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    ret
   %extract = shufflevector <16 x i8> %v, <16 x i8> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
@@ -1995,12 +1994,6 @@ define <4 x i16> @test_vadd_lane5_i16_bitcast_bigger_aligned(<4 x i16> %a, <16 x
 ; Negative test - can't dup bytes {3,4} of v8i16.
 
 define <4 x i16> @test_vadd_lane_i16_bitcast_bigger_unaligned(<4 x i16> %a, <16 x i8> %v) {
-; CHECK-LABEL: test_vadd_lane_i16_bitcast_bigger_unaligned:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v1.8b, v1.8b, v0.8b, #1
-; CHECK-NEXT:    dup v1.4h, v1.h[1]
-; CHECK-NEXT:    add v0.4h, v1.4h, v0.4h
-; CHECK-NEXT:    ret
   %extract = shufflevector <16 x i8> %v, <16 x i8> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
   %bc = bitcast <8 x i8> %extract to <4 x i16>
   %splat = shufflevector <4 x i16> %bc, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
index 79645e32074c89..b5dfde1236cd8e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
@@ -104,7 +104,9 @@ define <8 x i16> @test_vaddl_a8(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-LABEL: test_vaddl_a8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    uaddl v0.8h, v0.8b, v1.8b
-; CHECK-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEXT:    adrp x8, .LCPI6_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI6_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <8 x i8> %a to <8 x i16>
@@ -117,9 +119,10 @@ entry:
 define <4 x i32> @test_vaddl_a16(<4 x i16> %a, <4 x i16> %b) {
 ; CHECK-LABEL: test_vaddl_a16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x00ffff0000ffff
 ; CHECK-NEXT:    uaddl v0.4s, v0.4h, v1.4h
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    adrp x8, .LCPI7_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI7_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <4 x i16> %a to <4 x i32>
@@ -132,9 +135,10 @@ entry:
 define <2 x i64> @test_vaddl_a32(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-LABEL: test_vaddl_a32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x000000ffffffff
 ; CHECK-NEXT:    uaddl v0.2d, v0.2s, v1.2s
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEXT:    trn2 v0.4s, v0.4s, v2.4s
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <2 x i32> %a to <2 x i64>
@@ -232,7 +236,9 @@ define <8 x i16> @test_vaddl_high_a8(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: test_vaddl_high_a8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    uaddl2 v0.8h, v0.16b, v1.16b
-; CHECK-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEXT:    adrp x8, .LCPI15_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI15_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -247,9 +253,10 @@ entry:
 define <4 x i32> @test_vaddl_high_a16(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-LABEL: test_vaddl_high_a16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x00ffff0000ffff
 ; CHECK-NEXT:    uaddl2 v0.4s, v0.8h, v1.8h
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    adrp x8, .LCPI16_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI16_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -264,9 +271,10 @@ entry:
 define <2 x i64> @test_vaddl_high_a32(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test_vaddl_high_a32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x000000ffffffff
 ; CHECK-NEXT:    uaddl2 v0.2d, v0.4s, v1.4s
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEXT:    trn2 v0.4s, v0.4s, v2.4s
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
@@ -348,7 +356,9 @@ define <8 x i16> @test_vaddw_a8(<8 x i16> %a, <8 x i8> %b) {
 ; CHECK-LABEL: test_vaddw_a8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    uaddw v0.8h, v0.8h, v1.8b
-; CHECK-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEXT:    adrp x8, .LCPI24_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI24_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <8 x i8> %b to <8 x i16>
@@ -360,9 +370,10 @@ entry:
 define <4 x i32> @test_vaddw_a16(<4 x i32> %a, <4 x i16> %b) {
 ; CHECK-LABEL: test_vaddw_a16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x00ffff0000ffff
 ; CHECK-NEXT:    uaddw v0.4s, v0.4s, v1.4h
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    adrp x8, .LCPI25_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI25_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <4 x i16> %b to <4 x i32>
@@ -374,9 +385,10 @@ entry:
 define <2 x i64> @test_vaddw_a32(<2 x i64> %a, <2 x i32> %b) {
 ; CHECK-LABEL: test_vaddw_a32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x000000ffffffff
 ; CHECK-NEXT:    uaddw v0.2d, v0.2d, v1.2s
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEXT:    trn2 v0.4s, v0.4s, v2.4s
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <2 x i32> %b to <2 x i64>
@@ -461,7 +473,9 @@ define <8 x i16> @test_vaddw_high_a8(<8 x i16> %a, <16 x i8> %b) {
 ; CHECK-LABEL: test_vaddw_high_a8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    uaddw2 v0.8h, v0.8h, v1.16b
-; CHECK-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEXT:    adrp x8, .LCPI33_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI33_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -474,9 +488,10 @@ entry:
 define <4 x i32> @test_vaddw_high_a16(<4 x i32> %a, <8 x i16> %b) {
 ; CHECK-LABEL: test_vaddw_high_a16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x00ffff0000ffff
 ; CHECK-NEXT:    uaddw2 v0.4s, v0.4s, v1.8h
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    adrp x8, .LCPI34_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI34_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -489,9 +504,10 @@ entry:
 define <2 x i64> @test_vaddw_high_a32(<2 x i64> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test_vaddw_high_a32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x000000ffffffff
 ; CHECK-NEXT:    uaddw2 v0.2d, v0.2d, v1.4s
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEXT:    trn2 v0.4s, v0.4s, v2.4s
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
@@ -577,7 +593,9 @@ define <8 x i16> @test_vsubl_a8(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-LABEL: test_vsubl_a8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    usubl v0.8h, v0.8b, v1.8b
-; CHECK-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEXT:    adrp x8, .LCPI42_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI42_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <8 x i8> %a to <8 x i16>
@@ -590,9 +608,10 @@ entry:
 define <4 x i32> @test_vsubl_a16(<4 x i16> %a, <4 x i16> %b) {
 ; CHECK-LABEL: test_vsubl_a16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x00ffff0000ffff
 ; CHECK-NEXT:    usubl v0.4s, v0.4h, v1.4h
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    adrp x8, .LCPI43_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI43_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <4 x i16> %a to <4 x i32>
@@ -605,9 +624,10 @@ entry:
 define <2 x i64> @test_vsubl_a32(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-LABEL: test_vsubl_a32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x000000ffffffff
 ; CHECK-NEXT:    usubl v0.2d, v0.2s, v1.2s
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEXT:    trn2 v0.4s, v0.4s, v2.4s
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <2 x i32> %a to <2 x i64>
@@ -705,7 +725,9 @@ define <8 x i16> @test_vsubl_high_a8(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-LABEL: test_vsubl_high_a8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    usubl2 v0.8h, v0.16b, v1.16b
-; CHECK-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEXT:    adrp x8, .LCPI51_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI51_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -720,9 +742,10 @@ entry:
 define <4 x i32> @test_vsubl_high_a16(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-LABEL: test_vsubl_high_a16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x00ffff0000ffff
 ; CHECK-NEXT:    usubl2 v0.4s, v0.8h, v1.8h
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    adrp x8, .LCPI52_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI52_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -737,9 +760,10 @@ entry:
 define <2 x i64> @test_vsubl_high_a32(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test_vsubl_high_a32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x000000ffffffff
 ; CHECK-NEXT:    usubl2 v0.2d, v0.4s, v1.4s
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEXT:    trn2 v0.4s, v0.4s, v2.4s
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
@@ -821,7 +845,9 @@ define <8 x i16> @test_vsubw_a8(<8 x i16> %a, <8 x i8> %b) {
 ; CHECK-LABEL: test_vsubw_a8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    usubw v0.8h, v0.8h, v1.8b
-; CHECK-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEXT:    adrp x8, .LCPI60_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI60_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <8 x i8> %b to <8 x i16>
@@ -833,9 +859,10 @@ entry:
 define <4 x i32> @test_vsubw_a16(<4 x i32> %a, <4 x i16> %b) {
 ; CHECK-LABEL: test_vsubw_a16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x00ffff0000ffff
 ; CHECK-NEXT:    usubw v0.4s, v0.4s, v1.4h
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    adrp x8, .LCPI61_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI61_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <4 x i16> %b to <4 x i32>
@@ -847,9 +874,10 @@ entry:
 define <2 x i64> @test_vsubw_a32(<2 x i64> %a, <2 x i32> %b) {
 ; CHECK-LABEL: test_vsubw_a32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x000000ffffffff
 ; CHECK-NEXT:    usubw v0.2d, v0.2d, v1.2s
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEXT:    trn2 v0.4s, v0.4s, v2.4s
 ; CHECK-NEXT:    ret
 entry:
   %vmovl.i.i = zext <2 x i32> %b to <2 x i64>
@@ -934,7 +962,9 @@ define <8 x i16> @test_vsubw_high_a8(<8 x i16> %a, <16 x i8> %b) {
 ; CHECK-LABEL: test_vsubw_high_a8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    usubw2 v0.8h, v0.8h, v1.16b
-; CHECK-NEXT:    bic v0.8h, #255, lsl #8
+; CHECK-NEXT:    adrp x8, .LCPI69_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI69_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <16 x i8> %b, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -947,9 +977,10 @@ entry:
 define <4 x i32> @test_vsubw_high_a16(<4 x i32> %a, <8 x i16> %b) {
 ; CHECK-LABEL: test_vsubw_high_a16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x00ffff0000ffff
 ; CHECK-NEXT:    usubw2 v0.4s, v0.4s, v1.8h
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    adrp x8, .LCPI70_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI70_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -962,9 +993,10 @@ entry:
 define <2 x i64> @test_vsubw_high_a32(<2 x i64> %a, <4 x i32> %b) {
 ; CHECK-LABEL: test_vsubw_high_a32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi v2.2d, #0x000000ffffffff
 ; CHECK-NEXT:    usubw2 v0.2d, v0.2d, v1.4s
-; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-NEXT:    trn2 v0.4s, v0.4s, v2.4s
 ; CHECK-NEXT:    ret
 entry:
   %shuffle.i.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
diff --git a/llvm/test/CodeGen/AArch64/arm64-platform-reg.ll b/llvm/test/CodeGen/AArch64/arm64-platform-reg.ll
index c598306c2de301..8b7f282d425001 100644
--- a/llvm/test/CodeGen/AArch64/arm64-platform-reg.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-platform-reg.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=arm64-apple-ios -mattr=+reserve-x18 -o - %s | FileCheck %s --check-prefix=CHECK-RESERVE --check-prefix=CHECK-RESERVE-X18
 ; RUN: llc -mtriple=arm64-freebsd-gnu -mattr=+reserve-x18 -o - %s | FileCheck %s --check-prefix=CHECK-RESERVE --check-prefix=CHECK-RESERVE-X18
 ; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s
@@ -116,73 +117,130 @@
 @var = global [30 x i64] zeroinitializer
 
 define void @keep_live() {
+; CHECK-LABEL: keep_live:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-96]! // 16-byte Folded Spill
+; CHECK-NEXT:    stp x28, x27, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    stp x26, x25, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    stp x24, x23, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT:    stp x22, x21, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT:    stp x20, x19, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 96
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w20, -16
+; CHECK-NEXT:    .cfi_offset w21, -24
+; CHECK-NEXT:    .cfi_offset w22, -32
+; CHECK-NEXT:    .cfi_offset w23, -40
+; CHECK-NEXT:    .cfi_offset w24, -48
+; CHECK-NEXT:    .cfi_offset w25, -56
+; CHECK-NEXT:    .cfi_offset w26, -64
+; CHECK-NEXT:    .cfi_offset w27, -72
+; CHECK-NEXT:    .cfi_offset w28, -80
+; CHECK-NEXT:    .cfi_offset w30, -88
+; CHECK-NEXT:    .cfi_offset w29, -96
+; CHECK-NEXT:    adrp x8, :got:var
+; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var]
+; CHECK-NEXT:    ldr x9, [x8]
+; CHECK-NEXT:    ldr x10, [x8, #8]
+; CHECK-NEXT:    ldr x11, [x8, #16]
+; CHECK-NEXT:    ldr x12, [x8, #24]
+; CHECK-NEXT:    ldr x13, [x8, #32]
+; CHECK-NEXT:    ldr x14, [x8, #40]
+; CHECK-NEXT:    ldr x15, [x8, #48]
+; CHECK-NEXT:    ldr x16, [x8, #56]
+; CHECK-NEXT:    ldr x17, [x8, #64]
+; CHECK-NEXT:    ldr x18, [x8, #72]
+; CHECK-NEXT:    ldr x0, [x8, #80]
+; CHECK-NEXT:    ldr x1, [x8, #88]
+; CHECK-NEXT:    ldr x2, [x8, #96]
+; CHECK-NEXT:    ldr x3, [x8, #104]
+; CHECK-NEXT:    ldr x4, [x8, #112]
+; CHECK-NEXT:    ldr x5, [x8, #120]
+; CHECK-NEXT:    ldr x6, [x8, #128]
+; CHECK-NEXT:    ldr x7, [x8, #136]
+; CHECK-NEXT:    ldr x19, [x8, #144]
+; CHECK-NEXT:    ldr x20, [x8, #152]
+; CHECK-NEXT:    ldr x21, [x8, #160]
+; CHECK-NEXT:    ldr x22, [x8, #168]
+; CHECK-NEXT:    ldr x23, [x8, #176]
+; CHECK-NEXT:    ldr x24, [x8, #184]
+; CHECK-NEXT:    ldr x25, [x8, #192]
+; CHECK-NEXT:    ldr x26, [x8, #200]
+; CHECK-NEXT:    ldr x27, [x8, #208]
+; CHECK-NEXT:    ldr x28, [x8, #216]
+; CHECK-NEXT:    ldr x29, [x8, #224]
+; CHECK-NEXT:    ldr x30, [x8, #232]
+; CHECK-NEXT:    str x30, [x8, #232]
+; CHECK-NEXT:    str x29, [x8, #224]
+; CHECK-NEXT:    str x28, [x8, #216]
+; CHECK-NEXT:    str x27, [x8, #208]
+; CHECK-NEXT:    str x26, [x8, #200]
+; CHECK-NEXT:    str x25, [x8, #192]
+; CHECK-NEXT:    str x24, [x8, #184]
+; CHECK-NEXT:    str x23, [x8, #176]
+; CHECK-NEXT:    str x22, [x8, #168]
+; CHECK-NEXT:    str x21, [x8, #160]
+; CHECK-NEXT:    str x20, [x8, #152]
+; CHECK-NEXT:    str x19, [x8, #144]
+; CHECK-NEXT:    str x7, [x8, #136]
+; CHECK-NEXT:    str x6, [x8, #128]
+; CHECK-NEXT:    str x5, [x8, #120]
+; CHECK-NEXT:    str x4, [x8, #112]
+; CHECK-NEXT:    str x3, [x8, #104]
+; CHECK-NEXT:    str x2, [x8, #96]
+; CHECK-NEXT:    str x1, [x8, #88]
+; CHECK-NEXT:    str x0, [x8, #80]
+; CHECK-NEXT:    str x18, [x8, #72]
+; CHECK-NEXT:    str x17, [x8, #64]
+; CHECK-NEXT:    str x16, [x8, #56]
+; CHECK-NEXT:    str x15, [x8, #48]
+; CHECK-NEXT:    str x14, [x8, #40]
+; CHECK-NEXT:    str x13, [x8, #32]
+; CHECK-NEXT:    str x12, [x8, #24]
+; CHECK-NEXT:    str x11, [x8, #16]
+; CHECK-NEXT:    str x10, [x8, #8]
+; CHECK-NEXT:    str x9, [x8]
+; CHECK-NEXT:    ldp x20, x19, [sp, #80] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x22, x21, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x24, x23, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x26, x25, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x28, x27, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x29, x30, [sp], #96 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
   %val = load volatile [30 x i64], ptr @var
   store volatile [30 x i64] %val, ptr @var
 
-; CHECK: ldr x18
-; CHECK: str x18
 
-; CHECK-RESERVE-NOT: ldr fp
-; CHECK-RESERVE-X8-NOT: adrp x8
-; CHECK-RESERVE-X8-NOT: ldr x8
-; CHECK-RESERVE-X1-NOT: ldr x1,
-; CHECK-RESERVE-X2-NOT: ldr x2,
-; CHECK-RESERVE-X3-NOT: ldr x3,
-; CHECK-RESERVE-X4-NOT: ldr x4,
-; CHECK-RESERVE-X5-NOT: ldr x5,
-; CHECK-RESERVE-X6-NOT: ldr x6,
-; CHECK-RESERVE-X7-NOT: ldr x7,
-; CHECK-RESERVE-X9-NOT: ldr x9,
-; CHECK-RESERVE-X10-NOT: ldr x10,
-; CHECK-RESERVE-X11-NOT: ldr x11,
-; CHECK-RESERVE-X12-NOT: ldr x12,
-; CHECK-RESERVE-X13-NOT: ldr x13,
-; CHECK-RESERVE-X14-NOT: ldr x14,
-; CHECK-RESERVE-X15-NOT: ldr x15,
-; CHECK-RESERVE-X16-NOT: ldr x16
-; CHECK-RESERVE-X17-NOT: ldr x17
-; CHECK-RESERVE-X18-NOT: ldr x18
-; CHECK-RESERVE-X19-NOT: ldr x19
-; CHECK-RESERVE-X20-NOT: ldr x20
-; CHECK-RESERVE-X21-NOT: ldr x21
-; CHECK-RESERVE-X22-NOT: ldr x22
-; CHECK-RESERVE-X23-NOT: ldr x23
-; CHECK-RESERVE-X24-NOT: ldr x24
-; CHECK-RESERVE-X25-NOT: ldr x25
-; CHECK-RESERVE-X26-NOT: ldr x26
-; CHECK-RESERVE-X27-NOT: ldr x27
-; CHECK-RESERVE-X28-NOT: ldr x28
-; CHECK-RESERVE-X30-NOT: ldr x30
-; CHECK-RESERVE: Spill
-; CHECK-RESERVE-NOT: ldr fp
-; CHECK-RESERVE-X1-NOT: ldr x1,
-; CHECK-RESERVE-X2-NOT: ldr x2,
-; CHECK-RESERVE-X3-NOT: ldr x3,
-; CHECK-RESERVE-X4-NOT: ldr x4,
-; CHECK-RESERVE-X5-NOT: ldr x5,
-; CHECK-RESERVE-X6-NOT: ldr x6,
-; CHECK-RESERVE-X7-NOT: ldr x7,
-; CHECK-RESERVE-X9-NOT: ldr x9,
-; CHECK-RESERVE-X10-NOT: ldr x10,
-; CHECK-RESERVE-X11-NOT: ldr x11,
-; CHECK-RESERVE-X12-NOT: ldr x12,
-; CHECK-RESERVE-X13-NOT: ldr x13,
-; CHECK-RESERVE-X14-NOT: ldr x14,
-; CHECK-RESERVE-X15-NOT: ldr x15,
-; CHECK-RESERVE-X16-NOT: ldr x16
-; CHECK-RESERVE-X17-NOT: ldr x17
-; CHECK-RESERVE-X18-NOT: ldr x18
-; CHECK-RESERVE-X19-NOT: ldr x19
-; CHECK-RESERVE-X20-NOT: ldr x20
-; CHECK-RESERVE-X21-NOT: ldr x21
-; CHECK-RESERVE-X22-NOT: ldr x22
-; CHECK-RESERVE-X23-NOT: ldr x23
-; CHECK-RESERVE-X24-NOT: ldr x24
-; CHECK-RESERVE-X25-NOT: ldr x25
-; CHECK-RESERVE-X26-NOT: ldr x26
-; CHECK-RESERVE-X27-NOT: ldr x27
-; CHECK-RESERVE-X28-NOT: ldr x28
-; CHECK-RESERVE-X30-NOT: ldr x30
-; CHECK-RESERVE: ret
   ret void
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-RESERVE: {{.*}}
+; CHECK-RESERVE-X1: {{.*}}
+; CHECK-RESERVE-X10: {{.*}}
+; CHECK-RESERVE-X11: {{.*}}
+; CHECK-RESERVE-X12: {{.*}}
+; CHECK-RESERVE-X13: {{.*}}
+; CHECK-RESERVE-X14: {{.*}}
+; CHECK-RESERVE-X15: {{.*}}
+; CHECK-RESERVE-X16: {{.*}}
+; CHECK-RESERVE-X17: {{.*}}
+; CHECK-RESERVE-X18: {{.*}}
+; CHECK-RESERVE-X19: {{.*}}
+; CHECK-RESERVE-X2: {{.*}}
+; CHECK-RESERVE-X20: {{.*}}
+; CHECK-RESERVE-X21: {{.*}}
+; CHECK-RESERVE-X22: {{.*}}
+; CHECK-RESERVE-X23: {{.*}}
+; CHECK-RESERVE-X24: {{.*}}
+; CHECK-RESERVE-X25: {{.*}}
+; CHECK-RESERVE-X26: {{.*}}
+; CHECK-RESERVE-X27: {{.*}}
+; CHECK-RESERVE-X28: {{.*}}
+; CHECK-RESERVE-X3: {{.*}}
+; CHECK-RESERVE-X30: {{.*}}
+; CHECK-RESERVE-X4: {{.*}}
+; CHECK-RESERVE-X5: {{.*}}
+; CHECK-RESERVE-X6: {{.*}}
+; CHECK-RESERVE-X7: {{.*}}
+; CHECK-RESERVE-X8: {{.*}}
+; CHECK-RESERVE-X9: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-prefetch-new.ll b/llvm/test/CodeGen/AArch64/arm64-prefetch-new.ll
index da7d74afe56bec..2cfc7e882aee69 100644
--- a/llvm/test/CodeGen/AArch64/arm64-prefetch-new.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-prefetch-new.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=aarch64 -mattr=+v8.9a --global-isel=0 < %s | FileCheck %s
 ; RUN: llc -mtriple=aarch64 -mattr=+v8.9a --global-isel=1 --global-isel-abort=1 < %s | FileCheck %s
 
@@ -6,7 +7,6 @@
 
 define void @test(ptr %i, i32 %j) nounwind ssp {
 entry:
-  ; CHECK-LABEL: @test
   %j.addr = alloca i32, align 4
   store i32 %j, ptr %j.addr, align 4, !tbaa !0
   %tmp = bitcast ptr %j.addr to ptr
@@ -16,42 +16,30 @@ entry:
   ; Verify prefetching works for all the different kinds of pointers we might
   ; want to prefetch.
 
-  ; CHECK: prfm pldl1keep,
   call void @llvm.aarch64.prefetch(ptr null, i32 0, i32 0, i32 0, i32 1)
 
-  ; CHECK: prfum pldl1keep,
   call void @llvm.aarch64.prefetch(ptr %tmp, i32 0, i32 0, i32 0, i32 1)
 
-  ; CHECK: prfm pldl1keep,
   call void @llvm.aarch64.prefetch(ptr %i, i32 0, i32 0, i32 0, i32 1)
 
-  ; CHECK: prfum pldl1keep,
   call void @llvm.aarch64.prefetch(ptr %i.next, i32 0, i32 0, i32 0, i32 1)
 
-  ; CHECK: prfm pldl1keep,
   call void @llvm.aarch64.prefetch(ptr @a, i32 0, i32 0, i32 0, i32 1)
 
-  ; CHECK: prfm pldl1keep,
   call void @llvm.aarch64.prefetch(ptr @b, i32 0, i32 0, i32 0, i32 1)
 
   ; Verify that we can generate every single valid prefetch value.
 
-  ; CHECK: prfm pstl1keep,
   call void @llvm.aarch64.prefetch(ptr null, i32 1, i32 0, i32 0, i32 1)
 
-  ; CHECK: prfm pldl2keep,
   call void @llvm.aarch64.prefetch(ptr null, i32 0, i32 1, i32 0, i32 1)
 
-  ; CHECK: prfm pldl3keep,
   call void @llvm.aarch64.prefetch(ptr null, i32 0, i32 2, i32 0, i32 1)
 
-  ; CHECK: prfm pldslckeep,
   call void @llvm.aarch64.prefetch(ptr null, i32 0, i32 3, i32 0, i32 1)
 
-  ; CHECK: prfm pldl1strm,
   call void @llvm.aarch64.prefetch(ptr null, i32 0, i32 0, i32 1, i32 1)
 
-  ; CHECK: prfm plil1keep,
   call void @llvm.aarch64.prefetch(ptr null, i32 0, i32 0, i32 0, i32 0)
 
   ret void
@@ -65,3 +53,5 @@ attributes #0 = { inaccessiblemem_or_argmemonly nounwind willreturn }
 !1 = !{!"omnipotent char", !2}
 !2 = !{!"Simple C/C++ TBAA"}
 !3 = !{!"any pointer", !1}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-prefetch.ll b/llvm/test/CodeGen/AArch64/arm64-prefetch.ll
index 55652e3de5a17d..07d18879114966 100644
--- a/llvm/test/CodeGen/AArch64/arm64-prefetch.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-prefetch.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 ; RUN: llc -O0 --global-isel-abort=1 < %s -mtriple=arm64-eabi | FileCheck %s
 
@@ -5,34 +6,21 @@
 
 define void @test(i32 %i, i32 %j) nounwind ssp {
 entry:
-  ; CHECK: @test
   %j.addr = alloca i32, align 4
   store i32 %j, ptr %j.addr, align 4, !tbaa !0
-  ; CHECK: prfum pldl1strm
   call void @llvm.prefetch(ptr %j.addr, i32 0, i32 0, i32 1)
-  ; CHECK: prfum pldl3keep
   call void @llvm.prefetch(ptr %j.addr, i32 0, i32 1, i32 1)
-  ; CHECK: prfum pldl2keep
   call void @llvm.prefetch(ptr %j.addr, i32 0, i32 2, i32 1)
-  ; CHECK: prfum pldl1keep
   call void @llvm.prefetch(ptr %j.addr, i32 0, i32 3, i32 1)
 
-  ; CHECK: prfum plil1strm
   call void @llvm.prefetch(ptr %j.addr, i32 0, i32 0, i32 0)
-  ; CHECK: prfum plil3keep
   call void @llvm.prefetch(ptr %j.addr, i32 0, i32 1, i32 0)
-  ; CHECK: prfum plil2keep
   call void @llvm.prefetch(ptr %j.addr, i32 0, i32 2, i32 0)
-  ; CHECK: prfum plil1keep
   call void @llvm.prefetch(ptr %j.addr, i32 0, i32 3, i32 0)
 
-  ; CHECK: prfum pstl1strm
   call void @llvm.prefetch(ptr %j.addr, i32 1, i32 0, i32 1)
-  ; CHECK: prfum pstl3keep
   call void @llvm.prefetch(ptr %j.addr, i32 1, i32 1, i32 1)
-  ; CHECK: prfum pstl2keep
   call void @llvm.prefetch(ptr %j.addr, i32 1, i32 2, i32 1)
-  ; CHECK: prfum pstl1keep
   call void @llvm.prefetch(ptr %j.addr, i32 1, i32 3, i32 1)
 
   %tmp1 = load i32, ptr %j.addr, align 4, !tbaa !0
@@ -41,64 +29,52 @@ entry:
   %tmp2 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx = getelementptr inbounds i32, ptr %tmp2, i64 %idxprom
 
-  ; CHECK: prfm pldl1strm
   call void @llvm.prefetch(ptr %arrayidx, i32 0, i32 0, i32 1)
   %tmp4 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx3 = getelementptr inbounds i32, ptr %tmp4, i64 %idxprom
 
-  ; CHECK: prfm pldl3keep
   call void @llvm.prefetch(ptr %arrayidx3, i32 0, i32 1, i32 1)
   %tmp6 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx6 = getelementptr inbounds i32, ptr %tmp6, i64 %idxprom
 
-  ; CHECK: prfm pldl2keep
   call void @llvm.prefetch(ptr %arrayidx6, i32 0, i32 2, i32 1)
   %tmp8 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx9 = getelementptr inbounds i32, ptr %tmp8, i64 %idxprom
 
-  ; CHECK: prfm pldl1keep
   call void @llvm.prefetch(ptr %arrayidx9, i32 0, i32 3, i32 1)
   %tmp10 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx12 = getelementptr inbounds i32, ptr %tmp10, i64 %idxprom
 
 
-  ; CHECK: prfm plil1strm
   call void @llvm.prefetch(ptr %arrayidx12, i32 0, i32 0, i32 0)
   %tmp12 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx15 = getelementptr inbounds i32, ptr %tmp12, i64 %idxprom
 
-  ; CHECK: prfm plil3keep
   call void @llvm.prefetch(ptr %arrayidx3, i32 0, i32 1, i32 0)
   %tmp14 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx18 = getelementptr inbounds i32, ptr %tmp14, i64 %idxprom
 
-  ; CHECK: prfm plil2keep
   call void @llvm.prefetch(ptr %arrayidx6, i32 0, i32 2, i32 0)
   %tmp16 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx21 = getelementptr inbounds i32, ptr %tmp16, i64 %idxprom
 
-  ; CHECK: prfm plil1keep
   call void @llvm.prefetch(ptr %arrayidx9, i32 0, i32 3, i32 0)
   %tmp18 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx24 = getelementptr inbounds i32, ptr %tmp18, i64 %idxprom
 
 
-  ; CHECK: prfm pstl1strm
   call void @llvm.prefetch(ptr %arrayidx12, i32 1, i32 0, i32 1)
   %tmp20 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx27 = getelementptr inbounds i32, ptr %tmp20, i64 %idxprom
 
-  ; CHECK: prfm pstl3keep
   call void @llvm.prefetch(ptr %arrayidx15, i32 1, i32 1, i32 1)
   %tmp22 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx30 = getelementptr inbounds i32, ptr %tmp22, i64 %idxprom
 
-  ; CHECK: prfm pstl2keep
   call void @llvm.prefetch(ptr %arrayidx18, i32 1, i32 2, i32 1)
   %tmp24 = load ptr, ptr @a, align 8, !tbaa !3
   %arrayidx33 = getelementptr inbounds i32, ptr %tmp24, i64 %idxprom
 
-  ; CHECK: prfm pstl1keep
   call void @llvm.prefetch(ptr %arrayidx21, i32 1, i32 3, i32 1)
   ret void
 }
@@ -109,3 +85,5 @@ declare void @llvm.prefetch(ptr nocapture, i32, i32, i32) nounwind
 !1 = !{!"omnipotent char", !2}
 !2 = !{!"Simple C/C++ TBAA"}
 !3 = !{!"any pointer", !1}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll b/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
index cfd507c60831de..ba242fcaf6af3e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
@@ -1,9 +1,7 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
 
 define i8 @test_64bit_add(ptr %a, i64 %b) {
-; CHECK-LABEL: test_64bit_add:
-; CHECK: ldrh w0, [x0, x1, lsl #1]
-; CHECK: ret
   %tmp1 = getelementptr inbounds i16, ptr %a, i64 %b
   %tmp2 = load i16, ptr %tmp1
   %tmp3 = trunc i16 %tmp2 to i8
@@ -13,7 +11,6 @@ define i8 @test_64bit_add(ptr %a, i64 %b) {
 ; These tests are trying to form SEXT and ZEXT operations that never leave i64
 ; space, to make sure LLVM can adapt the offset register correctly.
 define void @ldst_8bit(ptr %base, i64 %offset) minsize {
-; CHECK-LABEL: ldst_8bit:
 
    %off32.sext.tmp = shl i64 %offset, 32
    %off32.sext = ashr i64 %off32.sext.tmp, 32
@@ -21,7 +18,6 @@ define void @ldst_8bit(ptr %base, i64 %offset) minsize {
    %val8_sxtw = load volatile i8, ptr %addr8_sxtw
    %val32_signed = sext i8 %val8_sxtw to i32
    store volatile i32 %val32_signed, ptr @var_32bit
-; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
 
   %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = and i64 %offset, 4294967295
@@ -30,14 +26,12 @@ define void @ldst_8bit(ptr %base, i64 %offset) minsize {
   %val8_uxtw = load volatile i8, ptr %addr_uxtw
   %newval8 = add i8 %val8_uxtw, 1
   store volatile i8 %newval8, ptr @var_8bit
-; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
 
    ret void
 }
 
 
 define void @ldst_16bit(ptr %base, i64 %offset) minsize {
-; CHECK-LABEL: ldst_16bit:
 
   %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = and i64 %offset, 4294967295
@@ -46,7 +40,6 @@ define void @ldst_16bit(ptr %base, i64 %offset) minsize {
   %val8_uxtw = load volatile i16, ptr %addr_uxtw
   %newval8 = add i16 %val8_uxtw, 1
   store volatile i16 %newval8, ptr @var_16bit
-; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
 
   %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw.tmp = shl i64 %offset, 32
@@ -56,7 +49,6 @@ define void @ldst_16bit(ptr %base, i64 %offset) minsize {
   %val16_sxtw = load volatile i16, ptr %addr_sxtw
   %val64_signed = sext i16 %val16_sxtw to i64
   store volatile i64 %val64_signed, ptr @var_64bit
-; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
 
 
   %base_uxtwN = ptrtoint ptr %base to i64
@@ -67,12 +59,10 @@ define void @ldst_16bit(ptr %base, i64 %offset) minsize {
   %val32 = load volatile i32, ptr @var_32bit
   %val16_trunc32 = trunc i32 %val32 to i16
   store volatile i16 %val16_trunc32, ptr %addr_uxtwN
-; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #1]
    ret void
 }
 
 define void @ldst_32bit(ptr %base, i64 %offset) minsize {
-; CHECK-LABEL: ldst_32bit:
 
   %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = and i64 %offset, 4294967295
@@ -81,7 +71,6 @@ define void @ldst_32bit(ptr %base, i64 %offset) minsize {
   %val32_uxtw = load volatile i32, ptr %addr_uxtw
   %newval32 = add i32 %val32_uxtw, 1
   store volatile i32 %newval32, ptr @var_32bit
-; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
 
   %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw.tmp = shl i64 %offset, 32
@@ -91,7 +80,6 @@ define void @ldst_32bit(ptr %base, i64 %offset) minsize {
   %val32_sxtw = load volatile i32, ptr %addr_sxtw
   %val64_signed = sext i32 %val32_sxtw to i64
   store volatile i64 %val64_signed, ptr @var_64bit
-; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
 
 
   %base_uxtwN = ptrtoint ptr %base to i64
@@ -101,12 +89,10 @@ define void @ldst_32bit(ptr %base, i64 %offset) minsize {
   %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
   %val32 = load volatile i32, ptr @var_32bit
   store volatile i32 %val32, ptr %addr_uxtwN
-; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
    ret void
 }
 
 define void @ldst_64bit(ptr %base, i64 %offset) minsize {
-; CHECK-LABEL: ldst_64bit:
 
   %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = and i64 %offset, 4294967295
@@ -115,7 +101,6 @@ define void @ldst_64bit(ptr %base, i64 %offset) minsize {
   %val64_uxtw = load volatile i64, ptr %addr_uxtw
   %newval8 = add i64 %val64_uxtw, 1
   store volatile i64 %newval8, ptr @var_64bit
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
 
   %base_sxtw = ptrtoint ptr %base to i64
   %offset_sxtw.tmp = shl i64 %offset, 32
@@ -124,7 +109,6 @@ define void @ldst_64bit(ptr %base, i64 %offset) minsize {
   %addr_sxtw = inttoptr i64 %addrint_sxtw to ptr
   %val64_sxtw = load volatile i64, ptr %addr_sxtw
   store volatile i64 %val64_sxtw, ptr @var_64bit
-; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
 
 
   %base_uxtwN = ptrtoint ptr %base to i64
@@ -134,7 +118,6 @@ define void @ldst_64bit(ptr %base, i64 %offset) minsize {
   %addr_uxtwN = inttoptr i64 %addrint_uxtwN to ptr
   %val64 = load volatile i64, ptr @var_64bit
   store volatile i64 %val64, ptr %addr_uxtwN
-; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
    ret void
 }
 
@@ -142,3 +125,5 @@ define void @ldst_64bit(ptr %base, i64 %offset) minsize {
 @var_16bit = global i16 0
 @var_32bit = global i32 0
 @var_64bit = global i64 0
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir b/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir
index b4590756d04422..26c380a7a2ec85 100644
--- a/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir
+++ b/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 # RUN: llc -mtriple=aarch64-linux-gnu -run-pass peephole-opt -o - %s | FileCheck %s
 # CHECK: %1:gpr32common = ANDWri {{.*}}
 # CHECK-NEXT: $wzr = SUBSWri {{.*}}
@@ -39,3 +40,5 @@ body:             |
     RET_ReallyLR implicit $w0
 
 ...
+## NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+# CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll b/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll
index 85b179d631e1c3..0c1e1a54d260a5 100644
--- a/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=arm64-linux-gnu -mattr=+reserve-x9 -mattr=+call-saved-x9 -o - %s | FileCheck %s --check-prefixes=CHECK-X9
 ; RUN: llc -mtriple=arm64-linux-gnu -mattr=+reserve-x9 -mattr=+call-saved-x9 -global-isel -o - %s | FileCheck %s --check-prefixes=CHECK-X9
 ; RUN: llc -mtriple=arm64-linux-gnu -mattr=+reserve-x9 -mattr=+call-saved-x9 -fast-isel -o - %s | FileCheck %s --check-prefixes=CHECK-X9
@@ -36,42 +37,19 @@
 ; If a register is specified to be both reserved and callee-saved, then it
 ; should not be allocated and should not be spilled onto the stack.
 define void @foo() {
-; CHECK-X9-NOT: str x9, [sp
-; CHECK-X10-NOT: str x10, [sp
-; CHECK-X11-NOT: str x11, [sp
-; CHECK-X12-NOT: str x12, [sp
-; CHECK-X13-NOT: str x13, [sp
-; CHECK-X14-NOT: str x14, [sp
-; CHECK-X15-NOT: str x15, [sp
-; CHECK-X18-NOT: str x18, [sp
 
   %val = load volatile [30 x i64], ptr @var
   store volatile [30 x i64] %val, ptr @var
 
-; CHECK-X9-NOT: ldr x9
-; CHECK-X10-NOT: ldr x10
-; CHECK-X11-NOT: ldr x11
-; CHECK-X12-NOT: ldr x12
-; CHECK-X13-NOT: ldr x13
-; CHECK-X14-NOT: ldr x14
-; CHECK-X15-NOT: ldr x15
-; CHECK-X18-NOT: ldr x18
-; CHECK-X9-NOT: str x9
-; CHECK-X10-NOT: str x10
-; CHECK-X11-NOT: str x11
-; CHECK-X12-NOT: str x12
-; CHECK-X13-NOT: str x13
-; CHECK-X14-NOT: str x14
-; CHECK-X15-NOT: str x15
-; CHECK-X18-NOT: str x18
 
-; CHECK-X9-NOT: ldr x9, [sp
-; CHECK-X10-NOT: ldr x10, [sp
-; CHECK-X11-NOT: ldr x11, [sp
-; CHECK-X12-NOT: ldr x12, [sp
-; CHECK-X13-NOT: ldr x13, [sp
-; CHECK-X14-NOT: ldr x14, [sp
-; CHECK-X15-NOT: ldr x15, [sp
-; CHECK-X18-NOT: ldr x18, [sp
   ret void
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-X10: {{.*}}
+; CHECK-X11: {{.*}}
+; CHECK-X12: {{.*}}
+; CHECK-X13: {{.*}}
+; CHECK-X14: {{.*}}
+; CHECK-X15: {{.*}}
+; CHECK-X18: {{.*}}
+; CHECK-X9: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-reserved-arg-reg-call-error.ll b/llvm/test/CodeGen/AArch64/arm64-reserved-arg-reg-call-error.ll
index 98160bb9300cf2..3ca12566dd0055 100644
--- a/llvm/test/CodeGen/AArch64/arm64-reserved-arg-reg-call-error.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-reserved-arg-reg-call-error.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: not llc < %s -mtriple=arm64-linux-gnu -mattr=+reserve-x1 2>&1 | FileCheck %s
 ; RUN: not llc < %s -mtriple=arm64-linux-gnu -mattr=+reserve-x1 -fast-isel 2>&1 | FileCheck %s
 ; RUN: not llc < %s -mtriple=arm64-linux-gnu -mattr=+reserve-x1 -global-isel 2>&1 | FileCheck %s
diff --git a/llvm/test/CodeGen/AArch64/arm64-sincos.ll b/llvm/test/CodeGen/AArch64/arm64-sincos.ll
index 2a62293b521c3c..5564e922b4e96c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-sincos.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-sincos.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-apple-ios7 | FileCheck %s --check-prefix CHECK-IOS
 ; RUN: llc < %s -mtriple=arm64-linux-gnu | FileCheck %s --check-prefix CHECK-LINUX
 ; RUN: llc < %s -mtriple=arm64-linux-android | FileCheck %s --check-prefix CHECK-LINUX
@@ -8,13 +9,26 @@
 ; rdar://12856873
 
 define float @test1(float %x) nounwind {
-entry:
 ; CHECK-IOS-LABEL: test1:
-; CHECK-IOS: bl ___sincosf_stret
-; CHECK-IOS: fadd s0, s0, s1
-
+; CHECK-IOS:       ; %bb.0: ; %entry
+; CHECK-IOS-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-IOS-NEXT:    bl ___sincosf_stret
+; CHECK-IOS-NEXT:    fadd s0, s0, s1
+; CHECK-IOS-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-IOS-NEXT:    ret
+;
 ; CHECK-LINUX-LABEL: test1:
-; CHECK-LINUX: bl sincosf
+; CHECK-LINUX:       // %bb.0: // %entry
+; CHECK-LINUX-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-LINUX-NEXT:    add x0, sp, #12
+; CHECK-LINUX-NEXT:    add x1, sp, #8
+; CHECK-LINUX-NEXT:    bl sincosf
+; CHECK-LINUX-NEXT:    ldp s0, s1, [sp, #8]
+; CHECK-LINUX-NEXT:    fadd s0, s1, s0
+; CHECK-LINUX-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-LINUX-NEXT:    ret
+entry:
+
 
   %call = tail call float @sinf(float %x) readnone
   %call1 = tail call float @cosf(float %x) readnone
@@ -23,14 +37,35 @@ entry:
 }
 
 define float @test1_errno(float %x) nounwind {
-entry:
 ; CHECK-IOS-LABEL: test1_errno:
-; CHECK-IOS: bl _sinf
-; CHECK-IOS: bl _cosf
-
+; CHECK-IOS:       ; %bb.0: ; %entry
+; CHECK-IOS-NEXT:    stp d9, d8, [sp, #-32]! ; 16-byte Folded Spill
+; CHECK-IOS-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-IOS-NEXT:    fmov s8, s0
+; CHECK-IOS-NEXT:    bl _sinf
+; CHECK-IOS-NEXT:    fmov s9, s0
+; CHECK-IOS-NEXT:    fmov s0, s8
+; CHECK-IOS-NEXT:    bl _cosf
+; CHECK-IOS-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
+; CHECK-IOS-NEXT:    fadd s0, s9, s0
+; CHECK-IOS-NEXT:    ldp d9, d8, [sp], #32 ; 16-byte Folded Reload
+; CHECK-IOS-NEXT:    ret
+;
 ; CHECK-LINUX-LABEL: test1_errno:
-; CHECK-LINUX: bl sinf
-; CHECK-LINUX: bl cosf
+; CHECK-LINUX:       // %bb.0: // %entry
+; CHECK-LINUX-NEXT:    stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-LINUX-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-LINUX-NEXT:    fmov s8, s0
+; CHECK-LINUX-NEXT:    bl sinf
+; CHECK-LINUX-NEXT:    fmov s9, s0
+; CHECK-LINUX-NEXT:    fmov s0, s8
+; CHECK-LINUX-NEXT:    bl cosf
+; CHECK-LINUX-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-LINUX-NEXT:    fadd s0, s9, s0
+; CHECK-LINUX-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-LINUX-NEXT:    ret
+entry:
+
 
   %call = tail call float @sinf(float %x)
   %call1 = tail call float @cosf(float %x)
@@ -39,13 +74,29 @@ entry:
 }
 
 define double @test2(double %x) nounwind {
-entry:
 ; CHECK-IOS-LABEL: test2:
-; CHECK-IOS: bl ___sincos_stret
-; CHECK-IOS: fadd d0, d0, d1
-
+; CHECK-IOS:       ; %bb.0: ; %entry
+; CHECK-IOS-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-IOS-NEXT:    bl ___sincos_stret
+; CHECK-IOS-NEXT:    fadd d0, d0, d1
+; CHECK-IOS-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-IOS-NEXT:    ret
+;
 ; CHECK-LINUX-LABEL: test2:
-; CHECK-LINUX: bl sincos
+; CHECK-LINUX:       // %bb.0: // %entry
+; CHECK-LINUX-NEXT:    sub sp, sp, #32
+; CHECK-LINUX-NEXT:    add x0, sp, #24
+; CHECK-LINUX-NEXT:    add x1, sp, #8
+; CHECK-LINUX-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-LINUX-NEXT:    bl sincos
+; CHECK-LINUX-NEXT:    ldr d0, [sp, #8]
+; CHECK-LINUX-NEXT:    ldr d1, [sp, #24]
+; CHECK-LINUX-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-LINUX-NEXT:    fadd d0, d1, d0
+; CHECK-LINUX-NEXT:    add sp, sp, #32
+; CHECK-LINUX-NEXT:    ret
+entry:
+
 
   %call = tail call double @sin(double %x) readnone
   %call1 = tail call double @cos(double %x) readnone
@@ -54,14 +105,35 @@ entry:
 }
 
 define double @test2_errno(double %x) nounwind {
-entry:
 ; CHECK-IOS-LABEL: test2_errno:
-; CHECK-IOS: bl _sin
-; CHECK-IOS: bl _cos
-
+; CHECK-IOS:       ; %bb.0: ; %entry
+; CHECK-IOS-NEXT:    stp d9, d8, [sp, #-32]! ; 16-byte Folded Spill
+; CHECK-IOS-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-IOS-NEXT:    fmov d8, d0
+; CHECK-IOS-NEXT:    bl _sin
+; CHECK-IOS-NEXT:    fmov d9, d0
+; CHECK-IOS-NEXT:    fmov d0, d8
+; CHECK-IOS-NEXT:    bl _cos
+; CHECK-IOS-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
+; CHECK-IOS-NEXT:    fadd d0, d9, d0
+; CHECK-IOS-NEXT:    ldp d9, d8, [sp], #32 ; 16-byte Folded Reload
+; CHECK-IOS-NEXT:    ret
+;
 ; CHECK-LINUX-LABEL: test2_errno:
-; CHECK-LINUX: bl sin
-; CHECK-LINUX: bl cos
+; CHECK-LINUX:       // %bb.0: // %entry
+; CHECK-LINUX-NEXT:    stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-LINUX-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-LINUX-NEXT:    fmov d8, d0
+; CHECK-LINUX-NEXT:    bl sin
+; CHECK-LINUX-NEXT:    fmov d9, d0
+; CHECK-LINUX-NEXT:    fmov d0, d8
+; CHECK-LINUX-NEXT:    bl cos
+; CHECK-LINUX-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-LINUX-NEXT:    fadd d0, d9, d0
+; CHECK-LINUX-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-LINUX-NEXT:    ret
+entry:
+
 
   %call = tail call double @sin(double %x)
   %call1 = tail call double @cos(double %x)
diff --git a/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll b/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
index 475affa358bd15..49357f41edc003 100644
--- a/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
@@ -133,7 +133,7 @@ define void @testLeftGood4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounw
 define void @testLeftBad4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad4x16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #16500
+; CHECK-NEXT:    mov w8, #16500 // =0x4074
 ; CHECK-NEXT:    shl.4h v1, v1, #14
 ; CHECK-NEXT:    dup.4h v2, w8
 ; CHECK-NEXT:    and.8b v0, v0, v2
@@ -163,7 +163,7 @@ define void @testRightGood4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) noun
 define void @testRightBad4x16(<4 x i16> %src1, <4 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad4x16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #16500
+; CHECK-NEXT:    mov w8, #16500 // =0x4074
 ; CHECK-NEXT:    dup.4h v2, w8
 ; CHECK-NEXT:    and.8b v0, v0, v2
 ; CHECK-NEXT:    usra.4h v0, v1, #14
@@ -192,7 +192,7 @@ define void @testLeftGood8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounw
 define void @testLeftBad8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad8x16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #16500
+; CHECK-NEXT:    mov w8, #16500 // =0x4074
 ; CHECK-NEXT:    shl.8h v1, v1, #14
 ; CHECK-NEXT:    dup.8h v2, w8
 ; CHECK-NEXT:    and.16b v0, v0, v2
@@ -222,7 +222,7 @@ define void @testRightGood8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) noun
 define void @testRightBad8x16(<8 x i16> %src1, <8 x i16> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad8x16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #16500
+; CHECK-NEXT:    mov w8, #16500 // =0x4074
 ; CHECK-NEXT:    dup.8h v2, w8
 ; CHECK-NEXT:    and.16b v0, v0, v2
 ; CHECK-NEXT:    usra.8h v0, v1, #14
@@ -251,7 +251,7 @@ define void @testLeftGood2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounw
 define void @testLeftBad2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad2x32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #4194300
+; CHECK-NEXT:    mov w8, #4194300 // =0x3ffffc
 ; CHECK-NEXT:    shl.2s v1, v1, #22
 ; CHECK-NEXT:    dup.2s v2, w8
 ; CHECK-NEXT:    and.8b v0, v0, v2
@@ -281,7 +281,7 @@ define void @testRightGood2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) noun
 define void @testRightBad2x32(<2 x i32> %src1, <2 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad2x32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #4194300
+; CHECK-NEXT:    mov w8, #4194300 // =0x3ffffc
 ; CHECK-NEXT:    ushr.2s v1, v1, #22
 ; CHECK-NEXT:    dup.2s v2, w8
 ; CHECK-NEXT:    and.8b v0, v0, v2
@@ -311,7 +311,7 @@ define void @testLeftGood4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounw
 define void @testLeftBad4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad4x32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #4194300
+; CHECK-NEXT:    mov w8, #4194300 // =0x3ffffc
 ; CHECK-NEXT:    shl.4s v1, v1, #22
 ; CHECK-NEXT:    dup.4s v2, w8
 ; CHECK-NEXT:    and.16b v0, v0, v2
@@ -341,7 +341,7 @@ define void @testRightGood4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) noun
 define void @testRightBad4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad4x32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #4194300
+; CHECK-NEXT:    mov w8, #4194300 // =0x3ffffc
 ; CHECK-NEXT:    ushr.4s v1, v1, #22
 ; CHECK-NEXT:    dup.4s v2, w8
 ; CHECK-NEXT:    and.16b v0, v0, v2
@@ -358,7 +358,11 @@ define void @testRightBad4x32(<4 x i32> %src1, <4 x i32> %src2, ptr %dest) nounw
 define void @testLeftGood2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftGood2x64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sli.2d v0, v1, #48
+; CHECK-NEXT:    adrp x8, .LCPI24_0
+; CHECK-NEXT:    shl.2d v1, v1, #48
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI24_0]
+; CHECK-NEXT:    tbl.16b v0, { v0 }, v2
+; CHECK-NEXT:    orr.16b v0, v0, v1
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %and.i = and <2 x i64> %src1, <i64 281474976710655, i64 281474976710655>
@@ -371,7 +375,7 @@ define void @testLeftGood2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounw
 define void @testLeftBad2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testLeftBad2x64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #10
+; CHECK-NEXT:    mov x8, #10 // =0xa
 ; CHECK-NEXT:    shl.2d v1, v1, #48
 ; CHECK-NEXT:    movk x8, #1, lsl #48
 ; CHECK-NEXT:    dup.2d v2, x8
@@ -389,7 +393,11 @@ define void @testLeftBad2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwi
 define void @testRightGood2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightGood2x64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sri.2d v0, v1, #48
+; CHECK-NEXT:    adrp x8, .LCPI26_0
+; CHECK-NEXT:    ushr.2d v1, v1, #48
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI26_0]
+; CHECK-NEXT:    tbl.16b v0, { v0 }, v2
+; CHECK-NEXT:    orr.16b v0, v0, v1
 ; CHECK-NEXT:    str q0, [x0]
 ; CHECK-NEXT:    ret
   %and.i = and <2 x i64> %src1, <i64 18446744073709486080, i64 18446744073709486080>
@@ -402,7 +410,7 @@ define void @testRightGood2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) noun
 define void @testRightBad2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounwind {
 ; CHECK-LABEL: testRightBad2x64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #10
+; CHECK-NEXT:    mov x8, #10 // =0xa
 ; CHECK-NEXT:    ushr.2d v1, v1, #48
 ; CHECK-NEXT:    movk x8, #1, lsl #48
 ; CHECK-NEXT:    dup.2d v2, x8
diff --git a/llvm/test/CodeGen/AArch64/arm64-spill-remarks-treshold-hotness.ll b/llvm/test/CodeGen/AArch64/arm64-spill-remarks-treshold-hotness.ll
index 5f849c67b0ca31..d045b6d01c3a4e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-spill-remarks-treshold-hotness.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-spill-remarks-treshold-hotness.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple -pass-remarks-missed=regalloc \
 ; RUN:       -pass-remarks-with-hotness 2>&1 | FileCheck %s
 
@@ -9,6 +10,135 @@
 ; THRESHOLD-NOT: remark
 
 define void @fpr128(ptr %p) nounwind ssp {
+; CHECK-LABEL: fpr128:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    sub sp, sp, #176
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:    stp d15, d14, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT:    stp d13, d12, [sp, #32] ; 16-byte Folded Spill
+; CHECK-NEXT:    stp d11, d10, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT:    stp d9, d8, [sp, #64] ; 16-byte Folded Spill
+; CHECK-NEXT:    stp x28, x27, [sp, #80] ; 16-byte Folded Spill
+; CHECK-NEXT:    stp x26, x25, [sp, #96] ; 16-byte Folded Spill
+; CHECK-NEXT:    stp x24, x23, [sp, #112] ; 16-byte Folded Spill
+; CHECK-NEXT:    stp x22, x21, [sp, #128] ; 16-byte Folded Spill
+; CHECK-NEXT:    stp x20, x19, [sp, #144] ; 16-byte Folded Spill
+; CHECK-NEXT:    stp x29, x30, [sp, #160] ; 16-byte Folded Spill
+; CHECK-NEXT:  LBB0_1: ; %loop
+; CHECK-NEXT:    ; =>This Loop Header: Depth=1
+; CHECK-NEXT:    ; Child Loop BB0_2 Depth 2
+; CHECK-NEXT:    str w8, [sp, #8] ; 4-byte Folded Spill
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:  LBB0_2: ; %loop2
+; CHECK-NEXT:    ; Parent Loop BB0_1 Depth=1
+; CHECK-NEXT:    ; => This Inner Loop Header: Depth=2
+; CHECK-NEXT:    str w8, [sp, #12] ; 4-byte Folded Spill
+; CHECK-NEXT:    ; InlineAsm Start
+; CHECK-NEXT:    ; inlineasm
+; CHECK-NEXT:    ; InlineAsm End
+; CHECK-NEXT:    ldr w8, [sp, #12] ; 4-byte Folded Reload
+; CHECK-NEXT:    add w8, w8, #1
+; CHECK-NEXT:    cmp w8, #100
+; CHECK-NEXT:    b.lt LBB0_2
+; CHECK-NEXT:  ; %bb.3: ; %end2
+; CHECK-NEXT:    ; in Loop: Header=BB0_1 Depth=1
+; CHECK-NEXT:    ; InlineAsm Start
+; CHECK-NEXT:    ; inlineasm
+; CHECK-NEXT:    ; InlineAsm End
+; CHECK-NEXT:    ldr w8, [sp, #8] ; 4-byte Folded Reload
+; CHECK-NEXT:    add w8, w8, #1
+; CHECK-NEXT:    cmp w8, #100
+; CHECK-NEXT:    b.lt LBB0_1
+; CHECK-NEXT:  ; %bb.4: ; %loop3.preheader
+; CHECK-NEXT:    mov w8, wzr
+; CHECK-NEXT:  LBB0_5: ; %loop3
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    str w8, [sp, #12] ; 4-byte Folded Spill
+; CHECK-NEXT:    ; InlineAsm Start
+; CHECK-NEXT:    ; inlineasm
+; CHECK-NEXT:    ; InlineAsm End
+; CHECK-NEXT:    ldr w8, [sp, #12] ; 4-byte Folded Reload
+; CHECK-NEXT:    add w8, w8, #1
+; CHECK-NEXT:    cmp w8, #100
+; CHECK-NEXT:    b.lt LBB0_5
+; CHECK-NEXT:  ; %bb.6: ; %end3
+; CHECK-NEXT:    ldp x29, x30, [sp, #160] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp x20, x19, [sp, #144] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp x22, x21, [sp, #128] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp x24, x23, [sp, #112] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp x26, x25, [sp, #96] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp x28, x27, [sp, #80] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp d9, d8, [sp, #64] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp d11, d10, [sp, #48] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp d13, d12, [sp, #32] ; 16-byte Folded Reload
+; CHECK-NEXT:    ldp d15, d14, [sp, #16] ; 16-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #176
+; CHECK-NEXT:    ret
+;
+; THRESHOLD-LABEL: fpr128:
+; THRESHOLD:       ; %bb.0: ; %entry
+; THRESHOLD-NEXT:    sub sp, sp, #176
+; THRESHOLD-NEXT:    mov w8, wzr
+; THRESHOLD-NEXT:    stp d15, d14, [sp, #16] ; 16-byte Folded Spill
+; THRESHOLD-NEXT:    stp d13, d12, [sp, #32] ; 16-byte Folded Spill
+; THRESHOLD-NEXT:    stp d11, d10, [sp, #48] ; 16-byte Folded Spill
+; THRESHOLD-NEXT:    stp d9, d8, [sp, #64] ; 16-byte Folded Spill
+; THRESHOLD-NEXT:    stp x28, x27, [sp, #80] ; 16-byte Folded Spill
+; THRESHOLD-NEXT:    stp x26, x25, [sp, #96] ; 16-byte Folded Spill
+; THRESHOLD-NEXT:    stp x24, x23, [sp, #112] ; 16-byte Folded Spill
+; THRESHOLD-NEXT:    stp x22, x21, [sp, #128] ; 16-byte Folded Spill
+; THRESHOLD-NEXT:    stp x20, x19, [sp, #144] ; 16-byte Folded Spill
+; THRESHOLD-NEXT:    stp x29, x30, [sp, #160] ; 16-byte Folded Spill
+; THRESHOLD-NEXT:  LBB0_1: ; %loop
+; THRESHOLD-NEXT:    ; =>This Loop Header: Depth=1
+; THRESHOLD-NEXT:    ; Child Loop BB0_2 Depth 2
+; THRESHOLD-NEXT:    str w8, [sp, #8] ; 4-byte Folded Spill
+; THRESHOLD-NEXT:    mov w8, wzr
+; THRESHOLD-NEXT:  LBB0_2: ; %loop2
+; THRESHOLD-NEXT:    ; Parent Loop BB0_1 Depth=1
+; THRESHOLD-NEXT:    ; => This Inner Loop Header: Depth=2
+; THRESHOLD-NEXT:    str w8, [sp, #12] ; 4-byte Folded Spill
+; THRESHOLD-NEXT:    ; InlineAsm Start
+; THRESHOLD-NEXT:    ; inlineasm
+; THRESHOLD-NEXT:    ; InlineAsm End
+; THRESHOLD-NEXT:    ldr w8, [sp, #12] ; 4-byte Folded Reload
+; THRESHOLD-NEXT:    add w8, w8, #1
+; THRESHOLD-NEXT:    cmp w8, #100
+; THRESHOLD-NEXT:    b.lt LBB0_2
+; THRESHOLD-NEXT:  ; %bb.3: ; %end2
+; THRESHOLD-NEXT:    ; in Loop: Header=BB0_1 Depth=1
+; THRESHOLD-NEXT:    ; InlineAsm Start
+; THRESHOLD-NEXT:    ; inlineasm
+; THRESHOLD-NEXT:    ; InlineAsm End
+; THRESHOLD-NEXT:    ldr w8, [sp, #8] ; 4-byte Folded Reload
+; THRESHOLD-NEXT:    add w8, w8, #1
+; THRESHOLD-NEXT:    cmp w8, #100
+; THRESHOLD-NEXT:    b.lt LBB0_1
+; THRESHOLD-NEXT:  ; %bb.4: ; %loop3.preheader
+; THRESHOLD-NEXT:    mov w8, wzr
+; THRESHOLD-NEXT:  LBB0_5: ; %loop3
+; THRESHOLD-NEXT:    ; =>This Inner Loop Header: Depth=1
+; THRESHOLD-NEXT:    str w8, [sp, #12] ; 4-byte Folded Spill
+; THRESHOLD-NEXT:    ; InlineAsm Start
+; THRESHOLD-NEXT:    ; inlineasm
+; THRESHOLD-NEXT:    ; InlineAsm End
+; THRESHOLD-NEXT:    ldr w8, [sp, #12] ; 4-byte Folded Reload
+; THRESHOLD-NEXT:    add w8, w8, #1
+; THRESHOLD-NEXT:    cmp w8, #100
+; THRESHOLD-NEXT:    b.lt LBB0_5
+; THRESHOLD-NEXT:  ; %bb.6: ; %end3
+; THRESHOLD-NEXT:    ldp x29, x30, [sp, #160] ; 16-byte Folded Reload
+; THRESHOLD-NEXT:    ldp x20, x19, [sp, #144] ; 16-byte Folded Reload
+; THRESHOLD-NEXT:    ldp x22, x21, [sp, #128] ; 16-byte Folded Reload
+; THRESHOLD-NEXT:    ldp x24, x23, [sp, #112] ; 16-byte Folded Reload
+; THRESHOLD-NEXT:    ldp x26, x25, [sp, #96] ; 16-byte Folded Reload
+; THRESHOLD-NEXT:    ldp x28, x27, [sp, #80] ; 16-byte Folded Reload
+; THRESHOLD-NEXT:    ldp d9, d8, [sp, #64] ; 16-byte Folded Reload
+; THRESHOLD-NEXT:    ldp d11, d10, [sp, #48] ; 16-byte Folded Reload
+; THRESHOLD-NEXT:    ldp d13, d12, [sp, #32] ; 16-byte Folded Reload
+; THRESHOLD-NEXT:    ldp d15, d14, [sp, #16] ; 16-byte Folded Reload
+; THRESHOLD-NEXT:    add sp, sp, #176
+; THRESHOLD-NEXT:    ret
 entry:
   br label %loop, !dbg !8
 
diff --git a/llvm/test/CodeGen/AArch64/arm64-stackmap-nops.ll b/llvm/test/CodeGen/AArch64/arm64-stackmap-nops.ll
index 2647ac44296908..c7a954db894830 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stackmap-nops.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stackmap-nops.ll
@@ -1,15 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-apple-darwin | FileCheck %s
 
 define void @test_shadow_optimization() {
 entry:
 ; Expect 8 bytes worth of nops here rather than 16: With the shadow optimization
 ; in place, 8 bytes will be consumed by the frame teardown and return instr.
-; CHECK-LABEL: test_shadow_optimization:
-; CHECK:      nop
-; CHECK-NEXT: nop
-; CHECK-NOT:  nop
   tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64  0, i32  16)
   ret void
 }
 
 declare void @llvm.experimental.stackmap(i64, i32, ...)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-stacksave.ll b/llvm/test/CodeGen/AArch64/arm64-stacksave.ll
index e471b1989f61b7..e749a575a18503 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stacksave.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stacksave.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=arm64-apple-macosx10.8.0 < %s -verify-coalescing
 ; RUN: llc -mtriple=arm64_32-apple-ios9.0 < %s -verify-coalescing
 ; <rdar://problem/11522048>
diff --git a/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll b/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll
index d1aa88e1d70417..8322bf0e5da3e9 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stp-aa.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -enable-misched=false -aarch64-enable-stp-suppress=false -verify-machineinstrs | FileCheck %s
 
 ; The next set of tests makes sure we can combine the second instruction into
@@ -8,6 +9,11 @@
 ; CHECK: ldr w0, [x2, #8]
 ; CHECK: ret
 define i32 @stp_int_aa(i32 %a, i32 %b, ptr nocapture %p) nounwind {
+; CHECK-LABEL: stp_int_aa:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp w0, w1, [x2]
+; CHECK-NEXT:    ldr w0, [x2, #8]
+; CHECK-NEXT:    ret
   store i32 %a, ptr %p, align 4
   %ld.ptr = getelementptr inbounds i32, ptr %p, i64 2
   %tmp = load i32, ptr %ld.ptr, align 4
@@ -21,6 +27,11 @@ define i32 @stp_int_aa(i32 %a, i32 %b, ptr nocapture %p) nounwind {
 ; CHECK: ldr x0, [x2, #16]
 ; CHECK: ret
 define i64 @stp_long_aa(i64 %a, i64 %b, ptr nocapture %p) nounwind {
+; CHECK-LABEL: stp_long_aa:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x0, x1, [x2]
+; CHECK-NEXT:    ldr x0, [x2, #16]
+; CHECK-NEXT:    ret
   store i64 %a, ptr %p, align 8
   %ld.ptr = getelementptr inbounds i64, ptr %p, i64 2
   %tmp = load i64, ptr %ld.ptr, align 4
@@ -34,6 +45,11 @@ define i64 @stp_long_aa(i64 %a, i64 %b, ptr nocapture %p) nounwind {
 ; CHECK: ldr s0, [x0, #8]
 ; CHECK: ret
 define float @stp_float_aa(float %a, float %b, ptr nocapture %p) nounwind {
+; CHECK-LABEL: stp_float_aa:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp s0, s1, [x0]
+; CHECK-NEXT:    ldr s0, [x0, #8]
+; CHECK-NEXT:    ret
   store float %a, ptr %p, align 4
   %ld.ptr = getelementptr inbounds float, ptr %p, i64 2
   %tmp = load float, ptr %ld.ptr, align 4
@@ -47,6 +63,11 @@ define float @stp_float_aa(float %a, float %b, ptr nocapture %p) nounwind {
 ; CHECK: ldr d0, [x0, #16]
 ; CHECK: ret
 define double @stp_double_aa(double %a, double %b, ptr nocapture %p) nounwind {
+; CHECK-LABEL: stp_double_aa:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp d0, d1, [x0]
+; CHECK-NEXT:    ldr d0, [x0, #16]
+; CHECK-NEXT:    ret
   store double %a, ptr %p, align 8
   %ld.ptr = getelementptr inbounds double, ptr %p, i64 2
   %tmp = load double, ptr %ld.ptr, align 4
@@ -63,6 +84,11 @@ define double @stp_double_aa(double %a, double %b, ptr nocapture %p) nounwind {
 ; CHECK: stp w1, w2, [x3]
 ; CHECK: ret
 define i32 @stp_int_aa_after(i32 %w0, i32 %a, i32 %b, ptr nocapture %p) nounwind {
+; CHECK-LABEL: stp_int_aa_after:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w0, [x3, #4]
+; CHECK-NEXT:    stp w1, w2, [x3]
+; CHECK-NEXT:    ret
   store i32 %a, ptr %p, align 4
   %ld.ptr = getelementptr inbounds i32, ptr %p, i64 1
   %tmp = load i32, ptr %ld.ptr, align 4
@@ -76,6 +102,11 @@ define i32 @stp_int_aa_after(i32 %w0, i32 %a, i32 %b, ptr nocapture %p) nounwind
 ; CHECK: stp x1, x2, [x3]
 ; CHECK: ret
 define i64 @stp_long_aa_after(i64 %x0, i64 %a, i64 %b, ptr nocapture %p) nounwind {
+; CHECK-LABEL: stp_long_aa_after:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x0, [x3, #8]
+; CHECK-NEXT:    stp x1, x2, [x3]
+; CHECK-NEXT:    ret
   store i64 %a, ptr %p, align 8
   %ld.ptr = getelementptr inbounds i64, ptr %p, i64 1
   %tmp = load i64, ptr %ld.ptr, align 4
@@ -89,6 +120,11 @@ define i64 @stp_long_aa_after(i64 %x0, i64 %a, i64 %b, ptr nocapture %p) nounwin
 ; CHECK: stp s1, s2, [x0]
 ; CHECK: ret
 define float @stp_float_aa_after(float %s0, float %a, float %b, ptr nocapture %p) nounwind {
+; CHECK-LABEL: stp_float_aa_after:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0, #4]
+; CHECK-NEXT:    stp s1, s2, [x0]
+; CHECK-NEXT:    ret
   store float %a, ptr %p, align 4
   %ld.ptr = getelementptr inbounds float, ptr %p, i64 1
   %tmp = load float, ptr %ld.ptr, align 4
@@ -102,6 +138,11 @@ define float @stp_float_aa_after(float %s0, float %a, float %b, ptr nocapture %p
 ; CHECK: stp d1, d2, [x0]
 ; CHECK: ret
 define double @stp_double_aa_after(double %d0, double %a, double %b, ptr nocapture %p) nounwind {
+; CHECK-LABEL: stp_double_aa_after:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0, #8]
+; CHECK-NEXT:    stp d1, d2, [x0]
+; CHECK-NEXT:    ret
   store double %a, ptr %p, align 8
   %ld.ptr = getelementptr inbounds double, ptr %p, i64 1
   %tmp = load double, ptr %ld.ptr, align 4
@@ -119,6 +160,13 @@ define double @stp_double_aa_after(double %d0, double %a, double %b, ptr nocaptu
 ; CHECK: fadd
 ; CHECK: stp q2, q0, [x{{[0-9]+}}, #32]
 define void @st1(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, ptr %base, i64 %index) {
+; CHECK-LABEL: st1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    add x8, x0, x1, lsl #2
+; CHECK-NEXT:    stp q0, q1, [x8]
+; CHECK-NEXT:    fadd v0.4s, v3.4s, v0.4s
+; CHECK-NEXT:    stp q2, q0, [x8, #32]
+; CHECK-NEXT:    ret
 entry:
   %a0 = getelementptr inbounds float, ptr %base, i64 %index
   %b0 = getelementptr float, ptr %a0, i64 4
diff --git a/llvm/test/CodeGen/AArch64/arm64-stur.ll b/llvm/test/CodeGen/AArch64/arm64-stur.ll
index 2a74abb10226da..8073533528729f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stur.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stur.ll
@@ -1,10 +1,12 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -mcpu=cyclone -mattr=+slow-misaligned-128store | FileCheck %s
 %struct.X = type <{ i32, i64, i64 }>
 
 define void @foo1(ptr %p, i64 %val) nounwind {
 ; CHECK-LABEL: foo1:
-; CHECK: 	stur	w1, [x0, #-4]
-; CHECK-NEXT: 	ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stur w1, [x0, #-4]
+; CHECK-NEXT:    ret
   %tmp1 = trunc i64 %val to i32
   %ptr = getelementptr inbounds i32, ptr %p, i64 -1
   store i32 %tmp1, ptr %ptr, align 4
@@ -12,8 +14,9 @@ define void @foo1(ptr %p, i64 %val) nounwind {
 }
 define void @foo2(ptr %p, i64 %val) nounwind {
 ; CHECK-LABEL: foo2:
-; CHECK: 	sturh	w1, [x0, #-2]
-; CHECK-NEXT: 	ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sturh w1, [x0, #-2]
+; CHECK-NEXT:    ret
   %tmp1 = trunc i64 %val to i16
   %ptr = getelementptr inbounds i16, ptr %p, i64 -1
   store i16 %tmp1, ptr %ptr, align 2
@@ -21,8 +24,9 @@ define void @foo2(ptr %p, i64 %val) nounwind {
 }
 define void @foo3(ptr %p, i64 %val) nounwind {
 ; CHECK-LABEL: foo3:
-; CHECK: 	sturb	w1, [x0, #-1]
-; CHECK-NEXT: 	ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sturb w1, [x0, #-1]
+; CHECK-NEXT:    ret
   %tmp1 = trunc i64 %val to i8
   %ptr = getelementptr inbounds i8, ptr %p, i64 -1
   store i8 %tmp1, ptr %ptr, align 1
@@ -30,8 +34,9 @@ define void @foo3(ptr %p, i64 %val) nounwind {
 }
 define void @foo4(ptr %p, i32 %val) nounwind {
 ; CHECK-LABEL: foo4:
-; CHECK: 	sturh	w1, [x0, #-2]
-; CHECK-NEXT: 	ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sturh w1, [x0, #-2]
+; CHECK-NEXT:    ret
   %tmp1 = trunc i32 %val to i16
   %ptr = getelementptr inbounds i16, ptr %p, i32 -1
   store i16 %tmp1, ptr %ptr, align 2
@@ -39,8 +44,9 @@ define void @foo4(ptr %p, i32 %val) nounwind {
 }
 define void @foo5(ptr %p, i32 %val) nounwind {
 ; CHECK-LABEL: foo5:
-; CHECK: 	sturb	w1, [x0, #-1]
-; CHECK-NEXT: 	ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sturb w1, [x0, #-1]
+; CHECK-NEXT:    ret
   %tmp1 = trunc i32 %val to i8
   %ptr = getelementptr inbounds i8, ptr %p, i32 -1
   store i8 %tmp1, ptr %ptr, align 1
@@ -49,10 +55,10 @@ define void @foo5(ptr %p, i32 %val) nounwind {
 
 define void @foo(ptr nocapture %p) nounwind optsize ssp {
 ; CHECK-LABEL: foo:
-; CHECK-NOT: str
-; CHECK: stur    xzr, [x0, #12]
-; CHECK-NEXT: stur    xzr, [x0, #4]
-; CHECK-NEXT: ret
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stur xzr, [x0, #12]
+; CHECK-NEXT:    stur xzr, [x0, #4]
+; CHECK-NEXT:    ret
   %B = getelementptr inbounds %struct.X, ptr %p, i64 0, i32 1
   call void @llvm.memset.p0.i64(ptr %B, i8 0, i64 16, i1 false)
   ret void
@@ -69,6 +75,12 @@ declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
 ; CHECK: ext.16b v[[REG2:[0-9]+]], v[[REG]], v[[REG]], #8
 ; CHECK: str     d[[REG2]], [x0, #8]
 define void @unaligned(ptr %p, <4 x i32> %v) nounwind {
+; CHECK-LABEL: unaligned:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    ext.16b v0, v0, v0, #8
+; CHECK-NEXT:    str d0, [x0, #8]
+; CHECK-NEXT:    ret
   store <4 x i32> %v, ptr %p, align 4
   ret void
 }
@@ -76,6 +88,10 @@ define void @unaligned(ptr %p, <4 x i32> %v) nounwind {
 ; CHECK-LABEL: aligned:
 ; CHECK: str q0
 define void @aligned(ptr %p, <4 x i32> %v) nounwind {
+; CHECK-LABEL: aligned:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
   store <4 x i32> %v, ptr %p
   ret void
 }
@@ -86,12 +102,20 @@ define void @aligned(ptr %p, <4 x i32> %v) nounwind {
 ; CHECK-LABEL: twobytealign:
 ; CHECK: str q0
 define void @twobytealign(ptr %p, <4 x i32> %v) nounwind {
+; CHECK-LABEL: twobytealign:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
   store <4 x i32> %v, ptr %p, align 2
   ret void
 }
 ; CHECK-LABEL: onebytealign:
 ; CHECK: str q0
 define void @onebytealign(ptr %p, <4 x i32> %v) nounwind {
+; CHECK-LABEL: onebytealign:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    ret
   store <4 x i32> %v, ptr %p, align 1
   ret void
 }
diff --git a/llvm/test/CodeGen/AArch64/arm64-subsections.ll b/llvm/test/CodeGen/AArch64/arm64-subsections.ll
index 1449b857ec6d7a..24cad381f2b216 100644
--- a/llvm/test/CodeGen/AArch64/arm64-subsections.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-subsections.ll
@@ -1,5 +1,9 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=arm64-apple-ios7.0 -o - %s | FileCheck %s --check-prefix=CHECK-MACHO
 ; RUN: llc -mtriple=arm64-linux-gnu -o - %s | FileCheck %s --check-prefix=CHECK-ELF
 
 ; CHECK-MACHO: .subsections_via_symbols
 ; CHECK-ELF-NOT: .subsections_via_symbols
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-ELF: {{.*}}
+; CHECK-MACHO: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll b/llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll
index 49055e3975c057..b531bae32b2b5b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=arm64-apple-ios7.0 %s -o - | FileCheck %s
 ; RUN: llc -mtriple=arm64-apple-ios7.0 -global-isel -global-isel-abort=1 -verify-machineinstrs %s -o - | FileCheck %s
 
@@ -8,11 +9,21 @@
 ; x0.
 define i8 @get_var() {
 ; CHECK-LABEL: get_var:
-; CHECK: adrp x[[TLVPDESC_SLOT_HI:[0-9]+]], _var at TLVPPAGE
- ; CHECK: ldr x[[PTR:[0-9]+]], [x[[TLVPDESC_SLOT_HI]], _var at TLVPPAGEOFF]
- ; CHECK: ldr [[TLV_GET_ADDR:x[0-9]+]], [x[[PTR]]]
-; CHECK: blr [[TLV_GET_ADDR]]
-; CHECK: ldrb w0, [x0]
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:  Lloh0:
+; CHECK-NEXT:    adrp x0, _var at TLVPPAGE
+; CHECK-NEXT:  Lloh1:
+; CHECK-NEXT:    ldr x0, [x0, _var at TLVPPAGEOFF]
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    blr x8
+; CHECK-NEXT:    ldrb w0, [x0]
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh0, Lloh1
 
   %val = load i8, ptr @var, align 1
   ret i8 %val
diff --git a/llvm/test/CodeGen/AArch64/arm64-tls-initial-exec.ll b/llvm/test/CodeGen/AArch64/arm64-tls-initial-exec.ll
index c5a11449649764..3fe219863a5384 100644
--- a/llvm/test/CodeGen/AArch64/arm64-tls-initial-exec.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-tls-initial-exec.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -show-mc-encoding < %s | FileCheck %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -filetype=obj < %s | llvm-objdump -r - | FileCheck --check-prefix=CHECK-RELOC %s
 ; RUN: llc -mtriple=arm64-none-linux-gnu -verify-machineinstrs -show-mc-encoding -code-model=tiny < %s | FileCheck %s --check-prefix=CHECK-TINY
@@ -11,19 +12,28 @@
 
 define i32 @test_initial_exec() {
 ; CHECK-LABEL: test_initial_exec:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, :gottprel:initial_exec_var // encoding: [0x08'A',A,A,0x90'A']
+; CHECK-NEXT:    // fixup A - offset: 0, value: :gottprel:initial_exec_var, kind: fixup_aarch64_pcrel_adrp_imm21
+; CHECK-NEXT:    ldr x8, [x8, :gottprel_lo12:initial_exec_var] // encoding: [0x08,0bAAAAAA01,0b01AAAAAA,0xf9]
+; CHECK-NEXT:    // fixup A - offset: 0, value: :gottprel_lo12:initial_exec_var, kind: fixup_aarch64_ldst_imm12_scale8
+; CHECK-NEXT:    mrs x9, TPIDR_EL0 // encoding: [0x49,0xd0,0x3b,0xd5]
+; CHECK-NEXT:    ldr w0, [x9, x8] // encoding: [0x20,0x69,0x68,0xb8]
+; CHECK-NEXT:    ret // encoding: [0xc0,0x03,0x5f,0xd6]
+;
+; CHECK-TINY-LABEL: test_initial_exec:
+; CHECK-TINY:       // %bb.0:
+; CHECK-TINY-NEXT:    ldr x8, :gottprel:initial_exec_var // encoding: [0bAAA01000,A,A,0x58]
+; CHECK-TINY-NEXT:    // fixup A - offset: 0, value: :gottprel:initial_exec_var, kind: fixup_aarch64_ldr_pcrel_imm19
+; CHECK-TINY-NEXT:    mrs x9, TPIDR_EL0 // encoding: [0x49,0xd0,0x3b,0xd5]
+; CHECK-TINY-NEXT:    ldr w0, [x9, x8] // encoding: [0x20,0x69,0x68,0xb8]
+; CHECK-TINY-NEXT:    ret // encoding: [0xc0,0x03,0x5f,0xd6]
   %val = load i32, ptr @initial_exec_var
 
-; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
-; CHECK: ldr x[[TP_OFFSET:[0-9]+]], [x[[GOTADDR]], :gottprel_lo12:initial_exec_var]
-; CHECK: mrs x[[TP:[0-9]+]], TPIDR_EL0
-; CHECK: ldr w0, [x[[TP]], x[[TP_OFFSET]]]
 
 ; CHECK-RELOC: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
 ; CHECK-RELOC: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
 
-; CHECK-TINY: ldr x[[TP_OFFSET:[0-9]+]], :gottprel:initial_exec_var
-; CHECK-TINY: mrs x[[TP:[0-9]+]], TPIDR_EL0
-; CHECK-TINY: ldr w0, [x[[TP]], x[[TP_OFFSET]]]
 
 ; CHECK-TINY-RELOC: R_AARCH64_TLSIE_LD_GOTTPREL_PREL19
 
@@ -32,19 +42,28 @@ define i32 @test_initial_exec() {
 
 define ptr @test_initial_exec_addr() {
 ; CHECK-LABEL: test_initial_exec_addr:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, :gottprel:initial_exec_var // encoding: [0x08'A',A,A,0x90'A']
+; CHECK-NEXT:    // fixup A - offset: 0, value: :gottprel:initial_exec_var, kind: fixup_aarch64_pcrel_adrp_imm21
+; CHECK-NEXT:    ldr x8, [x8, :gottprel_lo12:initial_exec_var] // encoding: [0x08,0bAAAAAA01,0b01AAAAAA,0xf9]
+; CHECK-NEXT:    // fixup A - offset: 0, value: :gottprel_lo12:initial_exec_var, kind: fixup_aarch64_ldst_imm12_scale8
+; CHECK-NEXT:    mrs x9, TPIDR_EL0 // encoding: [0x49,0xd0,0x3b,0xd5]
+; CHECK-NEXT:    add x0, x9, x8 // encoding: [0x20,0x01,0x08,0x8b]
+; CHECK-NEXT:    ret // encoding: [0xc0,0x03,0x5f,0xd6]
+;
+; CHECK-TINY-LABEL: test_initial_exec_addr:
+; CHECK-TINY:       // %bb.0:
+; CHECK-TINY-NEXT:    ldr x8, :gottprel:initial_exec_var // encoding: [0bAAA01000,A,A,0x58]
+; CHECK-TINY-NEXT:    // fixup A - offset: 0, value: :gottprel:initial_exec_var, kind: fixup_aarch64_ldr_pcrel_imm19
+; CHECK-TINY-NEXT:    mrs x9, TPIDR_EL0 // encoding: [0x49,0xd0,0x3b,0xd5]
+; CHECK-TINY-NEXT:    add x0, x9, x8 // encoding: [0x20,0x01,0x08,0x8b]
+; CHECK-TINY-NEXT:    ret // encoding: [0xc0,0x03,0x5f,0xd6]
   ret ptr @initial_exec_var
 
-; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
-; CHECK: ldr [[TP_OFFSET:x[0-9]+]], [x[[GOTADDR]], :gottprel_lo12:initial_exec_var]
-; CHECK: mrs [[TP:x[0-9]+]], TPIDR_EL0
-; CHECK: add x0, [[TP]], [[TP_OFFSET]]
 
 ; CHECK-RELOC: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
 ; CHECK-RELOC: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
 
-; CHECK-TINY: ldr x[[TP_OFFSET:[0-9]+]], :gottprel:initial_exec_var
-; CHECK-TINY: mrs [[TP:x[0-9]+]], TPIDR_EL0
-; CHECK-TINY: add x0, [[TP]], x[[TP_OFFSET]]
 
 ; CHECK-TINY-RELOC: R_AARCH64_TLSIE_LD_GOTTPREL_PREL19
 
diff --git a/llvm/test/CodeGen/AArch64/arm64-uminv.ll b/llvm/test/CodeGen/AArch64/arm64-uminv.ll
index 98b3d6de62976f..47ee750a3d3e9d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-uminv.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-uminv.ll
@@ -1,12 +1,8 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
 ; RUN: llc < %s -global-isel=1 -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
 
 define i32 @vmin_u8x8(<8 x i8> %a) nounwind ssp {
-; CHECK-LABEL: vmin_u8x8:
-; CHECK: uminv.8b        b[[REG:[0-9]+]], v0
-; CHECK: fmov    [[REG2:w[0-9]+]], s[[REG]]
-; CHECK-NOT: and
-; CHECK: cbz     [[REG2]],
 entry:
   %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a) nounwind
   %tmp = trunc i32 %vminv.i to i8
@@ -25,11 +21,6 @@ return:
 declare i32 @bar(...)
 
 define i32 @vmin_u4x16(<4 x i16> %a) nounwind ssp {
-; CHECK-LABEL: vmin_u4x16:
-; CHECK: uminv.4h        h[[REG:[0-9]+]], v0
-; CHECK: fmov    [[REG2:w[0-9]+]], s[[REG]]
-; CHECK-NOT: and
-; CHECK: cbz     [[REG2]],
 entry:
   %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a) nounwind
   %tmp = trunc i32 %vminv.i to i16
@@ -46,11 +37,6 @@ return:
 }
 
 define i32 @vmin_u8x16(<8 x i16> %a) nounwind ssp {
-; CHECK-LABEL: vmin_u8x16:
-; CHECK: uminv.8h        h[[REG:[0-9]+]], v0
-; CHECK: fmov    [[REG2:w[0-9]+]], s[[REG]]
-; CHECK-NOT: and
-; CHECK: cbz     [[REG2]],
 entry:
   %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a) nounwind
   %tmp = trunc i32 %vminv.i to i16
@@ -67,11 +53,6 @@ return:
 }
 
 define i32 @vmin_u16x8(<16 x i8> %a) nounwind ssp {
-; CHECK-LABEL: vmin_u16x8:
-; CHECK: uminv.16b        b[[REG:[0-9]+]], v0
-; CHECK: fmov     [[REG2:w[0-9]+]], s[[REG]]
-; CHECK-NOT: and
-; CHECK: cbz     [[REG2]],
 entry:
   %vminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a) nounwind
   %tmp = trunc i32 %vminv.i to i8
@@ -88,10 +69,6 @@ return:
 }
 
 define <8 x i8> @test_vminv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
-; CHECK-LABEL: test_vminv_u8_used_by_laneop:
-; CHECK: uminv.8b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
-; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a2)
   %1 = trunc i32 %0 to i8
@@ -100,10 +77,6 @@ entry:
 }
 
 define <4 x i16> @test_vminv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
-; CHECK-LABEL: test_vminv_u16_used_by_laneop:
-; CHECK: uminv.4h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
-; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a2)
   %1 = trunc i32 %0 to i16
@@ -112,10 +85,6 @@ entry:
 }
 
 define <2 x i32> @test_vminv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
-; CHECK-LABEL: test_vminv_u32_used_by_laneop:
-; CHECK: uminp.2s v[[REGNUM:[0-9]+]], v1, v1
-; CHECK-NEXT: mov.s v0[1], v[[REGNUM]][0]
-; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32> %a2)
   %1 = insertelement <2 x i32> %a1, i32 %0, i32 1
@@ -123,10 +92,6 @@ entry:
 }
 
 define <16 x i8> @test_vminvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
-; CHECK-LABEL: test_vminvq_u8_used_by_laneop:
-; CHECK: uminv.16b b[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
-; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a2)
   %1 = trunc i32 %0 to i8
@@ -135,10 +100,6 @@ entry:
 }
 
 define <8 x i16> @test_vminvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
-; CHECK-LABEL: test_vminvq_u16_used_by_laneop:
-; CHECK: uminv.8h h[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
-; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a2)
   %1 = trunc i32 %0 to i16
@@ -147,10 +108,6 @@ entry:
 }
 
 define <4 x i32> @test_vminvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
-; CHECK-LABEL: test_vminvq_u32_used_by_laneop:
-; CHECK: uminv.4s s[[REGNUM:[0-9]+]], v1
-; CHECK-NEXT: mov.s v0[3], v[[REGNUM]][0]
-; CHECK-NEXT: ret
 entry:
   %0 = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a2)
   %1 = insertelement <4 x i32> %a1, i32 %0, i32 3
@@ -162,3 +119,5 @@ declare i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16>) nounwind readnone
 declare i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8>) nounwind readnone
 declare i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32>) nounwind readnone
 declare i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32>) nounwind readnone
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-vaddlv.ll b/llvm/test/CodeGen/AArch64/arm64-vaddlv.ll
index 903a9e9b5010fb..033e94c53afe3a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vaddlv.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vaddlv.ll
@@ -1,20 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
 define i64 @test_vaddlv_s32(<2 x i32> %a1) nounwind readnone {
-; CHECK: test_vaddlv_s32
-; CHECK: saddlp.1d v[[REGNUM:[0-9]+]], v[[INREG:[0-9]+]]
-; CHECK-NEXT: fmov x[[OUTREG:[0-9]+]], d[[REGNUM]]
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_vaddlv_s32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    saddlp.1d v0, v0
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
 entry:
   %vaddlv.i = tail call i64 @llvm.aarch64.neon.saddlv.i64.v2i32(<2 x i32> %a1) nounwind
   ret i64 %vaddlv.i
 }
 
 define i64 @test_vaddlv_u32(<2 x i32> %a1) nounwind readnone {
-; CHECK: test_vaddlv_u32
-; CHECK: uaddlp.1d v[[REGNUM:[0-9]+]], v[[INREG:[0-9]+]]
-; CHECK-NEXT: fmov x[[OUTREG:[0-9]+]], d[[REGNUM]]
-; CHECK-NEXT: ret
+; CHECK-LABEL: test_vaddlv_u32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    uaddlp.1d v0, v0
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
 entry:
   %vaddlv.i = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v2i32(<2 x i32> %a1) nounwind
   ret i64 %vaddlv.i
diff --git a/llvm/test/CodeGen/AArch64/arm64-variadic-aapcs.ll b/llvm/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
index e688af7fdeca38..af5513e37a126c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-variadic-aapcs.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -aarch64-load-store-renaming=true -verify-machineinstrs -mtriple=arm64-linux-gnu -pre-RA-sched=linearize -enable-misched=false -disable-post-ra < %s | FileCheck %s
 
 %va_list = type {ptr, ptr, ptr, i32, i32}
@@ -8,33 +9,41 @@ declare void @llvm.va_start(ptr)
 
 define dso_local void @test_simple(i32 %n, ...) {
 ; CHECK-LABEL: test_simple:
-; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
-; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #[[STACKSIZE]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #192
+; CHECK-NEXT:    .cfi_def_cfa_offset 192
+; CHECK-NEXT:    add x8, sp, #192
+; CHECK-NEXT:    adrp x9, var
+; CHECK-NEXT:    add x9, x9, :lo12:var
+; CHECK-NEXT:    stp q6, q7, [sp, #96]
+; CHECK-NEXT:    stp q4, q5, [sp, #64]
+; CHECK-NEXT:    stp q2, q3, [sp, #32]
+; CHECK-NEXT:    stp q0, q1, [sp]
+; CHECK-NEXT:    stp x6, x7, [sp, #176]
+; CHECK-NEXT:    stp x4, x5, [sp, #160]
+; CHECK-NEXT:    stp x2, x3, [sp, #144]
+; CHECK-NEXT:    str x1, [sp, #136]
+; CHECK-NEXT:    str x8, [x9]
+; CHECK-NEXT:    add x8, sp, #136
+; CHECK-NEXT:    add x10, x8, #56
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    add x8, x8, #128
+; CHECK-NEXT:    stp x10, x8, [x9, #8]
+; CHECK-NEXT:    mov x8, #-56 // =0xffffffffffffffc8
+; CHECK-NEXT:    movk x8, #65408, lsl #32
+; CHECK-NEXT:    str x8, [x9, #24]
+; CHECK-NEXT:    add sp, sp, #192
+; CHECK-NEXT:    ret
 
-; CHECK: adrp x[[VA_LIST_HI:[0-9]+]], var
-; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, :lo12:var
 
-; CHECK-DAG: stp x6, x7, [sp, #
 ; ... omit middle ones ...
-; CHECK-DAG: str x1, [sp, #[[GR_BASE:[0-9]+]]]
 
-; CHECK-DAG: stp q0, q1, [sp]
 ; ... omit middle ones ...
-; CHECK-DAG: stp q6, q7, [sp, #
 
-; CHECK: str [[STACK_TOP]], [x[[VA_LIST]]]
 
-; CHECK: add [[GR_TOPTMP:x[0-9]+]], sp, #[[GR_BASE]]
-; CHECK: add [[GR_TOP:x[0-9]+]], [[GR_TOPTMP]], #56
 
 
-; CHECK: mov [[VR_TOPTMP:x[0-9]+]], sp
-; CHECK: add [[VR_TOP:x[0-9]+]], [[VR_TOPTMP]], #128
-; CHECK: stp [[GR_TOP]], [[VR_TOP]], [x[[VA_LIST]], #8]
 
-; CHECK: mov     [[GRVR:x[0-9]+]], #-56
-; CHECK: movk    [[GRVR]], #65408, lsl #32
-; CHECK: str     [[GRVR]], [x[[VA_LIST]], #24]
 
   call void @llvm.va_start(ptr @var)
 
@@ -43,32 +52,39 @@ define dso_local void @test_simple(i32 %n, ...) {
 
 define dso_local void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
 ; CHECK-LABEL: test_fewargs:
-; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
-; CHECK: add [[STACK_TOP:x[0-9]+]], sp, #[[STACKSIZE]]
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #160
+; CHECK-NEXT:    .cfi_def_cfa_offset 160
+; CHECK-NEXT:    add x8, sp, #160
+; CHECK-NEXT:    adrp x9, var
+; CHECK-NEXT:    add x9, x9, :lo12:var
+; CHECK-NEXT:    stp q6, q7, [sp, #80]
+; CHECK-NEXT:    stp q4, q5, [sp, #48]
+; CHECK-NEXT:    stp q2, q3, [sp, #16]
+; CHECK-NEXT:    str q1, [sp]
+; CHECK-NEXT:    stp x6, x7, [sp, #144]
+; CHECK-NEXT:    stp x4, x5, [sp, #128]
+; CHECK-NEXT:    str x3, [sp, #120]
+; CHECK-NEXT:    str x8, [x9]
+; CHECK-NEXT:    add x8, sp, #120
+; CHECK-NEXT:    add x10, x8, #40
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    add x8, x8, #112
+; CHECK-NEXT:    stp x10, x8, [x9, #8]
+; CHECK-NEXT:    mov x8, #-40 // =0xffffffffffffffd8
+; CHECK-NEXT:    movk x8, #65424, lsl #32
+; CHECK-NEXT:    str x8, [x9, #24]
+; CHECK-NEXT:    add sp, sp, #160
+; CHECK-NEXT:    ret
 
-; CHECK: adrp x[[VA_LIST_HI:[0-9]+]], var
-; CHECK: add x[[VA_LIST:[0-9]+]], {{x[0-9]+}}, :lo12:var
 
-; CHECK-DAG: stp x6, x7, [sp, #
 ; ... omit middle ones ...
-; CHECK-DAG: str x3, [sp, #[[GR_BASE:[0-9]+]]]
 
-; CHECK-DAG: stp q6, q7, [sp, #80]
 ; ... omit middle ones ...
-; CHECK-DAG: str q1, [sp]
 
-; CHECK: str [[STACK_TOP]], [x[[VA_LIST]]]
 
-; CHECK: add [[GR_TOPTMP:x[0-9]+]], sp, #[[GR_BASE]]
-; CHECK: add [[GR_TOP:x[0-9]+]], [[GR_TOPTMP]], #40
 
-; CHECK: mov [[VR_TOPTMP:x[0-9]+]], sp
-; CHECK: add [[VR_TOP:x[0-9]+]], [[VR_TOPTMP]], #112
-; CHECK: stp [[GR_TOP]], [[VR_TOP]], [x[[VA_LIST]], #8]
 
-; CHECK: mov  [[GRVR_OFFS:x[0-9]+]], #-40
-; CHECK: movk [[GRVR_OFFS]], #65424, lsl #32
-; CHECK: str  [[GRVR_OFFS]], [x[[VA_LIST]], #24]
 
   call void @llvm.va_start(ptr @var)
 
@@ -77,12 +93,15 @@ define dso_local void @test_fewargs(i32 %n, i32 %n1, i32 %n2, float %m, ...) {
 
 define dso_local void @test_nospare([8 x i64], [8 x float], ...) {
 ; CHECK-LABEL: test_nospare:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    adrp x9, var
+; CHECK-NEXT:    add x9, x9, :lo12:var
+; CHECK-NEXT:    str x8, [x9]
+; CHECK-NEXT:    str xzr, [x9, #24]
+; CHECK-NEXT:    ret
 
   call void @llvm.va_start(ptr @var)
-; CHECK-NOT: sub sp, sp
-; CHECK: mov [[STACK:x[0-9]+]], sp
-; CHECK: add x[[VAR:[0-9]+]], {{x[0-9]+}}, :lo12:var
-; CHECK: str [[STACK]], [x[[VAR]]]
 
   ret void
 }
@@ -91,13 +110,25 @@ define dso_local void @test_nospare([8 x i64], [8 x float], ...) {
 ; __stack field should point just past them.
 define dso_local void @test_offsetstack([8 x i64], [2 x i64], [3 x float], ...) {
 ; CHECK-LABEL: test_offsetstack:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #80
+; CHECK-NEXT:    .cfi_def_cfa_offset 80
+; CHECK-NEXT:    mov x8, #281131379326976 // =0xffb000000000
+; CHECK-NEXT:    movk x8, #65535, lsl #48
+; CHECK-NEXT:    adrp x9, var
+; CHECK-NEXT:    add x9, x9, :lo12:var
+; CHECK-NEXT:    stp q6, q7, [sp, #48]
+; CHECK-NEXT:    stp q4, q5, [sp, #16]
+; CHECK-NEXT:    str q3, [sp]
+; CHECK-NEXT:    str x8, [x9, #24]
+; CHECK-NEXT:    mov x8, sp
+; CHECK-NEXT:    add x8, x8, #80
+; CHECK-NEXT:    str x8, [x9, #16]
+; CHECK-NEXT:    add x8, sp, #96
+; CHECK-NEXT:    str x8, [x9]
+; CHECK-NEXT:    add sp, sp, #80
+; CHECK-NEXT:    ret
 
-; CHECK-DAG: stp {{q[0-9]+}}, {{q[0-9]+}}, [sp, #48]
-; CHECK-DAG: stp {{q[0-9]+}}, {{q[0-9]+}}, [sp, #16]
-; CHECK-DAG: str {{q[0-9]+}}, [sp]
-; CHECK-DAG: add [[STACK_TOP:x[0-9]+]], sp, #96
-; CHECK-DAG: add x[[VAR:[0-9]+]], {{x[0-9]+}}, :lo12:var
-; CHECK-DAG: str [[STACK_TOP]], [x[[VAR]]]
 
   call void @llvm.va_start(ptr @var)
   ret void
@@ -107,12 +138,12 @@ declare void @llvm.va_end(ptr)
 
 define dso_local void @test_va_end() nounwind {
 ; CHECK-LABEL: test_va_end:
-; CHECK-NEXT: %bb.0
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
 
   call void @llvm.va_end(ptr @var)
 
   ret void
-; CHECK-NEXT: ret
 }
 
 declare void @llvm.va_copy(ptr %dest, ptr %src)
@@ -121,13 +152,16 @@ declare void @llvm.va_copy(ptr %dest, ptr %src)
 
 define dso_local void @test_va_copy() {
 ; CHECK-LABEL: test_va_copy:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, var
+; CHECK-NEXT:    add x8, x8, :lo12:var
+; CHECK-NEXT:    ldp q0, q1, [x8]
+; CHECK-NEXT:    adrp x9, second_list
+; CHECK-NEXT:    add x9, x9, :lo12:second_list
+; CHECK-NEXT:    stp q0, q1, [x9]
+; CHECK-NEXT:    ret
   call void @llvm.va_copy(ptr @second_list, ptr @var)
 
-; CHECK: add x[[SRC:[0-9]+]], {{x[0-9]+}}, :lo12:var
 
-; CHECK: ldp [[BLOCKA:q[0-9]+]], [[BLOCKB:q[0-9]+]], [x[[SRC]]]
-; CHECK: add x[[DST:[0-9]+]], {{x[0-9]+}}, :lo12:second_list
-; CHECK: stp [[BLOCKA]], [[BLOCKB]], [x[[DST]]]
   ret void
-; CHECK: ret
 }
diff --git a/llvm/test/CodeGen/AArch64/arm64-vmovn.ll b/llvm/test/CodeGen/AArch64/arm64-vmovn.ll
index 8e8642f90f1333..0b021f902dc9b6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmovn.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmovn.ll
@@ -1,114 +1,121 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
 define <8 x i8> @xtn8b(<8 x i16> %A) nounwind {
-;CHECK-LABEL: xtn8b:
-;CHECK-NOT: ld1
-;CHECK: xtn.8b v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: xtn8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    xtn.8b v0, v0
+; CHECK-NEXT:    ret
   %tmp3 = trunc <8 x i16> %A to <8 x i8>
         ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @xtn4h(<4 x i32> %A) nounwind {
-;CHECK-LABEL: xtn4h:
-;CHECK-NOT: ld1
-;CHECK: xtn.4h v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: xtn4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    xtn.4h v0, v0
+; CHECK-NEXT:    ret
   %tmp3 = trunc <4 x i32> %A to <4 x i16>
         ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @xtn2s(<2 x i64> %A) nounwind {
-;CHECK-LABEL: xtn2s:
-;CHECK-NOT: ld1
-;CHECK: xtn.2s v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: xtn2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    xtn.2s v0, v0
+; CHECK-NEXT:    ret
   %tmp3 = trunc <2 x i64> %A to <2 x i32>
         ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @xtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind {
-;CHECK-LABEL: xtn2_16b:
-;CHECK-NOT: ld1
-;CHECK: xtn2.16b v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: xtn2_16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    xtn2.16b v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = trunc <8 x i16> %A to <8 x i8>
         %res = shufflevector <8 x i8> %ret, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %res
 }
 
 define <8 x i16> @xtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind {
-;CHECK-LABEL: xtn2_8h:
-;CHECK-NOT: ld1
-;CHECK: xtn2.8h v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: xtn2_8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    xtn2.8h v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = trunc <4 x i32> %A to <4 x i16>
         %res = shufflevector <4 x i16> %ret, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %res
 }
 
 define <4 x i32> @xtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind {
-;CHECK-LABEL: xtn2_4s:
-;CHECK-NOT: ld1
-;CHECK: xtn2.4s v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: xtn2_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    xtn2.4s v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = trunc <2 x i64> %A to <2 x i32>
         %res = shufflevector <2 x i32> %ret, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %res
 }
 
 define <8 x i8> @sqxtn8b(<8 x i16> %A) nounwind {
-;CHECK-LABEL: sqxtn8b:
-;CHECK-NOT: ld1
-;CHECK: sqxtn.8b v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtn8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sqxtn.8b v0, v0
+; CHECK-NEXT:    ret
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> %A)
         ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqxtn4h(<4 x i32> %A) nounwind {
-;CHECK-LABEL: sqxtn4h:
-;CHECK-NOT: ld1
-;CHECK: sqxtn.4h v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtn4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sqxtn.4h v0, v0
+; CHECK-NEXT:    ret
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %A)
         ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqxtn2s(<2 x i64> %A) nounwind {
-;CHECK-LABEL: sqxtn2s:
-;CHECK-NOT: ld1
-;CHECK: sqxtn.2s v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtn2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sqxtn.2s v0, v0
+; CHECK-NEXT:    ret
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> %A)
         ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @sqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind {
-;CHECK-LABEL: sqxtn2_16b:
-;CHECK-NOT: ld1
-;CHECK: sqxtn2.16b v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtn2_16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    sqxtn2.16b v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqxtn.v8i8(<8 x i16> %A)
         %res = shufflevector <8 x i8> %ret, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %res
 }
 
 define <8 x i16> @sqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind {
-;CHECK-LABEL: sqxtn2_8h:
-;CHECK-NOT: ld1
-;CHECK: sqxtn2.8h v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtn2_8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    sqxtn2.8h v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32> %A)
         %res = shufflevector <4 x i16> %ret, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %res
 }
 
 define <4 x i32> @sqxtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind {
-;CHECK-LABEL: sqxtn2_4s:
-;CHECK-NOT: ld1
-;CHECK: sqxtn2.4s v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtn2_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    sqxtn2.4s v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64> %A)
         %res = shufflevector <2 x i32> %ret, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %res
@@ -119,57 +126,60 @@ declare <4 x i16> @llvm.aarch64.neon.sqxtn.v4i16(<4 x i32>) nounwind readnone
 declare <2 x i32> @llvm.aarch64.neon.sqxtn.v2i32(<2 x i64>) nounwind readnone
 
 define <8 x i8> @uqxtn8b(<8 x i16> %A) nounwind {
-;CHECK-LABEL: uqxtn8b:
-;CHECK-NOT: ld1
-;CHECK: uqxtn.8b v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: uqxtn8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uqxtn.8b v0, v0
+; CHECK-NEXT:    ret
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> %A)
         ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @uqxtn4h(<4 x i32> %A) nounwind {
-;CHECK-LABEL: uqxtn4h:
-;CHECK-NOT: ld1
-;CHECK: uqxtn.4h v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: uqxtn4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uqxtn.4h v0, v0
+; CHECK-NEXT:    ret
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %A)
         ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @uqxtn2s(<2 x i64> %A) nounwind {
-;CHECK-LABEL: uqxtn2s:
-;CHECK-NOT: ld1
-;CHECK: uqxtn.2s v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: uqxtn2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uqxtn.2s v0, v0
+; CHECK-NEXT:    ret
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> %A)
         ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @uqxtn2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind {
-;CHECK-LABEL: uqxtn2_16b:
-;CHECK-NOT: ld1
-;CHECK: uqxtn2.16b v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: uqxtn2_16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    uqxtn2.16b v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.uqxtn.v8i8(<8 x i16> %A)
         %res = shufflevector <8 x i8> %ret, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %res
 }
 
 define <8 x i16> @uqxtn2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind {
-;CHECK-LABEL: uqxtn2_8h:
-;CHECK-NOT: ld1
-;CHECK: uqxtn2.8h v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: uqxtn2_8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    uqxtn2.8h v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32> %A)
         %res = shufflevector <4 x i16> %ret, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %res
 }
 
 define <4 x i32> @uqxtn2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind {
-;CHECK-LABEL: uqxtn2_4s:
-;CHECK-NOT: ld1
-;CHECK: uqxtn2.4s v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: uqxtn2_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    uqxtn2.4s v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64> %A)
         %res = shufflevector <2 x i32> %ret, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %res
@@ -180,57 +190,60 @@ declare <4 x i16> @llvm.aarch64.neon.uqxtn.v4i16(<4 x i32>) nounwind readnone
 declare <2 x i32> @llvm.aarch64.neon.uqxtn.v2i32(<2 x i64>) nounwind readnone
 
 define <8 x i8> @sqxtun8b(<8 x i16> %A) nounwind {
-;CHECK-LABEL: sqxtun8b:
-;CHECK-NOT: ld1
-;CHECK: sqxtun.8b v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtun8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sqxtun.8b v0, v0
+; CHECK-NEXT:    ret
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> %A)
         ret <8 x i8> %tmp3
 }
 
 define <4 x i16> @sqxtun4h(<4 x i32> %A) nounwind {
-;CHECK-LABEL: sqxtun4h:
-;CHECK-NOT: ld1
-;CHECK: sqxtun.4h v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtun4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sqxtun.4h v0, v0
+; CHECK-NEXT:    ret
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %A)
         ret <4 x i16> %tmp3
 }
 
 define <2 x i32> @sqxtun2s(<2 x i64> %A) nounwind {
-;CHECK-LABEL: sqxtun2s:
-;CHECK-NOT: ld1
-;CHECK: sqxtun.2s v0, v0
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtun2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sqxtun.2s v0, v0
+; CHECK-NEXT:    ret
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> %A)
         ret <2 x i32> %tmp3
 }
 
 define <16 x i8> @sqxtun2_16b(<8 x i8> %ret, <8 x i16> %A) nounwind {
-;CHECK-LABEL: sqxtun2_16b:
-;CHECK-NOT: ld1
-;CHECK: sqxtun2.16b v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtun2_16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    sqxtun2.16b v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = call <8 x i8> @llvm.aarch64.neon.sqxtun.v8i8(<8 x i16> %A)
         %res = shufflevector <8 x i8> %ret, <8 x i8> %tmp3, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
         ret <16 x i8> %res
 }
 
 define <8 x i16> @sqxtun2_8h(<4 x i16> %ret, <4 x i32> %A) nounwind {
-;CHECK-LABEL: sqxtun2_8h:
-;CHECK-NOT: ld1
-;CHECK: sqxtun2.8h v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtun2_8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    sqxtun2.8h v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = call <4 x i16> @llvm.aarch64.neon.sqxtun.v4i16(<4 x i32> %A)
         %res = shufflevector <4 x i16> %ret, <4 x i16> %tmp3, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
         ret <8 x i16> %res
 }
 
 define <4 x i32> @sqxtun2_4s(<2 x i32> %ret, <2 x i64> %A) nounwind {
-;CHECK-LABEL: sqxtun2_4s:
-;CHECK-NOT: ld1
-;CHECK: sqxtun2.4s v0, v1
-;CHECK-NEXT: ret
+; CHECK-LABEL: sqxtun2_4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    sqxtun2.4s v0, v1
+; CHECK-NEXT:    ret
         %tmp3 = call <2 x i32> @llvm.aarch64.neon.sqxtun.v2i32(<2 x i64> %A)
         %res = shufflevector <2 x i32> %ret, <2 x i32> %tmp3, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
         ret <4 x i32> %res
diff --git a/llvm/test/CodeGen/AArch64/arm64-vqsub.ll b/llvm/test/CodeGen/AArch64/arm64-vqsub.ll
index dee21291fa149c..95eba008198713 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vqsub.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vqsub.ll
@@ -1,8 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck %s
 
 define <8 x i8> @sqsub8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub8b:
-;CHECK: sqsub.8b
+; CHECK-LABEL: sqsub8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqsub.8b v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <8 x i8>, ptr %A
 	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -10,8 +15,12 @@ define <8 x i8> @sqsub8b(ptr %A, ptr %B) nounwind {
 }
 
 define <4 x i16> @sqsub4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub4h:
-;CHECK: sqsub.4h
+; CHECK-LABEL: sqsub4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqsub.4h v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <4 x i16>, ptr %A
 	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -19,8 +28,12 @@ define <4 x i16> @sqsub4h(ptr %A, ptr %B) nounwind {
 }
 
 define <2 x i32> @sqsub2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub2s:
-;CHECK: sqsub.2s
+; CHECK-LABEL: sqsub2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    sqsub.2s v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <2 x i32>, ptr %A
 	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -28,8 +41,12 @@ define <2 x i32> @sqsub2s(ptr %A, ptr %B) nounwind {
 }
 
 define <8 x i8> @uqsub8b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub8b:
-;CHECK: uqsub.8b
+; CHECK-LABEL: uqsub8b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqsub.8b v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <8 x i8>, ptr %A
 	%tmp2 = load <8 x i8>, ptr %B
 	%tmp3 = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -37,8 +54,12 @@ define <8 x i8> @uqsub8b(ptr %A, ptr %B) nounwind {
 }
 
 define <4 x i16> @uqsub4h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub4h:
-;CHECK: uqsub.4h
+; CHECK-LABEL: uqsub4h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqsub.4h v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <4 x i16>, ptr %A
 	%tmp2 = load <4 x i16>, ptr %B
 	%tmp3 = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -46,8 +67,12 @@ define <4 x i16> @uqsub4h(ptr %A, ptr %B) nounwind {
 }
 
 define <2 x i32> @uqsub2s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub2s:
-;CHECK: uqsub.2s
+; CHECK-LABEL: uqsub2s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ldr d1, [x1]
+; CHECK-NEXT:    uqsub.2s v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <2 x i32>, ptr %A
 	%tmp2 = load <2 x i32>, ptr %B
 	%tmp3 = call <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -55,8 +80,12 @@ define <2 x i32> @uqsub2s(ptr %A, ptr %B) nounwind {
 }
 
 define <16 x i8> @sqsub16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub16b:
-;CHECK: sqsub.16b
+; CHECK-LABEL: sqsub16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqsub.16b v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <16 x i8>, ptr %A
 	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
@@ -64,8 +93,12 @@ define <16 x i8> @sqsub16b(ptr %A, ptr %B) nounwind {
 }
 
 define <8 x i16> @sqsub8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub8h:
-;CHECK: sqsub.8h
+; CHECK-LABEL: sqsub8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqsub.8h v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <8 x i16>, ptr %A
 	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
@@ -73,8 +106,12 @@ define <8 x i16> @sqsub8h(ptr %A, ptr %B) nounwind {
 }
 
 define <4 x i32> @sqsub4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub4s:
-;CHECK: sqsub.4s
+; CHECK-LABEL: sqsub4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqsub.4s v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <4 x i32>, ptr %A
 	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
@@ -82,8 +119,12 @@ define <4 x i32> @sqsub4s(ptr %A, ptr %B) nounwind {
 }
 
 define <2 x i64> @sqsub2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: sqsub2d:
-;CHECK: sqsub.2d
+; CHECK-LABEL: sqsub2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    sqsub.2d v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <2 x i64>, ptr %A
 	%tmp2 = load <2 x i64>, ptr %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
@@ -91,8 +132,12 @@ define <2 x i64> @sqsub2d(ptr %A, ptr %B) nounwind {
 }
 
 define <16 x i8> @uqsub16b(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub16b:
-;CHECK: uqsub.16b
+; CHECK-LABEL: uqsub16b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqsub.16b v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <16 x i8>, ptr %A
 	%tmp2 = load <16 x i8>, ptr %B
 	%tmp3 = call <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
@@ -100,8 +145,12 @@ define <16 x i8> @uqsub16b(ptr %A, ptr %B) nounwind {
 }
 
 define <8 x i16> @uqsub8h(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub8h:
-;CHECK: uqsub.8h
+; CHECK-LABEL: uqsub8h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqsub.8h v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <8 x i16>, ptr %A
 	%tmp2 = load <8 x i16>, ptr %B
 	%tmp3 = call <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
@@ -109,8 +158,12 @@ define <8 x i16> @uqsub8h(ptr %A, ptr %B) nounwind {
 }
 
 define <4 x i32> @uqsub4s(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub4s:
-;CHECK: uqsub.4s
+; CHECK-LABEL: uqsub4s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqsub.4s v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <4 x i32>, ptr %A
 	%tmp2 = load <4 x i32>, ptr %B
 	%tmp3 = call <4 x i32> @llvm.aarch64.neon.uqsub.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
@@ -118,8 +171,12 @@ define <4 x i32> @uqsub4s(ptr %A, ptr %B) nounwind {
 }
 
 define <2 x i64> @uqsub2d(ptr %A, ptr %B) nounwind {
-;CHECK-LABEL: uqsub2d:
-;CHECK: uqsub.2d
+; CHECK-LABEL: uqsub2d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q1, [x1]
+; CHECK-NEXT:    uqsub.2d v0, v0, v1
+; CHECK-NEXT:    ret
 	%tmp1 = load <2 x i64>, ptr %A
 	%tmp2 = load <2 x i64>, ptr %B
 	%tmp3 = call <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
diff --git a/llvm/test/CodeGen/AArch64/arm64-vsetcc_fp.ll b/llvm/test/CodeGen/AArch64/arm64-vsetcc_fp.ll
index 32e24832d8aa75..1c528e7cbd1435 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vsetcc_fp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vsetcc_fp.ll
@@ -1,11 +1,9 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -asm-verbose=false | FileCheck %s
 define <2 x i32> @fcmp_one(<2 x float> %x, <2 x float> %y) nounwind optsize readnone {
-; CHECK-LABEL: fcmp_one:
-; CHECK-NEXT: fcmgt.2s [[REG:v[0-9]+]], v0, v1
-; CHECK-NEXT: fcmgt.2s [[REG2:v[0-9]+]], v1, v0
-; CHECK-NEXT: orr.8b v0, [[REG2]], [[REG]]
-; CHECK-NEXT: ret
   %tmp = fcmp one <2 x float> %x, %y
   %or = sext <2 x i1> %tmp to <2 x i32>
   ret <2 x i32> %or
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll b/llvm/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll
index 6c3cd4766d7998..3f7d906243cde1 100644
--- a/llvm/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-zero-cycle-zeroing.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=-zcz-gp,+no-zcz-fp      | FileCheck %s -check-prefixes=ALL,NONEGP,NONEFP
 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+zcz                    | FileCheck %s -check-prefixes=ALL,ZEROGP,ZEROFP
 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+zcz -mattr=+fullfp16   | FileCheck %s -check-prefixes=ALL,ZEROGP,ZERO16
@@ -16,57 +17,98 @@ declare void @barl(i64, i64)
 declare void @barf(float, float)
 
 define void @t1() nounwind ssp {
+; ZEROFP-LABEL: t1:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi d0, #0000000000000000
+; ZEROFP-NEXT:    movi d1, #0000000000000000
+; ZEROFP-NEXT:    movi d2, #0000000000000000
+; ZEROFP-NEXT:    movi v3.2d, #0000000000000000
+; ZEROFP-NEXT:    b bar
+;
+; ZERO16-LABEL: t1:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi d0, #0000000000000000
+; ZERO16-NEXT:    movi d1, #0000000000000000
+; ZERO16-NEXT:    movi d2, #0000000000000000
+; ZERO16-NEXT:    movi v3.2d, #0000000000000000
+; ZERO16-NEXT:    b bar
+;
+; NONE16-LABEL: t1:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    fmov h0, wzr
+; NONE16-NEXT:    fmov s1, wzr
+; NONE16-NEXT:    fmov d2, xzr
+; NONE16-NEXT:    movi.16b v3, #0
+; NONE16-NEXT:    b _bar
 entry:
-; ALL-LABEL: t1:
-; ALL-NOT: fmov
-; NONEFP-DAG: fmov s0, wzr
-; NONEFP-DAG: fmov s1, wzr
-; NONEFP-DAG: fmov d2, xzr
-; NONEFP-DAG: movi{{(.16b)?}} v3{{(.2d)?}}, #0
-; NONE16: fmov h0, wzr
-; NONE16: fmov s1, wzr
-; NONE16: fmov d2, xzr
-; NONE16: movi{{(.16b)?}} v3{{(.2d)?}}, #0
-; ZEROFP-DAG: movi d0, #0
-; ZEROFP-DAG: movi d1, #0
-; ZEROFP-DAG: movi d2, #0
-; ZEROFP-DAG: movi v3.2d, #0
-; ZERO16: movi d0, #0
-; ZERO16: movi d1, #0
-; ZERO16: movi d2, #0
-; ZERO16: movi v3.2d, #0
   tail call void @bar(half 0.000000e+00, float 0.000000e+00, double 0.000000e+00, <2 x double> <double 0.000000e+00, double 0.000000e+00>) nounwind
   ret void
 }
 
 define void @t2() nounwind ssp {
+; NONEGP-LABEL: t2:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    mov w0, wzr
+; NONEGP-NEXT:    mov w1, wzr
+; NONEGP-NEXT:    b bari
+;
+; ZERO16-LABEL: t2:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    mov w0, #0 // =0x0
+; ZERO16-NEXT:    mov w1, #0 // =0x0
+; ZERO16-NEXT:    b bari
+;
+; NONE16-LABEL: t2:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    mov w0, #0 ; =0x0
+; NONE16-NEXT:    mov w1, #0 ; =0x0
+; NONE16-NEXT:    b _bari
 entry:
-; ALL-LABEL: t2:
-; NONEGP: mov w0, wzr
-; NONEGP: mov w1, wzr
-; ZEROGP: mov w0, #0
-; ZEROGP: mov w1, #0
   tail call void @bari(i32 0, i32 0) nounwind
   ret void
 }
 
 define void @t3() nounwind ssp {
+; NONEGP-LABEL: t3:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    mov x0, xzr
+; NONEGP-NEXT:    mov x1, xzr
+; NONEGP-NEXT:    b barl
+;
+; ZERO16-LABEL: t3:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    mov x0, #0 // =0x0
+; ZERO16-NEXT:    mov x1, #0 // =0x0
+; ZERO16-NEXT:    b barl
+;
+; NONE16-LABEL: t3:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    mov x0, #0 ; =0x0
+; NONE16-NEXT:    mov x1, #0 ; =0x0
+; NONE16-NEXT:    b _barl
 entry:
-; ALL-LABEL: t3:
-; NONEGP: mov x0, xzr
-; NONEGP: mov x1, xzr
-; ZEROGP: mov x0, #0
-; ZEROGP: mov x1, #0
   tail call void @barl(i64 0, i64 0) nounwind
   ret void
 }
 
 define void @t4() nounwind ssp {
-; ALL-LABEL: t4:
-; NONEFP: fmov s{{[0-3]+}}, wzr
-; NONEFP: fmov s{{[0-3]+}}, wzr
-; ZEROFP: movi d0, #0
-; ZEROFP: movi d1, #0
+; ZEROFP-LABEL: t4:
+; ZEROFP:       // %bb.0:
+; ZEROFP-NEXT:    movi d0, #0000000000000000
+; ZEROFP-NEXT:    movi d1, #0000000000000000
+; ZEROFP-NEXT:    b barf
+;
+; ZERO16-LABEL: t4:
+; ZERO16:       // %bb.0:
+; ZERO16-NEXT:    movi d0, #0000000000000000
+; ZERO16-NEXT:    movi d1, #0000000000000000
+; ZERO16-NEXT:    b barf
+;
+; NONE16-LABEL: t4:
+; NONE16:       ; %bb.0:
+; NONE16-NEXT:    fmov s0, wzr
+; NONE16-NEXT:    fmov s1, wzr
+; NONE16-NEXT:    b _barf
   tail call void @barf(float 0.000000e+00, float 0.000000e+00) nounwind
   ret void
 }
@@ -79,6 +121,77 @@ declare double @sin(double)
 ; ALL-NOT: str q{{[0-9]+}}
 ; ALL-NOT: ldr q{{[0-9]+}}
 define double @foo(i32 %n) {
+; ZERO16-LABEL: foo:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    str d10, [sp, #-48]! // 8-byte Folded Spill
+; ZERO16-NEXT:    stp d9, d8, [sp, #8] // 16-byte Folded Spill
+; ZERO16-NEXT:    str x30, [sp, #24] // 8-byte Folded Spill
+; ZERO16-NEXT:    stp x20, x19, [sp, #32] // 16-byte Folded Spill
+; ZERO16-NEXT:    .cfi_def_cfa_offset 48
+; ZERO16-NEXT:    .cfi_offset w19, -8
+; ZERO16-NEXT:    .cfi_offset w20, -16
+; ZERO16-NEXT:    .cfi_offset w30, -24
+; ZERO16-NEXT:    .cfi_offset b8, -32
+; ZERO16-NEXT:    .cfi_offset b9, -40
+; ZERO16-NEXT:    .cfi_offset b10, -48
+; ZERO16-NEXT:    movi d8, #0000000000000000
+; ZERO16-NEXT:    fmov d10, #1.00000000
+; ZERO16-NEXT:    mov w19, w0
+; ZERO16-NEXT:    fmov d9, #1.00000000
+; ZERO16-NEXT:    mov w20, #0 // =0x0
+; ZERO16-NEXT:  .LBB4_1: // %for.body
+; ZERO16-NEXT:    // =>This Inner Loop Header: Depth=1
+; ZERO16-NEXT:    fmov d0, d8
+; ZERO16-NEXT:    bl sin
+; ZERO16-NEXT:    fadd d8, d8, d10
+; ZERO16-NEXT:    fmaxnm d9, d9, d0
+; ZERO16-NEXT:    add w20, w20, #1
+; ZERO16-NEXT:    cmp w20, w19
+; ZERO16-NEXT:    b.lt .LBB4_1
+; ZERO16-NEXT:  // %bb.2: // %for.end
+; ZERO16-NEXT:    fmov d0, d9
+; ZERO16-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
+; ZERO16-NEXT:    ldp d9, d8, [sp, #8] // 16-byte Folded Reload
+; ZERO16-NEXT:    ldr x30, [sp, #24] // 8-byte Folded Reload
+; ZERO16-NEXT:    ldr d10, [sp], #48 // 8-byte Folded Reload
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: foo:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    stp d11, d10, [sp, #-64]! ; 16-byte Folded Spill
+; NONE16-NEXT:    stp d9, d8, [sp, #16] ; 16-byte Folded Spill
+; NONE16-NEXT:    stp x20, x19, [sp, #32] ; 16-byte Folded Spill
+; NONE16-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
+; NONE16-NEXT:    .cfi_def_cfa_offset 64
+; NONE16-NEXT:    .cfi_offset w30, -8
+; NONE16-NEXT:    .cfi_offset w29, -16
+; NONE16-NEXT:    .cfi_offset w19, -24
+; NONE16-NEXT:    .cfi_offset w20, -32
+; NONE16-NEXT:    .cfi_offset b8, -40
+; NONE16-NEXT:    .cfi_offset b9, -48
+; NONE16-NEXT:    .cfi_offset b10, -56
+; NONE16-NEXT:    .cfi_offset b11, -64
+; NONE16-NEXT:    mov x19, x0
+; NONE16-NEXT:    mov w20, #0 ; =0x0
+; NONE16-NEXT:    fmov d8, xzr
+; NONE16-NEXT:    fmov d10, #1.00000000
+; NONE16-NEXT:    fmov d9, #1.00000000
+; NONE16-NEXT:  LBB4_1: ; %for.body
+; NONE16-NEXT:    ; =>This Inner Loop Header: Depth=1
+; NONE16-NEXT:    fmov d0, d8
+; NONE16-NEXT:    bl _sin
+; NONE16-NEXT:    fmaxnm d9, d9, d0
+; NONE16-NEXT:    fadd d8, d8, d10
+; NONE16-NEXT:    add w20, w20, #1
+; NONE16-NEXT:    cmp w20, w19
+; NONE16-NEXT:    b.lt LBB4_1
+; NONE16-NEXT:  ; %bb.2: ; %for.end
+; NONE16-NEXT:    fmov d0, d9
+; NONE16-NEXT:    ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
+; NONE16-NEXT:    ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
+; NONE16-NEXT:    ldp d9, d8, [sp, #16] ; 16-byte Folded Reload
+; NONE16-NEXT:    ldp d11, d10, [sp], #64 ; 16-byte Folded Reload
+; NONE16-NEXT:    ret
 entry:
   br label %for.body
 
@@ -98,134 +211,402 @@ for.end:
 }
 
 define <2 x i64> @t6() {
-; ALL-LABEL: t6:
-; ALL: movi{{(.16b)?}} v0{{(.2d)?}}, #0
+; NONEGP-LABEL: t6:
+; NONEGP:       // %bb.0:
+; NONEGP-NEXT:    movi v0.2d, #0000000000000000
+; NONEGP-NEXT:    ret
+;
+; ZEROFP-LABEL: t6:
+; ZEROFP:       // %bb.0:
+; ZEROFP-NEXT:    movi v0.2d, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: t6:
+; ZERO16:       // %bb.0:
+; ZERO16-NEXT:    movi v0.2d, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: t6:
+; NONE16:       ; %bb.0:
+; NONE16-NEXT:    movi.16b v0, #0
+; NONE16-NEXT:    ret
   ret <2 x i64> zeroinitializer
 }
 
 define i1 @ti1() {
+; NONEGP-LABEL: ti1:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    mov w0, wzr
+; NONEGP-NEXT:    ret
+;
+; ZERO16-LABEL: ti1:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    mov w0, #0 // =0x0
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: ti1:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    mov w0, #0 ; =0x0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: ti1:
-; NONEGP: mov w0, wzr
-; ZEROGP: mov w0, #0
   ret i1 false
 }
 
 define i8 @ti8() {
+; NONEGP-LABEL: ti8:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    mov w0, wzr
+; NONEGP-NEXT:    ret
+;
+; ZERO16-LABEL: ti8:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    mov w0, #0 // =0x0
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: ti8:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    mov w0, #0 ; =0x0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: ti8:
-; NONEGP: mov w0, wzr
-; ZEROGP: mov w0, #0
   ret i8 0
 }
 
 define i16 @ti16() {
+; NONEGP-LABEL: ti16:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    mov w0, wzr
+; NONEGP-NEXT:    ret
+;
+; ZERO16-LABEL: ti16:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    mov w0, #0 // =0x0
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: ti16:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    mov w0, #0 ; =0x0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: ti16:
-; NONEGP: mov w0, wzr
- ; ZEROGP: mov w0, #0
   ret i16 0
 }
 
 define i32 @ti32() {
+; NONEGP-LABEL: ti32:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    mov w0, wzr
+; NONEGP-NEXT:    ret
+;
+; ZERO16-LABEL: ti32:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    mov w0, #0 // =0x0
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: ti32:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    mov w0, #0 ; =0x0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: ti32:
-; NONEGP: mov w0, wzr
-; ZEROGP: mov w0, #0
   ret i32 0
 }
 
 define i64 @ti64() {
+; NONEGP-LABEL: ti64:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    mov x0, xzr
+; NONEGP-NEXT:    ret
+;
+; ZERO16-LABEL: ti64:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    mov x0, #0 // =0x0
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: ti64:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    mov x0, #0 ; =0x0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: ti64:
-; NONEGP: mov x0, xzr
-; ZEROGP: mov x0, #0
   ret i64 0
 }
 
 define float @tf32() {
+; ZEROFP-LABEL: tf32:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi d0, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: tf32:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi d0, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: tf32:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    fmov s0, wzr
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: tf32:
-; NONEFP: mov s0, wzr
-; ZEROFP: movi d0, #0
   ret float 0.0
 }
 
 define double @td64() {
+; ZEROFP-LABEL: td64:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi d0, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: td64:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi d0, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: td64:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    fmov d0, xzr
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: td64:
-; NONEFP: mov d0, xzr
-; ZEROFP: movi d0, #0
   ret double 0.0
 }
 
 define <8 x i8> @tv8i8() {
+; NONEGP-LABEL: tv8i8:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    movi v0.2d, #0000000000000000
+; NONEGP-NEXT:    ret
+;
+; ZEROFP-LABEL: tv8i8:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi v0.2d, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: tv8i8:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi v0.2d, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: tv8i8:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    movi.16b v0, #0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: tv8i8:
-; ALL: movi{{(.16b)?}} v0{{(.2d)?}}, #0
   ret <8 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
 }
 
 define <4 x i16> @tv4i16() {
+; NONEGP-LABEL: tv4i16:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    movi v0.2d, #0000000000000000
+; NONEGP-NEXT:    ret
+;
+; ZEROFP-LABEL: tv4i16:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi v0.2d, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: tv4i16:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi v0.2d, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: tv4i16:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    movi.16b v0, #0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: tv4i16:
-; ALL: movi{{(.16b)?}} v0{{(.2d)?}}, #0
   ret <4 x i16> <i16 0, i16 0, i16 0, i16 0>
 }
 
 define <2 x i32> @tv2i32() {
+; NONEGP-LABEL: tv2i32:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    movi v0.2d, #0000000000000000
+; NONEGP-NEXT:    ret
+;
+; ZEROFP-LABEL: tv2i32:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi v0.2d, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: tv2i32:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi v0.2d, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: tv2i32:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    movi.16b v0, #0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: tv2i32:
-; ALL: movi{{(.16b)?}} v0{{(.2d)?}}, #0
   ret <2 x i32> <i32 0, i32 0>
 }
 
 define <2 x float> @tv2f32() {
+; NONEGP-LABEL: tv2f32:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    movi v0.2d, #0000000000000000
+; NONEGP-NEXT:    ret
+;
+; ZEROFP-LABEL: tv2f32:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi v0.2d, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: tv2f32:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi v0.2d, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: tv2f32:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    movi.16b v0, #0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: tv2f32:
-; ALL: movi{{(.16b)?}} v0{{(.2d)?}}, #0
   ret <2 x float> <float 0.0, float 0.0>
 }
 
 define <16 x i8> @tv16i8() {
+; NONEGP-LABEL: tv16i8:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    movi v0.2d, #0000000000000000
+; NONEGP-NEXT:    ret
+;
+; ZEROFP-LABEL: tv16i8:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi v0.2d, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: tv16i8:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi v0.2d, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: tv16i8:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    movi.16b v0, #0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: tv16i8:
-; ALL: movi{{(.16b)?}} v0{{(.2d)?}}, #0
   ret <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>
 }
 
 define <8 x i16> @tv8i16() {
+; NONEGP-LABEL: tv8i16:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    movi v0.2d, #0000000000000000
+; NONEGP-NEXT:    ret
+;
+; ZEROFP-LABEL: tv8i16:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi v0.2d, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: tv8i16:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi v0.2d, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: tv8i16:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    movi.16b v0, #0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: tv8i16:
-; ALL: movi{{(.16b)?}} v0{{(.2d)?}}, #0
   ret <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
 }
 
 define <4 x i32> @tv4i32() {
+; NONEGP-LABEL: tv4i32:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    movi v0.2d, #0000000000000000
+; NONEGP-NEXT:    ret
+;
+; ZEROFP-LABEL: tv4i32:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi v0.2d, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: tv4i32:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi v0.2d, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: tv4i32:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    movi.16b v0, #0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: tv4i32:
-; ALL: movi{{(.16b)?}} v0{{(.2d)?}}, #0
   ret <4 x i32> <i32 0, i32 0, i32 0, i32 0>
 }
 
 define <2 x i64> @tv2i64() {
+; NONEGP-LABEL: tv2i64:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    movi v0.2d, #0000000000000000
+; NONEGP-NEXT:    ret
+;
+; ZEROFP-LABEL: tv2i64:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi v0.2d, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: tv2i64:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi v0.2d, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: tv2i64:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    movi.16b v0, #0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: tv2i64:
-; ALL: movi{{(.16b)?}} v0{{(.2d)?}}, #0
   ret <2 x i64> <i64 0, i64 0>
 }
 
 define <4 x float> @tv4f32() {
+; NONEGP-LABEL: tv4f32:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    movi v0.2d, #0000000000000000
+; NONEGP-NEXT:    ret
+;
+; ZEROFP-LABEL: tv4f32:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi v0.2d, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: tv4f32:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi v0.2d, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: tv4f32:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    movi.16b v0, #0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: tv4f32:
-; ALL: movi{{(.16b)?}} v0{{(.2d)?}}, #0
   ret <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>
 }
 
 define <2 x double> @tv2d64() {
+; NONEGP-LABEL: tv2d64:
+; NONEGP:       // %bb.0: // %entry
+; NONEGP-NEXT:    movi v0.2d, #0000000000000000
+; NONEGP-NEXT:    ret
+;
+; ZEROFP-LABEL: tv2d64:
+; ZEROFP:       // %bb.0: // %entry
+; ZEROFP-NEXT:    movi v0.2d, #0000000000000000
+; ZEROFP-NEXT:    ret
+;
+; ZERO16-LABEL: tv2d64:
+; ZERO16:       // %bb.0: // %entry
+; ZERO16-NEXT:    movi v0.2d, #0000000000000000
+; ZERO16-NEXT:    ret
+;
+; NONE16-LABEL: tv2d64:
+; NONE16:       ; %bb.0: ; %entry
+; NONE16-NEXT:    movi.16b v0, #0
+; NONE16-NEXT:    ret
 entry:
-; ALL-LABEL: tv2d64:
-; ALL: movi{{(.16b)?}} v0{{(.2d)?}}, #0
   ret <2 x double> <double 0.0, double 0.0>
 }
 
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; ALL: {{.*}}
+; NONEFP: {{.*}}
+; ZEROGP: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-zeroreg.ll b/llvm/test/CodeGen/AArch64/arm64-zeroreg.ll
index ea2e6ede8566af..f8f95ce610b100 100644
--- a/llvm/test/CodeGen/AArch64/arm64-zeroreg.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-zeroreg.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -o - %s | FileCheck %s
 target triple = "aarch64--"
 
@@ -9,32 +10,123 @@ declare void @end()
 ; CHECK-LABEL: func:
 define void @func(ptr %addr) {
   ; We should not see any spills or reloads between begin and end
-  ; CHECK: bl begin
-  ; CHECK-NOT: str{{.*}}sp
-  ; CHECK-NOT: Folded Spill
-  ; CHECK-NOT: ldr{{.*}}sp
-  ; CHECK-NOT: Folded Reload
+; CHECK-LABEL: func:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    stp x29, x30, [sp, #-96]! // 16-byte Folded Spill
+; CHECK-NEXT:    stp x28, x27, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    stp x26, x25, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    stp x24, x23, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT:    stp x22, x21, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT:    stp x20, x19, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 96
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w20, -16
+; CHECK-NEXT:    .cfi_offset w21, -24
+; CHECK-NEXT:    .cfi_offset w22, -32
+; CHECK-NEXT:    .cfi_offset w23, -40
+; CHECK-NEXT:    .cfi_offset w24, -48
+; CHECK-NEXT:    .cfi_offset w25, -56
+; CHECK-NEXT:    .cfi_offset w26, -64
+; CHECK-NEXT:    .cfi_offset w27, -72
+; CHECK-NEXT:    .cfi_offset w28, -80
+; CHECK-NEXT:    .cfi_offset w30, -88
+; CHECK-NEXT:    .cfi_offset w29, -96
+; CHECK-NEXT:    mov x19, x0
+; CHECK-NEXT:    bl begin
+; CHECK-NEXT:    ldr x8, [x19]
+; CHECK-NEXT:    ldr x9, [x19]
+; CHECK-NEXT:    ldr x10, [x19]
+; CHECK-NEXT:    ldr x11, [x19]
+; CHECK-NEXT:    ldr x12, [x19]
+; CHECK-NEXT:    ldr x13, [x19]
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    ldr x14, [x19]
+; CHECK-NEXT:    ldr x15, [x19]
+; CHECK-NEXT:    ldr x16, [x19]
+; CHECK-NEXT:    ldr x17, [x19]
+; CHECK-NEXT:    ldr x18, [x19]
+; CHECK-NEXT:    ldr x0, [x19]
+; CHECK-NEXT:    ldr x1, [x19]
+; CHECK-NEXT:    ldr x2, [x19]
+; CHECK-NEXT:    ldr x3, [x19]
+; CHECK-NEXT:    ldr x4, [x19]
+; CHECK-NEXT:    ldr x5, [x19]
+; CHECK-NEXT:    ldr x6, [x19]
+; CHECK-NEXT:    ldr x7, [x19]
+; CHECK-NEXT:    ldr x20, [x19]
+; CHECK-NEXT:    ldr x21, [x19]
+; CHECK-NEXT:    ldr x22, [x19]
+; CHECK-NEXT:    ldr x23, [x19]
+; CHECK-NEXT:    ldr x24, [x19]
+; CHECK-NEXT:    ldr x25, [x19]
+; CHECK-NEXT:    ldr x26, [x19]
+; CHECK-NEXT:    ldr x27, [x19]
+; CHECK-NEXT:    ldr x28, [x19]
+; CHECK-NEXT:    ldr x29, [x19]
+; CHECK-NEXT:    ldr x30, [x19]
+; CHECK-NEXT:    b.ne .LBB0_2
+; CHECK-NEXT:  // %bb.1: // %if.then
+; CHECK-NEXT:    str x10, [x19]
+; CHECK-NEXT:  .LBB0_2: // %if.end
+; CHECK-NEXT:    str x8, [x19]
+; CHECK-NEXT:    str x9, [x19]
+; CHECK-NEXT:    str x10, [x19]
+; CHECK-NEXT:    str x11, [x19]
+; CHECK-NEXT:    str x12, [x19]
+; CHECK-NEXT:    str x13, [x19]
+; CHECK-NEXT:    str x14, [x19]
+; CHECK-NEXT:    str x15, [x19]
+; CHECK-NEXT:    str x16, [x19]
+; CHECK-NEXT:    str x17, [x19]
+; CHECK-NEXT:    str x18, [x19]
+; CHECK-NEXT:    str x0, [x19]
+; CHECK-NEXT:    str x1, [x19]
+; CHECK-NEXT:    str x2, [x19]
+; CHECK-NEXT:    str x3, [x19]
+; CHECK-NEXT:    str x4, [x19]
+; CHECK-NEXT:    str x5, [x19]
+; CHECK-NEXT:    str x6, [x19]
+; CHECK-NEXT:    str x7, [x19]
+; CHECK-NEXT:    str x20, [x19]
+; CHECK-NEXT:    str x21, [x19]
+; CHECK-NEXT:    str x22, [x19]
+; CHECK-NEXT:    str x23, [x19]
+; CHECK-NEXT:    str x24, [x19]
+; CHECK-NEXT:    str x25, [x19]
+; CHECK-NEXT:    str x26, [x19]
+; CHECK-NEXT:    str x27, [x19]
+; CHECK-NEXT:    str x28, [x19]
+; CHECK-NEXT:    str x29, [x19]
+; CHECK-NEXT:    str x30, [x19]
+; CHECK-NEXT:    bl end
+; CHECK-NEXT:    ldp x20, x19, [sp, #80] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x22, x21, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x24, x23, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x26, x25, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x28, x27, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x29, x30, [sp], #96 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
   call void @begin()
-  %v0 = load volatile i64, ptr %addr  
-  %v1 = load volatile i64, ptr %addr  
-  %v2 = load volatile i64, ptr %addr  
-  %v3 = load volatile i64, ptr %addr  
-  %v4 = load volatile i64, ptr %addr  
-  %v5 = load volatile i64, ptr %addr  
-  %v6 = load volatile i64, ptr %addr  
-  %v7 = load volatile i64, ptr %addr  
-  %v8 = load volatile i64, ptr %addr  
-  %v9 = load volatile i64, ptr %addr  
-  %v10 = load volatile i64, ptr %addr  
-  %v11 = load volatile i64, ptr %addr  
-  %v12 = load volatile i64, ptr %addr  
-  %v13 = load volatile i64, ptr %addr  
-  %v14 = load volatile i64, ptr %addr  
-  %v15 = load volatile i64, ptr %addr  
-  %v16 = load volatile i64, ptr %addr  
-  %v17 = load volatile i64, ptr %addr  
-  %v18 = load volatile i64, ptr %addr  
-  %v19 = load volatile i64, ptr %addr  
+  %v0 = load volatile i64, ptr %addr
+  %v1 = load volatile i64, ptr %addr
+  %v2 = load volatile i64, ptr %addr
+  %v3 = load volatile i64, ptr %addr
+  %v4 = load volatile i64, ptr %addr
+  %v5 = load volatile i64, ptr %addr
+  %v6 = load volatile i64, ptr %addr
+  %v7 = load volatile i64, ptr %addr
+  %v8 = load volatile i64, ptr %addr
+  %v9 = load volatile i64, ptr %addr
+  %v10 = load volatile i64, ptr %addr
+  %v11 = load volatile i64, ptr %addr
+  %v12 = load volatile i64, ptr %addr
+  %v13 = load volatile i64, ptr %addr
+  %v14 = load volatile i64, ptr %addr
+  %v15 = load volatile i64, ptr %addr
+  %v16 = load volatile i64, ptr %addr
+  %v17 = load volatile i64, ptr %addr
+  %v18 = load volatile i64, ptr %addr
+  %v19 = load volatile i64, ptr %addr
   %v20 = load volatile i64, ptr %addr
   %v21 = load volatile i64, ptr %addr
   %v22 = load volatile i64, ptr %addr
@@ -84,7 +176,6 @@ if.end:
   store volatile i64 %v27, ptr %addr
   store volatile i64 %v28, ptr %addr
   store volatile i64 %v29, ptr %addr
-  ; CHECK: bl end
   call void @end()
 
   ret void
diff --git a/llvm/test/CodeGen/AArch64/arm64-zext.ll b/llvm/test/CodeGen/AArch64/arm64-zext.ll
index 9470708ebdc074..5719f0986fe369 100644
--- a/llvm/test/CodeGen/AArch64/arm64-zext.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-zext.ll
@@ -1,10 +1,12 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 
 define i64 @foo(i32 %a, i32 %b) nounwind readnone ssp {
-entry:
 ; CHECK-LABEL: foo:
-; CHECK: add w0, w1, w0
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    add w0, w1, w0
+; CHECK-NEXT:    ret
+entry:
   %add = add i32 %b, %a
   %conv = zext i32 %add to i64
   ret i64 %conv
diff --git a/llvm/test/CodeGen/AArch64/irg.ll b/llvm/test/CodeGen/AArch64/irg.ll
index a5a454ff69eaaa..39c6622a6cf3c9 100644
--- a/llvm/test/CodeGen/AArch64/irg.ll
+++ b/llvm/test/CodeGen/AArch64/irg.ll
@@ -1,39 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+mte | FileCheck %s
 
 define ptr @irg_imm16(ptr %p) {
-entry:
 ; CHECK-LABEL: irg_imm16:
-; CHECK: mov w[[R:[0-9]+]], #16
-; CHECK: irg x0, x0, x[[R]]
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov w8, #16 // =0x10
+; CHECK-NEXT:    irg x0, x0, x8
+; CHECK-NEXT:    ret
+entry:
   %q = call ptr @llvm.aarch64.irg(ptr %p, i64 16)
   ret ptr %q
 }
 
 define ptr @irg_imm0(ptr %p) {
-entry:
 ; CHECK-LABEL: irg_imm0:
-; CHECK: irg x0, x0{{$}}
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    irg x0, x0
+; CHECK-NEXT:    ret
+entry:
   %q = call ptr @llvm.aarch64.irg(ptr %p, i64 0)
   ret ptr %q
 }
 
 define ptr @irg_reg(ptr %p, i64 %ex) {
-entry:
 ; CHECK-LABEL: irg_reg:
-; CHECK: irg x0, x0, x1
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    irg x0, x0, x1
+; CHECK-NEXT:    ret
+entry:
   %q = call ptr @llvm.aarch64.irg(ptr %p, i64 %ex)
   ret ptr %q
 }
 
 ; undef argument in irg is treated specially
 define ptr @irg_sp() {
-entry:
 ; CHECK-LABEL: irg_sp:
-; CHECK: irg x0, sp{{$}}
-; CHECK: ret
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    irg x0, sp
+; CHECK-NEXT:    ret
+entry:
   %q = call ptr @llvm.aarch64.irg.sp(i64 0)
   ret ptr %q
 }
diff --git a/llvm/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll b/llvm/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
index ed88293fcf7e34..7e6ce0e9f0a175 100644
--- a/llvm/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
+++ b/llvm/test/CodeGen/AArch64/neon-scalar-by-elem-fma.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
 
 attributes #0 = { strictfp }
@@ -8,112 +9,143 @@ declare float @llvm.experimental.constrained.fma.f32(float, float, float, metada
 declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)
 
 define float @test_fmla_ss4S_0(float %a, float %b, <4 x float> %v) {
-  ; CHECK-LABEL: test_fmla_ss4S_0
-  ; CHECK: fmadd s0, s1, s2, s0
+; CHECK-LABEL: test_fmla_ss4S_0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd s0, s1, s2, s0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 0
   %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
   ret float %tmp2
 }
 
 define float @test_fmla_ss4S_0_swap(float %a, float %b, <4 x float> %v) {
-  ; CHECK-LABEL: test_fmla_ss4S_0_swap
-  ; CHECK: fmadd s0, s2, s1, s0
+; CHECK-LABEL: test_fmla_ss4S_0_swap:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd s0, s2, s1, s0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 0
   %tmp2 = call float @llvm.fma.f32(float %tmp1, float %b, float %a)
   ret float %tmp2
 }
 
 define float @test_fmla_ss4S_3(float %a, float %b, <4 x float> %v) {
-  ; CHECK-LABEL: test_fmla_ss4S_3
-  ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+; CHECK-LABEL: test_fmla_ss4S_3:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmla s0, s1, v2.s[3]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 3
   %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
   ret float %tmp2
 }
 
 define float @test_fmla_ss4S_3_swap(float %a, float %b, <4 x float> %v) {
-  ; CHECK-LABEL: test_fmla_ss4S_3_swap
-  ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+; CHECK-LABEL: test_fmla_ss4S_3_swap:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmla s0, s0, v2.s[3]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 3
   %tmp2 = call float @llvm.fma.f32(float %tmp1, float %a, float %a)
   ret float %tmp2
 }
 
 define float @test_fmla_ss2S_0(float %a, float %b, <2 x float> %v) {
-  ; CHECK-LABEL: test_fmla_ss2S_0
-  ; CHECK: fmadd s0, s1, s2, s0
+; CHECK-LABEL: test_fmla_ss2S_0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fmadd s0, s1, s2, s0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x float> %v, i32 0
   %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
   ret float %tmp2
 }
 
 define float @test_fmla_ss2S_0_swap(float %a, float %b, <2 x float> %v) {
-  ; CHECK-LABEL: test_fmla_ss2S_0_swap
-  ; CHECK: fmadd s0, s2, s1, s0
+; CHECK-LABEL: test_fmla_ss2S_0_swap:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fmadd s0, s2, s1, s0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x float> %v, i32 0
   %tmp2 = call float @llvm.fma.f32(float %tmp1, float %b, float %a)
   ret float %tmp2
 }
 
 define float @test_fmla_ss2S_1(float %a, float %b, <2 x float> %v) {
-  ; CHECK-LABEL: test_fmla_ss2S_1
-  ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_fmla_ss2S_1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fmla s0, s1, v2.s[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x float> %v, i32 1
   %tmp2 = call float @llvm.fma.f32(float %b, float %tmp1, float %a)
   ret float %tmp2
 }
 
 define double @test_fmla_ddD_0(double %a, double %b, <1 x double> %v) {
-  ; CHECK-LABEL: test_fmla_ddD_0
-  ; CHECK: fmadd d0, d1, d2, d0
+; CHECK-LABEL: test_fmla_ddD_0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd d0, d1, d2, d0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <1 x double> %v, i32 0
   %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
   ret double %tmp2
 }
 
 define double @test_fmla_ddD_0_swap(double %a, double %b, <1 x double> %v) {
-  ; CHECK-LABEL: test_fmla_ddD_0_swap
-  ; CHECK: fmadd d0, d2, d1, d0
+; CHECK-LABEL: test_fmla_ddD_0_swap:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd d0, d2, d1, d0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <1 x double> %v, i32 0
   %tmp2 = call double @llvm.fma.f64(double %tmp1, double %b, double %a)
   ret double %tmp2
 }
 
 define double @test_fmla_dd2D_0(double %a, double %b, <2 x double> %v) {
-  ; CHECK-LABEL: test_fmla_dd2D_0
-  ; CHECK: fmadd d0, d1, d2, d0
+; CHECK-LABEL: test_fmla_dd2D_0:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd d0, d1, d2, d0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 0
   %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
   ret double %tmp2
 }
 
 define double @test_fmla_dd2D_0_swap(double %a, double %b, <2 x double> %v) {
-  ; CHECK-LABEL: test_fmla_dd2D_0_swap
-  ; CHECK: fmadd d0, d2, d1, d0
+; CHECK-LABEL: test_fmla_dd2D_0_swap:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd d0, d2, d1, d0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 0
   %tmp2 = call double @llvm.fma.f64(double %tmp1, double %b, double %a)
   ret double %tmp2
 }
 
 define double @test_fmla_dd2D_1(double %a, double %b, <2 x double> %v) {
-  ; CHECK-LABEL: test_fmla_dd2D_1
-  ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_fmla_dd2D_1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmla d0, d1, v2.d[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 1
   %tmp2 = call double @llvm.fma.f64(double %b, double %tmp1, double %a)
   ret double %tmp2
 }
 
 define double @test_fmla_dd2D_1_swap(double %a, double %b, <2 x double> %v) {
-  ; CHECK-LABEL: test_fmla_dd2D_1_swap
-  ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_fmla_dd2D_1_swap:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmla d0, d1, v2.d[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 1
   %tmp2 = call double @llvm.fma.f64(double %tmp1, double %b, double %a)
   ret double %tmp2
 }
 
 define float @test_fmls_ss4S_0(float %a, float %b, <4 x float> %v) {
-  ; CHECK-LABEL: test_fmls_ss4S_0
-  ; CHECK: fmsub s0, s2, s1, s0
+; CHECK-LABEL: test_fmls_ss4S_0:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub s0, s2, s1, s0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg float %b
   %extract = extractelement <4 x float> %v, i64 0
@@ -122,8 +154,10 @@ entry:
 }
 
 define float @test_fmls_ss4S_0_swap(float %a, float %b, <4 x float> %v) {
-  ; CHECK-LABEL: test_fmls_ss4S_0_swap
-  ; CHECK: fmsub s0, s2, s1, s0
+; CHECK-LABEL: test_fmls_ss4S_0_swap:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub s0, s2, s1, s0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg float %b
   %extract = extractelement <4 x float> %v, i64 0
@@ -132,8 +166,11 @@ entry:
 }
 
 define float @test_fmls_ss4S_3(float %a, float %b, <4 x float> %v) {
-  ; CHECK-LABEL: test_fmls_ss4S_3
-  ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+; CHECK-LABEL: test_fmls_ss4S_3:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov s1, v2.s[3]
+; CHECK-NEXT:    fmls s0, s1, v2.s[3]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 3
   %tmp2 = fsub float -0.0, %tmp1
   %tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a)
@@ -141,8 +178,11 @@ define float @test_fmls_ss4S_3(float %a, float %b, <4 x float> %v) {
 }
 
 define float @test_fmls_ss4S_3_swap(float %a, float %b, <4 x float> %v) {
-  ; CHECK-LABEL: test_fmls_ss4S_3_swap
-  ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+; CHECK-LABEL: test_fmls_ss4S_3_swap:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov s1, v2.s[3]
+; CHECK-NEXT:    fmls s0, s1, v2.s[3]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 3
   %tmp2 = fsub float -0.0, %tmp1
   %tmp3 = call float @llvm.fma.f32(float %tmp1, float %tmp2, float %a)
@@ -151,8 +191,11 @@ define float @test_fmls_ss4S_3_swap(float %a, float %b, <4 x float> %v) {
 
 
 define float @test_fmls_ss2S_0(float %a, float %b, <2 x float> %v) {
-  ; CHECK-LABEL: test_fmls_ss2S_0
-  ; CHECK: fmsub s0, s2, s1, s0
+; CHECK-LABEL: test_fmls_ss2S_0:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fmsub s0, s2, s1, s0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg float %b
   %extract = extractelement <2 x float> %v, i64 0
@@ -161,8 +204,11 @@ entry:
 }
 
 define float @test_fmls_ss2S_0_swap(float %a, float %b, <2 x float> %v) {
-  ; CHECK-LABEL: test_fmls_ss2S_0_swap
-  ; CHECK: fmsub s0, s2, s1, s0
+; CHECK-LABEL: test_fmls_ss2S_0_swap:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fmsub s0, s2, s1, s0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg float %b
   %extract = extractelement <2 x float> %v, i64 0
@@ -171,8 +217,12 @@ entry:
 }
 
 define float @test_fmls_ss2S_1(float %a, float %b, <2 x float> %v) {
-  ; CHECK-LABEL: test_fmls_ss2S_1
-  ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_fmls_ss2S_1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    mov s1, v2.s[1]
+; CHECK-NEXT:    fmls s0, s1, v2.s[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x float> %v, i32 1
   %tmp2 = fsub float -0.0, %tmp1
   %tmp3 = call float @llvm.fma.f32(float %tmp2, float %tmp1, float %a)
@@ -180,8 +230,10 @@ define float @test_fmls_ss2S_1(float %a, float %b, <2 x float> %v) {
 }
 
 define double @test_fmls_ddD_0(double %a, double %b, <1 x double> %v) {
-  ; CHECK-LABEL: test_fmls_ddD_0
-  ; CHECK: fmsub d0, d1, d2, d0
+; CHECK-LABEL: test_fmls_ddD_0:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub d0, d1, d2, d0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg double %b
   %extract = extractelement <1 x double> %v, i64 0
@@ -190,8 +242,10 @@ entry:
 }
 
 define double @test_fmls_ddD_0_swap(double %a, double %b, <1 x double> %v) {
-  ; CHECK-LABEL: test_fmls_ddD_0_swap
-  ; CHECK: fmsub d0, d2, d1, d0
+; CHECK-LABEL: test_fmls_ddD_0_swap:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub d0, d2, d1, d0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg double %b
   %extract = extractelement <1 x double> %v, i64 0
@@ -200,8 +254,10 @@ entry:
 }
 
 define double @test_fmls_dd2D_0(double %a, double %b, <2 x double> %v) {
-  ; CHECK-LABEL: test_fmls_dd2D_0
-  ; CHECK: fmsub d0, d2, d1, d0
+; CHECK-LABEL: test_fmls_dd2D_0:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub d0, d2, d1, d0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg double %b
   %extract = extractelement <2 x double> %v, i64 0
@@ -210,8 +266,10 @@ entry:
 }
 
 define double @test_fmls_dd2D_0_swap(double %a, double %b, <2 x double> %v) {
-  ; CHECK-LABEL: test_fmls_dd2D_0_swap
-  ; CHECK: fmsub d0, d2, d1, d0
+; CHECK-LABEL: test_fmls_dd2D_0_swap:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub d0, d2, d1, d0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg double %b
   %extract = extractelement <2 x double> %v, i64 0
@@ -220,8 +278,11 @@ entry:
 }
 
 define double @test_fmls_dd2D_1(double %a, double %b, <2 x double> %v) {
-  ; CHECK-LABEL: test_fmls_dd2D_1
-  ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_fmls_dd2D_1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d1, v2.d[1]
+; CHECK-NEXT:    fmls d0, d1, v2.d[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 1
   %tmp2 = fsub double -0.0, %tmp1
   %tmp3 = call double @llvm.fma.f64(double %tmp2, double %tmp1, double %a)
@@ -229,8 +290,11 @@ define double @test_fmls_dd2D_1(double %a, double %b, <2 x double> %v) {
 }
 
 define double @test_fmls_dd2D_1_swap(double %a, double %b, <2 x double> %v) {
-  ; CHECK-LABEL: test_fmls_dd2D_1_swap
-  ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_fmls_dd2D_1_swap:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d1, v2.d[1]
+; CHECK-NEXT:    fmls d0, d1, v2.d[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 1
   %tmp2 = fsub double -0.0, %tmp1
   %tmp3 = call double @llvm.fma.f64(double %tmp1, double %tmp2, double %a)
@@ -238,112 +302,143 @@ define double @test_fmls_dd2D_1_swap(double %a, double %b, <2 x double> %v) {
 }
 
 define float @test_fmla_ss4S_0_strict(float %a, float %b, <4 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmla_ss4S_0_strict
-  ; CHECK: fmadd s0, s1, s2, s0
+; CHECK-LABEL: test_fmla_ss4S_0_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd s0, s1, s2, s0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 0
   %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %b, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp2
 }
 
 define float @test_fmla_ss4S_0_swap_strict(float %a, float %b, <4 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmla_ss4S_0_swap_strict
-  ; CHECK: fmadd s0, s2, s1, s0
+; CHECK-LABEL: test_fmla_ss4S_0_swap_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd s0, s2, s1, s0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 0
   %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %tmp1, float %b, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp2
 }
 
 define float @test_fmla_ss4S_3_strict(float %a, float %b, <4 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmla_ss4S_3_strict
-  ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+; CHECK-LABEL: test_fmla_ss4S_3_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmla s0, s1, v2.s[3]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 3
   %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %b, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp2
 }
 
 define float @test_fmla_ss4S_3_swap_strict(float %a, float %b, <4 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmla_ss4S_3_swap_strict
-  ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+; CHECK-LABEL: test_fmla_ss4S_3_swap_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmla s0, s0, v2.s[3]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 3
   %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %tmp1, float %a, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp2
 }
 
 define float @test_fmla_ss2S_0_strict(float %a, float %b, <2 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmla_ss2S_0_strict
-  ; CHECK: fmadd s0, s1, s2, s0
+; CHECK-LABEL: test_fmla_ss2S_0_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fmadd s0, s1, s2, s0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x float> %v, i32 0
   %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %b, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp2
 }
 
 define float @test_fmla_ss2S_0_swap_strict(float %a, float %b, <2 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmla_ss2S_0_swap_strict
-  ; CHECK: fmadd s0, s2, s1, s0
+; CHECK-LABEL: test_fmla_ss2S_0_swap_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fmadd s0, s2, s1, s0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x float> %v, i32 0
   %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %tmp1, float %b, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp2
 }
 
 define float @test_fmla_ss2S_1_strict(float %a, float %b, <2 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmla_ss2S_1_strict
-  ; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_fmla_ss2S_1_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fmla s0, s1, v2.s[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x float> %v, i32 1
   %tmp2 = call float @llvm.experimental.constrained.fma.f32(float %b, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %tmp2
 }
 
 define double @test_fmla_ddD_0_strict(double %a, double %b, <1 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmla_ddD_0_strict
-  ; CHECK: fmadd d0, d1, d2, d0
+; CHECK-LABEL: test_fmla_ddD_0_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd d0, d1, d2, d0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <1 x double> %v, i32 0
   %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %b, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp2
 }
 
 define double @test_fmla_ddD_0_swap_strict(double %a, double %b, <1 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmla_ddD_0_swap_strict
-  ; CHECK: fmadd d0, d2, d1, d0
+; CHECK-LABEL: test_fmla_ddD_0_swap_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd d0, d2, d1, d0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <1 x double> %v, i32 0
   %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %tmp1, double %b, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp2
 }
 
 define double @test_fmla_dd2D_0_strict(double %a, double %b, <2 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmla_dd2D_0_strict
-  ; CHECK: fmadd d0, d1, d2, d0
+; CHECK-LABEL: test_fmla_dd2D_0_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd d0, d1, d2, d0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 0
   %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %b, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp2
 }
 
 define double @test_fmla_dd2D_0_swap_strict(double %a, double %b, <2 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmla_dd2D_0_swap_strict
-  ; CHECK: fmadd d0, d2, d1, d0
+; CHECK-LABEL: test_fmla_dd2D_0_swap_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmadd d0, d2, d1, d0
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 0
   %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %tmp1, double %b, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp2
 }
 
 define double @test_fmla_dd2D_1_strict(double %a, double %b, <2 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmla_dd2D_1_strict
-  ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_fmla_dd2D_1_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmla d0, d1, v2.d[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 1
   %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %b, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp2
 }
 
 define double @test_fmla_dd2D_1_swap_strict(double %a, double %b, <2 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmla_dd2D_1_swap_strict
-  ; CHECK: fmla {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_fmla_dd2D_1_swap_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmla d0, d1, v2.d[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 1
   %tmp2 = call double @llvm.experimental.constrained.fma.f64(double %tmp1, double %b, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret double %tmp2
 }
 
 define float @test_fmls_ss4S_0_strict(float %a, float %b, <4 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmls_ss4S_0_strict
-  ; CHECK: fmsub s0, s2, s1, s0
+; CHECK-LABEL: test_fmls_ss4S_0_strict:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub s0, s2, s1, s0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg float %b
   %extract = extractelement <4 x float> %v, i64 0
@@ -352,8 +447,10 @@ entry:
 }
 
 define float @test_fmls_ss4S_0_swap_strict(float %a, float %b, <4 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmls_ss4S_0_swap_strict
-  ; CHECK: fmsub s0, s2, s1, s0
+; CHECK-LABEL: test_fmls_ss4S_0_swap_strict:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub s0, s2, s1, s0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg float %b
   %extract = extractelement <4 x float> %v, i64 0
@@ -362,8 +459,11 @@ entry:
 }
 
 define float @test_fmls_ss4S_3_strict(float %a, float %b, <4 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmls_ss4S_3_strict
-  ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+; CHECK-LABEL: test_fmls_ss4S_3_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov s1, v2.s[3]
+; CHECK-NEXT:    fmls s0, s1, v2.s[3]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 3
   %tmp2 = fneg float %tmp1
   %tmp3 = call float @llvm.experimental.constrained.fma.f32(float %tmp2, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
@@ -371,8 +471,11 @@ define float @test_fmls_ss4S_3_strict(float %a, float %b, <4 x float> %v) #0 {
 }
 
 define float @test_fmls_ss4S_3_swap_strict(float %a, float %b, <4 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmls_ss4S_3_swap_strict
-  ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+; CHECK-LABEL: test_fmls_ss4S_3_swap_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov s1, v2.s[3]
+; CHECK-NEXT:    fmls s0, s1, v2.s[3]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <4 x float> %v, i32 3
   %tmp2 = fneg float %tmp1
   %tmp3 = call float @llvm.experimental.constrained.fma.f32(float %tmp1, float %tmp2, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
@@ -380,8 +483,11 @@ define float @test_fmls_ss4S_3_swap_strict(float %a, float %b, <4 x float> %v) #
 }
 
 define float @test_fmls_ss2S_0_strict(float %a, float %b, <2 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmls_ss2S_0_strict
-  ; CHECK: fmsub s0, s2, s1, s0
+; CHECK-LABEL: test_fmls_ss2S_0_strict:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fmsub s0, s2, s1, s0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg float %b
   %extract = extractelement <2 x float> %v, i64 0
@@ -390,8 +496,11 @@ entry:
 }
 
 define float @test_fmls_ss2S_0_swap_strict(float %a, float %b, <2 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmls_ss2S_0_swap_strict
-  ; CHECK: fmsub s0, s2, s1, s0
+; CHECK-LABEL: test_fmls_ss2S_0_swap_strict:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    fmsub s0, s2, s1, s0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg float %b
   %extract = extractelement <2 x float> %v, i64 0
@@ -400,8 +509,12 @@ entry:
 }
 
 define float @test_fmls_ss2S_1_strict(float %a, float %b, <2 x float> %v) #0 {
-  ; CHECK-LABEL: test_fmls_ss2S_1_strict
-  ; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
+; CHECK-LABEL: test_fmls_ss2S_1_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    mov s1, v2.s[1]
+; CHECK-NEXT:    fmls s0, s1, v2.s[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x float> %v, i32 1
   %tmp2 = fneg float %tmp1
   %tmp3 = call float @llvm.experimental.constrained.fma.f32(float %tmp2, float %tmp1, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
@@ -409,8 +522,10 @@ define float @test_fmls_ss2S_1_strict(float %a, float %b, <2 x float> %v) #0 {
 }
 
 define double @test_fmls_ddD_0_strict(double %a, double %b, <1 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmls_ddD_0_strict
-  ; CHECK: fmsub d0, d2, d1, d0
+; CHECK-LABEL: test_fmls_ddD_0_strict:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub d0, d2, d1, d0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg double %b
   %extract = extractelement <1 x double> %v, i64 0
@@ -419,8 +534,10 @@ entry:
 }
 
 define double @test_fmls_ddD_0_swap_strict(double %a, double %b, <1 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmls_ddD_0_swap_strict
-  ; CHECK: fmsub d0, d2, d1, d0
+; CHECK-LABEL: test_fmls_ddD_0_swap_strict:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub d0, d2, d1, d0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg double %b
   %extract = extractelement <1 x double> %v, i64 0
@@ -429,8 +546,10 @@ entry:
 }
 
 define double @test_fmls_dd2D_0_strict(double %a, double %b, <2 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmls_dd2D_0_strict
-  ; CHECK: fmsub d0, d2, d1, d0
+; CHECK-LABEL: test_fmls_dd2D_0_strict:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub d0, d2, d1, d0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg double %b
   %extract = extractelement <2 x double> %v, i64 0
@@ -439,8 +558,10 @@ entry:
 }
 
 define double @test_fmls_dd2D_0_swap_strict(double %a, double %b, <2 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmls_dd2D_0_swap_strict
-  ; CHECK: fmsub d0, d2, d1, d0
+; CHECK-LABEL: test_fmls_dd2D_0_swap_strict:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmsub d0, d2, d1, d0
+; CHECK-NEXT:    ret
 entry:
   %fneg = fneg double %b
   %extract = extractelement <2 x double> %v, i64 0
@@ -449,8 +570,11 @@ entry:
 }
 
 define double @test_fmls_dd2D_1_strict(double %a, double %b, <2 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmls_dd2D_1_strict
-  ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_fmls_dd2D_1_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d1, v2.d[1]
+; CHECK-NEXT:    fmls d0, d1, v2.d[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 1
   %tmp2 = fneg double %tmp1
   %tmp3 = call double @llvm.experimental.constrained.fma.f64(double %tmp2, double %tmp1, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
@@ -458,8 +582,11 @@ define double @test_fmls_dd2D_1_strict(double %a, double %b, <2 x double> %v) #0
 }
 
 define double @test_fmls_dd2D_1_swap_strict(double %a, double %b, <2 x double> %v) #0 {
-  ; CHECK-LABEL: test_fmls_dd2D_1_swap_strict
-  ; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+; CHECK-LABEL: test_fmls_dd2D_1_swap_strict:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov d1, v2.d[1]
+; CHECK-NEXT:    fmls d0, d1, v2.d[1]
+; CHECK-NEXT:    ret
   %tmp1 = extractelement <2 x double> %v, i32 1
   %tmp2 = fneg double %tmp1
   %tmp3 = call double @llvm.experimental.constrained.fma.f64(double %tmp1, double %tmp2, double %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-sqdmulh.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-sqdmulh.ll
index 9c5dff6c3bf6fb..cd2ffc21028ebc 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-sqdmulh.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-sqdmulh.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -verify-machineinstrs < %s | FileCheck %s
 
 ; SQDMULH (Single, x2)
diff --git a/llvm/test/CodeGen/AArch64/wineh-save-lrpair3.mir b/llvm/test/CodeGen/AArch64/wineh-save-lrpair3.mir
index e45cf9ff3e4c82..8a42182361c0c3 100644
--- a/llvm/test/CodeGen/AArch64/wineh-save-lrpair3.mir
+++ b/llvm/test/CodeGen/AArch64/wineh-save-lrpair3.mir
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 # RUN: llc -o - %s -mtriple=aarch64-windows -start-before=prologepilog \
 # RUN:   -stop-after=prologepilog | FileCheck %s
 
@@ -66,3 +67,5 @@ body:             |
     RET_ReallyLR
 
 ...
+## NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+# CHECK: {{.*}}

>From 4a3b1a62473217ac4f9abb38917b9213b546623e Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Wed, 24 Jan 2024 16:27:26 -0500
Subject: [PATCH 3/6] d

---
 llvm/test/CodeGen/AArch64/arm64-vhadd.ll      |  94 ++++++++++++----
 llvm/test/CodeGen/AArch64/arm64-zip.ll        |  26 +++--
 .../CodeGen/AArch64/build-vector-extract.ll   |  40 ++++---
 llvm/test/CodeGen/AArch64/extbinopload.ll     | 104 +++++++++++-------
 4 files changed, 175 insertions(+), 89 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
index dda610e5dd3cb1..38a55e035680af 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vhadd.ll
@@ -826,8 +826,14 @@ define <4 x i16> @hadd8_sext_asr(<4 x i8> %src1, <4 x i8> %src2) {
 define <4 x i16> @hadd8_zext_asr(<4 x i8> %src1, <4 x i8> %src2) {
 ; CHECK-LABEL: hadd8_zext_asr:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bic.4h v0, #255, lsl #8
-; CHECK-NEXT:    bic.4h v1, #255, lsl #8
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x8, .LCPI57_0
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    mov.d v1[1], v1[0]
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI57_0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v2
+; CHECK-NEXT:    tbl.8b v1, { v1 }, v2
 ; CHECK-NEXT:    uhadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
@@ -856,8 +862,14 @@ define <4 x i16> @hadd8_sext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
 define <4 x i16> @hadd8_zext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
 ; CHECK-LABEL: hadd8_zext_lsr:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bic.4h v0, #255, lsl #8
-; CHECK-NEXT:    bic.4h v1, #255, lsl #8
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x8, .LCPI59_0
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    mov.d v1[1], v1[0]
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI59_0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v2
+; CHECK-NEXT:    tbl.8b v1, { v1 }, v2
 ; CHECK-NEXT:    uhadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
@@ -886,9 +898,14 @@ define <2 x i16> @hadd8x2_sext_asr(<2 x i8> %src1, <2 x i8> %src2) {
 define <2 x i16> @hadd8x2_zext_asr(<2 x i8> %src1, <2 x i8> %src2) {
 ; CHECK-LABEL: hadd8x2_zext_asr:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi d2, #0x0000ff000000ff
-; CHECK-NEXT:    and.8b v0, v0, v2
-; CHECK-NEXT:    and.8b v1, v1, v2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x8, .LCPI61_0
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    mov.d v1[1], v1[0]
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI61_0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v2
+; CHECK-NEXT:    tbl.8b v1, { v1 }, v2
 ; CHECK-NEXT:    uhadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <2 x i8> %src1 to <2 x i16>
@@ -903,10 +920,11 @@ define <2 x i16> @hadd8x2_sext_lsr(<2 x i8> %src1, <2 x i8> %src2) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shl.2s v0, v0, #24
 ; CHECK-NEXT:    shl.2s v1, v1, #24
+; CHECK-NEXT:    movi.2d v2, #0000000000000000
 ; CHECK-NEXT:    sshr.2s v0, v0, #24
 ; CHECK-NEXT:    ssra.2s v0, v1, #24
-; CHECK-NEXT:    movi d1, #0x00ffff0000ffff
-; CHECK-NEXT:    and.8b v0, v0, v1
+; CHECK-NEXT:    rev32.4h v0, v0
+; CHECK-NEXT:    trn2.4h v0, v0, v2
 ; CHECK-NEXT:    ushr.2s v0, v0, #1
 ; CHECK-NEXT:    ret
   %zextsrc1 = sext <2 x i8> %src1 to <2 x i16>
@@ -919,9 +937,14 @@ define <2 x i16> @hadd8x2_sext_lsr(<2 x i8> %src1, <2 x i8> %src2) {
 define <2 x i16> @hadd8x2_zext_lsr(<2 x i8> %src1, <2 x i8> %src2) {
 ; CHECK-LABEL: hadd8x2_zext_lsr:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi d2, #0x0000ff000000ff
-; CHECK-NEXT:    and.8b v0, v0, v2
-; CHECK-NEXT:    and.8b v1, v1, v2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x8, .LCPI63_0
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    mov.d v1[1], v1[0]
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI63_0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v2
+; CHECK-NEXT:    tbl.8b v1, { v1 }, v2
 ; CHECK-NEXT:    uhadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <2 x i8> %src1 to <2 x i16>
@@ -951,8 +974,14 @@ define <4 x i16> @rhadd8_sext_asr(<4 x i8> %src1, <4 x i8> %src2) {
 define <4 x i16> @rhadd8_zext_asr(<4 x i8> %src1, <4 x i8> %src2) {
 ; CHECK-LABEL: rhadd8_zext_asr:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bic.4h v0, #255, lsl #8
-; CHECK-NEXT:    bic.4h v1, #255, lsl #8
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x8, .LCPI65_0
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    mov.d v1[1], v1[0]
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI65_0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v2
+; CHECK-NEXT:    tbl.8b v1, { v1 }, v2
 ; CHECK-NEXT:    urhadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
@@ -985,8 +1014,14 @@ define <4 x i16> @rhadd8_sext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
 define <4 x i16> @rhadd8_zext_lsr(<4 x i8> %src1, <4 x i8> %src2) {
 ; CHECK-LABEL: rhadd8_zext_lsr:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bic.4h v0, #255, lsl #8
-; CHECK-NEXT:    bic.4h v1, #255, lsl #8
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x8, .LCPI67_0
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    mov.d v1[1], v1[0]
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI67_0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v2
+; CHECK-NEXT:    tbl.8b v1, { v1 }, v2
 ; CHECK-NEXT:    urhadd.4h v0, v0, v1
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <4 x i8> %src1 to <4 x i16>
@@ -1017,9 +1052,14 @@ define <2 x i16> @rhadd8x2_sext_asr(<2 x i8> %src1, <2 x i8> %src2) {
 define <2 x i16> @rhadd8x2_zext_asr(<2 x i8> %src1, <2 x i8> %src2) {
 ; CHECK-LABEL: rhadd8x2_zext_asr:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi d2, #0x0000ff000000ff
-; CHECK-NEXT:    and.8b v0, v0, v2
-; CHECK-NEXT:    and.8b v1, v1, v2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x8, .LCPI69_0
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    mov.d v1[1], v1[0]
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI69_0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v2
+; CHECK-NEXT:    tbl.8b v1, { v1 }, v2
 ; CHECK-NEXT:    urhadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <2 x i8> %src1 to <2 x i16>
@@ -1035,12 +1075,13 @@ define <2 x i16> @rhadd8x2_sext_lsr(<2 x i8> %src1, <2 x i8> %src2) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    shl.2s v0, v0, #24
 ; CHECK-NEXT:    shl.2s v1, v1, #24
-; CHECK-NEXT:    movi d2, #0x00ffff0000ffff
+; CHECK-NEXT:    movi.2d v2, #0000000000000000
 ; CHECK-NEXT:    sshr.2s v0, v0, #24
 ; CHECK-NEXT:    sshr.2s v1, v1, #24
 ; CHECK-NEXT:    mvn.8b v0, v0
 ; CHECK-NEXT:    sub.2s v0, v1, v0
-; CHECK-NEXT:    and.8b v0, v0, v2
+; CHECK-NEXT:    rev32.4h v0, v0
+; CHECK-NEXT:    trn2.4h v0, v0, v2
 ; CHECK-NEXT:    ushr.2s v0, v0, #1
 ; CHECK-NEXT:    ret
   %zextsrc1 = sext <2 x i8> %src1 to <2 x i16>
@@ -1054,9 +1095,14 @@ define <2 x i16> @rhadd8x2_sext_lsr(<2 x i8> %src1, <2 x i8> %src2) {
 define <2 x i16> @rhadd8x2_zext_lsr(<2 x i8> %src1, <2 x i8> %src2) {
 ; CHECK-LABEL: rhadd8x2_zext_lsr:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi d2, #0x0000ff000000ff
-; CHECK-NEXT:    and.8b v0, v0, v2
-; CHECK-NEXT:    and.8b v1, v1, v2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x8, .LCPI71_0
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    mov.d v1[1], v1[0]
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI71_0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v2
+; CHECK-NEXT:    tbl.8b v1, { v1 }, v2
 ; CHECK-NEXT:    urhadd.2s v0, v0, v1
 ; CHECK-NEXT:    ret
   %zextsrc1 = zext <2 x i8> %src1 to <2 x i16>
diff --git a/llvm/test/CodeGen/AArch64/arm64-zip.ll b/llvm/test/CodeGen/AArch64/arm64-zip.ll
index c6e3c3540f6e91..b0631383d53d25 100644
--- a/llvm/test/CodeGen/AArch64/arm64-zip.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-zip.ll
@@ -250,11 +250,16 @@ define <8 x i16> @combine_v8i16_undef(<4 x i16> %0, <4 x i16> %1) {
 define <16 x i8> @combine_v8i16_8first(<8 x i8> %0, <8 x i8> %1) {
 ; CHECK-LABEL: combine_v8i16_8first:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1_q2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    adrp x8, .LCPI17_0
-; CHECK-NEXT:    fmov d2, d0
-; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI17_0]
-; CHECK-NEXT:    tbl.16b v0, { v1, v2 }, v3
+; CHECK-NEXT:    adrp x9, .LCPI17_1
+; CHECK-NEXT:    mov.d v1[1], v0[0]
+; CHECK-NEXT:    ldr d0, [x8, :lo12:.LCPI17_0]
+; CHECK-NEXT:    ldr d2, [x9, :lo12:.LCPI17_1]
+; CHECK-NEXT:    tbl.8b v3, { v1 }, v0
+; CHECK-NEXT:    tbl.8b v0, { v1 }, v2
+; CHECK-NEXT:    mov.d v0[1], v3[0]
 ; CHECK-NEXT:    ret
   %3 = shufflevector <8 x i8> %1, <8 x i8> %0, <16 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3, i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 7>
   ret <16 x i8> %3
@@ -265,11 +270,16 @@ define <16 x i8> @combine_v8i16_8first(<8 x i8> %0, <8 x i8> %1) {
 define <16 x i8> @combine_v8i16_8firstundef(<8 x i8> %0, <8 x i8> %1) {
 ; CHECK-LABEL: combine_v8i16_8firstundef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1_q2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    adrp x8, .LCPI18_0
-; CHECK-NEXT:    fmov d2, d0
-; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI18_0]
-; CHECK-NEXT:    tbl.16b v0, { v1, v2 }, v3
+; CHECK-NEXT:    adrp x9, .LCPI18_1
+; CHECK-NEXT:    mov.d v1[1], v0[0]
+; CHECK-NEXT:    ldr d0, [x8, :lo12:.LCPI18_0]
+; CHECK-NEXT:    ldr d2, [x9, :lo12:.LCPI18_1]
+; CHECK-NEXT:    tbl.8b v3, { v1 }, v0
+; CHECK-NEXT:    tbl.8b v0, { v1 }, v2
+; CHECK-NEXT:    mov.d v0[1], v3[0]
 ; CHECK-NEXT:    ret
   %3 = shufflevector <8 x i8> %1, <8 x i8> %0, <16 x i32> <i32 8, i32 0, i32 9, i32 1, i32 10, i32 2, i32 11, i32 3, i32 12, i32 4, i32 13, i32 5, i32 14, i32 6, i32 15, i32 undef>
   ret <16 x i8> %3
diff --git a/llvm/test/CodeGen/AArch64/build-vector-extract.ll b/llvm/test/CodeGen/AArch64/build-vector-extract.ll
index 36b1b2cdcb4320..ce31d1afa45cec 100644
--- a/llvm/test/CodeGen/AArch64/build-vector-extract.ll
+++ b/llvm/test/CodeGen/AArch64/build-vector-extract.ll
@@ -416,8 +416,9 @@ define <2 x i64> @extract3_i16_zext_insert1_i64_zero(<8 x i16> %x) {
 define <2 x i64> @extract0_i8_zext_insert0_i64_undef(<16 x i8> %x) {
 ; CHECK-LABEL: extract0_i8_zext_insert0_i64_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    umov w8, v0.b[0]
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    mov v1.b[0], v0.b[0]
+; CHECK-NEXT:    mov v0.16b, v1.16b
 ; CHECK-NEXT:    ret
   %e = extractelement <16 x i8> %x, i32 0
   %z = zext i8 %e to i64
@@ -442,8 +443,9 @@ define <2 x i64> @extract0_i8_zext_insert0_i64_zero(<16 x i8> %x) {
 define <2 x i64> @extract1_i8_zext_insert0_i64_undef(<16 x i8> %x) {
 ; CHECK-LABEL: extract1_i8_zext_insert0_i64_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    umov w8, v0.b[1]
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    adrp x8, .LCPI34_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI34_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
   %e = extractelement <16 x i8> %x, i32 1
   %z = zext i8 %e to i64
@@ -468,8 +470,9 @@ define <2 x i64> @extract1_i8_zext_insert0_i64_zero(<16 x i8> %x) {
 define <2 x i64> @extract2_i8_zext_insert0_i64_undef(<16 x i8> %x) {
 ; CHECK-LABEL: extract2_i8_zext_insert0_i64_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    umov w8, v0.b[2]
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    adrp x8, .LCPI36_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI36_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
   %e = extractelement <16 x i8> %x, i32 2
   %z = zext i8 %e to i64
@@ -494,8 +497,9 @@ define <2 x i64> @extract2_i8_zext_insert0_i64_zero(<16 x i8> %x) {
 define <2 x i64> @extract3_i8_zext_insert0_i64_undef(<16 x i8> %x) {
 ; CHECK-LABEL: extract3_i8_zext_insert0_i64_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    umov w8, v0.b[3]
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    adrp x8, .LCPI38_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI38_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
   %e = extractelement <16 x i8> %x, i32 3
   %z = zext i8 %e to i64
@@ -520,8 +524,9 @@ define <2 x i64> @extract3_i8_zext_insert0_i64_zero(<16 x i8> %x) {
 define <2 x i64> @extract0_i8_zext_insert1_i64_undef(<16 x i8> %x) {
 ; CHECK-LABEL: extract0_i8_zext_insert1_i64_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    umov w8, v0.b[0]
-; CHECK-NEXT:    dup v0.2d, x8
+; CHECK-NEXT:    adrp x8, .LCPI40_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI40_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
   %e = extractelement <16 x i8> %x, i32 0
   %z = zext i8 %e to i64
@@ -546,8 +551,9 @@ define <2 x i64> @extract0_i8_zext_insert1_i64_zero(<16 x i8> %x) {
 define <2 x i64> @extract1_i8_zext_insert1_i64_undef(<16 x i8> %x) {
 ; CHECK-LABEL: extract1_i8_zext_insert1_i64_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    umov w8, v0.b[1]
-; CHECK-NEXT:    dup v0.2d, x8
+; CHECK-NEXT:    adrp x8, .LCPI42_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI42_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
   %e = extractelement <16 x i8> %x, i32 1
   %z = zext i8 %e to i64
@@ -572,8 +578,9 @@ define <2 x i64> @extract1_i8_zext_insert1_i64_zero(<16 x i8> %x) {
 define <2 x i64> @extract2_i8_zext_insert1_i64_undef(<16 x i8> %x) {
 ; CHECK-LABEL: extract2_i8_zext_insert1_i64_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    umov w8, v0.b[2]
-; CHECK-NEXT:    dup v0.2d, x8
+; CHECK-NEXT:    adrp x8, .LCPI44_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI44_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
   %e = extractelement <16 x i8> %x, i32 2
   %z = zext i8 %e to i64
@@ -598,8 +605,9 @@ define <2 x i64> @extract2_i8_zext_insert1_i64_zero(<16 x i8> %x) {
 define <2 x i64> @extract3_i8_zext_insert1_i64_undef(<16 x i8> %x) {
 ; CHECK-LABEL: extract3_i8_zext_insert1_i64_undef:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    umov w8, v0.b[3]
-; CHECK-NEXT:    dup v0.2d, x8
+; CHECK-NEXT:    adrp x8, .LCPI46_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI46_0]
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-NEXT:    ret
   %e = extractelement <16 x i8> %x, i32 3
   %z = zext i8 %e to i64
diff --git a/llvm/test/CodeGen/AArch64/extbinopload.ll b/llvm/test/CodeGen/AArch64/extbinopload.ll
index 849fc7aa00a8e7..1191f1a0d3831d 100644
--- a/llvm/test/CodeGen/AArch64/extbinopload.ll
+++ b/llvm/test/CodeGen/AArch64/extbinopload.ll
@@ -5,8 +5,15 @@ define <4 x i16> @normal_load_v4i8(ptr %p) {
 ; CHECK-LABEL: normal_load_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp s0, s1, [x0]
-; CHECK-NEXT:    uaddl v0.8h, v0.8b, v1.8b
-; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    mov v1.d[1], v1.d[0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v2.8b
+; CHECK-NEXT:    tbl v1.8b, { v1.16b }, v2.8b
+; CHECK-NEXT:    add v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
   %l1 = load <4 x i8>, ptr %p
   %q = getelementptr i8, ptr %p, i32 4
@@ -36,10 +43,16 @@ define <4 x i16> @load_v4i8(ptr %p) {
 ; CHECK-LABEL: load_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp s1, s0, [x0]
+; CHECK-NEXT:    adrp x8, .LCPI2_0
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI2_0]
 ; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    mov v1.d[1], v1.d[0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v2.8b
+; CHECK-NEXT:    tbl v1.8b, { v1.16b }, v2.8b
 ; CHECK-NEXT:    shl v0.4h, v0.4h, #3
-; CHECK-NEXT:    uaddw v0.8h, v0.8h, v1.8b
-; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    add v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    ret
   %l1 = load <4 x i8>, ptr %p
   %q = getelementptr i8, ptr %p, i32 4
@@ -263,14 +276,19 @@ define <16 x i16> @load_v16i8(ptr %p) {
 define <2 x i16> @std_v2i8_v2i16(ptr %p) {
 ; CHECK-LABEL: std_v2i8_v2i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldrb w8, [x0, #2]
-; CHECK-NEXT:    ldrb w9, [x0, #3]
-; CHECK-NEXT:    fmov s0, w8
-; CHECK-NEXT:    ldrb w8, [x0]
-; CHECK-NEXT:    fmov s1, w8
-; CHECK-NEXT:    mov v0.s[1], w9
-; CHECK-NEXT:    ldrb w9, [x0, #1]
-; CHECK-NEXT:    mov v1.s[1], w9
+; CHECK-NEXT:    add x8, x0, #2
+; CHECK-NEXT:    ld1 { v1.b }[0], [x0]
+; CHECK-NEXT:    ld1 { v0.b }[0], [x8]
+; CHECK-NEXT:    add x8, x0, #3
+; CHECK-NEXT:    ld1 { v0.b }[4], [x8]
+; CHECK-NEXT:    add x8, x0, #1
+; CHECK-NEXT:    ld1 { v1.b }[4], [x8]
+; CHECK-NEXT:    adrp x8, .LCPI12_0
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI12_0]
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    mov v1.d[1], v1.d[0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v2.8b
+; CHECK-NEXT:    tbl v1.8b, { v1.16b }, v2.8b
 ; CHECK-NEXT:    shl v0.2s, v0.2s, #3
 ; CHECK-NEXT:    add v0.2s, v1.2s, v0.2s
 ; CHECK-NEXT:    ret
@@ -646,35 +664,30 @@ define <16 x i32> @extrause_load(ptr %p, ptr %q, ptr %r, ptr %s, ptr %z) {
 ; CHECK-LABEL: extrause_load:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s1, [x0]
-; CHECK-NEXT:    add x8, x3, #8
+; CHECK-NEXT:    adrp x8, .LCPI21_0
+; CHECK-NEXT:    add x10, x3, #8
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI21_0]
 ; CHECK-NEXT:    add x11, x3, #12
+; CHECK-NEXT:    add x8, x1, #4
 ; CHECK-NEXT:    str s1, [x4]
+; CHECK-NEXT:    add x9, x1, #12
 ; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
 ; CHECK-NEXT:    ldp s0, s5, [x2]
-; CHECK-NEXT:    ushll v2.8h, v0.8b, #0
-; CHECK-NEXT:    umov w9, v2.h[0]
-; CHECK-NEXT:    umov w10, v2.h[1]
-; CHECK-NEXT:    mov v0.b[8], w9
-; CHECK-NEXT:    umov w9, v2.h[2]
-; CHECK-NEXT:    mov v0.b[9], w10
-; CHECK-NEXT:    umov w10, v2.h[3]
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v2.16b
 ; CHECK-NEXT:    ldr s2, [x1]
 ; CHECK-NEXT:    ushll v2.8h, v2.8b, #0
-; CHECK-NEXT:    mov v0.b[10], w9
-; CHECK-NEXT:    add x9, x1, #4
-; CHECK-NEXT:    uzp1 v1.8b, v1.8b, v2.8b
-; CHECK-NEXT:    mov v0.b[11], w10
-; CHECK-NEXT:    add x10, x1, #12
 ; CHECK-NEXT:    ld1 { v0.s }[3], [x3], #4
 ; CHECK-NEXT:    ldr s4, [x0, #12]
 ; CHECK-NEXT:    ldp s3, s16, [x0, #4]
 ; CHECK-NEXT:    ld1 { v5.s }[1], [x3]
 ; CHECK-NEXT:    ldp s6, s7, [x2, #8]
-; CHECK-NEXT:    ld1 { v4.s }[1], [x10]
-; CHECK-NEXT:    ld1 { v3.s }[1], [x9]
-; CHECK-NEXT:    ld1 { v6.s }[1], [x8]
-; CHECK-NEXT:    ld1 { v7.s }[1], [x11]
+; CHECK-NEXT:    ld1 { v4.s }[1], [x9]
+; CHECK-NEXT:    uzp1 v1.8b, v1.8b, v2.8b
+; CHECK-NEXT:    ld1 { v3.s }[1], [x8]
 ; CHECK-NEXT:    add x8, x1, #8
+; CHECK-NEXT:    ld1 { v6.s }[1], [x10]
+; CHECK-NEXT:    ld1 { v7.s }[1], [x11]
 ; CHECK-NEXT:    ld1 { v16.s }[1], [x8]
 ; CHECK-NEXT:    uaddl v2.8h, v3.8b, v4.8b
 ; CHECK-NEXT:    ushll v3.8h, v6.8b, #0
@@ -1362,16 +1375,19 @@ define <4 x i32> @bitcast(ptr %p) {
 define <4 x i32> @atomic(ptr %p) {
 ; CHECK-LABEL: atomic:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldar w8, [x0]
-; CHECK-NEXT:    movi v0.2d, #0x0000ff000000ff
-; CHECK-NEXT:    ldr s1, [x0, #4]
-; CHECK-NEXT:    fmov s2, w8
-; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
-; CHECK-NEXT:    zip1 v2.8b, v2.8b, v0.8b
-; CHECK-NEXT:    ushll v1.4s, v1.4h, #3
-; CHECK-NEXT:    ushll v2.4s, v2.4h, #0
-; CHECK-NEXT:    and v0.16b, v2.16b, v0.16b
-; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ldar w9, [x0]
+; CHECK-NEXT:    ldr s0, [x0, #4]
+; CHECK-NEXT:    adrp x8, .LCPI31_0
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI31_0]
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    fmov s1, w9
+; CHECK-NEXT:    zip1 v1.8b, v1.8b, v0.8b
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v2.16b
+; CHECK-NEXT:    ushll v1.4s, v1.4h, #0
+; CHECK-NEXT:    tbl v1.16b, { v1.16b }, v2.16b
+; CHECK-NEXT:    shl v0.4s, v0.4s, #3
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    ret
   %l1b = load atomic float, ptr %p acquire, align 4
   %l1 = bitcast float %l1b to <4 x i8>
@@ -1392,10 +1408,16 @@ define <4 x i32> @volatile(ptr %p) {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    ldr s1, [x0, #4]
+; CHECK-NEXT:    adrp x8, .LCPI32_0
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI32_0]
 ; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
 ; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
-; CHECK-NEXT:    ushll v1.4s, v1.4h, #3
-; CHECK-NEXT:    uaddw v0.4s, v1.4s, v0.4h
+; CHECK-NEXT:    ushll v1.4s, v1.4h, #0
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    tbl v1.16b, { v1.16b }, v2.16b
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v2.16b
+; CHECK-NEXT:    shl v1.4s, v1.4s, #3
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %l1b = load volatile float, ptr %p

>From 1e4a1de30e2be6dd0e5074a30cfe6bbf7dde687f Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Wed, 24 Jan 2024 18:30:05 -0500
Subject: [PATCH 4/6] Update ARMTargetTransformInfo.h

---
 llvm/lib/Target/ARM/ARMTargetTransformInfo.h | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
index e83d8b830a43cd..bb4b321b530091 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h
@@ -347,7 +347,6 @@ inline bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
   if (EltSz != 8 && EltSz != 16 && EltSz != 32)
     return false;
 
-  unsigned NumElts = VT.getVectorNumElements();
   unsigned BlockElts = M[0] + 1;
   // If the first shuffle index is UNDEF, be optimistic.
   if (M[0] < 0)
@@ -356,7 +355,7 @@ inline bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
     return false;
 
-  for (unsigned i = 0; i < NumElts; ++i) {
+  for (unsigned i = 0, e = M.size(); i < e; ++i) {
     if (M[i] < 0)
       continue; // ignore UNDEF indices
     if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))

>From 43100b220e28bb7509326acd1df61ad7f0937db1 Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Wed, 24 Jan 2024 18:35:54 -0500
Subject: [PATCH 5/6] Update shuffle.ll

---
 llvm/test/Analysis/CostModel/ARM/shuffle.ll | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/llvm/test/Analysis/CostModel/ARM/shuffle.ll b/llvm/test/Analysis/CostModel/ARM/shuffle.ll
index fdb280de175b92..14f16252581aab 100644
--- a/llvm/test/Analysis/CostModel/ARM/shuffle.ll
+++ b/llvm/test/Analysis/CostModel/ARM/shuffle.ll
@@ -103,11 +103,11 @@ define void @reverse() {
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v8i32 = shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 64 for instruction: %v2i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v4i64 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f16 = shufflevector <2 x half> undef, <2 x half> undef, <2 x i32> <i32 1, i32 0>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = shufflevector <2 x half> undef, <2 x half> undef, <2 x i32> <i32 1, i32 0>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %v8f16 = shufflevector <8 x half> undef, <8 x half> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 64 for instruction: %v16f16 = shufflevector <16 x half> undef, <16 x half> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f32 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %v4f32 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %v8f32 = shufflevector <8 x float> undef, <8 x float> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
@@ -254,11 +254,11 @@ define void @select() {
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v8i32 = shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> <i32 8, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 64 for instruction: %v2i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v4i64 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f16 = shufflevector <2 x half> undef, <2 x half> undef, <2 x i32> <i32 1, i32 0>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v2f16 = shufflevector <2 x half> undef, <2 x half> undef, <2 x i32> <i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %v8f16 = shufflevector <8 x half> undef, <8 x half> undef, <8 x i32> <i32 8, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 64 for instruction: %v16f16 = shufflevector <16 x half> undef, <16 x half> undef, <16 x i32> <i32 0, i32 17, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f32 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v2f32 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %v4f32 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %v8f32 = shufflevector <8 x float> undef, <8 x float> undef, <8 x i32> <i32 8, i32 1, i32 10, i32 11, i32 4, i32 5, i32 6, i32 15>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %v2f64 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
@@ -333,7 +333,7 @@ define void @vrev2() {
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v4i32 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v8i32 = shufflevector <8 x i32> undef, <8 x i32> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 128 for instruction: %v4i64 = shufflevector <4 x i64> undef, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v4f16 = shufflevector <4 x half> undef, <4 x half> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v8f16 = shufflevector <8 x half> undef, <8 x half> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 64 for instruction: %v16f16 = shufflevector <16 x half> undef, <16 x half> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
 ; CHECK-MVE-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %v4f32 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>

>From 40a6fb3f55e100ebf06add89a451a0c8cd569f4d Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Wed, 24 Jan 2024 18:49:14 -0500
Subject: [PATCH 6/6] Test edits

---
 llvm/test/CodeGen/AArch64/addsub_ext.ll       |   8 +-
 llvm/test/CodeGen/AArch64/arg_promotion.ll    |  24 +-
 .../CodeGen/AArch64/arm64-assert-zext-sext.ll |   4 +-
 llvm/test/CodeGen/AArch64/arm64-atomic-128.ll |   8 +-
 .../CodeGen/AArch64/arm64-call-tailcalls.ll   |   1 -
 .../CodeGen/AArch64/arm64-fast-isel-icmp.ll   |   8 +-
 llvm/test/CodeGen/AArch64/arm64-fp128.ll      |   4 +-
 .../CodeGen/AArch64/arm64-memset-inline.ll    |  16 +-
 llvm/test/CodeGen/AArch64/arm64-movi.ll       | 140 +++++-----
 llvm/test/CodeGen/AArch64/arm64-popcnt.ll     |  14 +-
 .../arm64-register-offset-addressing.ll       |   4 -
 .../AArch64/arm64-reserve-call-saved-reg.ll   |   1 -
 llvm/test/CodeGen/AArch64/arm64-srl-and.ll    |   4 +-
 .../CodeGen/AArch64/arm64-subvector-extend.ll |   6 +-
 llvm/test/CodeGen/AArch64/arm64-vcvt_f.ll     |   2 +
 llvm/test/CodeGen/AArch64/arm64-vshift.ll     |   6 +-
 llvm/test/CodeGen/AArch64/branch-relax-asm.ll |   2 +-
 .../CodeGen/AArch64/build-vector-two-dup.ll   |  19 +-
 llvm/test/CodeGen/AArch64/combine-mul.ll      |   4 +-
 llvm/test/CodeGen/AArch64/const-isel.ll       |   2 +-
 .../AArch64/early-ifcvt-same-value.mir        |  99 +++----
 llvm/test/CodeGen/AArch64/fabs-combine.ll     |   2 +-
 llvm/test/CodeGen/AArch64/fp-const-fold.ll    |  20 +-
 .../CodeGen/AArch64/implicit-null-check.ll    |  34 +--
 llvm/test/CodeGen/AArch64/load-insert-zero.ll |  13 +-
 llvm/test/CodeGen/AArch64/loop-sink-limit.mir | 113 ++++----
 .../machine-outliner-safe-range-in-middle.mir |   3 -
 .../machine-outliner-unsafe-range-at-end.mir  |   5 -
 .../AArch64/machine-sink-kill-flags.ll        |   2 +-
 llvm/test/CodeGen/AArch64/memcpy-scoped-aa.ll |   2 +
 llvm/test/CodeGen/AArch64/memset-inline.ll    |  16 +-
 .../CodeGen/AArch64/merge-store-dependency.ll |   4 +-
 llvm/test/CodeGen/AArch64/neon-abd.ll         |  18 +-
 llvm/test/CodeGen/AArch64/pr51476.ll          |   2 +-
 llvm/test/CodeGen/AArch64/pr58431.ll          |   2 +-
 .../AArch64/ptrauth-intrinsic-blend.ll        |   4 +-
 llvm/test/CodeGen/AArch64/qmovn.ll            |   8 +-
 llvm/test/CodeGen/AArch64/reassocmls.ll       |   2 +-
 llvm/test/CodeGen/AArch64/settag-merge.ll     |  64 +++--
 llvm/test/CodeGen/AArch64/shift-mod.ll        |   4 +-
 llvm/test/CodeGen/AArch64/signbit-test.ll     |  26 +-
 .../AArch64/signed-truncation-check.ll        |   2 +-
 llvm/test/CodeGen/AArch64/srem-lkk.ll         |  24 +-
 .../CodeGen/AArch64/strict-fp-int-promote.ll  |  12 +-
 .../AArch64/sve-fixed-length-int-extends.ll   |   7 +-
 .../AArch64/sve-fixed-length-int-to-fp.ll     |  10 +-
 .../AArch64/sve-fixed-length-masked-gather.ll |  27 +-
 ...prefetches-vect-base-invalid-imm-offset.ll |  44 +--
 ...s-scatter-stores-vector-base-imm-offset.ll |  18 +-
 .../CodeGen/AArch64/sve-intrinsics-while.ll   |  36 +--
 llvm/test/CodeGen/AArch64/sve-ldnf1.mir       |   4 -
 llvm/test/CodeGen/AArch64/sve-ldstnt1.mir     |   4 -
 .../AArch64/sve-masked-gather-vec-plus-imm.ll |  10 +-
 llvm/test/CodeGen/AArch64/sve2-int-mul.ll     |   6 +-
 llvm/test/CodeGen/AArch64/tbl-loops.ll        |  96 ++++---
 .../CodeGen/AArch64/typepromotion-cost.ll     |   8 +-
 .../AArch64/vec3-loads-ext-trunc-stores.ll    | 259 ++++++++++++------
 llvm/test/CodeGen/AArch64/vec_uaddo.ll        |  13 +-
 .../AArch64/vecreduce-umax-legalization.ll    |  11 +-
 llvm/test/CodeGen/AArch64/vector-fcvt.ll      | 236 +++++++++-------
 .../AArch64/vector-popcnt-128-ult-ugt.ll      | 244 ++++++++---------
 llvm/test/CodeGen/AArch64/zext-to-tbl.ll      | 156 ++++++-----
 llvm/test/CodeGen/AArch64/zext.ll             | 105 +++++--
 63 files changed, 1164 insertions(+), 888 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/addsub_ext.ll b/llvm/test/CodeGen/AArch64/addsub_ext.ll
index 04a98bd5088803..9058818df712eb 100644
--- a/llvm/test/CodeGen/AArch64/addsub_ext.ll
+++ b/llvm/test/CodeGen/AArch64/addsub_ext.ll
@@ -444,7 +444,7 @@ define i32 @cmp_s_i8i32(i8 %v, i32 %lhs) minsize {
 ; CHECK-NEXT:    cmp w1, w0, uxtb
 ; CHECK-NEXT:    b.ge .LBB40_2
 ; CHECK-NEXT:  // %bb.1: // %then
-; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB40_2: // %end
 ; CHECK-NEXT:    mov w0, w1
@@ -465,7 +465,7 @@ define i64 @cmp_s_i8i64(i8 %v, i64 %lhs) minsize {
 ; CHECK-NEXT:    cmp x1, w0, sxtb
 ; CHECK-NEXT:    b.ge .LBB41_2
 ; CHECK-NEXT:  // %bb.1: // %then
-; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB41_2: // %end
 ; CHECK-NEXT:    mov x0, x1
@@ -485,7 +485,7 @@ define i32 @cmp_s_i16i32(i16 %v, i32 %lhs) minsize {
 ; CHECK-NEXT:    cmp w1, w0, uxth
 ; CHECK-NEXT:    b.ge .LBB42_2
 ; CHECK-NEXT:  // %bb.1: // %then
-; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB42_2: // %end
 ; CHECK-NEXT:    mov w0, w1
@@ -506,7 +506,7 @@ define i64 @cmp_s_i16i64(i16 %v, i64 %lhs) minsize {
 ; CHECK-NEXT:    cmp x1, w0, sxth
 ; CHECK-NEXT:    b.ge .LBB43_2
 ; CHECK-NEXT:  // %bb.1: // %then
-; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB43_2: // %end
 ; CHECK-NEXT:    mov x0, x1
diff --git a/llvm/test/CodeGen/AArch64/arg_promotion.ll b/llvm/test/CodeGen/AArch64/arg_promotion.ll
index cc37d230c6cbe4..52c8bf0996dd24 100644
--- a/llvm/test/CodeGen/AArch64/arg_promotion.ll
+++ b/llvm/test/CodeGen/AArch64/arg_promotion.ll
@@ -8,7 +8,7 @@ target triple = "aarch64-unknown-linux-gnu"
 ; than 128 bits.
 
 define dso_local void @caller_8xi32(ptr noalias %src, ptr noalias %dst) #0 {
-; CHECK-LABEL: define dso_local void @caller_8xi32(
+; CHECK-LABEL: @caller_8xi32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call fastcc void @callee_8xi32(ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]])
 ; CHECK-NEXT:    ret void
@@ -19,7 +19,7 @@ entry:
 }
 
 define internal fastcc void @callee_8xi32(ptr noalias %src, ptr noalias %dst) #0 {
-; CHECK-LABEL: define internal fastcc void @callee_8xi32(
+; CHECK-LABEL: @callee_8xi32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i32>, ptr [[SRC:%.*]], align 16
 ; CHECK-NEXT:    store <8 x i32> [[TMP0]], ptr [[DST:%.*]], align 16
@@ -35,7 +35,7 @@ entry:
 ; less.
 
 define dso_local void @caller_4xi32(ptr noalias %src, ptr noalias %dst) #1 {
-; CHECK-LABEL: define dso_local void @caller_4xi32(
+; CHECK-LABEL: @caller_4xi32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SRC_VAL:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 16
 ; CHECK-NEXT:    call fastcc void @callee_4xi32(<4 x i32> [[SRC_VAL]], ptr noalias [[DST:%.*]])
@@ -47,7 +47,7 @@ entry:
 }
 
 define internal fastcc void @callee_4xi32(ptr noalias %src, ptr noalias %dst) #1 {
-; CHECK-LABEL: define internal fastcc void @callee_4xi32(
+; CHECK-LABEL: @callee_4xi32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    store <4 x i32> [[SRC_0_VAL:%.*]], ptr [[DST:%.*]], align 16
 ; CHECK-NEXT:    ret void
@@ -62,7 +62,7 @@ entry:
 ; greater than 128 bits.
 
 define dso_local void @caller_i256(ptr noalias %src, ptr noalias %dst) #0 {
-; CHECK-LABEL: define dso_local void @caller_i256(
+; CHECK-LABEL: @caller_i256(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SRC_VAL:%.*]] = load i256, ptr [[SRC:%.*]], align 16
 ; CHECK-NEXT:    call fastcc void @callee_i256(i256 [[SRC_VAL]], ptr noalias [[DST:%.*]])
@@ -74,7 +74,7 @@ entry:
 }
 
 define internal fastcc void @callee_i256(ptr noalias %src, ptr noalias %dst) #0 {
-; CHECK-LABEL: define internal fastcc void @callee_i256(
+; CHECK-LABEL: @callee_i256(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    store i256 [[SRC_0_VAL:%.*]], ptr [[DST:%.*]], align 16
 ; CHECK-NEXT:    ret void
@@ -88,7 +88,7 @@ entry:
 ; A scalable vector pointer argument is not a target of ArgumentPromotionPass.
 
 define dso_local void @caller_nx4xi32(ptr noalias %src, ptr noalias %dst) #2 {
-; CHECK-LABEL: define dso_local void @caller_nx4xi32(
+; CHECK-LABEL: @caller_nx4xi32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call fastcc void @callee_nx4xi32(ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]])
 ; CHECK-NEXT:    ret void
@@ -99,7 +99,7 @@ entry:
 }
 
 define internal fastcc void @callee_nx4xi32(ptr noalias %src, ptr noalias %dst) #2 {
-; CHECK-LABEL: define internal fastcc void @callee_nx4xi32(
+; CHECK-LABEL: @callee_nx4xi32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 4 x i32>, ptr [[SRC:%.*]], align 16
 ; CHECK-NEXT:    store <vscale x 4 x i32> [[TMP0]], ptr [[DST:%.*]], align 16
@@ -117,7 +117,7 @@ entry:
 %struct_8xi32 = type { <8 x i32>, <8 x i32> }
 
 define dso_local void @caller_struct8xi32(ptr noalias %src, ptr noalias %dst) #0 {
-; CHECK-LABEL: define dso_local void @caller_struct8xi32(
+; CHECK-LABEL: @caller_struct8xi32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    call fastcc void @callee_struct8xi32(ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]])
 ; CHECK-NEXT:    ret void
@@ -128,7 +128,7 @@ entry:
 }
 
 define internal fastcc void @callee_struct8xi32(ptr noalias %src, ptr noalias %dst) #0 {
-; CHECK-LABEL: define internal fastcc void @callee_struct8xi32(
+; CHECK-LABEL: @callee_struct8xi32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i32>, ptr [[SRC:%.*]], align 16
 ; CHECK-NEXT:    store <8 x i32> [[TMP0]], ptr [[DST:%.*]], align 16
@@ -154,7 +154,7 @@ entry:
 %struct_4xi32 = type { <4 x i32>, <4 x i32> }
 
 define dso_local void @caller_struct4xi32(ptr noalias %src, ptr noalias %dst) #1 {
-; CHECK-LABEL: define dso_local void @caller_struct4xi32(
+; CHECK-LABEL: @caller_struct4xi32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SRC_VAL:%.*]] = load <4 x i32>, ptr [[SRC:%.*]], align 16
 ; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr i8, ptr [[SRC]], i64 16
@@ -168,7 +168,7 @@ entry:
 }
 
 define internal fastcc void @callee_struct4xi32(ptr noalias %src, ptr noalias %dst) #1 {
-; CHECK-LABEL: define internal fastcc void @callee_struct4xi32(
+; CHECK-LABEL: @callee_struct4xi32(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    store <4 x i32> [[SRC_0_VAL:%.*]], ptr [[DST:%.*]], align 16
 ; CHECK-NEXT:    [[DST2:%.*]] = getelementptr inbounds [[STRUCT_4XI32:%.*]], ptr [[DST]], i64 0, i32 1
diff --git a/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll b/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll
index 9cbbabed349364..da529eab3ca2a8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-assert-zext-sext.ll
@@ -11,7 +11,7 @@ define i32 @assertzext(i32 %n, i1 %a, ptr %b) local_unnamed_addr {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    mov w8, #33066
+; CHECK-NEXT:    mov w8, #33066 // =0x812a
 ; CHECK-NEXT:    tst w1, #0x1
 ; CHECK-NEXT:    movk w8, #28567, lsl #16
 ; CHECK-NEXT:    csel w19, wzr, w8, ne
@@ -53,7 +53,7 @@ define i32 @assertsext(i32 %n, i8 %a) local_unnamed_addr {
 ; CHECK-NEXT:    mov x0, xzr
 ; CHECK-NEXT:    b .LBB1_3
 ; CHECK-NEXT:  .LBB1_2: // %if.then
-; CHECK-NEXT:    mov x9, #24575
+; CHECK-NEXT:    mov x9, #24575 // =0x5fff
 ; CHECK-NEXT:    sxtb w8, w1
 ; CHECK-NEXT:    movk x9, #15873, lsl #16
 ; CHECK-NEXT:    movk x9, #474, lsl #32
diff --git a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
index 37c61d0a4a0fb6..de93a19d079d6e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
@@ -714,8 +714,8 @@ define i128 @atomic_load_seq_cst(ptr %p) {
 ;
 ; LSE-LABEL: atomic_load_seq_cst:
 ; LSE:       // %bb.0:
-; LSE-NEXT:    mov x2, #0
-; LSE-NEXT:    mov x3, #0
+; LSE-NEXT:    mov x2, #0 // =0x0
+; LSE-NEXT:    mov x3, #0 // =0x0
 ; LSE-NEXT:    caspal x2, x3, x2, x3, [x0]
 ; LSE-NEXT:    mov x0, x2
 ; LSE-NEXT:    mov x1, x3
@@ -747,8 +747,8 @@ define i128 @atomic_load_relaxed(i64, i64, ptr %p) {
 ;
 ; LSE-LABEL: atomic_load_relaxed:
 ; LSE:       // %bb.0:
-; LSE-NEXT:    mov x0, #0
-; LSE-NEXT:    mov x1, #0
+; LSE-NEXT:    mov x0, #0 // =0x0
+; LSE-NEXT:    mov x1, #0 // =0x0
 ; LSE-NEXT:    casp x0, x1, x0, x1, [x2]
 ; LSE-NEXT:    ret
     %r = load atomic i128, ptr %p monotonic, align 16
diff --git a/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll b/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll
index fdb96e1d8a7509..3ae5af662e69b9 100644
--- a/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll
@@ -46,7 +46,6 @@ define float @t5(float %a) nounwind readonly ssp {
 }
 
 define void @t7() nounwind {
-
   br i1 undef, label %bb, label %bb1.lr.ph
 
 bb1.lr.ph:                                        ; preds = %entry
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
index ac08825c237629..c7863dd0517f9c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-icmp.ll
@@ -261,15 +261,11 @@ entry:
 
 define i32 @icmp_i8_shift_and_cmp(i8 %a, i8 %b) {
 entry:
-; CHECK-LABEL: icmp_i8_shift_and_cmp:
-; CHECK:       ubfiz [[REG1:w[0-9]+]], w0, #3, #5
-; CHECK-NEXT:  sxtb [[REG0:w[0-9]+]], w1
-; CHECK-NEXT:  cmp [[REG0]], [[REG1]], sxtb
-; CHECK-NEXT:  cset [[REG:w[0-9]+]], eq
-; CHECK-NEXT:  and w0, [[REG]], #0x1
   %op = shl i8 %a, 3
   %cmp = icmp eq i8 %b, %op
   %conv = zext i1 %cmp to i32
   ret i32 %conv
 }
 
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
index 61e64e219355fc..4d2b88d874a30f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
@@ -291,14 +291,14 @@ define dso_local i32 @test_br_cc() uwtable {
 ; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    b.ge .LBB11_2
 ; CHECK-NEXT:  // %bb.1: // %iftrue
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    .cfi_restore w30
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB11_2: // %iffalse
 ; CHECK-NEXT:    .cfi_restore_state
-; CHECK-NEXT:    mov w0, #29
+; CHECK-NEXT:    mov w0, #29 // =0x1d
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    .cfi_def_cfa_offset 0
 ; CHECK-NEXT:    .cfi_restore w30
diff --git a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
index 52b09885ebb1c5..ccad2a2a52c465 100644
--- a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
@@ -313,7 +313,7 @@ define void @memset_4_stack() {
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    mov w8, #-1431655766
+; CHECK-NEXT:    mov w8, #-1431655766 // =0xaaaaaaaa
 ; CHECK-NEXT:    add x0, sp, #12
 ; CHECK-NEXT:    str w8, [sp, #12]
 ; CHECK-NEXT:    bl something
@@ -330,7 +330,7 @@ define void @memset_8_stack() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    mov x8, #-6148914691236517206
+; CHECK-NEXT:    mov x8, #-6148914691236517206 // =0xaaaaaaaaaaaaaaaa
 ; CHECK-NEXT:    stp x30, x8, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    add x0, sp, #8
 ; CHECK-NEXT:    bl something
@@ -349,7 +349,7 @@ define void @memset_12_stack() {
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    mov x8, #-6148914691236517206
+; CHECK-NEXT:    mov x8, #-6148914691236517206 // =0xaaaaaaaaaaaaaaaa
 ; CHECK-NEXT:    mov x0, sp
 ; CHECK-NEXT:    str x8, [sp]
 ; CHECK-NEXT:    str w8, [sp, #8]
@@ -369,7 +369,7 @@ define void @memset_16_stack() {
 ; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    mov x8, #-6148914691236517206
+; CHECK-NEXT:    mov x8, #-6148914691236517206 // =0xaaaaaaaaaaaaaaaa
 ; CHECK-NEXT:    mov x0, sp
 ; CHECK-NEXT:    stp x8, x30, [sp, #8] // 8-byte Folded Spill
 ; CHECK-NEXT:    str x8, [sp]
@@ -390,7 +390,7 @@ define void @memset_20_stack() {
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    mov x8, #-6148914691236517206
+; CHECK-NEXT:    mov x8, #-6148914691236517206 // =0xaaaaaaaaaaaaaaaa
 ; CHECK-NEXT:    add x0, sp, #8
 ; CHECK-NEXT:    stp x8, x8, [sp, #8]
 ; CHECK-NEXT:    str w8, [sp, #24]
@@ -411,7 +411,7 @@ define void @memset_26_stack() {
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    mov x8, #-6148914691236517206
+; CHECK-NEXT:    mov x8, #-6148914691236517206 // =0xaaaaaaaaaaaaaaaa
 ; CHECK-NEXT:    mov x0, sp
 ; CHECK-NEXT:    stp x8, x8, [sp, #8]
 ; CHECK-NEXT:    str x8, [sp]
@@ -454,7 +454,7 @@ define void @memset_40_stack() {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.16b, #170
-; CHECK-NEXT:    mov x8, #-6148914691236517206
+; CHECK-NEXT:    mov x8, #-6148914691236517206 // =0xaaaaaaaaaaaaaaaa
 ; CHECK-NEXT:    mov x0, sp
 ; CHECK-NEXT:    str x8, [sp, #32]
 ; CHECK-NEXT:    stp q0, q0, [sp]
@@ -497,7 +497,7 @@ define void @memset_72_stack() {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 96
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    movi v0.16b, #170
-; CHECK-NEXT:    mov x8, #-6148914691236517206
+; CHECK-NEXT:    mov x8, #-6148914691236517206 // =0xaaaaaaaaaaaaaaaa
 ; CHECK-NEXT:    mov x0, sp
 ; CHECK-NEXT:    str x8, [sp, #64]
 ; CHECK-NEXT:    stp q0, q0, [sp]
diff --git a/llvm/test/CodeGen/AArch64/arm64-movi.ll b/llvm/test/CodeGen/AArch64/arm64-movi.ll
index 8ec98b74429718..463a9fffd449b6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-movi.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-movi.ll
@@ -9,7 +9,7 @@
 define i64 @test64_32_rot0() nounwind {
 ; CHECK-LABEL: test64_32_rot0:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #30064771079
+; CHECK-NEXT:    mov x0, #30064771079 // =0x700000007
 ; CHECK-NEXT:    ret
   ret i64 30064771079
 }
@@ -18,7 +18,7 @@ define i64 @test64_32_rot0() nounwind {
 define i64 @test64_32_rot2() nounwind {
 ; CHECK-LABEL: test64_32_rot2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-4611686002321260541
+; CHECK-NEXT:    mov x0, #-4611686002321260541 // =0xc0000003c0000003
 ; CHECK-NEXT:    ret
   ret i64 13835058071388291075
 }
@@ -27,7 +27,7 @@ define i64 @test64_32_rot2() nounwind {
 define i64 @test64_4_rot3() nounwind {
 ; CHECK-LABEL: test64_4_rot3:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-1229782938247303442
+; CHECK-NEXT:    mov x0, #-1229782938247303442 // =0xeeeeeeeeeeeeeeee
 ; CHECK-NEXT:    ret
   ret i64 17216961135462248174
 }
@@ -36,7 +36,7 @@ define i64 @test64_4_rot3() nounwind {
 define i64 @test64_64_manybits() nounwind {
 ; CHECK-LABEL: test64_64_manybits:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #4503599627304960
+; CHECK-NEXT:    mov x0, #4503599627304960 // =0xfffffffff0000
 ; CHECK-NEXT:    ret
   ret i64 4503599627304960
 }
@@ -45,7 +45,7 @@ define i64 @test64_64_manybits() nounwind {
 define i64 @test64_64_onebit() nounwind {
 ; CHECK-LABEL: test64_64_onebit:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #274877906944
+; CHECK-NEXT:    mov x0, #274877906944 // =0x4000000000
 ; CHECK-NEXT:    ret
   ret i64 274877906944
 }
@@ -54,7 +54,7 @@ define i64 @test64_64_onebit() nounwind {
 define i32 @test32_32_rot16() nounwind {
 ; CHECK-LABEL: test32_32_rot16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #16711680
+; CHECK-NEXT:    mov w0, #16711680 // =0xff0000
 ; CHECK-NEXT:    ret
   ret i32 16711680
 }
@@ -63,7 +63,7 @@ define i32 @test32_32_rot16() nounwind {
 define i32 @test32_2_rot1() nounwind {
 ; CHECK-LABEL: test32_2_rot1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #-1431655766
+; CHECK-NEXT:    mov w0, #-1431655766 // =0xaaaaaaaa
 ; CHECK-NEXT:    ret
   ret i32 2863311530
 }
@@ -75,7 +75,7 @@ define i32 @test32_2_rot1() nounwind {
 define i32 @movz() nounwind {
 ; CHECK-LABEL: movz:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #5
+; CHECK-NEXT:    mov w0, #5 // =0x5
 ; CHECK-NEXT:    ret
   ret i32 5
 }
@@ -83,7 +83,7 @@ define i32 @movz() nounwind {
 define i64 @movz_3movk() nounwind {
 ; CHECK-LABEL: movz_3movk:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #22136
+; CHECK-NEXT:    mov x0, #22136 // =0x5678
 ; CHECK-NEXT:    movk x0, #43981, lsl #16
 ; CHECK-NEXT:    movk x0, #4660, lsl #32
 ; CHECK-NEXT:    movk x0, #5, lsl #48
@@ -94,7 +94,7 @@ define i64 @movz_3movk() nounwind {
 define i64 @movz_movk_skip1() nounwind {
 ; CHECK-LABEL: movz_movk_skip1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #1126236160
+; CHECK-NEXT:    mov x0, #1126236160 // =0x43210000
 ; CHECK-NEXT:    movk x0, #5, lsl #32
 ; CHECK-NEXT:    ret
   ret i64 22601072640
@@ -103,7 +103,7 @@ define i64 @movz_movk_skip1() nounwind {
 define i64 @movz_skip1_movk() nounwind {
 ; CHECK-LABEL: movz_skip1_movk:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #4660
+; CHECK-NEXT:    mov x0, #4660 // =0x1234
 ; CHECK-NEXT:    movk x0, #34388, lsl #32
 ; CHECK-NEXT:    ret
   ret i64 147695335379508
@@ -112,7 +112,7 @@ define i64 @movz_skip1_movk() nounwind {
 define i64 @orr_lsl_pattern() nounwind {
 ; CHECK-LABEL: orr_lsl_pattern:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-6148914691236517206
+; CHECK-NEXT:    mov x0, #-6148914691236517206 // =0xaaaaaaaaaaaaaaaa
 ; CHECK-NEXT:    and x0, x0, #0x1fffffffe0
 ; CHECK-NEXT:    ret
   ret i64 45812984480
@@ -122,7 +122,7 @@ define i64 @orr_lsl_pattern() nounwind {
 define i64 @mvn_lsl_pattern() nounwind {
 ; CHECK-LABEL: mvn_lsl_pattern:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #16777216
+; CHECK-NEXT:    mov x0, #16777216 // =0x1000000
 ; CHECK-NEXT:    movk x0, #65471, lsl #32
 ; CHECK-NEXT:    movk x0, #65535, lsl #48
 ; CHECK-NEXT:    ret
@@ -133,7 +133,7 @@ define i64 @mvn_lsl_pattern() nounwind {
 define i64 @mvn32_pattern_2() nounwind {
 ; CHECK-LABEL: mvn32_pattern_2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #65473
+; CHECK-NEXT:    mov x0, #65473 // =0xffc1
 ; CHECK-NEXT:    movk x0, #65535, lsl #16
 ; CHECK-NEXT:    movk x0, #17, lsl #32
 ; CHECK-NEXT:    ret
@@ -147,7 +147,7 @@ define i64 @mvn32_pattern_2() nounwind {
 define i64 @movn() nounwind {
 ; CHECK-LABEL: movn:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-42
+; CHECK-NEXT:    mov x0, #-42 // =0xffffffffffffffd6
 ; CHECK-NEXT:    ret
   ret i64 -42
 }
@@ -155,7 +155,7 @@ define i64 @movn() nounwind {
 define i64 @movn_skip1_movk() nounwind {
 ; CHECK-LABEL: movn_skip1_movk:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-60876
+; CHECK-NEXT:    mov x0, #-60876 // =0xffffffffffff1234
 ; CHECK-NEXT:    movk x0, #65494, lsl #32
 ; CHECK-NEXT:    ret
   ret i64 -176093720012
@@ -169,7 +169,7 @@ define i64 @movn_skip1_movk() nounwind {
 define i64 @orr_movk1() nounwind {
 ; CHECK-LABEL: orr_movk1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #72056494543077120
+; CHECK-NEXT:    mov x0, #72056494543077120 // =0xffff0000ffff00
 ; CHECK-NEXT:    movk x0, #57005, lsl #16
 ; CHECK-NEXT:    ret
   ret i64 72056498262245120
@@ -178,7 +178,7 @@ define i64 @orr_movk1() nounwind {
 define i64 @orr_movk2() nounwind {
 ; CHECK-LABEL: orr_movk2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #72056494543077120
+; CHECK-NEXT:    mov x0, #72056494543077120 // =0xffff0000ffff00
 ; CHECK-NEXT:    movk x0, #57005, lsl #48
 ; CHECK-NEXT:    ret
   ret i64 -2400982650836746496
@@ -187,7 +187,7 @@ define i64 @orr_movk2() nounwind {
 define i64 @orr_movk3() nounwind {
 ; CHECK-LABEL: orr_movk3:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #72056494543077120
+; CHECK-NEXT:    mov x0, #72056494543077120 // =0xffff0000ffff00
 ; CHECK-NEXT:    movk x0, #57005, lsl #32
 ; CHECK-NEXT:    ret
   ret i64 72020953688702720
@@ -196,7 +196,7 @@ define i64 @orr_movk3() nounwind {
 define i64 @orr_movk4() nounwind {
 ; CHECK-LABEL: orr_movk4:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #72056494543077120
+; CHECK-NEXT:    mov x0, #72056494543077120 // =0xffff0000ffff00
 ; CHECK-NEXT:    movk x0, #57005
 ; CHECK-NEXT:    ret
   ret i64 72056494543068845
@@ -206,7 +206,7 @@ define i64 @orr_movk4() nounwind {
 define i64 @orr_movk5() nounwind {
 ; CHECK-LABEL: orr_movk5:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-71777214294589696
+; CHECK-NEXT:    mov x0, #-71777214294589696 // =0xff00ff00ff00ff00
 ; CHECK-NEXT:    movk x0, #57005, lsl #16
 ; CHECK-NEXT:    ret
   ret i64 -71777214836900096
@@ -215,7 +215,7 @@ define i64 @orr_movk5() nounwind {
 define i64 @orr_movk6() nounwind {
 ; CHECK-LABEL: orr_movk6:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-71777214294589696
+; CHECK-NEXT:    mov x0, #-71777214294589696 // =0xff00ff00ff00ff00
 ; CHECK-NEXT:    movk x0, #57005, lsl #16
 ; CHECK-NEXT:    movk x0, #57005, lsl #48
 ; CHECK-NEXT:    ret
@@ -225,7 +225,7 @@ define i64 @orr_movk6() nounwind {
 define i64 @orr_movk7() nounwind {
 ; CHECK-LABEL: orr_movk7:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-71777214294589696
+; CHECK-NEXT:    mov x0, #-71777214294589696 // =0xff00ff00ff00ff00
 ; CHECK-NEXT:    movk x0, #57005, lsl #48
 ; CHECK-NEXT:    ret
   ret i64 -2400982646575268096
@@ -234,7 +234,7 @@ define i64 @orr_movk7() nounwind {
 define i64 @orr_movk8() nounwind {
 ; CHECK-LABEL: orr_movk8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-71777214294589696
+; CHECK-NEXT:    mov x0, #-71777214294589696 // =0xff00ff00ff00ff00
 ; CHECK-NEXT:    movk x0, #57005
 ; CHECK-NEXT:    movk x0, #57005, lsl #48
 ; CHECK-NEXT:    ret
@@ -245,7 +245,7 @@ define i64 @orr_movk8() nounwind {
 define i64 @orr_movk9() nounwind {
 ; CHECK-LABEL: orr_movk9:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #1152921435887370240
+; CHECK-NEXT:    mov x0, #1152921435887370240 // =0xffffff000000000
 ; CHECK-NEXT:    movk x0, #65280
 ; CHECK-NEXT:    movk x0, #57005, lsl #16
 ; CHECK-NEXT:    ret
@@ -255,7 +255,7 @@ define i64 @orr_movk9() nounwind {
 define i64 @orr_movk10() nounwind {
 ; CHECK-LABEL: orr_movk10:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #1152921504606846720
+; CHECK-NEXT:    mov x0, #1152921504606846720 // =0xfffffffffffff00
 ; CHECK-NEXT:    movk x0, #57005, lsl #16
 ; CHECK-NEXT:    ret
   ret i64 1152921504047824640
@@ -264,7 +264,7 @@ define i64 @orr_movk10() nounwind {
 define i64 @orr_movk11() nounwind {
 ; CHECK-LABEL: orr_movk11:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-65281
+; CHECK-NEXT:    mov x0, #-65281 // =0xffffffffffff00ff
 ; CHECK-NEXT:    movk x0, #57005, lsl #16
 ; CHECK-NEXT:    movk x0, #65520, lsl #48
 ; CHECK-NEXT:    ret
@@ -274,7 +274,7 @@ define i64 @orr_movk11() nounwind {
 define i64 @orr_movk12() nounwind {
 ; CHECK-LABEL: orr_movk12:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-4503599627370241
+; CHECK-NEXT:    mov x0, #-4503599627370241 // =0xfff00000000000ff
 ; CHECK-NEXT:    movk x0, #57005, lsl #32
 ; CHECK-NEXT:    ret
   ret i64 -4258765016661761
@@ -283,7 +283,7 @@ define i64 @orr_movk12() nounwind {
 define i64 @orr_movk13() nounwind {
 ; CHECK-LABEL: orr_movk13:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #17592169267200
+; CHECK-NEXT:    mov x0, #17592169267200 // =0xfffff000000
 ; CHECK-NEXT:    movk x0, #57005
 ; CHECK-NEXT:    movk x0, #57005, lsl #48
 ; CHECK-NEXT:    ret
@@ -294,7 +294,7 @@ define i64 @orr_movk13() nounwind {
 define i64 @g() nounwind {
 ; CHECK-LABEL: g:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov x0, #2
+; CHECK-NEXT:    mov x0, #2 // =0x2
 ; CHECK-NEXT:    movk x0, #65535, lsl #48
 ; CHECK-NEXT:    ret
 entry:
@@ -304,7 +304,7 @@ entry:
 define i64 @orr_movk14() nounwind {
 ; CHECK-LABEL: orr_movk14:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-549755813888
+; CHECK-NEXT:    mov x0, #-549755813888 // =0xffffff8000000000
 ; CHECK-NEXT:    movk x0, #2048, lsl #16
 ; CHECK-NEXT:    ret
   ret i64 -549621596160
@@ -313,7 +313,7 @@ define i64 @orr_movk14() nounwind {
 define i64 @orr_movk15() nounwind {
 ; CHECK-LABEL: orr_movk15:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #549755813887
+; CHECK-NEXT:    mov x0, #549755813887 // =0x7fffffffff
 ; CHECK-NEXT:    movk x0, #63487, lsl #16
 ; CHECK-NEXT:    ret
   ret i64 549621596159
@@ -322,7 +322,7 @@ define i64 @orr_movk15() nounwind {
 define i64 @orr_movk16() nounwind {
 ; CHECK-LABEL: orr_movk16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #2147483646
+; CHECK-NEXT:    mov x0, #2147483646 // =0x7ffffffe
 ; CHECK-NEXT:    orr x0, x0, #0x7fffe0007fffe0
 ; CHECK-NEXT:    ret
   ret i64 36028661727494142
@@ -331,7 +331,7 @@ define i64 @orr_movk16() nounwind {
 define i64 @orr_movk17() nounwind {
 ; CHECK-LABEL: orr_movk17:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-1099511627776
+; CHECK-NEXT:    mov x0, #-1099511627776 // =0xffffff0000000000
 ; CHECK-NEXT:    movk x0, #65280, lsl #16
 ; CHECK-NEXT:    ret
   ret i64 -1095233437696
@@ -340,7 +340,7 @@ define i64 @orr_movk17() nounwind {
 define i64 @orr_movk18() nounwind {
 ; CHECK-LABEL: orr_movk18:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #137438887936
+; CHECK-NEXT:    mov x0, #137438887936 // =0x1fffff0000
 ; CHECK-NEXT:    movk x0, #65473
 ; CHECK-NEXT:    ret
   ret i64 137438953409
@@ -349,7 +349,7 @@ define i64 @orr_movk18() nounwind {
 define i64 @orr_and() nounwind {
 ; CHECK-LABEL: orr_and:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #72340172838076673
+; CHECK-NEXT:    mov x0, #72340172838076673 // =0x101010101010101
 ; CHECK-NEXT:    and x0, x0, #0xffffffffff00
 ; CHECK-NEXT:    ret
   ret i64 1103823438080
@@ -359,7 +359,7 @@ define i64 @orr_and() nounwind {
 define i64 @movn_movk() nounwind {
 ; CHECK-LABEL: movn_movk:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #43690
+; CHECK-NEXT:    mov x0, #43690 // =0xaaaa
 ; CHECK-NEXT:    movk x0, #43690, lsl #16
 ; CHECK-NEXT:    movk x0, #9, lsl #32
 ; CHECK-NEXT:    ret
@@ -370,7 +370,7 @@ define i64 @movn_movk() nounwind {
 define i64 @movn_orr() nounwind {
 ; CHECK-LABEL: movn_orr:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-51847
+; CHECK-NEXT:    mov x0, #-51847 // =0xffffffffffff3579
 ; CHECK-NEXT:    movk x0, #4369, lsl #32
 ; CHECK-NEXT:    movk x0, #4369, lsl #48
 ; CHECK-NEXT:    ret
@@ -381,7 +381,7 @@ define i64 @movn_orr() nounwind {
 define i64 @movn_eor() nounwind {
 ; CHECK-LABEL: movn_eor:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #3689348814741910323
+; CHECK-NEXT:    mov x0, #3689348814741910323 // =0x3333333333333333
 ; CHECK-NEXT:    movk x0, #52428
 ; CHECK-NEXT:    movk x0, #8455, lsl #16
 ; CHECK-NEXT:    ret
@@ -391,7 +391,7 @@ define i64 @movn_eor() nounwind {
 define i64 @orr_orr_64() nounwind {
 ; CHECK-LABEL: orr_orr_64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #536866816
+; CHECK-NEXT:    mov x0, #536866816 // =0x1ffff000
 ; CHECK-NEXT:    orr x0, x0, #0x3fff800000000000
 ; CHECK-NEXT:    ret
   ret i64 4611545281475899392
@@ -400,7 +400,7 @@ define i64 @orr_orr_64() nounwind {
 define i64 @orr_orr_32() nounwind {
 ; CHECK-LABEL: orr_orr_32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #558551907040256
+; CHECK-NEXT:    mov x0, #558551907040256 // =0x1fc000001fc00
 ; CHECK-NEXT:    orr x0, x0, #0x1c001c001c001c00
 ; CHECK-NEXT:    ret
   ret i64 2018171185438784512
@@ -409,7 +409,7 @@ define i64 @orr_orr_32() nounwind {
 define i64 @orr_orr_16() nounwind {
 ; CHECK-LABEL: orr_orr_16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #1152939097061330944
+; CHECK-NEXT:    mov x0, #1152939097061330944 // =0x1000100010001000
 ; CHECK-NEXT:    orr x0, x0, #0x1000100010001
 ; CHECK-NEXT:    ret
   ret i64 1153220576333074433
@@ -418,7 +418,7 @@ define i64 @orr_orr_16() nounwind {
 define i64 @orr_orr_8() nounwind {
 ; CHECK-LABEL: orr_orr_8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #144680345676153346
+; CHECK-NEXT:    mov x0, #144680345676153346 // =0x202020202020202
 ; CHECK-NEXT:    orr x0, x0, #0x1818181818181818
 ; CHECK-NEXT:    ret
   ret i64 1880844493789993498
@@ -427,7 +427,7 @@ define i64 @orr_orr_8() nounwind {
 define i64 @orr_64_orr_8() nounwind {
 ; CHECK-LABEL: orr_64_orr_8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x0, #-6148914691236517206
+; CHECK-NEXT:    mov x0, #-6148914691236517206 // =0xaaaaaaaaaaaaaaaa
 ; CHECK-NEXT:    orr x0, x0, #0xfffff0000000000
 ; CHECK-NEXT:    ret
   ret i64 -5764607889538110806
@@ -436,8 +436,8 @@ define i64 @orr_64_orr_8() nounwind {
 define i64 @orr_2_eor_16() nounwind {
 ; CHECK-LABEL: orr_2_eor_16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #6148914691236517205
-; CHECK-NEXT:    eor  x0, x0, #0x3000300030003000
+; CHECK-NEXT:    mov x0, #6148914691236517205 // =0x5555555555555555
+; CHECK-NEXT:    eor x0, x0, #0x3000300030003000
 ; CHECK-NEXT:    ret
   ret i64 7301853788297848149
 }
@@ -445,8 +445,8 @@ define i64 @orr_2_eor_16() nounwind {
 define i64 @orr_2_eor_32() nounwind {
 ; CHECK-LABEL: orr_2_eor_32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #6148914691236517205
-; CHECK-NEXT:    eor  x0, x0, #0x1fffc0001fffc0
+; CHECK-NEXT:    mov x0, #6148914691236517205 // =0x5555555555555555
+; CHECK-NEXT:    eor x0, x0, #0x1fffc0001fffc0
 ; CHECK-NEXT:    ret
   ret i64 6145912199858268821
 }
@@ -454,8 +454,8 @@ define i64 @orr_2_eor_32() nounwind {
 define i64 @orr_2_eor_64() nounwind {
 ; CHECK-LABEL: orr_2_eor_64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #6148914691236517205
-; CHECK-NEXT:    eor  x0, x0, #0x1fffffffffc00
+; CHECK-NEXT:    mov x0, #6148914691236517205 // =0x5555555555555555
+; CHECK-NEXT:    eor x0, x0, #0x1fffffffffc00
 ; CHECK-NEXT:    ret
   ret i64 6148727041252043093
 }
@@ -463,8 +463,8 @@ define i64 @orr_2_eor_64() nounwind {
 define i64 @orr_4_eor_8() nounwind {
 ; CHECK-LABEL: orr_4_eor_8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #2459565876494606882
-; CHECK-NEXT:    eor  x0, x0, #0x8f8f8f8f8f8f8f8f
+; CHECK-NEXT:    mov x0, #2459565876494606882 // =0x2222222222222222
+; CHECK-NEXT:    eor x0, x0, #0x8f8f8f8f8f8f8f8f
 ; CHECK-NEXT:    ret
   ret i64 12514849900987264429
 }
@@ -472,8 +472,8 @@ define i64 @orr_4_eor_8() nounwind {
 define i64 @orr_4_eor_16() nounwind {
 ; CHECK-LABEL: orr_4_eor_16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #4919131752989213764
-; CHECK-NEXT:    eor  x0, x0, #0xf00ff00ff00ff00f
+; CHECK-NEXT:    mov x0, #4919131752989213764 // =0x4444444444444444
+; CHECK-NEXT:    eor x0, x0, #0xf00ff00ff00ff00f
 ; CHECK-NEXT:    ret
   ret i64 12991675787320734795
 }
@@ -481,8 +481,8 @@ define i64 @orr_4_eor_16() nounwind {
 define i64 @orr_4_eor_32() nounwind {
 ; CHECK-LABEL: orr_4_eor_32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #4919131752989213764
-; CHECK-NEXT:    eor  x0, x0, #0x1ff800001ff80000
+; CHECK-NEXT:    mov x0, #4919131752989213764 // =0x4444444444444444
+; CHECK-NEXT:    eor x0, x0, #0x1ff800001ff80000
 ; CHECK-NEXT:    ret
   ret i64 6610233413460575300
 }
@@ -490,8 +490,8 @@ define i64 @orr_4_eor_32() nounwind {
 define i64 @orr_4_eor_64() nounwind {
 ; CHECK-LABEL: orr_4_eor_64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #1229782938247303441
-; CHECK-NEXT:    eor  x0, x0, #0xfff80000000
+; CHECK-NEXT:    mov x0, #1229782938247303441 // =0x1111111111111111
+; CHECK-NEXT:    eor x0, x0, #0xfff80000000
 ; CHECK-NEXT:    ret
   ret i64 1229798183233720593
 }
@@ -499,8 +499,8 @@ define i64 @orr_4_eor_64() nounwind {
 define i64 @orr_8_eor_16() nounwind {
 ; CHECK-LABEL: orr_8_eor_16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #3472328296227680304
-; CHECK-NEXT:    eor  x0, x0, #0x1f801f801f801f80
+; CHECK-NEXT:    mov x0, #3472328296227680304 // =0x3030303030303030
+; CHECK-NEXT:    eor x0, x0, #0x1f801f801f801f80
 ; CHECK-NEXT:    ret
   ret i64 3436298949444513712
 }
@@ -508,8 +508,8 @@ define i64 @orr_8_eor_16() nounwind {
 define i64 @orr_8_eor_32() nounwind {
 ; CHECK-LABEL: orr_8_eor_32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #1157442765409226768
-; CHECK-NEXT:    eor  x0, x0, #0xffff8001ffff8001
+; CHECK-NEXT:    mov x0, #1157442765409226768 // =0x1010101010101010
+; CHECK-NEXT:    eor x0, x0, #0xffff8001ffff8001
 ; CHECK-NEXT:    ret
   ret i64 17289195901212921873
 }
@@ -517,8 +517,8 @@ define i64 @orr_8_eor_32() nounwind {
 define i64 @orr_8_eor_64() nounwind {
 ; CHECK-LABEL: orr_8_eor_64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #3472328296227680304
-; CHECK-NEXT:    eor  x0, x0, #0x3ffffffff00000
+; CHECK-NEXT:    mov x0, #3472328296227680304 // =0x3030303030303030
+; CHECK-NEXT:    eor x0, x0, #0x3ffffffff00000
 ; CHECK-NEXT:    ret
   ret i64 3463215129921859632
 }
@@ -526,8 +526,8 @@ define i64 @orr_8_eor_64() nounwind {
 define i64 @orr_16_eor_32() nounwind {
 ; CHECK-LABEL: orr_16_eor_32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #1143931760365539296
-; CHECK-NEXT:    eor  x0, x0, #0xffff0001ffff0001
+; CHECK-NEXT:    mov x0, #1143931760365539296 // =0xfe00fe00fe00fe0
+; CHECK-NEXT:    eor x0, x0, #0xffff0001ffff0001
 ; CHECK-NEXT:    ret
   ret i64 17302565756451360737
 }
@@ -535,8 +535,8 @@ define i64 @orr_16_eor_32() nounwind {
 define i64 @orr_16_eor_64() nounwind {
 ; CHECK-LABEL: orr_16_eor_64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #9214505439794855904
-; CHECK-NEXT:    eor  x0, x0, #0xfe000
+; CHECK-NEXT:    mov x0, #9214505439794855904 // =0x7fe07fe07fe07fe0
+; CHECK-NEXT:    eor x0, x0, #0xfe000
 ; CHECK-NEXT:    ret
   ret i64 9214505439795847136
 }
@@ -544,8 +544,8 @@ define i64 @orr_16_eor_64() nounwind {
 define i64 @orr_32_eor_64() nounwind {
 ; CHECK-LABEL: orr_32_eor_64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov  x0, #1030792151280
-; CHECK-NEXT:    eor  x0, x0, #0xffff8000003fffff
+; CHECK-NEXT:    mov x0, #1030792151280 // =0xf0000000f0
+; CHECK-NEXT:    eor x0, x0, #0xffff8000003fffff
 ; CHECK-NEXT:    ret
   ret i64 18446604367017541391
 }
diff --git a/llvm/test/CodeGen/AArch64/arm64-popcnt.ll b/llvm/test/CodeGen/AArch64/arm64-popcnt.ll
index 599fac8c80346a..155abed3aa7a5c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-popcnt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-popcnt.ll
@@ -15,7 +15,7 @@ define i32 @cnt32_advsimd(i32 %x) nounwind readnone {
 ; CHECK-NONEON-LABEL: cnt32_advsimd:
 ; CHECK-NONEON:       // %bb.0:
 ; CHECK-NONEON-NEXT:    lsr w9, w0, #1
-; CHECK-NONEON-NEXT:    mov w8, #16843009
+; CHECK-NONEON-NEXT:    mov w8, #16843009 // =0x1010101
 ; CHECK-NONEON-NEXT:    and w9, w9, #0x55555555
 ; CHECK-NONEON-NEXT:    sub w9, w0, w9
 ; CHECK-NONEON-NEXT:    lsr w10, w9, #2
@@ -50,7 +50,7 @@ define i32 @cnt32_advsimd_2(<2 x i32> %x) {
 ; CHECK-NONEON-LABEL: cnt32_advsimd_2:
 ; CHECK-NONEON:       // %bb.0:
 ; CHECK-NONEON-NEXT:    lsr w9, w0, #1
-; CHECK-NONEON-NEXT:    mov w8, #16843009
+; CHECK-NONEON-NEXT:    mov w8, #16843009 // =0x1010101
 ; CHECK-NONEON-NEXT:    and w9, w9, #0x55555555
 ; CHECK-NONEON-NEXT:    sub w9, w0, w9
 ; CHECK-NONEON-NEXT:    lsr w10, w9, #2
@@ -86,7 +86,7 @@ define i64 @cnt64_advsimd(i64 %x) nounwind readnone {
 ; CHECK-NONEON-LABEL: cnt64_advsimd:
 ; CHECK-NONEON:       // %bb.0:
 ; CHECK-NONEON-NEXT:    lsr x9, x0, #1
-; CHECK-NONEON-NEXT:    mov x8, #72340172838076673
+; CHECK-NONEON-NEXT:    mov x8, #72340172838076673 // =0x101010101010101
 ; CHECK-NONEON-NEXT:    and x9, x9, #0x5555555555555555
 ; CHECK-NONEON-NEXT:    sub x9, x0, x9
 ; CHECK-NONEON-NEXT:    lsr x10, x9, #2
@@ -114,7 +114,7 @@ define i32 @cnt32(i32 %x) nounwind readnone noimplicitfloat {
 ; CHECK-LABEL: cnt32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w9, w0, #1
-; CHECK-NEXT:    mov w8, #16843009
+; CHECK-NEXT:    mov w8, #16843009 // =0x1010101
 ; CHECK-NEXT:    and w9, w9, #0x55555555
 ; CHECK-NEXT:    sub w9, w0, w9
 ; CHECK-NEXT:    lsr w10, w9, #2
@@ -130,7 +130,7 @@ define i32 @cnt32(i32 %x) nounwind readnone noimplicitfloat {
 ; CHECK-NONEON-LABEL: cnt32:
 ; CHECK-NONEON:       // %bb.0:
 ; CHECK-NONEON-NEXT:    lsr w9, w0, #1
-; CHECK-NONEON-NEXT:    mov w8, #16843009
+; CHECK-NONEON-NEXT:    mov w8, #16843009 // =0x1010101
 ; CHECK-NONEON-NEXT:    and w9, w9, #0x55555555
 ; CHECK-NONEON-NEXT:    sub w9, w0, w9
 ; CHECK-NONEON-NEXT:    lsr w10, w9, #2
@@ -155,7 +155,7 @@ define i64 @cnt64(i64 %x) nounwind readnone noimplicitfloat {
 ; CHECK-LABEL: cnt64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr x9, x0, #1
-; CHECK-NEXT:    mov x8, #72340172838076673
+; CHECK-NEXT:    mov x8, #72340172838076673 // =0x101010101010101
 ; CHECK-NEXT:    and x9, x9, #0x5555555555555555
 ; CHECK-NEXT:    sub x9, x0, x9
 ; CHECK-NEXT:    lsr x10, x9, #2
@@ -171,7 +171,7 @@ define i64 @cnt64(i64 %x) nounwind readnone noimplicitfloat {
 ; CHECK-NONEON-LABEL: cnt64:
 ; CHECK-NONEON:       // %bb.0:
 ; CHECK-NONEON-NEXT:    lsr x9, x0, #1
-; CHECK-NONEON-NEXT:    mov x8, #72340172838076673
+; CHECK-NONEON-NEXT:    mov x8, #72340172838076673 // =0x101010101010101
 ; CHECK-NONEON-NEXT:    and x9, x9, #0x5555555555555555
 ; CHECK-NONEON-NEXT:    sub x9, x0, x9
 ; CHECK-NONEON-NEXT:    lsr x10, x9, #2
diff --git a/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll b/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
index ba242fcaf6af3e..27d6c96ee1ffdc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
@@ -11,7 +11,6 @@ define i8 @test_64bit_add(ptr %a, i64 %b) {
 ; These tests are trying to form SEXT and ZEXT operations that never leave i64
 ; space, to make sure LLVM can adapt the offset register correctly.
 define void @ldst_8bit(ptr %base, i64 %offset) minsize {
-
    %off32.sext.tmp = shl i64 %offset, 32
    %off32.sext = ashr i64 %off32.sext.tmp, 32
    %addr8_sxtw = getelementptr i8, ptr %base, i64 %off32.sext
@@ -32,7 +31,6 @@ define void @ldst_8bit(ptr %base, i64 %offset) minsize {
 
 
 define void @ldst_16bit(ptr %base, i64 %offset) minsize {
-
   %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = and i64 %offset, 4294967295
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
@@ -63,7 +61,6 @@ define void @ldst_16bit(ptr %base, i64 %offset) minsize {
 }
 
 define void @ldst_32bit(ptr %base, i64 %offset) minsize {
-
   %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = and i64 %offset, 4294967295
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
@@ -93,7 +90,6 @@ define void @ldst_32bit(ptr %base, i64 %offset) minsize {
 }
 
 define void @ldst_64bit(ptr %base, i64 %offset) minsize {
-
   %addrint_uxtw = ptrtoint ptr %base to i64
   %offset_uxtw = and i64 %offset, 4294967295
   %addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
diff --git a/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll b/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll
index 0c1e1a54d260a5..f69c0567a5ae2e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-reserve-call-saved-reg.ll
@@ -37,7 +37,6 @@
 ; If a register is specified to be both reserved and callee-saved, then it
 ; should not be allocated and should not be spilled onto the stack.
 define void @foo() {
-
   %val = load volatile [30 x i64], ptr @var
   store volatile [30 x i64] %val, ptr @var
 
diff --git a/llvm/test/CodeGen/AArch64/arm64-srl-and.ll b/llvm/test/CodeGen/AArch64/arm64-srl-and.ll
index b58f6ba96a5b87..d77272eb62783a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-srl-and.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-srl-and.ll
@@ -9,11 +9,11 @@ define i32 @srl_and()  {
 ; CHECK-LABEL: srl_and:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    adrp x8, :got:g
-; CHECK-NEXT:    mov w9, #50
+; CHECK-NEXT:    mov w9, #50 // =0x32
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:g]
 ; CHECK-NEXT:    ldrh w8, [x8]
 ; CHECK-NEXT:    eor w8, w8, w9
-; CHECK-NEXT:    mov w9, #65535
+; CHECK-NEXT:    mov w9, #65535 // =0xffff
 ; CHECK-NEXT:    add w8, w8, w9
 ; CHECK-NEXT:    and w0, w8, w8, lsr #16
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll b/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll
index 00cc6b21ccaf8b..cadce787d7f065 100644
--- a/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-subvector-extend.ll
@@ -233,7 +233,11 @@ define <4 x i64> @cfunc4(<4 x i16> %v0) nounwind {
 define <4 x i64> @zext_v4i8_to_v4i64(<4 x i8> %v0) nounwind {
 ; CHECK-SD-LABEL: zext_v4i8_to_v4i64:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    bic.4h v0, #255, lsl #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    adrp x8, .LCPI14_0
+; CHECK-SD-NEXT:    mov.d v0[1], v0[0]
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI14_0]
+; CHECK-SD-NEXT:    tbl.8b v0, { v0 }, v1
 ; CHECK-SD-NEXT:    ushll.4s v0, v0, #0
 ; CHECK-SD-NEXT:    ushll2.2d v1, v0, #0
 ; CHECK-SD-NEXT:    ushll.2d v0, v0, #0
diff --git a/llvm/test/CodeGen/AArch64/arm64-vcvt_f.ll b/llvm/test/CodeGen/AArch64/arm64-vcvt_f.ll
index 269ffed98a844e..1206db5aa6a23d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vcvt_f.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vcvt_f.ll
@@ -352,3 +352,5 @@ define float @from_half(i16 %in) {
 
 declare float @llvm.convert.from.fp16.f32(i16) #1
 declare i16 @llvm.convert.to.fp16.f32(float) #1
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; FALLBACK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 1dfd977186b0e7..85ea733ab6710f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2093,8 +2093,12 @@ define <4 x i32> @neon.ushl8h_constant_shift_extend_not_2x(ptr %A) nounwind {
 ; CHECK-LABEL: neon.ushl8h_constant_shift_extend_not_2x:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    adrp x8, .LCPI156_0
+; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI156_0]
 ; CHECK-NEXT:    ushll.8h v0, v0, #0
-; CHECK-NEXT:    ushll.4s v0, v0, #1
+; CHECK-NEXT:    ushll.4s v0, v0, #0
+; CHECK-NEXT:    tbl.16b v0, { v0 }, v1
+; CHECK-NEXT:    add.4s v0, v0, v0
 ; CHECK-NEXT:    ret
   %tmp1 = load <4 x i8>, ptr %A
   %tmp2 = zext <4 x i8> %tmp1 to <4 x i32>
diff --git a/llvm/test/CodeGen/AArch64/branch-relax-asm.ll b/llvm/test/CodeGen/AArch64/branch-relax-asm.ll
index e1fd208cb075a1..72fda815f3a84d 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-asm.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-asm.ll
@@ -12,7 +12,7 @@ define i32 @test_asm_length(i32 %in) {
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  LBB0_2: ; %true
-; CHECK-NEXT:    mov w0, #4
+; CHECK-NEXT:    mov w0, #4 ; =0x4
 ; CHECK-NEXT:    ; InlineAsm Start
 ; CHECK-NEXT:    nop
 ; CHECK-NEXT:    nop
diff --git a/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll b/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
index 5cfa59a3022394..939d3d81e7e63d 100644
--- a/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
+++ b/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
@@ -78,11 +78,22 @@ entry:
 define <16 x i8> @test5(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
 ; CHECK-LABEL: test5:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    ldr b0, [x0]
+; CHECK-NEXT:    ldrb w9, [x0]
+; CHECK-NEXT:    ldrb w10, [x1]
 ; CHECK-NEXT:    adrp x8, .LCPI4_0
-; CHECK-NEXT:    ld1r { v1.16b }, [x1]
-; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI4_0]
-; CHECK-NEXT:    tbl v0.16b, { v0.16b, v1.16b }, v2.16b
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI4_0]
+; CHECK-NEXT:    fmov s1, w9
+; CHECK-NEXT:    dup v0.8b, w9
+; CHECK-NEXT:    mov v1.b[8], w10
+; CHECK-NEXT:    mov v1.b[9], w10
+; CHECK-NEXT:    mov v1.b[10], w10
+; CHECK-NEXT:    mov v1.b[11], w10
+; CHECK-NEXT:    mov v1.b[12], w10
+; CHECK-NEXT:    mov v1.b[13], w10
+; CHECK-NEXT:    mov v1.b[14], w10
+; CHECK-NEXT:    mov v1.b[15], w10
+; CHECK-NEXT:    tbl v1.8b, { v1.16b }, v2.8b
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i8, ptr %a, align 1
diff --git a/llvm/test/CodeGen/AArch64/combine-mul.ll b/llvm/test/CodeGen/AArch64/combine-mul.ll
index a2b0425308093d..a742386561f9fa 100644
--- a/llvm/test/CodeGen/AArch64/combine-mul.ll
+++ b/llvm/test/CodeGen/AArch64/combine-mul.ll
@@ -77,7 +77,7 @@ define i8 @one_demanded_bit(i8 %x) {
 define <2 x i64> @one_demanded_bit_splat(<2 x i64> %x) {
 ; CHECK-LABEL: one_demanded_bit_splat:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #32
+; CHECK-NEXT:    mov w8, #32 // =0x20
 ; CHECK-NEXT:    shl v0.2d, v0.2d, #5
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
@@ -131,7 +131,7 @@ define i32 @squared_demanded_2_low_bits(i32 %x) {
 define <2 x i64> @squared_demanded_2_low_bits_splat(<2 x i64> %x) {
 ; CHECK-LABEL: squared_demanded_2_low_bits_splat:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-2
+; CHECK-NEXT:    mov x8, #-2 // =0xfffffffffffffffe
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/const-isel.ll b/llvm/test/CodeGen/AArch64/const-isel.ll
index adf3c5b329227c..ac985f8740679c 100644
--- a/llvm/test/CodeGen/AArch64/const-isel.ll
+++ b/llvm/test/CodeGen/AArch64/const-isel.ll
@@ -4,7 +4,7 @@
 define i53 @PR59892 () {
 ; FISEL-LABEL: PR59892:
 ; FISEL:       // %bb.0:
-; FISEL-NEXT:    mov x8, #47668
+; FISEL-NEXT:    mov x8, #47668 // =0xba34
 ; FISEL-NEXT:    movk x8, #4645, lsl #16
 ; FISEL-NEXT:    movk x8, #58741, lsl #32
 ; FISEL-NEXT:    movk x8, #1, lsl #48
diff --git a/llvm/test/CodeGen/AArch64/early-ifcvt-same-value.mir b/llvm/test/CodeGen/AArch64/early-ifcvt-same-value.mir
index b9298608e192fc..dfb7507858892e 100644
--- a/llvm/test/CodeGen/AArch64/early-ifcvt-same-value.mir
+++ b/llvm/test/CodeGen/AArch64/early-ifcvt-same-value.mir
@@ -20,15 +20,16 @@ liveins:
 body:             |
   ; CHECK-LABEL: name: fmov0
   ; CHECK: bb.0.entry:
-  ; CHECK:   liveins: $s1, $w0
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
-  ; CHECK:   [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
-  ; CHECK:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
-  ; CHECK:   [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
-  ; CHECK:   [[FMOVS0_1:%[0-9]+]]:fpr32 = FMOVS0
-  ; CHECK:   [[COPY2:%[0-9]+]]:fpr32 = COPY [[FMOVS0_]]
-  ; CHECK:   $s0 = COPY [[COPY2]]
-  ; CHECK:   RET_ReallyLR implicit $s0
+  ; CHECK-NEXT:   liveins: $s1, $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
+  ; CHECK-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
+  ; CHECK-NEXT:   [[FMOVS0_1:%[0-9]+]]:fpr32 = FMOVS0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:fpr32 = COPY [[FMOVS0_]]
+  ; CHECK-NEXT:   $s0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   RET_ReallyLR implicit $s0
   bb.0.entry:
     successors: %bb.1, %bb.2
     liveins: $s1, $w0
@@ -75,22 +76,26 @@ liveins:
 body:             |
   ; CHECK-LABEL: name: fmov0_extrapred
   ; CHECK: bb.0.entry:
-  ; CHECK:   successors: %bb.4(0x80000000)
-  ; CHECK:   liveins: $s1, $w0
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
-  ; CHECK:   [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
-  ; CHECK:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
-  ; CHECK:   [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
-  ; CHECK:   [[FMOVS0_1:%[0-9]+]]:fpr32 = FMOVS0
-  ; CHECK:   B %bb.4
-  ; CHECK: bb.1:
-  ; CHECK:   successors: %bb.4(0x80000000)
-  ; CHECK:   [[DEF:%[0-9]+]]:fpr32 = IMPLICIT_DEF
-  ; CHECK:   B %bb.4
-  ; CHECK: bb.4:
-  ; CHECK:   [[PHI:%[0-9]+]]:fpr32 = PHI [[FMOVS0_]], %bb.0, [[DEF]], %bb.1
-  ; CHECK:   $s0 = COPY [[PHI]]
-  ; CHECK:   RET_ReallyLR implicit $s0
+  ; CHECK-NEXT:   successors: %bb.4(0x80000000)
+  ; CHECK-NEXT:   liveins: $s1, $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
+  ; CHECK-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
+  ; CHECK-NEXT:   [[FMOVS0_1:%[0-9]+]]:fpr32 = FMOVS0
+  ; CHECK-NEXT:   B %bb.4
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1:
+  ; CHECK-NEXT:   successors: %bb.4(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:fpr32 = IMPLICIT_DEF
+  ; CHECK-NEXT:   B %bb.4
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.4:
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:fpr32 = PHI [[FMOVS0_]], %bb.0, [[DEF]], %bb.1
+  ; CHECK-NEXT:   $s0 = COPY [[PHI]]
+  ; CHECK-NEXT:   RET_ReallyLR implicit $s0
   bb.0.entry:
     successors: %bb.1, %bb.2
     liveins: $s1, $w0
@@ -104,8 +109,6 @@ body:             |
   bb.4:
     successors: %bb.3
 
-    ; Make sure we also handle the case when there are extra predecessors on
-    ; the tail block.
     %3:fpr32 = IMPLICIT_DEF
     B %bb.3
 
@@ -147,17 +150,18 @@ liveins:
 body:             |
   ; CHECK-LABEL: name: copy_physreg
   ; CHECK: bb.0.entry:
-  ; CHECK:   liveins: $s1, $w0
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
-  ; CHECK:   [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
-  ; CHECK:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
-  ; CHECK:   [[DEF:%[0-9]+]]:fpr32 = IMPLICIT_DEF implicit-def $s1
-  ; CHECK:   [[COPY2:%[0-9]+]]:fpr32 = COPY $s1
-  ; CHECK:   [[DEF1:%[0-9]+]]:fpr32 = IMPLICIT_DEF implicit-def $s1
-  ; CHECK:   [[COPY3:%[0-9]+]]:fpr32 = COPY $s1
-  ; CHECK:   [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[COPY2]], [[COPY3]], 1, implicit $nzcv
-  ; CHECK:   $s0 = COPY [[FCSELSrrr]]
-  ; CHECK:   RET_ReallyLR implicit $s0
+  ; CHECK-NEXT:   liveins: $s1, $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
+  ; CHECK-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:fpr32 = IMPLICIT_DEF implicit-def $s1
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:fpr32 = COPY $s1
+  ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:fpr32 = IMPLICIT_DEF implicit-def $s1
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:fpr32 = COPY $s1
+  ; CHECK-NEXT:   [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[COPY2]], [[COPY3]], 1, implicit $nzcv
+  ; CHECK-NEXT:   $s0 = COPY [[FCSELSrrr]]
+  ; CHECK-NEXT:   RET_ReallyLR implicit $s0
   bb.0.entry:
     successors: %bb.1, %bb.2
     liveins: $s1, $w0
@@ -210,15 +214,16 @@ liveins:
 body:             |
   ; CHECK-LABEL: name: same_def_different_operand
   ; CHECK: bb.0.entry:
-  ; CHECK:   liveins: $s1, $w0, $x2
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr64common = COPY $x0
-  ; CHECK:   early-clobber %11:gpr64common, %10:gpr64 = LDRXpre [[COPY]], 16
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr32common = COPY $w0
-  ; CHECK:   [[COPY2:%[0-9]+]]:fpr32 = COPY $s1
-  ; CHECK:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY1]], 1, 0, implicit-def $nzcv
-  ; CHECK:   [[CSELXr:%[0-9]+]]:gpr64common = CSELXr %11, %10, 1, implicit $nzcv
-  ; CHECK:   $x2 = COPY [[CSELXr]]
-  ; CHECK:   RET_ReallyLR implicit $x2
+  ; CHECK-NEXT:   liveins: $s1, $w0, $x2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr64common = COPY $x0
+  ; CHECK-NEXT:   early-clobber %11:gpr64common, %10:gpr64 = LDRXpre [[COPY]], 16
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr32common = COPY $w0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:fpr32 = COPY $s1
+  ; CHECK-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY1]], 1, 0, implicit-def $nzcv
+  ; CHECK-NEXT:   [[CSELXr:%[0-9]+]]:gpr64common = CSELXr %11, %10, 1, implicit $nzcv
+  ; CHECK-NEXT:   $x2 = COPY [[CSELXr]]
+  ; CHECK-NEXT:   RET_ReallyLR implicit $x2
   bb.0.entry:
     successors: %bb.1, %bb.2
     liveins: $s1, $w0, $x2
diff --git a/llvm/test/CodeGen/AArch64/fabs-combine.ll b/llvm/test/CodeGen/AArch64/fabs-combine.ll
index 23bf7a699195f7..0dcab3eb976675 100644
--- a/llvm/test/CodeGen/AArch64/fabs-combine.ll
+++ b/llvm/test/CodeGen/AArch64/fabs-combine.ll
@@ -71,7 +71,7 @@ define <4 x float> @nabsv4f32(<4 x float> %a) {
 define <2 x double> @nabsv2d64(<2 x double> %a) {
 ; CHECK-LABEL: nabsv2d64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-9223372036854775808
+; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/fp-const-fold.ll b/llvm/test/CodeGen/AArch64/fp-const-fold.ll
index c18c9a3dea1a30..907a72ce4b7d56 100644
--- a/llvm/test/CodeGen/AArch64/fp-const-fold.ll
+++ b/llvm/test/CodeGen/AArch64/fp-const-fold.ll
@@ -6,7 +6,7 @@
 define double @constant_fold_fdiv_by_zero(ptr %p) {
 ; CHECK-LABEL: constant_fold_fdiv_by_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #9218868437227405312
+; CHECK-NEXT:    mov x8, #9218868437227405312 // =0x7ff0000000000000
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    ret
   %r = fdiv double 4.940660e-324, 0.0
@@ -18,7 +18,7 @@ define double @constant_fold_fdiv_by_zero(ptr %p) {
 define double @constant_fold_frem_by_zero(ptr %p) {
 ; CHECK-LABEL: constant_fold_frem_by_zero:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #9221120237041090560
+; CHECK-NEXT:    mov x8, #9221120237041090560 // =0x7ff8000000000000
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    ret
   %r = frem double 4.940660e-324, 0.0
@@ -30,7 +30,7 @@ define double @constant_fold_frem_by_zero(ptr %p) {
 define double @constant_fold_fmul_nan(ptr %p) {
 ; CHECK-LABEL: constant_fold_fmul_nan:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #9221120237041090560
+; CHECK-NEXT:    mov x8, #9221120237041090560 // =0x7ff8000000000000
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    ret
   %r = fmul double 0x7ff0000000000000, 0.0
@@ -42,7 +42,7 @@ define double @constant_fold_fmul_nan(ptr %p) {
 define double @constant_fold_fadd_nan(ptr %p) {
 ; CHECK-LABEL: constant_fold_fadd_nan:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #9221120237041090560
+; CHECK-NEXT:    mov x8, #9221120237041090560 // =0x7ff8000000000000
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    ret
   %r = fadd double 0x7ff0000000000000, 0xfff0000000000000
@@ -54,7 +54,7 @@ define double @constant_fold_fadd_nan(ptr %p) {
 define double @constant_fold_fsub_nan(ptr %p) {
 ; CHECK-LABEL: constant_fold_fsub_nan:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #9221120237041090560
+; CHECK-NEXT:    mov x8, #9221120237041090560 // =0x7ff8000000000000
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    ret
   %r = fsub double 0x7ff0000000000000, 0x7ff0000000000000
@@ -66,7 +66,7 @@ define double @constant_fold_fsub_nan(ptr %p) {
 define double @constant_fold_fma_nan(ptr %p) {
 ; CHECK-LABEL: constant_fold_fma_nan:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #9221120237041090560
+; CHECK-NEXT:    mov x8, #9221120237041090560 // =0x7ff8000000000000
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    ret
   %r =  call double @llvm.fma.f64(double 0x7ff0000000000000, double 0.0, double 42.0)
@@ -95,7 +95,7 @@ define double @fmul_nnan_nan_op1(double %x) {
 define double @fdiv_ninf_nan_op0(double %x) {
 ; CHECK-LABEL: fdiv_ninf_nan_op0:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-2251799813685248
+; CHECK-NEXT:    mov x8, #-2251799813685248 // =0xfff8000000000000
 ; CHECK-NEXT:    fmov d1, x8
 ; CHECK-NEXT:    fdiv d0, d1, d0
 ; CHECK-NEXT:    ret
@@ -109,7 +109,7 @@ define double @fdiv_ninf_nan_op0(double %x) {
 define double @fadd_ninf_nan_op1(double %x) {
 ; CHECK-LABEL: fadd_ninf_nan_op1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #9221120237041090560
+; CHECK-NEXT:    mov x8, #9221120237041090560 // =0x7ff8000000000000
 ; CHECK-NEXT:    fmov d1, x8
 ; CHECK-NEXT:    fadd d0, d0, d1
 ; CHECK-NEXT:    ret
@@ -139,7 +139,7 @@ define double @fadd_ninf_inf_op1(double %x) {
 define double @fsub_nnan_inf_op0(double %x) {
 ; CHECK-LABEL: fsub_nnan_inf_op0:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #9218868437227405312
+; CHECK-NEXT:    mov x8, #9218868437227405312 // =0x7ff0000000000000
 ; CHECK-NEXT:    fmov d1, x8
 ; CHECK-NEXT:    fsub d0, d1, d0
 ; CHECK-NEXT:    ret
@@ -153,7 +153,7 @@ define double @fsub_nnan_inf_op0(double %x) {
 define double @fmul_nnan_inf_op1(double %x) {
 ; CHECK-LABEL: fmul_nnan_inf_op1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-4503599627370496
+; CHECK-NEXT:    mov x8, #-4503599627370496 // =0xfff0000000000000
 ; CHECK-NEXT:    fmov d1, x8
 ; CHECK-NEXT:    fmul d0, d0, d1
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/implicit-null-check.ll b/llvm/test/CodeGen/AArch64/implicit-null-check.ll
index 052ff7f0fe5d09..a905aaaac58076 100644
--- a/llvm/test/CodeGen/AArch64/implicit-null-check.ll
+++ b/llvm/test/CodeGen/AArch64/implicit-null-check.ll
@@ -14,7 +14,7 @@ define i32 @imp_null_check_load_fallthrough(ptr %x) {
 ; CHECK-NEXT:  // %bb.1: // %not_null
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB0_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -37,7 +37,7 @@ define i32 @imp_null_check_load_reorder(ptr %x) {
 ; CHECK-NEXT:  // %bb.1: // %not_null
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB1_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -59,7 +59,7 @@ define i32 @imp_null_check_unordered_load(ptr %x) {
 ; CHECK-NEXT:  // %bb.1: // %not_null
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB2_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -84,7 +84,7 @@ define i32 @imp_null_check_seq_cst_load(ptr %x) {
 ; CHECK-NEXT:    ldar w0, [x0]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB3_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -107,7 +107,7 @@ define i32 @imp_null_check_volatile_load(ptr %x) {
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB4_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -130,7 +130,7 @@ define i8 @imp_null_check_load_i8(ptr %x) {
 ; CHECK-NEXT:  // %bb.1: // %not_null
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB5_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -156,7 +156,7 @@ define i256 @imp_null_check_load_i256(ptr %x) {
 ; CHECK-NEXT:    mov x1, xzr
 ; CHECK-NEXT:    mov x2, xzr
 ; CHECK-NEXT:    mov x3, xzr
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -180,7 +180,7 @@ define i32 @imp_null_check_gep_load(ptr %x) {
 ; CHECK-NEXT:  // %bb.1: // %not_null
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB7_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -204,7 +204,7 @@ define i32 @imp_null_check_add_result(ptr %x, i32 %p) {
 ; CHECK-NEXT:    add w0, w8, w1
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB8_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -231,7 +231,7 @@ define i32 @imp_null_check_hoist_over_udiv(ptr %x, i32 %a, i32 %b) {
 ; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB9_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -260,7 +260,7 @@ define i32 @imp_null_check_hoist_over_unrelated_load(ptr %x, ptr %y, ptr %z) {
 ; CHECK-NEXT:    str w8, [x2]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB10_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -287,7 +287,7 @@ define i32 @imp_null_check_gep_load_with_use_dep(ptr %x, i32 %a) {
 ; CHECK-NEXT:    add w0, w8, #4
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB11_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
@@ -316,7 +316,7 @@ define i32 @imp_null_check_load_fence1(ptr %x) {
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB12_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp eq ptr %x, null
@@ -342,7 +342,7 @@ define i32 @imp_null_check_load_fence2(ptr %x) {
 ; CHECK-NEXT:    ldr w0, [x0]
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB13_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp eq ptr %x, null
@@ -363,7 +363,7 @@ define void @imp_null_check_store(ptr %x) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x0, .LBB14_2
 ; CHECK-NEXT:  // %bb.1: // %not_null
-; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:  .LBB14_2: // %common.ret
 ; CHECK-NEXT:    ret
@@ -385,7 +385,7 @@ define void @imp_null_check_unordered_store(ptr %x) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x0, .LBB15_2
 ; CHECK-NEXT:  // %bb.1: // %not_null
-; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:  .LBB15_2: // %common.ret
 ; CHECK-NEXT:    ret
@@ -409,7 +409,7 @@ define i32 @imp_null_check_neg_gep_load(ptr %x) {
 ; CHECK-NEXT:  // %bb.1: // %not_null
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB16_2:
-; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov w0, #42 // =0x2a
 ; CHECK-NEXT:    ret
  entry:
   %c = icmp eq ptr %x, null
diff --git a/llvm/test/CodeGen/AArch64/load-insert-zero.ll b/llvm/test/CodeGen/AArch64/load-insert-zero.ll
index 993af08a66ddd9..0c6fde90e2fa94 100644
--- a/llvm/test/CodeGen/AArch64/load-insert-zero.ll
+++ b/llvm/test/CodeGen/AArch64/load-insert-zero.ll
@@ -467,20 +467,23 @@ define void @predictor_4x4_neon(ptr nocapture noundef writeonly %0, i64 noundef
 ; CHECK-NEXT:    ushll v3.8h, v2.8b, #1
 ; CHECK-NEXT:    mov v0.s[0], w8
 ; CHECK-NEXT:    lsr w8, w8, #24
+; CHECK-NEXT:    dup v4.8b, w8
+; CHECK-NEXT:    adrp x8, .LCPI42_0
 ; CHECK-NEXT:    uaddl v0.8h, v0.8b, v1.8b
 ; CHECK-NEXT:    urhadd v1.8b, v1.8b, v2.8b
+; CHECK-NEXT:    mov v2.16b, v4.16b
 ; CHECK-NEXT:    str s1, [x0]
 ; CHECK-NEXT:    add v0.8h, v0.8h, v3.8h
-; CHECK-NEXT:    dup v3.8b, w8
+; CHECK-NEXT:    ldr d3, [x8, :lo12:.LCPI42_0]
 ; CHECK-NEXT:    lsl x8, x1, #1
+; CHECK-NEXT:    mov v2.d[1], v1.d[0]
+; CHECK-NEXT:    rshrn2 v4.16b, v0.8h, #2
 ; CHECK-NEXT:    rshrn v0.8b, v0.8h, #2
-; CHECK-NEXT:    zip1 v2.2s, v1.2s, v3.2s
+; CHECK-NEXT:    tbl v2.8b, { v2.16b }, v3.8b
 ; CHECK-NEXT:    str s0, [x0, x1]
-; CHECK-NEXT:    zip1 v3.2s, v0.2s, v3.2s
-; CHECK-NEXT:    ext v2.8b, v2.8b, v0.8b, #1
+; CHECK-NEXT:    tbl v1.8b, { v4.16b }, v3.8b
 ; CHECK-NEXT:    str s2, [x0, x8]
 ; CHECK-NEXT:    add x8, x8, x1
-; CHECK-NEXT:    ext v1.8b, v3.8b, v0.8b, #1
 ; CHECK-NEXT:    str s1, [x0, x8]
 ; CHECK-NEXT:    ret
   %5 = load i32, ptr %2, align 4
diff --git a/llvm/test/CodeGen/AArch64/loop-sink-limit.mir b/llvm/test/CodeGen/AArch64/loop-sink-limit.mir
index 3e18f95cab8489..f97222d8f58e67 100644
--- a/llvm/test/CodeGen/AArch64/loop-sink-limit.mir
+++ b/llvm/test/CodeGen/AArch64/loop-sink-limit.mir
@@ -90,58 +90,71 @@ machineFunctionInfo: {}
 body:             |
   ; SINK1-LABEL: name: do_sink_use_is_not_a_copy
   ; SINK1: bb.0.entry:
-  ; SINK1:   successors: %bb.1(0x50000000), %bb.2(0x30000000)
-  ; SINK1:   liveins: $w0
-  ; SINK1:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
-  ; SINK1:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
-  ; SINK1:   Bcc 11, %bb.2, implicit $nzcv
-  ; SINK1:   B %bb.1
-  ; SINK1: bb.1.for.body.preheader:
-  ; SINK1:   successors: %bb.3(0x80000000)
-  ; SINK1:   [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) @A
-  ; SINK1:   [[LDRWui:%[0-9]+]]:gpr32 = LDRWui killed [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) @A :: (dereferenceable load (s32) from @A)
-  ; SINK1:   B %bb.3
-  ; SINK1: bb.2.for.cond.cleanup:
-  ; SINK1:   [[PHI:%[0-9]+]]:gpr32all = PHI [[COPY]], %bb.0, %4, %bb.3
-  ; SINK1:   $w0 = COPY [[PHI]]
-  ; SINK1:   RET_ReallyLR implicit $w0
-  ; SINK1: bb.3.for.body:
-  ; SINK1:   successors: %bb.2(0x04000000), %bb.3(0x7c000000)
-  ; SINK1:   [[PHI1:%[0-9]+]]:gpr32sp = PHI [[COPY]], %bb.1, %5, %bb.3
-  ; SINK1:   [[PHI2:%[0-9]+]]:gpr32 = PHI [[COPY]], %bb.1, %4, %bb.3
-  ; SINK1:   [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr [[PHI2]], [[LDRWui]]
-  ; SINK1:   [[COPY1:%[0-9]+]]:gpr32all = COPY [[SDIVWr]]
-  ; SINK1:   [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[PHI1]], 1, 0, implicit-def $nzcv
-  ; SINK1:   [[COPY2:%[0-9]+]]:gpr32all = COPY [[SUBSWri1]]
-  ; SINK1:   Bcc 0, %bb.2, implicit $nzcv
-  ; SINK1:   B %bb.3
+  ; SINK1-NEXT:   successors: %bb.1(0x50000000), %bb.2(0x30000000)
+  ; SINK1-NEXT:   liveins: $w0
+  ; SINK1-NEXT: {{  $}}
+  ; SINK1-NEXT:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+  ; SINK1-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
+  ; SINK1-NEXT:   Bcc 11, %bb.2, implicit $nzcv
+  ; SINK1-NEXT:   B %bb.1
+  ; SINK1-NEXT: {{  $}}
+  ; SINK1-NEXT: bb.1.for.body.preheader:
+  ; SINK1-NEXT:   successors: %bb.3(0x80000000)
+  ; SINK1-NEXT: {{  $}}
+  ; SINK1-NEXT:   [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) @A
+  ; SINK1-NEXT:   [[LDRWui:%[0-9]+]]:gpr32 = LDRWui killed [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) @A :: (dereferenceable load (s32) from @A)
+  ; SINK1-NEXT:   B %bb.3
+  ; SINK1-NEXT: {{  $}}
+  ; SINK1-NEXT: bb.2.for.cond.cleanup:
+  ; SINK1-NEXT:   [[PHI:%[0-9]+]]:gpr32all = PHI [[COPY]], %bb.0, %4, %bb.3
+  ; SINK1-NEXT:   $w0 = COPY [[PHI]]
+  ; SINK1-NEXT:   RET_ReallyLR implicit $w0
+  ; SINK1-NEXT: {{  $}}
+  ; SINK1-NEXT: bb.3.for.body:
+  ; SINK1-NEXT:   successors: %bb.2(0x04000000), %bb.3(0x7c000000)
+  ; SINK1-NEXT: {{  $}}
+  ; SINK1-NEXT:   [[PHI1:%[0-9]+]]:gpr32sp = PHI [[COPY]], %bb.1, %5, %bb.3
+  ; SINK1-NEXT:   [[PHI2:%[0-9]+]]:gpr32 = PHI [[COPY]], %bb.1, %4, %bb.3
+  ; SINK1-NEXT:   [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr [[PHI2]], [[LDRWui]]
+  ; SINK1-NEXT:   [[COPY1:%[0-9]+]]:gpr32all = COPY [[SDIVWr]]
+  ; SINK1-NEXT:   [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[PHI1]], 1, 0, implicit-def $nzcv
+  ; SINK1-NEXT:   [[COPY2:%[0-9]+]]:gpr32all = COPY [[SUBSWri1]]
+  ; SINK1-NEXT:   Bcc 0, %bb.2, implicit $nzcv
+  ; SINK1-NEXT:   B %bb.3
+  ;
   ; SINK2-LABEL: name: do_sink_use_is_not_a_copy
   ; SINK2: bb.0.entry:
-  ; SINK2:   successors: %bb.1(0x50000000), %bb.2(0x30000000)
-  ; SINK2:   liveins: $w0
-  ; SINK2:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
-  ; SINK2:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
-  ; SINK2:   Bcc 11, %bb.2, implicit $nzcv
-  ; SINK2:   B %bb.1
-  ; SINK2: bb.1.for.body.preheader:
-  ; SINK2:   successors: %bb.3(0x80000000)
-  ; SINK2:   [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) @A
-  ; SINK2:   [[LDRWui:%[0-9]+]]:gpr32 = LDRWui killed [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) @A :: (dereferenceable load (s32) from @A)
-  ; SINK2:   B %bb.3
-  ; SINK2: bb.2.for.cond.cleanup:
-  ; SINK2:   [[PHI:%[0-9]+]]:gpr32all = PHI [[COPY]], %bb.0, %4, %bb.3
-  ; SINK2:   $w0 = COPY [[PHI]]
-  ; SINK2:   RET_ReallyLR implicit $w0
-  ; SINK2: bb.3.for.body:
-  ; SINK2:   successors: %bb.2(0x04000000), %bb.3(0x7c000000)
-  ; SINK2:   [[PHI1:%[0-9]+]]:gpr32sp = PHI [[COPY]], %bb.1, %5, %bb.3
-  ; SINK2:   [[PHI2:%[0-9]+]]:gpr32 = PHI [[COPY]], %bb.1, %4, %bb.3
-  ; SINK2:   [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr [[PHI2]], [[LDRWui]]
-  ; SINK2:   [[COPY1:%[0-9]+]]:gpr32all = COPY [[SDIVWr]]
-  ; SINK2:   [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[PHI1]], 1, 0, implicit-def $nzcv
-  ; SINK2:   [[COPY2:%[0-9]+]]:gpr32all = COPY [[SUBSWri1]]
-  ; SINK2:   Bcc 0, %bb.2, implicit $nzcv
-  ; SINK2:   B %bb.3
+  ; SINK2-NEXT:   successors: %bb.1(0x50000000), %bb.2(0x30000000)
+  ; SINK2-NEXT:   liveins: $w0
+  ; SINK2-NEXT: {{  $}}
+  ; SINK2-NEXT:   [[COPY:%[0-9]+]]:gpr32common = COPY $w0
+  ; SINK2-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 0, implicit-def $nzcv
+  ; SINK2-NEXT:   Bcc 11, %bb.2, implicit $nzcv
+  ; SINK2-NEXT:   B %bb.1
+  ; SINK2-NEXT: {{  $}}
+  ; SINK2-NEXT: bb.1.for.body.preheader:
+  ; SINK2-NEXT:   successors: %bb.3(0x80000000)
+  ; SINK2-NEXT: {{  $}}
+  ; SINK2-NEXT:   [[ADRP:%[0-9]+]]:gpr64common = ADRP target-flags(aarch64-page) @A
+  ; SINK2-NEXT:   [[LDRWui:%[0-9]+]]:gpr32 = LDRWui killed [[ADRP]], target-flags(aarch64-pageoff, aarch64-nc) @A :: (dereferenceable load (s32) from @A)
+  ; SINK2-NEXT:   B %bb.3
+  ; SINK2-NEXT: {{  $}}
+  ; SINK2-NEXT: bb.2.for.cond.cleanup:
+  ; SINK2-NEXT:   [[PHI:%[0-9]+]]:gpr32all = PHI [[COPY]], %bb.0, %4, %bb.3
+  ; SINK2-NEXT:   $w0 = COPY [[PHI]]
+  ; SINK2-NEXT:   RET_ReallyLR implicit $w0
+  ; SINK2-NEXT: {{  $}}
+  ; SINK2-NEXT: bb.3.for.body:
+  ; SINK2-NEXT:   successors: %bb.2(0x04000000), %bb.3(0x7c000000)
+  ; SINK2-NEXT: {{  $}}
+  ; SINK2-NEXT:   [[PHI1:%[0-9]+]]:gpr32sp = PHI [[COPY]], %bb.1, %5, %bb.3
+  ; SINK2-NEXT:   [[PHI2:%[0-9]+]]:gpr32 = PHI [[COPY]], %bb.1, %4, %bb.3
+  ; SINK2-NEXT:   [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr [[PHI2]], [[LDRWui]]
+  ; SINK2-NEXT:   [[COPY1:%[0-9]+]]:gpr32all = COPY [[SDIVWr]]
+  ; SINK2-NEXT:   [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[PHI1]], 1, 0, implicit-def $nzcv
+  ; SINK2-NEXT:   [[COPY2:%[0-9]+]]:gpr32all = COPY [[SUBSWri1]]
+  ; SINK2-NEXT:   Bcc 0, %bb.2, implicit $nzcv
+  ; SINK2-NEXT:   B %bb.3
   bb.0.entry:
     successors: %bb.1(0x50000000), %bb.2(0x30000000)
     liveins: $w0
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-safe-range-in-middle.mir b/llvm/test/CodeGen/AArch64/machine-outliner-safe-range-in-middle.mir
index 23811425101fde..5425cae700f698 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-safe-range-in-middle.mir
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-safe-range-in-middle.mir
@@ -10,7 +10,6 @@ machineFunctionInfo:
 body:             |
   bb.0:
     liveins: $x0
-    ; Begin safe range of 3 instructions
     ; CHECK-LABEL: name: unsafe_range_in_middle
     ; CHECK: liveins: $x0
     ; CHECK-NEXT: {{  $}}
@@ -26,7 +25,6 @@ body:             |
     $x2 = ADDXri $x0, 2, 0
     $x3 = ADDXri $x0, 3, 0
 
-    ; End safe range
     $x16 = ADDXri $x0, 16, 0
     $x9 = ADDXri $x16, 16, 0
     $x16 = ADDXri killed $x16, 16, 0
@@ -35,7 +33,6 @@ body:             |
     $x1 = ADDXri $x0, 1, 0
     $x2 = ADDXri $x0, 2, 0
     $x3 = ADDXri $x0, 3, 0
-    ; End safe range
     $x16 = ADDXri $x0, 16, 0
     $x9 = ADDXri $x9, 16, 0
     $x16 = ADDXri killed $x16, 16, 0
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-unsafe-range-at-end.mir b/llvm/test/CodeGen/AArch64/machine-outliner-unsafe-range-at-end.mir
index 18c12ea3a8a346..966f243d591513 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-unsafe-range-at-end.mir
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-unsafe-range-at-end.mir
@@ -10,9 +10,7 @@ machineFunctionInfo:
 body:             |
   bb.0:
     liveins: $x0, $x9
-    ; Begin safe range of 3 instructions
 
-    ; Outline
     ; CHECK-LABEL: name: unsafe_range_at_end
     ; CHECK: liveins: $x0, $x9
     ; CHECK-NEXT: {{  $}}
@@ -33,16 +31,13 @@ body:             |
     $x2 = ADDXri $x0, 2, 0
     $x3 = ADDXri $x0, 3, 0
 
-    ; Split here
     $x8 = ADDXri $x3, 3, 0
 
-    ; Outline
     $x0 = ADDXri $x0, 0, 0
     $x1 = ADDXri $x0, 1, 0
     $x2 = ADDXri $x0, 2, 0
     $x3 = ADDXri $x0, 3, 0
 
-    ; Don't outline any of this
     $x16 = ADDXri $x0, 16, 0
     $x9 = ADDXri $x9, 16, 0
     $x9 = ADDXri $x9, 16, 0
diff --git a/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll b/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
index e7e109170d6a12..0fe4683d97a232 100644
--- a/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
+++ b/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
@@ -22,7 +22,7 @@ define i32 @test(ptr %ptr) {
 ; CHECK-NEXT:    tbnz w10, #0, LBB0_1
 ; CHECK-NEXT:  ; %bb.2: ; %bb343
 ; CHECK-NEXT:    and w9, w10, #0x1
-; CHECK-NEXT:    mov w0, #-1
+; CHECK-NEXT:    mov w0, #-1 ; =0xffffffff
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:    ret
 bb:
diff --git a/llvm/test/CodeGen/AArch64/memcpy-scoped-aa.ll b/llvm/test/CodeGen/AArch64/memcpy-scoped-aa.ll
index f122c94d5cffa2..d51bcf3df1f73f 100644
--- a/llvm/test/CodeGen/AArch64/memcpy-scoped-aa.ll
+++ b/llvm/test/CodeGen/AArch64/memcpy-scoped-aa.ll
@@ -134,3 +134,5 @@ declare i8* @mempcpy(i8*, i8*, i64)
 !2 = !{!1}
 !3 = distinct !{!3, !0, !"bax: %q"}
 !4 = !{!3}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MIR: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/memset-inline.ll b/llvm/test/CodeGen/AArch64/memset-inline.ll
index 02d852b5ce45ad..31f7cd9bcb95fc 100644
--- a/llvm/test/CodeGen/AArch64/memset-inline.ll
+++ b/llvm/test/CodeGen/AArch64/memset-inline.ll
@@ -29,7 +29,7 @@ define void @memset_2(ptr %a, i8 %value) nounwind {
 define void @memset_4(ptr %a, i8 %value) nounwind {
 ; ALL-LABEL: memset_4:
 ; ALL:       // %bb.0:
-; ALL-NEXT:    mov w8, #16843009
+; ALL-NEXT:    mov w8, #16843009 // =0x1010101
 ; ALL-NEXT:    and w9, w1, #0xff
 ; ALL-NEXT:    mul w8, w9, w8
 ; ALL-NEXT:    str w8, [x0]
@@ -42,7 +42,7 @@ define void @memset_8(ptr %a, i8 %value) nounwind {
 ; ALL-LABEL: memset_8:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    // kill: def $w1 killed $w1 def $x1
-; ALL-NEXT:    mov x8, #72340172838076673
+; ALL-NEXT:    mov x8, #72340172838076673 // =0x101010101010101
 ; ALL-NEXT:    and x9, x1, #0xff
 ; ALL-NEXT:    mul x8, x9, x8
 ; ALL-NEXT:    str x8, [x0]
@@ -55,7 +55,7 @@ define void @memset_16(ptr %a, i8 %value) nounwind {
 ; ALL-LABEL: memset_16:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    // kill: def $w1 killed $w1 def $x1
-; ALL-NEXT:    mov x8, #72340172838076673
+; ALL-NEXT:    mov x8, #72340172838076673 // =0x101010101010101
 ; ALL-NEXT:    and x9, x1, #0xff
 ; ALL-NEXT:    mul x8, x9, x8
 ; ALL-NEXT:    stp x8, x8, [x0]
@@ -68,7 +68,7 @@ define void @memset_32(ptr %a, i8 %value) nounwind {
 ; GPR-LABEL: memset_32:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    // kill: def $w1 killed $w1 def $x1
-; GPR-NEXT:    mov x8, #72340172838076673
+; GPR-NEXT:    mov x8, #72340172838076673 // =0x101010101010101
 ; GPR-NEXT:    and x9, x1, #0xff
 ; GPR-NEXT:    mul x8, x9, x8
 ; GPR-NEXT:    stp x8, x8, [x0, #16]
@@ -88,7 +88,7 @@ define void @memset_64(ptr %a, i8 %value) nounwind {
 ; GPR-LABEL: memset_64:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    // kill: def $w1 killed $w1 def $x1
-; GPR-NEXT:    mov x8, #72340172838076673
+; GPR-NEXT:    mov x8, #72340172838076673 // =0x101010101010101
 ; GPR-NEXT:    and x9, x1, #0xff
 ; GPR-NEXT:    mul x8, x9, x8
 ; GPR-NEXT:    stp x8, x8, [x0, #48]
@@ -113,7 +113,7 @@ define void @aligned_memset_16(ptr align 16 %a, i8 %value) nounwind {
 ; ALL-LABEL: aligned_memset_16:
 ; ALL:       // %bb.0:
 ; ALL-NEXT:    // kill: def $w1 killed $w1 def $x1
-; ALL-NEXT:    mov x8, #72340172838076673
+; ALL-NEXT:    mov x8, #72340172838076673 // =0x101010101010101
 ; ALL-NEXT:    and x9, x1, #0xff
 ; ALL-NEXT:    mul x8, x9, x8
 ; ALL-NEXT:    stp x8, x8, [x0]
@@ -126,7 +126,7 @@ define void @aligned_memset_32(ptr align 32 %a, i8 %value) nounwind {
 ; GPR-LABEL: aligned_memset_32:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    // kill: def $w1 killed $w1 def $x1
-; GPR-NEXT:    mov x8, #72340172838076673
+; GPR-NEXT:    mov x8, #72340172838076673 // =0x101010101010101
 ; GPR-NEXT:    and x9, x1, #0xff
 ; GPR-NEXT:    mul x8, x9, x8
 ; GPR-NEXT:    stp x8, x8, [x0, #16]
@@ -146,7 +146,7 @@ define void @aligned_memset_64(ptr align 64 %a, i8 %value) nounwind {
 ; GPR-LABEL: aligned_memset_64:
 ; GPR:       // %bb.0:
 ; GPR-NEXT:    // kill: def $w1 killed $w1 def $x1
-; GPR-NEXT:    mov x8, #72340172838076673
+; GPR-NEXT:    mov x8, #72340172838076673 // =0x101010101010101
 ; GPR-NEXT:    and x9, x1, #0xff
 ; GPR-NEXT:    mul x8, x9, x8
 ; GPR-NEXT:    stp x8, x8, [x0, #48]
diff --git a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
index fc5813b99ea552..56362ce2fbd869 100644
--- a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
+++ b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
@@ -19,12 +19,12 @@ define void @test(ptr %fde, i32 %fd, ptr %func, ptr %arg) uwtable {
 ; A53-NEXT:    mov x8, x0
 ; A53-NEXT:    mov x19, x8
 ; A53-NEXT:    mov w0, w1
-; A53-NEXT:    mov w9, #256
+; A53-NEXT:    mov w9, #256 // =0x100
 ; A53-NEXT:    stp x2, x3, [x8, #32]
 ; A53-NEXT:    mov x2, x8
 ; A53-NEXT:    str q0, [x19, #16]!
 ; A53-NEXT:    str w1, [x19]
-; A53-NEXT:    mov w1, #4
+; A53-NEXT:    mov w1, #4 // =0x4
 ; A53-NEXT:    str q0, [x8]
 ; A53-NEXT:    strh w9, [x8, #24]
 ; A53-NEXT:    str wzr, [x8, #20]
diff --git a/llvm/test/CodeGen/AArch64/neon-abd.ll b/llvm/test/CodeGen/AArch64/neon-abd.ll
index 901cb8adc23f09..9c120974204452 100644
--- a/llvm/test/CodeGen/AArch64/neon-abd.ll
+++ b/llvm/test/CodeGen/AArch64/neon-abd.ll
@@ -232,8 +232,14 @@ define <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) #0 {
 define <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) #0 {
 ; CHECK-LABEL: uabd_4h_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bic v0.4h, #255, lsl #8
-; CHECK-NEXT:    bic v1.4h, #255, lsl #8
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x8, .LCPI15_0
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    mov v1.d[1], v1.d[0]
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI15_0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v2.8b
+; CHECK-NEXT:    tbl v1.8b, { v1.16b }, v2.8b
 ; CHECK-NEXT:    uabd v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ret
   %a.zext = zext <4 x i8> %a to <4 x i16>
@@ -284,9 +290,11 @@ define <2 x i32> @uabd_2s(<2 x i32> %a, <2 x i32> %b) #0 {
 define <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) #0 {
 ; CHECK-LABEL: uabd_2s_promoted_ops:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi d2, #0x00ffff0000ffff
-; CHECK-NEXT:    and v0.8b, v0.8b, v2.8b
-; CHECK-NEXT:    and v1.8b, v1.8b, v2.8b
+; CHECK-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-NEXT:    rev32 v0.4h, v0.4h
+; CHECK-NEXT:    rev32 v1.4h, v1.4h
+; CHECK-NEXT:    trn2 v0.4h, v0.4h, v2.4h
+; CHECK-NEXT:    trn2 v1.4h, v1.4h, v2.4h
 ; CHECK-NEXT:    uabd v0.2s, v0.2s, v1.2s
 ; CHECK-NEXT:    ret
   %a.zext = zext <2 x i16> %a to <2 x i32>
diff --git a/llvm/test/CodeGen/AArch64/pr51476.ll b/llvm/test/CodeGen/AArch64/pr51476.ll
index ce565a186ae057..939f9670d452a2 100644
--- a/llvm/test/CodeGen/AArch64/pr51476.ll
+++ b/llvm/test/CodeGen/AArch64/pr51476.ll
@@ -6,7 +6,7 @@ define void @test(i8 %arg) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    and w9, w0, #0xff
-; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    cmp w9, #1
 ; CHECK-NEXT:    cset w0, ne
 ; CHECK-NEXT:    strb w0, [sp, #12]
diff --git a/llvm/test/CodeGen/AArch64/pr58431.ll b/llvm/test/CodeGen/AArch64/pr58431.ll
index dcd97597ae4093..88bab4af95d64f 100644
--- a/llvm/test/CodeGen/AArch64/pr58431.ll
+++ b/llvm/test/CodeGen/AArch64/pr58431.ll
@@ -4,7 +4,7 @@
 define i32 @f(i64 %0) {
 ; CHECK-LABEL: f:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #10
+; CHECK-NEXT:    mov w8, #10 // =0xa
 ; CHECK-NEXT:    mov w9, w0
 ; CHECK-NEXT:    udiv x10, x9, x8
 ; CHECK-NEXT:    msub x0, x10, x8, x9
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-intrinsic-blend.ll b/llvm/test/CodeGen/AArch64/ptrauth-intrinsic-blend.ll
index e60b3dfeaf2258..c6c8b99d293fd5 100644
--- a/llvm/test/CodeGen/AArch64/ptrauth-intrinsic-blend.ll
+++ b/llvm/test/CodeGen/AArch64/ptrauth-intrinsic-blend.ll
@@ -24,7 +24,7 @@ define i64 @test_blend_constant(i64 %arg) {
 define i64 @test_blend_constant_swapped(i64 %arg) {
 ; CHECK-LABEL: test_blend_constant_swapped:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    mov w8, #12345
+; CHECK-NEXT:    mov w8, #12345 ; =0x3039
 ; CHECK-NEXT:    bfi x8, x0, #48, #16
 ; CHECK-NEXT:    mov x0, x8
 ; CHECK-NEXT:    ret
@@ -36,7 +36,7 @@ define i64 @test_blend_constant_swapped(i64 %arg) {
 define i64 @test_blend_constant_wide(i64 %arg) {
 ; CHECK-LABEL: test_blend_constant_wide:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    mov w8, #65536
+; CHECK-NEXT:    mov w8, #65536 ; =0x10000
 ; CHECK-NEXT:    bfi x0, x8, #48, #16
 ; CHECK-NEXT:    ret
   %tmp = call i64 @llvm.ptrauth.blend(i64 %arg, i64 65536)
diff --git a/llvm/test/CodeGen/AArch64/qmovn.ll b/llvm/test/CodeGen/AArch64/qmovn.ll
index f4fdb1b97e2920..09a69fe7b53cfc 100644
--- a/llvm/test/CodeGen/AArch64/qmovn.ll
+++ b/llvm/test/CodeGen/AArch64/qmovn.ll
@@ -84,9 +84,9 @@ entry:
 define <2 x i32> @vqmovni64_smaxmin(<2 x i64> %s0) {
 ; CHECK-LABEL: vqmovni64_smaxmin:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #2147483647
+; CHECK-NEXT:    mov w8, #2147483647 // =0x7fffffff
 ; CHECK-NEXT:    dup v1.2d, x8
-; CHECK-NEXT:    mov x8, #-2147483648
+; CHECK-NEXT:    mov x8, #-2147483648 // =0xffffffff80000000
 ; CHECK-NEXT:    cmgt v2.2d, v1.2d, v0.2d
 ; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    dup v1.2d, x8
@@ -106,9 +106,9 @@ entry:
 define <2 x i32> @vqmovni64_sminmax(<2 x i64> %s0) {
 ; CHECK-LABEL: vqmovni64_sminmax:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov x8, #-2147483648
+; CHECK-NEXT:    mov x8, #-2147483648 // =0xffffffff80000000
 ; CHECK-NEXT:    dup v1.2d, x8
-; CHECK-NEXT:    mov w8, #2147483647
+; CHECK-NEXT:    mov w8, #2147483647 // =0x7fffffff
 ; CHECK-NEXT:    cmgt v2.2d, v0.2d, v1.2d
 ; CHECK-NEXT:    bif v0.16b, v1.16b, v2.16b
 ; CHECK-NEXT:    dup v1.2d, x8
diff --git a/llvm/test/CodeGen/AArch64/reassocmls.ll b/llvm/test/CodeGen/AArch64/reassocmls.ll
index 381caffba92eb0..23ca5fb2407d9c 100644
--- a/llvm/test/CodeGen/AArch64/reassocmls.ll
+++ b/llvm/test/CodeGen/AArch64/reassocmls.ll
@@ -79,7 +79,7 @@ define i64 @mls_i64_C(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e) {
 ; CHECK-LABEL: mls_i64_C:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mul x8, x2, x1
-; CHECK-NEXT:    mov w9, #10
+; CHECK-NEXT:    mov w9, #10 // =0xa
 ; CHECK-NEXT:    madd x8, x4, x3, x8
 ; CHECK-NEXT:    sub x0, x9, x8
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/settag-merge.ll b/llvm/test/CodeGen/AArch64/settag-merge.ll
index af922b91b221a8..9b9eb292aaed0e 100644
--- a/llvm/test/CodeGen/AArch64/settag-merge.ll
+++ b/llvm/test/CodeGen/AArch64/settag-merge.ll
@@ -300,17 +300,39 @@ declare i32 @printf(ptr, ...) #0
 ; Don't merge in this case
 
 define i32 @nzcv_clobber(i32 %in) {
-entry:
 ; CHECK-LABEL: nzcv_clobber:
-; CHECK: stg sp, [sp, #528]
-; CHECK-NEXT: .LBB10_1:
-; CHECK: st2g x9, [x9], #32
-; CHECK-NEXT: subs x8, x8, #32
-; CHECK-NEXT: b.ne .LBB10_1
-; CHECK-NEXT: // %bb.2:
-; CHECK-NEXT: cmp w0, #10
-; CHECK-NEXT: stg sp, [sp]
-; CHECK-NEXT: b.ge .LBB10_4
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #544
+; CHECK-NEXT:    .cfi_def_cfa_offset 560
+; CHECK-NEXT:    .cfi_offset w30, -8
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    add x9, sp, #16
+; CHECK-NEXT:    mov x8, #512 // =0x200
+; CHECK-NEXT:    stg sp, [sp, #528]
+; CHECK-NEXT:  .LBB10_1: // %entry
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    st2g x9, [x9], #32
+; CHECK-NEXT:    subs x8, x8, #32
+; CHECK-NEXT:    b.ne .LBB10_1
+; CHECK-NEXT:  // %bb.2: // %entry
+; CHECK-NEXT:    cmp w0, #10
+; CHECK-NEXT:    stg sp, [sp]
+; CHECK-NEXT:    b.ge .LBB10_4
+; CHECK-NEXT:  // %bb.3: // %return0
+; CHECK-NEXT:    adrp x0, .L.str
+; CHECK-NEXT:    add x0, x0, :lo12:.L.str
+; CHECK-NEXT:    mov w1, #10 // =0xa
+; CHECK-NEXT:    bl printf
+; CHECK-NEXT:    mov w0, wzr
+; CHECK-NEXT:    b .LBB10_5
+; CHECK-NEXT:  .LBB10_4:
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:  .LBB10_5: // %common.ret
+; CHECK-NEXT:    add sp, sp, #544
+; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
 
   %a = alloca i8, i32 16, align 16
   %b = alloca i8, i32 512, align 16
@@ -334,13 +356,23 @@ return1:
 ; Merge in this case
 
 define i32 @nzcv_no_clobber(i32 %in) {
-entry:
 ; CHECK-LABEL: nzcv_no_clobber:
-; CHECK: mov x8, #544
-; CHECK-NEXT: .LBB11_1:
-; CHECK: st2g sp, [sp], #32
-; CHECK-NEXT: subs x8, x8, #32
-; CHECK-NEXT: b.ne .LBB11_1
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT:    sub sp, sp, #544
+; CHECK-NEXT:    .cfi_def_cfa_offset 560
+; CHECK-NEXT:    .cfi_offset w29, -16
+; CHECK-NEXT:    mov w0, #1 // =0x1
+; CHECK-NEXT:    mov x8, #544 // =0x220
+; CHECK-NEXT:  .LBB11_1: // %entry
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    st2g sp, [sp], #32
+; CHECK-NEXT:    subs x8, x8, #32
+; CHECK-NEXT:    b.ne .LBB11_1
+; CHECK-NEXT:  // %bb.2: // %entry
+; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
 
 
   %a = alloca i8, i32 16, align 16
diff --git a/llvm/test/CodeGen/AArch64/shift-mod.ll b/llvm/test/CodeGen/AArch64/shift-mod.ll
index a90603195cf348..ac95b75168ed98 100644
--- a/llvm/test/CodeGen/AArch64/shift-mod.ll
+++ b/llvm/test/CodeGen/AArch64/shift-mod.ll
@@ -127,7 +127,7 @@ define i64 @ashr_add_shl_i36(i64 %r) {
 define i64 @ashr_add_shl_mismatch_shifts1(i64 %r) {
 ; CHECK-LABEL: ashr_add_shl_mismatch_shifts1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #4294967296
+; CHECK-NEXT:    mov x8, #4294967296 // =0x100000000
 ; CHECK-NEXT:    add x8, x8, x0, lsl #8
 ; CHECK-NEXT:    asr x0, x8, #32
 ; CHECK-NEXT:    ret
@@ -140,7 +140,7 @@ define i64 @ashr_add_shl_mismatch_shifts1(i64 %r) {
 define i64 @ashr_add_shl_mismatch_shifts2(i64 %r) {
 ; CHECK-LABEL: ashr_add_shl_mismatch_shifts2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #4294967296
+; CHECK-NEXT:    mov x8, #4294967296 // =0x100000000
 ; CHECK-NEXT:    add x8, x8, x0, lsr #8
 ; CHECK-NEXT:    lsr x0, x8, #8
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/signbit-test.ll b/llvm/test/CodeGen/AArch64/signbit-test.ll
index f5eaf80cf7f8d2..e01c83d513dd3a 100644
--- a/llvm/test/CodeGen/AArch64/signbit-test.ll
+++ b/llvm/test/CodeGen/AArch64/signbit-test.ll
@@ -4,7 +4,7 @@
 define i64 @test_clear_mask_i64_i32(i64 %x) nounwind {
 ; CHECK-LABEL: test_clear_mask_i64_i32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel x0, x8, x0, ge
 ; CHECK-NEXT:    ret
@@ -22,7 +22,7 @@ f:
 define i64 @test_set_mask_i64_i32(i64 %x) nounwind {
 ; CHECK-LABEL: test_set_mask_i64_i32:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst x0, #0x80000000
 ; CHECK-NEXT:    csel x0, x8, x0, ne
 ; CHECK-NEXT:    ret
@@ -40,7 +40,7 @@ f:
 define i64 @test_clear_mask_i64_i16(i64 %x) nounwind {
 ; CHECK-LABEL: test_clear_mask_i64_i16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst x0, #0x8000
 ; CHECK-NEXT:    csel x0, x8, x0, eq
 ; CHECK-NEXT:    ret
@@ -58,7 +58,7 @@ f:
 define i64 @test_set_mask_i64_i16(i64 %x) nounwind {
 ; CHECK-LABEL: test_set_mask_i64_i16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst x0, #0x8000
 ; CHECK-NEXT:    csel x0, x8, x0, ne
 ; CHECK-NEXT:    ret
@@ -76,7 +76,7 @@ f:
 define i64 @test_clear_mask_i64_i8(i64 %x) nounwind {
 ; CHECK-LABEL: test_clear_mask_i64_i8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst x0, #0x80
 ; CHECK-NEXT:    csel x0, x8, x0, eq
 ; CHECK-NEXT:    ret
@@ -94,7 +94,7 @@ f:
 define i64 @test_set_mask_i64_i8(i64 %x) nounwind {
 ; CHECK-LABEL: test_set_mask_i64_i8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst x0, #0x80
 ; CHECK-NEXT:    csel x0, x8, x0, ne
 ; CHECK-NEXT:    ret
@@ -112,7 +112,7 @@ f:
 define i32 @test_clear_mask_i32_i16(i32 %x) nounwind {
 ; CHECK-LABEL: test_clear_mask_i32_i16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst w0, #0x8000
 ; CHECK-NEXT:    csel w0, w8, w0, eq
 ; CHECK-NEXT:    ret
@@ -130,7 +130,7 @@ f:
 define i32 @test_set_mask_i32_i16(i32 %x) nounwind {
 ; CHECK-LABEL: test_set_mask_i32_i16:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst w0, #0x8000
 ; CHECK-NEXT:    csel w0, w8, w0, ne
 ; CHECK-NEXT:    ret
@@ -148,7 +148,7 @@ f:
 define i32 @test_clear_mask_i32_i8(i32 %x) nounwind {
 ; CHECK-LABEL: test_clear_mask_i32_i8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst w0, #0x80
 ; CHECK-NEXT:    csel w0, w8, w0, eq
 ; CHECK-NEXT:    ret
@@ -166,7 +166,7 @@ f:
 define i32 @test_set_mask_i32_i8(i32 %x) nounwind {
 ; CHECK-LABEL: test_set_mask_i32_i8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst w0, #0x80
 ; CHECK-NEXT:    csel w0, w8, w0, ne
 ; CHECK-NEXT:    ret
@@ -184,7 +184,7 @@ f:
 define i16 @test_clear_mask_i16_i8(i16 %x) nounwind {
 ; CHECK-LABEL: test_clear_mask_i16_i8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst w0, #0x80
 ; CHECK-NEXT:    csel w0, w8, w0, eq
 ; CHECK-NEXT:    ret
@@ -202,7 +202,7 @@ f:
 define i16 @test_set_mask_i16_i8(i16 %x) nounwind {
 ; CHECK-LABEL: test_set_mask_i16_i8:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst w0, #0x80
 ; CHECK-NEXT:    csel w0, w8, w0, ne
 ; CHECK-NEXT:    ret
@@ -220,7 +220,7 @@ f:
 define i16 @test_set_mask_i16_i7(i16 %x) nounwind {
 ; CHECK-LABEL: test_set_mask_i16_i7:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    tst w0, #0x40
 ; CHECK-NEXT:    csel w0, w8, w0, ne
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
index ab42e6463feeed..bb4df6d8935b1b 100644
--- a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
+++ b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
@@ -396,7 +396,7 @@ define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind {
 define i1 @add_ulecmp_bad_i16_i8(i16 %x) nounwind {
 ; CHECK-LABEL: add_ulecmp_bad_i16_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ule i16 %tmp0, -1 ; when we +1 it, it will wrap to 0
diff --git a/llvm/test/CodeGen/AArch64/srem-lkk.ll b/llvm/test/CodeGen/AArch64/srem-lkk.ll
index 5ff178937ebbfb..d9f91449dffb80 100644
--- a/llvm/test/CodeGen/AArch64/srem-lkk.ll
+++ b/llvm/test/CodeGen/AArch64/srem-lkk.ll
@@ -4,14 +4,14 @@
 define i32 @fold_srem_positive_odd(i32 %x) {
 ; CHECK-LABEL: fold_srem_positive_odd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #37253
+; CHECK-NEXT:    mov w8, #37253 // =0x9185
 ; CHECK-NEXT:    movk w8, #44150, lsl #16
 ; CHECK-NEXT:    smull x8, w0, w8
 ; CHECK-NEXT:    lsr x8, x8, #32
 ; CHECK-NEXT:    add w8, w8, w0
 ; CHECK-NEXT:    asr w9, w8, #6
 ; CHECK-NEXT:    add w8, w9, w8, lsr #31
-; CHECK-NEXT:    mov w9, #95
+; CHECK-NEXT:    mov w9, #95 // =0x5f
 ; CHECK-NEXT:    msub w0, w8, w9, w0
 ; CHECK-NEXT:    ret
   %1 = srem i32 %x, 95
@@ -22,13 +22,13 @@ define i32 @fold_srem_positive_odd(i32 %x) {
 define i32 @fold_srem_positive_even(i32 %x) {
 ; CHECK-LABEL: fold_srem_positive_even:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #36849
+; CHECK-NEXT:    mov w8, #36849 // =0x8ff1
 ; CHECK-NEXT:    movk w8, #15827, lsl #16
 ; CHECK-NEXT:    smull x8, w0, w8
 ; CHECK-NEXT:    lsr x9, x8, #63
 ; CHECK-NEXT:    asr x8, x8, #40
 ; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    mov w9, #1060
+; CHECK-NEXT:    mov w9, #1060 // =0x424
 ; CHECK-NEXT:    msub w0, w8, w9, w0
 ; CHECK-NEXT:    ret
   %1 = srem i32 %x, 1060
@@ -39,13 +39,13 @@ define i32 @fold_srem_positive_even(i32 %x) {
 define i32 @fold_srem_negative_odd(i32 %x) {
 ; CHECK-LABEL: fold_srem_negative_odd:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #65445
+; CHECK-NEXT:    mov w8, #65445 // =0xffa5
 ; CHECK-NEXT:    movk w8, #42330, lsl #16
 ; CHECK-NEXT:    smull x8, w0, w8
 ; CHECK-NEXT:    lsr x9, x8, #63
 ; CHECK-NEXT:    asr x8, x8, #40
 ; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    mov w9, #-723
+; CHECK-NEXT:    mov w9, #-723 // =0xfffffd2d
 ; CHECK-NEXT:    msub w0, w8, w9, w0
 ; CHECK-NEXT:    ret
   %1 = srem i32 %x, -723
@@ -56,13 +56,13 @@ define i32 @fold_srem_negative_odd(i32 %x) {
 define i32 @fold_srem_negative_even(i32 %x) {
 ; CHECK-LABEL: fold_srem_negative_even:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #62439
+; CHECK-NEXT:    mov w8, #62439 // =0xf3e7
 ; CHECK-NEXT:    movk w8, #64805, lsl #16
 ; CHECK-NEXT:    smull x8, w0, w8
 ; CHECK-NEXT:    lsr x9, x8, #63
 ; CHECK-NEXT:    asr x8, x8, #40
 ; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    mov w9, #-22981
+; CHECK-NEXT:    mov w9, #-22981 // =0xffffa63b
 ; CHECK-NEXT:    msub w0, w8, w9, w0
 ; CHECK-NEXT:    ret
   %1 = srem i32 %x, -22981
@@ -74,14 +74,14 @@ define i32 @fold_srem_negative_even(i32 %x) {
 define i32 @combine_srem_sdiv(i32 %x) {
 ; CHECK-LABEL: combine_srem_sdiv:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #37253
+; CHECK-NEXT:    mov w8, #37253 // =0x9185
 ; CHECK-NEXT:    movk w8, #44150, lsl #16
 ; CHECK-NEXT:    smull x8, w0, w8
 ; CHECK-NEXT:    lsr x8, x8, #32
 ; CHECK-NEXT:    add w8, w8, w0
 ; CHECK-NEXT:    asr w9, w8, #6
 ; CHECK-NEXT:    add w8, w9, w8, lsr #31
-; CHECK-NEXT:    mov w9, #95
+; CHECK-NEXT:    mov w9, #95 // =0x5f
 ; CHECK-NEXT:    msub w9, w8, w9, w0
 ; CHECK-NEXT:    add w0, w9, w8
 ; CHECK-NEXT:    ret
@@ -95,14 +95,14 @@ define i32 @combine_srem_sdiv(i32 %x) {
 define i64 @dont_fold_srem_i64(i64 %x) {
 ; CHECK-LABEL: dont_fold_srem_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #58849
+; CHECK-NEXT:    mov x8, #58849 // =0xe5e1
 ; CHECK-NEXT:    movk x8, #48148, lsl #16
 ; CHECK-NEXT:    movk x8, #33436, lsl #32
 ; CHECK-NEXT:    movk x8, #21399, lsl #48
 ; CHECK-NEXT:    smulh x8, x0, x8
 ; CHECK-NEXT:    asr x9, x8, #5
 ; CHECK-NEXT:    add x8, x9, x8, lsr #63
-; CHECK-NEXT:    mov w9, #98
+; CHECK-NEXT:    mov w9, #98 // =0x62
 ; CHECK-NEXT:    msub x0, x8, x9, x0
 ; CHECK-NEXT:    ret
   %1 = srem i64 %x, 98
diff --git a/llvm/test/CodeGen/AArch64/strict-fp-int-promote.ll b/llvm/test/CodeGen/AArch64/strict-fp-int-promote.ll
index 0f7ea36949da54..7c70f5461a2199 100644
--- a/llvm/test/CodeGen/AArch64/strict-fp-int-promote.ll
+++ b/llvm/test/CodeGen/AArch64/strict-fp-int-promote.ll
@@ -12,7 +12,7 @@ declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metad
 define i32 @test() #0 {
 ; CHECK-LABEL: test:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    scvtf s0, w8
 ; CHECK-NEXT:    fcmp s0, s0
 ; CHECK-NEXT:    cset w0, eq
@@ -20,9 +20,9 @@ define i32 @test() #0 {
 ;
 ; SUBOPTIMAL-LABEL: test:
 ; SUBOPTIMAL:       // %bb.0: // %entry
-; SUBOPTIMAL-NEXT:    mov w8, #1
+; SUBOPTIMAL-NEXT:    mov w8, #1 // =0x1
 ; SUBOPTIMAL-NEXT:    scvtf s0, w8
-; SUBOPTIMAL-NEXT:    mov w8, #1
+; SUBOPTIMAL-NEXT:    mov w8, #1 // =0x1
 ; SUBOPTIMAL-NEXT:    scvtf s1, w8
 ; SUBOPTIMAL-NEXT:    fcmp s0, s1
 ; SUBOPTIMAL-NEXT:    cset w8, eq
@@ -39,7 +39,7 @@ entry:
 define i32 @test2() #0 {
 ; CHECK-LABEL: test2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    mov w8, #1 // =0x1
 ; CHECK-NEXT:    scvtf s0, w8
 ; CHECK-NEXT:    ucvtf s1, w8
 ; CHECK-NEXT:    fcmp s0, s1
@@ -48,9 +48,9 @@ define i32 @test2() #0 {
 ;
 ; SUBOPTIMAL-LABEL: test2:
 ; SUBOPTIMAL:       // %bb.0: // %entry
-; SUBOPTIMAL-NEXT:    mov w8, #1
+; SUBOPTIMAL-NEXT:    mov w8, #1 // =0x1
 ; SUBOPTIMAL-NEXT:    scvtf s0, w8
-; SUBOPTIMAL-NEXT:    mov w8, #1
+; SUBOPTIMAL-NEXT:    mov w8, #1 // =0x1
 ; SUBOPTIMAL-NEXT:    ucvtf s1, w8
 ; SUBOPTIMAL-NEXT:    fcmp s0, s1
 ; SUBOPTIMAL-NEXT:    cset w8, eq
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-extends.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-extends.ll
index 756e5f4cddf809..6985a9b052ecb5 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-extends.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-extends.ll
@@ -722,9 +722,12 @@ define void @zext_v64i8_v64i32(ptr %in, ptr %out) vscale_range(16,0) #0 {
 define void @zext_v4i8_v4i64(<4 x i8> %a, ptr %out) vscale_range(2,0) #0 {
 ; CHECK-LABEL: zext_v4i8_v4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x8, .LCPI34_0
 ; CHECK-NEXT:    ptrue p0.d, vl4
-; CHECK-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI34_0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-NEXT:    uunpklo z0.s, z0.h
 ; CHECK-NEXT:    uunpklo z0.d, z0.s
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x0]
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
index 50040eaa61e6c5..c9c080049cf379 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
@@ -109,8 +109,9 @@ define void @ucvtf_v128i16_v128f16(ptr %a, ptr %b) vscale_range(16,0) #0 {
 define <2 x float> @ucvtf_v2i16_v2f32(<2 x i16> %op1) vscale_range(2,0) #0 {
 ; CHECK-LABEL: ucvtf_v2i16_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi d1, #0x00ffff0000ffff
-; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    rev32 v0.4h, v0.4h
+; CHECK-NEXT:    trn2 v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ucvtf v0.2s, v0.2s
 ; CHECK-NEXT:    ret
   %res = uitofp <2 x i16> %op1 to <2 x float>
@@ -223,8 +224,9 @@ define <1 x double> @ucvtf_v1i16_v1f64(<1 x i16> %op1) vscale_range(2,0) #0 {
 define <2 x double> @ucvtf_v2i16_v2f64(<2 x i16> %op1) vscale_range(2,0) #0 {
 ; CHECK-LABEL: ucvtf_v2i16_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi d1, #0x00ffff0000ffff
-; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    rev32 v0.4h, v0.4h
+; CHECK-NEXT:    trn2 v0.4h, v0.4h, v1.4h
 ; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
 ; CHECK-NEXT:    ucvtf v0.2d, v0.2d
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
index 4ac0abcb851d44..57828ef01b907c 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
@@ -12,11 +12,14 @@ target triple = "aarch64-unknown-linux-gnu"
 define void @masked_gather_v2i8(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-LABEL: masked_gather_v2i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldrb w8, [x0]
-; CHECK-NEXT:    ldrb w9, [x0, #1]
+; CHECK-NEXT:    ld1 { v0.b }[0], [x0]
+; CHECK-NEXT:    add x8, x0, #1
 ; CHECK-NEXT:    ptrue p0.d, vl2
-; CHECK-NEXT:    fmov s0, w8
-; CHECK-NEXT:    mov v0.s[1], w9
+; CHECK-NEXT:    ld1 { v0.b }[4], [x8]
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-NEXT:    cmeq v0.2s, v0.2s, #0
 ; CHECK-NEXT:    sshll v0.2d, v0.2s, #0
 ; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
@@ -38,10 +41,14 @@ define void @masked_gather_v4i8(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-LABEL: masked_gather_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    adrp x8, .LCPI1_0
 ; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI1_0]
 ; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
-; CHECK-NEXT:    cmeq v0.4h, v0.4h, #0
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    cmeq v0.4h, v0.4h, #0
 ; CHECK-NEXT:    sunpklo z0.s, z0.h
 ; CHECK-NEXT:    sunpklo z0.d, z0.s
 ; CHECK-NEXT:    cmpne p1.d, p0/z, z0.d, #0
@@ -165,11 +172,13 @@ define void @masked_gather_v32i8(ptr %a, ptr %b) vscale_range(16,0) #0 {
 define void @masked_gather_v2i16(ptr %a, ptr %b) vscale_range(2,0) #0 {
 ; CHECK-LABEL: masked_gather_v2i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldrh w8, [x0]
-; CHECK-NEXT:    ldrh w9, [x0, #2]
+; CHECK-NEXT:    ld1 { v1.h }[0], [x0]
+; CHECK-NEXT:    add x8, x0, #2
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    ptrue p0.d, vl2
-; CHECK-NEXT:    fmov s0, w8
-; CHECK-NEXT:    mov v0.s[1], w9
+; CHECK-NEXT:    ld1 { v1.h }[2], [x8]
+; CHECK-NEXT:    rev32 v1.4h, v1.4h
+; CHECK-NEXT:    trn2 v0.4h, v1.4h, v0.4h
 ; CHECK-NEXT:    cmeq v0.2s, v0.2s, #0
 ; CHECK-NEXT:    sshll v0.2d, v0.2s, #0
 ; CHECK-NEXT:    cmpne p0.d, p0/z, z0.d, #0
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll
index b9c1d9a42a339a..bae3d9f76b0d99 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-gather-prefetches-vect-base-invalid-imm-offset.ll
@@ -14,7 +14,7 @@ define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_runtime_offset(<
 define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #32
+; CHECK-NEXT:    mov w8, #32 // =0x20
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 32, i32 1)
@@ -24,7 +24,7 @@ define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediat
 define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
@@ -44,7 +44,7 @@ define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_runtime_offset(<
 define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #32
+; CHECK-NEXT:    mov w8, #32 // =0x20
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 32, i32 1)
@@ -54,7 +54,7 @@ define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediat
 define void @llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfb.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
@@ -76,7 +76,7 @@ define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_runtime_offset(<
 define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #63
+; CHECK-NEXT:    mov w8, #63 // =0x3f
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 63, i32 1)
@@ -86,7 +86,7 @@ define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediat
 define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
@@ -96,7 +96,7 @@ define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediat
 define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #33
+; CHECK-NEXT:    mov w8, #33 // =0x21
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
@@ -116,7 +116,7 @@ define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_runtime_offset(<
 define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #63
+; CHECK-NEXT:    mov w8, #63 // =0x3f
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 63, i32 1)
@@ -126,7 +126,7 @@ define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediat
 define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
@@ -136,7 +136,7 @@ define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediat
 define void @llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #33
+; CHECK-NEXT:    mov w8, #33 // =0x21
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfh.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
@@ -158,7 +158,7 @@ define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_runtime_offset(<
 define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #125
+; CHECK-NEXT:    mov w8, #125 // =0x7d
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1)
@@ -168,7 +168,7 @@ define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediat
 define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
@@ -178,7 +178,7 @@ define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediat
 define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_4:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #33
+; CHECK-NEXT:    mov w8, #33 // =0x21
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
@@ -198,7 +198,7 @@ define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_runtime_offset(<
 define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #125
+; CHECK-NEXT:    mov w8, #125 // =0x7d
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1)
@@ -208,7 +208,7 @@ define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediat
 define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
@@ -218,7 +218,7 @@ define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediat
 define void @llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_4:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #33
+; CHECK-NEXT:    mov w8, #33 // =0x21
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfw.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
@@ -240,7 +240,7 @@ define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_runtime_offset(<
 define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #125
+; CHECK-NEXT:    mov w8, #125 // =0x7d
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 125, i32 1)
@@ -250,7 +250,7 @@ define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediat
 define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 -1, i32 1)
@@ -260,7 +260,7 @@ define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediat
 define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 4 x i32> %bases, <vscale x 4 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx4vi32_invalid_immediate_offset_inbound_not_multiple_of_8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #33
+; CHECK-NEXT:    mov w8, #33 // =0x21
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx4vi32(<vscale x 4 x i1> %Pg, <vscale x 4 x i32> %bases, i64 33, i32 1)
@@ -280,7 +280,7 @@ define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_runtime_offset(<
 define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_upper_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #125
+; CHECK-NEXT:    mov w8, #125 // =0x7d
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 125, i32 1)
@@ -290,7 +290,7 @@ define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediat
 define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_lower_bound:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 -1, i32 1)
@@ -300,7 +300,7 @@ define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediat
 define void @llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8(<vscale x 2 x i64> %bases, <vscale x 2 x i1> %Pg) nounwind {
 ; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scalar_offset_nx2vi64_invalid_immediate_offset_inbound_not_multiple_of_8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #33
+; CHECK-NEXT:    mov w8, #33 // =0x21
 ; CHECK-NEXT:    prfb pldl1strm, p0, [x8, z0.d, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.prfd.gather.scalar.offset.nx2vi64(<vscale x 2 x i1> %Pg, <vscale x 2 x i64> %bases, i64 33, i32 1)
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-vector-base-imm-offset.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-vector-base-imm-offset.ll
index 797b8c18e02ea3..3109f9f0901588 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-vector-base-imm-offset.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-scatter-stores-vector-base-imm-offset.ll
@@ -132,7 +132,7 @@ define void @sst1d_d_imm_offset_double(<vscale x 2 x double> %data, <vscale x 2
 define void @sst1b_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
 ; CHECK-LABEL: sst1b_s_imm_offset_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #32
+; CHECK-NEXT:    mov w8, #32 // =0x20
 ; CHECK-NEXT:    st1b { z0.s }, p0, [x8, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
@@ -146,7 +146,7 @@ define void @sst1b_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x
 define void @sst1b_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
 ; CHECK-LABEL: sst1b_d_imm_offset_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #32
+; CHECK-NEXT:    mov w8, #32 // =0x20
 ; CHECK-NEXT:    st1b { z0.d }, p0, [x8, z1.d]
 ; CHECK-NEXT:    ret
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
@@ -161,7 +161,7 @@ define void @sst1b_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x
 define void @sst1h_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
 ; CHECK-LABEL: sst1h_s_imm_offset_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #63
+; CHECK-NEXT:    mov w8, #63 // =0x3f
 ; CHECK-NEXT:    st1h { z0.s }, p0, [x8, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   %data_trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
@@ -175,7 +175,7 @@ define void @sst1h_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x
 define void @sst1h_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
 ; CHECK-LABEL: sst1h_d_imm_offset_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #63
+; CHECK-NEXT:    mov w8, #63 // =0x3f
 ; CHECK-NEXT:    st1h { z0.d }, p0, [x8, z1.d]
 ; CHECK-NEXT:    ret
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
@@ -190,7 +190,7 @@ define void @sst1h_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x
 define void @sst1w_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
 ; CHECK-LABEL: sst1w_s_imm_offset_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #125
+; CHECK-NEXT:    mov w8, #125 // =0x7d
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x8, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i32> %data,
@@ -203,7 +203,7 @@ define void @sst1w_s_imm_offset_out_of_range(<vscale x 4 x i32> %data, <vscale x
 define void @sst1w_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
 ; CHECK-LABEL: sst1w_d_imm_offset_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #125
+; CHECK-NEXT:    mov w8, #125 // =0x7d
 ; CHECK-NEXT:    st1w { z0.d }, p0, [x8, z1.d]
 ; CHECK-NEXT:    ret
   %data_trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
@@ -217,7 +217,7 @@ define void @sst1w_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x
 define void @sst1w_s_imm_offset_float_out_of_range(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
 ; CHECK-LABEL: sst1w_s_imm_offset_float_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #125
+; CHECK-NEXT:    mov w8, #125 // =0x7d
 ; CHECK-NEXT:    st1w { z0.s }, p0, [x8, z1.s, uxtw]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x float> %data,
@@ -231,7 +231,7 @@ define void @sst1w_s_imm_offset_float_out_of_range(<vscale x 4 x float> %data, <
 define void @sst1d_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
 ; CHECK-LABEL: sst1d_d_imm_offset_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #249
+; CHECK-NEXT:    mov w8, #249 // =0xf9
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x8, z1.d]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> %data,
@@ -244,7 +244,7 @@ define void @sst1d_d_imm_offset_out_of_range(<vscale x 2 x i64> %data, <vscale x
 define void @sst1d_d_imm_offset_double_out_of_range(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
 ; CHECK-LABEL: sst1d_d_imm_offset_double_out_of_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #249
+; CHECK-NEXT:    mov w8, #249 // =0xf9
 ; CHECK-NEXT:    st1d { z0.d }, p0, [x8, z1.d]
 ; CHECK-NEXT:    ret
   call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> %data,
diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-while.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-while.ll
index 657d9bea88a241..e8e392b8da2a72 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-while.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-while.ll
@@ -81,7 +81,7 @@ define <vscale x 2 x i1> @whilele_d_xx(i64 %a, i64 %b) {
 define <vscale x 2 x i1> @whilele_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
 ; CHECK-LABEL: whilele_d_ii_dont_fold_to_ptrue_larger_than_minvec:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #3
+; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    whilele p0.d, xzr, x8
 ; CHECK-NEXT:    ret
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i64(i64 0, i64 3)
@@ -101,7 +101,7 @@ entry:
 define <vscale x 16 x i1> @whilele_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
 ; CHECK-LABEL: whilele_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #9
+; CHECK-NEXT:    mov w8, #9 // =0x9
 ; CHECK-NEXT:    whilele p0.b, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
@@ -121,8 +121,8 @@ define <vscale x 16 x i1> @whilele_b_vl_maximum() vscale_range(16, 16) {
 define <vscale x 16 x i1> @whilele_b_ii_dont_fold_to_ptrue_overflow() {
 ; CHECK-LABEL: whilele_b_ii_dont_fold_to_ptrue_overflow:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #2
-; CHECK-NEXT:    mov w9, #2147483647
+; CHECK-NEXT:    mov w8, #2 // =0x2
+; CHECK-NEXT:    mov w9, #2147483647 // =0x7fffffff
 ; CHECK-NEXT:    movk w8, #32768, lsl #16
 ; CHECK-NEXT:    whilele p0.b, w9, w8
 ; CHECK-NEXT:    ret
@@ -134,7 +134,7 @@ entry:
 define <vscale x 16 x i1> @whilele_b_ii_dont_fold_to_ptrue_increment_overflow() {
 ; CHECK-LABEL: whilele_b_ii_dont_fold_to_ptrue_increment_overflow:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #2147483647
+; CHECK-NEXT:    mov w8, #2147483647 // =0x7fffffff
 ; CHECK-NEXT:    whilele p0.b, wzr, w8
 ; CHECK-NEXT:    ret
 entry:
@@ -221,7 +221,7 @@ define <vscale x 2 x i1> @whilelo_d_xx(i64 %a, i64 %b) {
 define <vscale x 2 x i1> @whilelo_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
 ; CHECK-LABEL: whilelo_d_ii_dont_fold_to_ptrue_larger_than_minvec:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #3
+; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    whilelo p0.d, xzr, x8
 ; CHECK-NEXT:    ret
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 0, i64 3)
@@ -241,7 +241,7 @@ entry:
 define <vscale x 16 x i1> @whilelo_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
 ; CHECK-LABEL: whilelo_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #9
+; CHECK-NEXT:    mov w8, #9 // =0x9
 ; CHECK-NEXT:    whilelo p0.b, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
@@ -261,8 +261,8 @@ define <vscale x 16 x i1> @whilelo_b_vl_maximum() vscale_range(16, 16) {
 define <vscale x 16 x i1> @whilelo_b_ii_dont_fold_to_ptrue_overflow() {
 ; CHECK-LABEL: whilelo_b_ii_dont_fold_to_ptrue_overflow:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #6
-; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    mov w8, #6 // =0x6
+; CHECK-NEXT:    mov w9, #-1 // =0xffffffff
 ; CHECK-NEXT:    whilelo p0.b, w9, w8
 ; CHECK-NEXT:    ret
 entry:
@@ -349,7 +349,7 @@ define <vscale x 2 x i1> @whilels_d_xx(i64 %a, i64 %b) {
 define <vscale x 2 x i1> @whilels_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
 ; CHECK-LABEL: whilels_d_ii_dont_fold_to_ptrue_larger_than_minvec:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #3
+; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    whilels p0.d, xzr, x8
 ; CHECK-NEXT:    ret
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i64(i64 0, i64 3)
@@ -369,7 +369,7 @@ entry:
 define <vscale x 16 x i1> @whilels_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
 ; CHECK-LABEL: whilels_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #9
+; CHECK-NEXT:    mov w8, #9 // =0x9
 ; CHECK-NEXT:    whilels p0.b, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
@@ -389,8 +389,8 @@ define <vscale x 16 x i1> @whilels_b_ii_vl_maximum() vscale_range(16, 16) {
 define <vscale x 16 x i1> @whilels_b_ii_dont_fold_to_ptrue_overflow() {
 ; CHECK-LABEL: whilels_b_ii_dont_fold_to_ptrue_overflow:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #6
-; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    mov w8, #6 // =0x6
+; CHECK-NEXT:    mov w9, #-1 // =0xffffffff
 ; CHECK-NEXT:    whilels p0.b, w9, w8
 ; CHECK-NEXT:    ret
 entry:
@@ -401,7 +401,7 @@ entry:
 define <vscale x 16 x i1> @whilels_b_ii_dont_fold_to_ptrue_increment_overflow() {
 ; CHECK-LABEL: whilels_b_ii_dont_fold_to_ptrue_increment_overflow:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #-1
+; CHECK-NEXT:    mov w8, #-1 // =0xffffffff
 ; CHECK-NEXT:    whilels p0.b, wzr, w8
 ; CHECK-NEXT:    ret
 entry:
@@ -488,7 +488,7 @@ define <vscale x 2 x i1> @whilelt_d_xx(i64 %a, i64 %b) {
 define <vscale x 2 x i1> @whilelt_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
 ; CHECK-LABEL: whilelt_d_ii_dont_fold_to_ptrue_larger_than_minvec:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #3
+; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    whilelt p0.d, xzr, x8
 ; CHECK-NEXT:    ret
   %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64 0, i64 3)
@@ -508,7 +508,7 @@ entry:
 define <vscale x 16 x i1> @whilelt_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
 ; CHECK-LABEL: whilelt_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #9
+; CHECK-NEXT:    mov w8, #9 // =0x9
 ; CHECK-NEXT:    whilelt p0.b, xzr, x8
 ; CHECK-NEXT:    ret
 entry:
@@ -528,8 +528,8 @@ define <vscale x 16 x i1> @whilelt_b_ii_vl_maximum() vscale_range(16, 16) {
 define <vscale x 16 x i1> @whilelt_b_ii_dont_fold_to_ptrue_overflow() {
 ; CHECK-LABEL: whilelt_b_ii_dont_fold_to_ptrue_overflow:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #2
-; CHECK-NEXT:    mov w9, #2147483647
+; CHECK-NEXT:    mov w8, #2 // =0x2
+; CHECK-NEXT:    mov w9, #2147483647 // =0x7fffffff
 ; CHECK-NEXT:    movk w8, #32768, lsl #16
 ; CHECK-NEXT:    whilelt p0.b, w9, w8
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
index 991d997f3317a7..0558883ac725b8 100644
--- a/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ldnf1.mir
@@ -7,26 +7,22 @@
   define void @testcase_positive_offset() uwtable {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
-    ; Reads from %object at offset 63 * readsize
     ret void
   }
   define void @testcase_negative_offset() uwtable {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
-    ; Reads from %object at offset 63 * readsize
     ret void
   }
 
   define void @testcase_positive_offset_out_of_range() uwtable {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
-    ; Reads from %object at offset 64 * readsize
     ret void
   }
   define void @testcase_negative_offset_out_of_range() uwtable {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
-    ; Reads from %object at offset -1 * readsize
     ret void
   }
 ...
diff --git a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
index 1352b9ddcacdf2..3d9728d5be8664 100644
--- a/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
+++ b/llvm/test/CodeGen/AArch64/sve-ldstnt1.mir
@@ -7,26 +7,22 @@
   define void @testcase_positive_offset() uwtable {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
-    ; Reads from %object at offset 7 * readsize
     ret void
   }
   define void @testcase_negative_offset() uwtable {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
-    ; Reads from %object at offset -8 * readsize
     ret void
   }
 
   define void @testcase_positive_offset_out_of_range() uwtable {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
-    ; Reads from %object at offset 8 * readsize
     ret void
   }
   define void @testcase_negative_offset_out_of_range() uwtable {
     %dummy = alloca <vscale x 2 x i64>, align 8
     %object = alloca <vscale x 2 x i64>, align 8
-    ; Reads from %object at offset -9 * readsize
     ret void
   }
 ...
diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll
index 9e34beedf5458e..f79bdd554ae284 100644
--- a/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-vec-plus-imm.ll
@@ -122,7 +122,7 @@ define <vscale x 2 x i64> @masked_sgather_nxv2i32(<vscale x 2 x ptr> %bases, <vs
 define <vscale x 2 x i64> @masked_gather_nxv2i8_range(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2i8_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #32
+; CHECK-NEXT:    mov w8, #32 // =0x20
 ; CHECK-NEXT:    ld1b { z0.d }, p0/z, [x8, z0.d]
 ; CHECK-NEXT:    ret
   %ptrs = getelementptr i8, <vscale x 2 x ptr> %bases, i32 32
@@ -134,7 +134,7 @@ define <vscale x 2 x i64> @masked_gather_nxv2i8_range(<vscale x 2 x ptr> %bases,
 define <vscale x 2 x half> @masked_gather_nxv2f16_range(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f16_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #64
+; CHECK-NEXT:    mov w8, #64 // =0x40
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x8, z0.d]
 ; CHECK-NEXT:    ret
   %ptrs = getelementptr half, <vscale x 2 x ptr> %bases, i32 32
@@ -145,7 +145,7 @@ define <vscale x 2 x half> @masked_gather_nxv2f16_range(<vscale x 2 x ptr> %base
 define <vscale x 2 x bfloat> @masked_gather_nxv2bf16_range(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) #0 {
 ; CHECK-LABEL: masked_gather_nxv2bf16_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #64
+; CHECK-NEXT:    mov w8, #64 // =0x40
 ; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x8, z0.d]
 ; CHECK-NEXT:    ret
   %ptrs = getelementptr bfloat, <vscale x 2 x ptr> %bases, i32 32
@@ -156,7 +156,7 @@ define <vscale x 2 x bfloat> @masked_gather_nxv2bf16_range(<vscale x 2 x ptr> %b
 define <vscale x 2 x float> @masked_gather_nxv2f32_range(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f32_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #128
+; CHECK-NEXT:    mov w8, #128 // =0x80
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x8, z0.d]
 ; CHECK-NEXT:    ret
   %ptrs = getelementptr float, <vscale x 2 x ptr> %bases, i32 32
@@ -167,7 +167,7 @@ define <vscale x 2 x float> @masked_gather_nxv2f32_range(<vscale x 2 x ptr> %bas
 define <vscale x 2 x double> @masked_gather_nxv2f64_range(<vscale x 2 x ptr> %bases, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: masked_gather_nxv2f64_range:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #256
+; CHECK-NEXT:    mov w8, #256 // =0x100
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x8, z0.d]
 ; CHECK-NEXT:    ret
   %ptrs = getelementptr double, <vscale x 2 x ptr> %bases, i32 32
diff --git a/llvm/test/CodeGen/AArch64/sve2-int-mul.ll b/llvm/test/CodeGen/AArch64/sve2-int-mul.ll
index 800888b7e6cb97..26986fae554eb1 100644
--- a/llvm/test/CodeGen/AArch64/sve2-int-mul.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-int-mul.ll
@@ -19,7 +19,7 @@ define <vscale x 8 x i16> @mul_i16_imm(<vscale x 8 x i16> %a) {
 define <vscale x 8 x i16> @mul_i16_imm_neg(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: mul_i16_imm_neg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-200
+; CHECK-NEXT:    mov w8, #-200 // =0xffffff38
 ; CHECK-NEXT:    mov z1.h, w8
 ; CHECK-NEXT:    mul z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
@@ -44,7 +44,7 @@ define <vscale x 4 x i32> @mul_i32_imm(<vscale x 4 x i32> %a) {
 define <vscale x 4 x i32> @mul_i32_imm_neg(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: mul_i32_imm_neg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-200
+; CHECK-NEXT:    mov w8, #-200 // =0xffffff38
 ; CHECK-NEXT:    mov z1.s, w8
 ; CHECK-NEXT:    mul z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
@@ -69,7 +69,7 @@ define <vscale x 2 x i64> @mul_i64_imm(<vscale x 2 x i64> %a) {
 define <vscale x 2 x i64> @mul_i64_imm_neg(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: mul_i64_imm_neg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #-200
+; CHECK-NEXT:    mov x8, #-200 // =0xffffffffffffff38
 ; CHECK-NEXT:    mov z1.d, x8
 ; CHECK-NEXT:    mul z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index 365fe03ab0b084..e195d6cd0a22ff 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -337,38 +337,45 @@ define void @loop3(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
 ; CHECK-NEXT:    mov w8, #1132396544 // =0x437f0000
 ; CHECK-NEXT:    adrp x12, .LCPI2_0
 ; CHECK-NEXT:    and x10, x11, #0x1fffffffc
+; CHECK-NEXT:    adrp x13, .LCPI2_1
+; CHECK-NEXT:    adrp x14, .LCPI2_2
+; CHECK-NEXT:    add x9, x10, x10, lsl #1
 ; CHECK-NEXT:    dup v0.4s, w8
 ; CHECK-NEXT:    ldr q1, [x12, :lo12:.LCPI2_0]
-; CHECK-NEXT:    add x9, x10, x10, lsl #1
+; CHECK-NEXT:    ldr d2, [x13, :lo12:.LCPI2_1]
+; CHECK-NEXT:    ldr d3, [x14, :lo12:.LCPI2_2]
 ; CHECK-NEXT:    mov x12, x10
 ; CHECK-NEXT:    add x8, x1, x9, lsl #2
 ; CHECK-NEXT:    add x9, x0, x9
 ; CHECK-NEXT:  .LBB2_4: // %vector.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    ld3 { v2.4s, v3.4s, v4.4s }, [x1], #48
+; CHECK-NEXT:    ld3 { v4.4s, v5.4s, v6.4s }, [x1], #48
 ; CHECK-NEXT:    add x13, x0, #8
 ; CHECK-NEXT:    subs x12, x12, #4
-; CHECK-NEXT:    fcmgt v5.4s, v2.4s, v0.4s
-; CHECK-NEXT:    fcmgt v6.4s, v3.4s, v0.4s
 ; CHECK-NEXT:    fcmgt v7.4s, v4.4s, v0.4s
-; CHECK-NEXT:    fcmlt v16.4s, v2.4s, #0.0
-; CHECK-NEXT:    fcmlt v17.4s, v3.4s, #0.0
-; CHECK-NEXT:    bsl v5.16b, v0.16b, v2.16b
-; CHECK-NEXT:    bsl v6.16b, v0.16b, v3.16b
+; CHECK-NEXT:    fcmgt v16.4s, v5.4s, v0.4s
+; CHECK-NEXT:    fcmgt v17.4s, v6.4s, v0.4s
+; CHECK-NEXT:    fcmlt v18.4s, v4.4s, #0.0
+; CHECK-NEXT:    fcmlt v19.4s, v5.4s, #0.0
 ; CHECK-NEXT:    bsl v7.16b, v0.16b, v4.16b
-; CHECK-NEXT:    fcmlt v2.4s, v4.4s, #0.0
-; CHECK-NEXT:    bic v3.16b, v5.16b, v16.16b
-; CHECK-NEXT:    bic v4.16b, v6.16b, v17.16b
-; CHECK-NEXT:    bic v2.16b, v7.16b, v2.16b
-; CHECK-NEXT:    fcvtzs v3.4s, v3.4s
+; CHECK-NEXT:    bsl v16.16b, v0.16b, v5.16b
+; CHECK-NEXT:    bsl v17.16b, v0.16b, v6.16b
+; CHECK-NEXT:    fcmlt v4.4s, v6.4s, #0.0
+; CHECK-NEXT:    bic v5.16b, v7.16b, v18.16b
+; CHECK-NEXT:    bic v6.16b, v16.16b, v19.16b
+; CHECK-NEXT:    bic v4.16b, v17.16b, v4.16b
+; CHECK-NEXT:    fcvtzs v5.4s, v5.4s
+; CHECK-NEXT:    fcvtzs v6.4s, v6.4s
 ; CHECK-NEXT:    fcvtzs v4.4s, v4.4s
-; CHECK-NEXT:    fcvtzs v2.4s, v2.4s
-; CHECK-NEXT:    xtn v5.4h, v3.4s
-; CHECK-NEXT:    xtn v6.4h, v4.4s
-; CHECK-NEXT:    xtn v7.4h, v2.4s
-; CHECK-NEXT:    tbl v2.16b, { v5.16b, v6.16b, v7.16b }, v1.16b
-; CHECK-NEXT:    st1 { v2.s }[2], [x13]
-; CHECK-NEXT:    str d2, [x0], #12
+; CHECK-NEXT:    xtn v16.4h, v5.4s
+; CHECK-NEXT:    xtn v17.4h, v6.4s
+; CHECK-NEXT:    xtn v18.4h, v4.4s
+; CHECK-NEXT:    tbl v4.16b, { v16.16b, v17.16b, v18.16b }, v1.16b
+; CHECK-NEXT:    tbl v5.8b, { v4.16b }, v2.8b
+; CHECK-NEXT:    tbl v4.8b, { v4.16b }, v3.8b
+; CHECK-NEXT:    mov v4.d[1], v5.d[0]
+; CHECK-NEXT:    st1 { v4.s }[2], [x13]
+; CHECK-NEXT:    str d4, [x0], #12
 ; CHECK-NEXT:    b.ne .LBB2_4
 ; CHECK-NEXT:  // %bb.5: // %middle.block
 ; CHECK-NEXT:    cmp x11, x10
@@ -590,42 +597,49 @@ define void @loop4(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
 ; CHECK-NEXT:    add x11, x8, #1
 ; CHECK-NEXT:    mov w8, #1132396544 // =0x437f0000
 ; CHECK-NEXT:    adrp x12, .LCPI3_0
+; CHECK-NEXT:    adrp x13, .LCPI3_1
+; CHECK-NEXT:    adrp x14, .LCPI3_2
 ; CHECK-NEXT:    and x10, x11, #0x1fffffffc
 ; CHECK-NEXT:    dup v0.4s, w8
-; CHECK-NEXT:    ldr q1, [x12, :lo12:.LCPI3_0]
 ; CHECK-NEXT:    add x8, x1, x10, lsl #4
 ; CHECK-NEXT:    add x9, x0, x10, lsl #2
+; CHECK-NEXT:    ldr q1, [x12, :lo12:.LCPI3_0]
+; CHECK-NEXT:    ldr d2, [x13, :lo12:.LCPI3_1]
+; CHECK-NEXT:    ldr d3, [x14, :lo12:.LCPI3_2]
 ; CHECK-NEXT:    mov x12, x10
 ; CHECK-NEXT:  .LBB3_9: // %vector.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    ld4 { v2.4s, v3.4s, v4.4s, v5.4s }, [x1], #64
+; CHECK-NEXT:    ld4 { v4.4s, v5.4s, v6.4s, v7.4s }, [x1], #64
 ; CHECK-NEXT:    subs x12, x12, #4
-; CHECK-NEXT:    fcmgt v6.4s, v2.4s, v0.4s
-; CHECK-NEXT:    fcmgt v7.4s, v3.4s, v0.4s
 ; CHECK-NEXT:    fcmgt v16.4s, v4.4s, v0.4s
 ; CHECK-NEXT:    fcmgt v17.4s, v5.4s, v0.4s
-; CHECK-NEXT:    fcmlt v18.4s, v2.4s, #0.0
-; CHECK-NEXT:    fcmlt v19.4s, v3.4s, #0.0
+; CHECK-NEXT:    fcmgt v18.4s, v6.4s, v0.4s
+; CHECK-NEXT:    fcmgt v19.4s, v7.4s, v0.4s
 ; CHECK-NEXT:    fcmlt v20.4s, v4.4s, #0.0
-; CHECK-NEXT:    bsl v6.16b, v0.16b, v2.16b
-; CHECK-NEXT:    bsl v7.16b, v0.16b, v3.16b
+; CHECK-NEXT:    fcmlt v21.4s, v5.4s, #0.0
+; CHECK-NEXT:    fcmlt v22.4s, v6.4s, #0.0
 ; CHECK-NEXT:    bsl v16.16b, v0.16b, v4.16b
 ; CHECK-NEXT:    bsl v17.16b, v0.16b, v5.16b
-; CHECK-NEXT:    fcmlt v2.4s, v5.4s, #0.0
-; CHECK-NEXT:    bic v3.16b, v6.16b, v18.16b
-; CHECK-NEXT:    bic v4.16b, v7.16b, v19.16b
+; CHECK-NEXT:    bsl v18.16b, v0.16b, v6.16b
+; CHECK-NEXT:    bsl v19.16b, v0.16b, v7.16b
+; CHECK-NEXT:    fcmlt v4.4s, v7.4s, #0.0
 ; CHECK-NEXT:    bic v5.16b, v16.16b, v20.16b
-; CHECK-NEXT:    bic v2.16b, v17.16b, v2.16b
-; CHECK-NEXT:    fcvtzs v3.4s, v3.4s
-; CHECK-NEXT:    fcvtzs v4.4s, v4.4s
+; CHECK-NEXT:    bic v6.16b, v17.16b, v21.16b
+; CHECK-NEXT:    bic v7.16b, v18.16b, v22.16b
+; CHECK-NEXT:    bic v4.16b, v19.16b, v4.16b
 ; CHECK-NEXT:    fcvtzs v5.4s, v5.4s
-; CHECK-NEXT:    fcvtzs v2.4s, v2.4s
-; CHECK-NEXT:    xtn v16.4h, v3.4s
-; CHECK-NEXT:    xtn v17.4h, v4.4s
-; CHECK-NEXT:    xtn v18.4h, v5.4s
-; CHECK-NEXT:    xtn v19.4h, v2.4s
-; CHECK-NEXT:    tbl v2.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v1.16b
-; CHECK-NEXT:    str q2, [x0], #16
+; CHECK-NEXT:    fcvtzs v6.4s, v6.4s
+; CHECK-NEXT:    fcvtzs v7.4s, v7.4s
+; CHECK-NEXT:    fcvtzs v4.4s, v4.4s
+; CHECK-NEXT:    xtn v16.4h, v5.4s
+; CHECK-NEXT:    xtn v17.4h, v6.4s
+; CHECK-NEXT:    xtn v18.4h, v7.4s
+; CHECK-NEXT:    xtn v19.4h, v4.4s
+; CHECK-NEXT:    tbl v4.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v1.16b
+; CHECK-NEXT:    tbl v5.8b, { v4.16b }, v2.8b
+; CHECK-NEXT:    tbl v4.8b, { v4.16b }, v3.8b
+; CHECK-NEXT:    mov v4.d[1], v5.d[0]
+; CHECK-NEXT:    str q4, [x0], #16
 ; CHECK-NEXT:    b.ne .LBB3_9
 ; CHECK-NEXT:  // %bb.10: // %middle.block
 ; CHECK-NEXT:    cmp x11, x10
diff --git a/llvm/test/CodeGen/AArch64/typepromotion-cost.ll b/llvm/test/CodeGen/AArch64/typepromotion-cost.ll
index 3aed4cb671c028..18e53311282e91 100644
--- a/llvm/test/CodeGen/AArch64/typepromotion-cost.ll
+++ b/llvm/test/CodeGen/AArch64/typepromotion-cost.ll
@@ -11,10 +11,10 @@ define i32 @needless_promotion(ptr nocapture noundef readonly %S, i64 noundef %r
 ; CHECK-O2-NEXT:  // %bb.1: // %lor.rhs
 ; CHECK-O2-NEXT:    cbz x1, .LBB0_5
 ; CHECK-O2-NEXT:  // %bb.2:
-; CHECK-O2-NEXT:    mov w9, #2
+; CHECK-O2-NEXT:    mov w9, #2 // =0x2
 ; CHECK-O2-NEXT:    b .LBB0_4
 ; CHECK-O2-NEXT:  .LBB0_3:
-; CHECK-O2-NEXT:    mov w9, #1
+; CHECK-O2-NEXT:    mov w9, #1 // =0x1
 ; CHECK-O2-NEXT:  .LBB0_4: // %lor.end.sink.split
 ; CHECK-O2-NEXT:    cmp w8, w9
 ; CHECK-O2-NEXT:    cset w0, eq
@@ -30,12 +30,12 @@ define i32 @needless_promotion(ptr nocapture noundef readonly %S, i64 noundef %r
 ; CHECK-O3-NEXT:  // %bb.1: // %lor.rhs
 ; CHECK-O3-NEXT:    cbz x1, .LBB0_4
 ; CHECK-O3-NEXT:  // %bb.2:
-; CHECK-O3-NEXT:    mov w9, #2
+; CHECK-O3-NEXT:    mov w9, #2 // =0x2
 ; CHECK-O3-NEXT:    cmp w8, w9
 ; CHECK-O3-NEXT:    cset w0, eq
 ; CHECK-O3-NEXT:    ret
 ; CHECK-O3-NEXT:  .LBB0_3:
-; CHECK-O3-NEXT:    mov w9, #1
+; CHECK-O3-NEXT:    mov w9, #1 // =0x1
 ; CHECK-O3-NEXT:    cmp w8, w9
 ; CHECK-O3-NEXT:    cset w0, eq
 ; CHECK-O3-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
index 0ef87c32930555..8a94d82c76b046 100644
--- a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
+++ b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
@@ -49,25 +49,28 @@ define <4 x i32> @load_v3i8_to_4xi32(ptr %src) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldrh w8, [x0]
-; CHECK-NEXT:    movi.2d v1, #0x0000ff000000ff
-; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    ldrh w9, [x0]
+; CHECK-NEXT:  Lloh0:
+; CHECK-NEXT:    adrp x8, lCPI1_0 at PAGE
+; CHECK-NEXT:  Lloh1:
+; CHECK-NEXT:    ldr q1, [x8, lCPI1_0 at PAGEOFF]
+; CHECK-NEXT:    strh w9, [sp, #12]
 ; CHECK-NEXT:    ldr s0, [sp, #12]
-; CHECK-NEXT:    ldrsb w8, [x0, #2]
+; CHECK-NEXT:    ldrsb w9, [x0, #2]
 ; CHECK-NEXT:    ushll.8h v0, v0, #0
 ; CHECK-NEXT:    mov.h v0[1], v0[1]
-; CHECK-NEXT:    mov.h v0[2], w8
+; CHECK-NEXT:    mov.h v0[2], w9
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
-; CHECK-NEXT:    and.16b v0, v0, v1
+; CHECK-NEXT:    tbl.16b v0, { v0 }, v1
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh0, Lloh1
 ;
 ; BE-LABEL: load_v3i8_to_4xi32:
 ; BE:       // %bb.0:
 ; BE-NEXT:    sub sp, sp, #16
 ; BE-NEXT:    .cfi_def_cfa_offset 16
 ; BE-NEXT:    ldrh w8, [x0]
-; BE-NEXT:    movi v1.2d, #0x0000ff000000ff
 ; BE-NEXT:    strh w8, [sp, #12]
 ; BE-NEXT:    ldr s0, [sp, #12]
 ; BE-NEXT:    ldrsb w8, [x0, #2]
@@ -75,9 +78,13 @@ define <4 x i32> @load_v3i8_to_4xi32(ptr %src) {
 ; BE-NEXT:    ushll v0.8h, v0.8b, #0
 ; BE-NEXT:    mov v0.h[1], v0.h[1]
 ; BE-NEXT:    mov v0.h[2], w8
+; BE-NEXT:    adrp x8, .LCPI1_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI1_0
+; BE-NEXT:    ld1 { v1.16b }, [x8]
 ; BE-NEXT:    ushll v0.4s, v0.4h, #0
-; BE-NEXT:    and v0.16b, v0.16b, v1.16b
-; BE-NEXT:    rev64 v0.4s, v0.4s
+; BE-NEXT:    rev32 v0.16b, v0.16b
+; BE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
+; BE-NEXT:    rev64 v0.16b, v0.16b
 ; BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; BE-NEXT:    add sp, sp, #16
 ; BE-NEXT:    ret
@@ -92,25 +99,28 @@ define <4 x i32> @load_v3i8_to_4xi32_align_2(ptr %src) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldrh w8, [x0]
-; CHECK-NEXT:    movi.2d v1, #0x0000ff000000ff
-; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    ldrh w9, [x0]
+; CHECK-NEXT:  Lloh2:
+; CHECK-NEXT:    adrp x8, lCPI2_0 at PAGE
+; CHECK-NEXT:  Lloh3:
+; CHECK-NEXT:    ldr q1, [x8, lCPI2_0 at PAGEOFF]
+; CHECK-NEXT:    strh w9, [sp, #12]
 ; CHECK-NEXT:    ldr s0, [sp, #12]
-; CHECK-NEXT:    ldrsb w8, [x0, #2]
+; CHECK-NEXT:    ldrsb w9, [x0, #2]
 ; CHECK-NEXT:    ushll.8h v0, v0, #0
 ; CHECK-NEXT:    mov.h v0[1], v0[1]
-; CHECK-NEXT:    mov.h v0[2], w8
+; CHECK-NEXT:    mov.h v0[2], w9
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
-; CHECK-NEXT:    and.16b v0, v0, v1
+; CHECK-NEXT:    tbl.16b v0, { v0 }, v1
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh2, Lloh3
 ;
 ; BE-LABEL: load_v3i8_to_4xi32_align_2:
 ; BE:       // %bb.0:
 ; BE-NEXT:    sub sp, sp, #16
 ; BE-NEXT:    .cfi_def_cfa_offset 16
 ; BE-NEXT:    ldrh w8, [x0]
-; BE-NEXT:    movi v1.2d, #0x0000ff000000ff
 ; BE-NEXT:    strh w8, [sp, #12]
 ; BE-NEXT:    ldr s0, [sp, #12]
 ; BE-NEXT:    ldrsb w8, [x0, #2]
@@ -118,9 +128,13 @@ define <4 x i32> @load_v3i8_to_4xi32_align_2(ptr %src) {
 ; BE-NEXT:    ushll v0.8h, v0.8b, #0
 ; BE-NEXT:    mov v0.h[1], v0.h[1]
 ; BE-NEXT:    mov v0.h[2], w8
+; BE-NEXT:    adrp x8, .LCPI2_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI2_0
+; BE-NEXT:    ld1 { v1.16b }, [x8]
 ; BE-NEXT:    ushll v0.4s, v0.4h, #0
-; BE-NEXT:    and v0.16b, v0.16b, v1.16b
-; BE-NEXT:    rev64 v0.4s, v0.4s
+; BE-NEXT:    rev32 v0.16b, v0.16b
+; BE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
+; BE-NEXT:    rev64 v0.16b, v0.16b
 ; BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; BE-NEXT:    add sp, sp, #16
 ; BE-NEXT:    ret
@@ -134,21 +148,28 @@ define <4 x i32> @load_v3i8_to_4xi32_align_4(ptr %src) {
 ; CHECK-LABEL: load_v3i8_to_4xi32_align_4:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    ldr s0, [x0]
-; CHECK-NEXT:    movi.2d v1, #0x0000ff000000ff
+; CHECK-NEXT:  Lloh4:
+; CHECK-NEXT:    adrp x8, lCPI3_0 at PAGE
+; CHECK-NEXT:  Lloh5:
+; CHECK-NEXT:    ldr q1, [x8, lCPI3_0 at PAGEOFF]
 ; CHECK-NEXT:    zip1.8b v0, v0, v0
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
-; CHECK-NEXT:    and.16b v0, v0, v1
+; CHECK-NEXT:    tbl.16b v0, { v0 }, v1
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh4, Lloh5
 ;
 ; BE-LABEL: load_v3i8_to_4xi32_align_4:
 ; BE:       // %bb.0:
 ; BE-NEXT:    ldr s0, [x0]
-; BE-NEXT:    movi v1.2d, #0x0000ff000000ff
+; BE-NEXT:    adrp x8, .LCPI3_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI3_0
+; BE-NEXT:    ld1 { v1.16b }, [x8]
 ; BE-NEXT:    rev32 v0.8b, v0.8b
 ; BE-NEXT:    zip1 v0.8b, v0.8b, v0.8b
 ; BE-NEXT:    ushll v0.4s, v0.4h, #0
-; BE-NEXT:    and v0.16b, v0.16b, v1.16b
-; BE-NEXT:    rev64 v0.4s, v0.4s
+; BE-NEXT:    rev32 v0.16b, v0.16b
+; BE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
+; BE-NEXT:    rev64 v0.16b, v0.16b
 ; BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; BE-NEXT:    ret
   %l = load <3 x i8>, ptr %src, align 4
@@ -162,25 +183,28 @@ define <4 x i32> @load_v3i8_to_4xi32_const_offset_1(ptr %src) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldurh w8, [x0, #1]
-; CHECK-NEXT:    movi.2d v1, #0x0000ff000000ff
-; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    ldurh w9, [x0, #1]
+; CHECK-NEXT:  Lloh6:
+; CHECK-NEXT:    adrp x8, lCPI4_0 at PAGE
+; CHECK-NEXT:  Lloh7:
+; CHECK-NEXT:    ldr q1, [x8, lCPI4_0 at PAGEOFF]
+; CHECK-NEXT:    strh w9, [sp, #12]
 ; CHECK-NEXT:    ldr s0, [sp, #12]
-; CHECK-NEXT:    ldrsb w8, [x0, #3]
+; CHECK-NEXT:    ldrsb w9, [x0, #3]
 ; CHECK-NEXT:    ushll.8h v0, v0, #0
 ; CHECK-NEXT:    mov.h v0[1], v0[1]
-; CHECK-NEXT:    mov.h v0[2], w8
+; CHECK-NEXT:    mov.h v0[2], w9
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
-; CHECK-NEXT:    and.16b v0, v0, v1
+; CHECK-NEXT:    tbl.16b v0, { v0 }, v1
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh6, Lloh7
 ;
 ; BE-LABEL: load_v3i8_to_4xi32_const_offset_1:
 ; BE:       // %bb.0:
 ; BE-NEXT:    sub sp, sp, #16
 ; BE-NEXT:    .cfi_def_cfa_offset 16
 ; BE-NEXT:    ldurh w8, [x0, #1]
-; BE-NEXT:    movi v1.2d, #0x0000ff000000ff
 ; BE-NEXT:    strh w8, [sp, #12]
 ; BE-NEXT:    ldr s0, [sp, #12]
 ; BE-NEXT:    ldrsb w8, [x0, #3]
@@ -188,9 +212,13 @@ define <4 x i32> @load_v3i8_to_4xi32_const_offset_1(ptr %src) {
 ; BE-NEXT:    ushll v0.8h, v0.8b, #0
 ; BE-NEXT:    mov v0.h[1], v0.h[1]
 ; BE-NEXT:    mov v0.h[2], w8
+; BE-NEXT:    adrp x8, .LCPI4_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI4_0
+; BE-NEXT:    ld1 { v1.16b }, [x8]
 ; BE-NEXT:    ushll v0.4s, v0.4h, #0
-; BE-NEXT:    and v0.16b, v0.16b, v1.16b
-; BE-NEXT:    rev64 v0.4s, v0.4s
+; BE-NEXT:    rev32 v0.16b, v0.16b
+; BE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
+; BE-NEXT:    rev64 v0.16b, v0.16b
 ; BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; BE-NEXT:    add sp, sp, #16
 ; BE-NEXT:    ret
@@ -206,25 +234,28 @@ define <4 x i32> @load_v3i8_to_4xi32_const_offset_3(ptr %src) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldurh w8, [x0, #3]
-; CHECK-NEXT:    movi.2d v1, #0x0000ff000000ff
-; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    ldurh w9, [x0, #3]
+; CHECK-NEXT:  Lloh8:
+; CHECK-NEXT:    adrp x8, lCPI5_0 at PAGE
+; CHECK-NEXT:  Lloh9:
+; CHECK-NEXT:    ldr q1, [x8, lCPI5_0 at PAGEOFF]
+; CHECK-NEXT:    strh w9, [sp, #12]
 ; CHECK-NEXT:    ldr s0, [sp, #12]
-; CHECK-NEXT:    ldrsb w8, [x0, #5]
+; CHECK-NEXT:    ldrsb w9, [x0, #5]
 ; CHECK-NEXT:    ushll.8h v0, v0, #0
 ; CHECK-NEXT:    mov.h v0[1], v0[1]
-; CHECK-NEXT:    mov.h v0[2], w8
+; CHECK-NEXT:    mov.h v0[2], w9
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
-; CHECK-NEXT:    and.16b v0, v0, v1
+; CHECK-NEXT:    tbl.16b v0, { v0 }, v1
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh8, Lloh9
 ;
 ; BE-LABEL: load_v3i8_to_4xi32_const_offset_3:
 ; BE:       // %bb.0:
 ; BE-NEXT:    sub sp, sp, #16
 ; BE-NEXT:    .cfi_def_cfa_offset 16
 ; BE-NEXT:    ldurh w8, [x0, #3]
-; BE-NEXT:    movi v1.2d, #0x0000ff000000ff
 ; BE-NEXT:    strh w8, [sp, #12]
 ; BE-NEXT:    ldr s0, [sp, #12]
 ; BE-NEXT:    ldrsb w8, [x0, #5]
@@ -232,9 +263,13 @@ define <4 x i32> @load_v3i8_to_4xi32_const_offset_3(ptr %src) {
 ; BE-NEXT:    ushll v0.8h, v0.8b, #0
 ; BE-NEXT:    mov v0.h[1], v0.h[1]
 ; BE-NEXT:    mov v0.h[2], w8
+; BE-NEXT:    adrp x8, .LCPI5_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI5_0
+; BE-NEXT:    ld1 { v1.16b }, [x8]
 ; BE-NEXT:    ushll v0.4s, v0.4h, #0
-; BE-NEXT:    and v0.16b, v0.16b, v1.16b
-; BE-NEXT:    rev64 v0.4s, v0.4s
+; BE-NEXT:    rev32 v0.16b, v0.16b
+; BE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
+; BE-NEXT:    rev64 v0.16b, v0.16b
 ; BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; BE-NEXT:    add sp, sp, #16
 ; BE-NEXT:    ret
@@ -286,25 +321,28 @@ define <4 x i32> @volatile_load_v3i8_to_4xi32(ptr %src) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldrh w8, [x0]
-; CHECK-NEXT:    movi.2d v1, #0x0000ff000000ff
-; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    ldrh w9, [x0]
+; CHECK-NEXT:  Lloh10:
+; CHECK-NEXT:    adrp x8, lCPI6_0 at PAGE
+; CHECK-NEXT:  Lloh11:
+; CHECK-NEXT:    ldr q1, [x8, lCPI6_0 at PAGEOFF]
+; CHECK-NEXT:    strh w9, [sp, #12]
 ; CHECK-NEXT:    ldr s0, [sp, #12]
-; CHECK-NEXT:    ldrsb w8, [x0, #2]
+; CHECK-NEXT:    ldrsb w9, [x0, #2]
 ; CHECK-NEXT:    ushll.8h v0, v0, #0
 ; CHECK-NEXT:    mov.h v0[1], v0[1]
-; CHECK-NEXT:    mov.h v0[2], w8
+; CHECK-NEXT:    mov.h v0[2], w9
 ; CHECK-NEXT:    ushll.4s v0, v0, #0
-; CHECK-NEXT:    and.16b v0, v0, v1
+; CHECK-NEXT:    tbl.16b v0, { v0 }, v1
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh10, Lloh11
 ;
 ; BE-LABEL: volatile_load_v3i8_to_4xi32:
 ; BE:       // %bb.0:
 ; BE-NEXT:    sub sp, sp, #16
 ; BE-NEXT:    .cfi_def_cfa_offset 16
 ; BE-NEXT:    ldrh w8, [x0]
-; BE-NEXT:    movi v1.2d, #0x0000ff000000ff
 ; BE-NEXT:    strh w8, [sp, #12]
 ; BE-NEXT:    ldr s0, [sp, #12]
 ; BE-NEXT:    ldrsb w8, [x0, #2]
@@ -312,9 +350,13 @@ define <4 x i32> @volatile_load_v3i8_to_4xi32(ptr %src) {
 ; BE-NEXT:    ushll v0.8h, v0.8b, #0
 ; BE-NEXT:    mov v0.h[1], v0.h[1]
 ; BE-NEXT:    mov v0.h[2], w8
+; BE-NEXT:    adrp x8, .LCPI6_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI6_0
+; BE-NEXT:    ld1 { v1.16b }, [x8]
 ; BE-NEXT:    ushll v0.4s, v0.4h, #0
-; BE-NEXT:    and v0.16b, v0.16b, v1.16b
-; BE-NEXT:    rev64 v0.4s, v0.4s
+; BE-NEXT:    rev32 v0.16b, v0.16b
+; BE-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
+; BE-NEXT:    rev64 v0.16b, v0.16b
 ; BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; BE-NEXT:    add sp, sp, #16
 ; BE-NEXT:    ret
@@ -391,9 +433,9 @@ define void @store_trunc_add_from_64bits(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    add x9, x0, #4
-; CHECK-NEXT:  Lloh0:
+; CHECK-NEXT:  Lloh12:
 ; CHECK-NEXT:    adrp x8, lCPI9_0 at PAGE
-; CHECK-NEXT:  Lloh1:
+; CHECK-NEXT:  Lloh13:
 ; CHECK-NEXT:    ldr d1, [x8, lCPI9_0 at PAGEOFF]
 ; CHECK-NEXT:    ld1.h { v0 }[2], [x9]
 ; CHECK-NEXT:    add.4h v0, v0, v1
@@ -405,7 +447,7 @@ define void @store_trunc_add_from_64bits(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    strh w9, [x1]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-; CHECK-NEXT:    .loh AdrpLdr Lloh0, Lloh1
+; CHECK-NEXT:    .loh AdrpLdr Lloh12, Lloh13
 ;
 ; BE-LABEL: store_trunc_add_from_64bits:
 ; BE:       // %bb.0: // %entry
@@ -441,18 +483,24 @@ define void @load_ext_to_64bits(ptr %src, ptr %dst) {
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldrh w8, [x0]
-; CHECK-NEXT:    strh w8, [sp, #12]
-; CHECK-NEXT:    add x8, x0, #2
+; CHECK-NEXT:    ldrh w9, [x0]
+; CHECK-NEXT:  Lloh14:
+; CHECK-NEXT:    adrp x8, lCPI10_0 at PAGE
+; CHECK-NEXT:  Lloh15:
+; CHECK-NEXT:    ldr d1, [x8, lCPI10_0 at PAGEOFF]
+; CHECK-NEXT:    add x8, x1, #4
+; CHECK-NEXT:    strh w9, [sp, #12]
+; CHECK-NEXT:    add x9, x0, #2
 ; CHECK-NEXT:    ldr s0, [sp, #12]
 ; CHECK-NEXT:    ushll.8h v0, v0, #0
-; CHECK-NEXT:    ld1.b { v0 }[4], [x8]
-; CHECK-NEXT:    add x8, x1, #4
-; CHECK-NEXT:    bic.4h v0, #255, lsl #8
+; CHECK-NEXT:    ld1.b { v0 }[4], [x9]
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v1
 ; CHECK-NEXT:    st1.h { v0 }[2], [x8]
 ; CHECK-NEXT:    str s0, [x1]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh14, Lloh15
 ;
 ; BE-LABEL: load_ext_to_64bits:
 ; BE:       // %bb.0: // %entry
@@ -465,11 +513,17 @@ define void @load_ext_to_64bits(ptr %src, ptr %dst) {
 ; BE-NEXT:    rev32 v0.8b, v0.8b
 ; BE-NEXT:    ushll v0.8h, v0.8b, #0
 ; BE-NEXT:    ld1 { v0.b }[4], [x8]
+; BE-NEXT:    adrp x8, .LCPI10_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI10_0
+; BE-NEXT:    ld1 { v1.8b }, [x8]
 ; BE-NEXT:    add x8, x1, #4
-; BE-NEXT:    bic v0.4h, #255, lsl #8
-; BE-NEXT:    rev32 v1.8h, v0.8h
-; BE-NEXT:    st1 { v0.h }[2], [x8]
-; BE-NEXT:    str s1, [x1]
+; BE-NEXT:    mov v0.d[1], v0.d[0]
+; BE-NEXT:    rev16 v0.16b, v0.16b
+; BE-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
+; BE-NEXT:    rev16 v1.16b, v0.16b
+; BE-NEXT:    rev32 v0.16b, v0.16b
+; BE-NEXT:    st1 { v1.h }[2], [x8]
+; BE-NEXT:    str s0, [x1]
 ; BE-NEXT:    add sp, sp, #16
 ; BE-NEXT:    ret
 entry:
@@ -483,23 +537,35 @@ define void @load_ext_to_64bits_default_align(ptr %src, ptr %dst) {
 ; CHECK-LABEL: load_ext_to_64bits_default_align:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:  Lloh16:
+; CHECK-NEXT:    adrp x8, lCPI11_0 at PAGE
+; CHECK-NEXT:  Lloh17:
+; CHECK-NEXT:    ldr d1, [x8, lCPI11_0 at PAGEOFF]
 ; CHECK-NEXT:    add x8, x1, #4
 ; CHECK-NEXT:    zip1.8b v0, v0, v0
-; CHECK-NEXT:    bic.4h v0, #255, lsl #8
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v1
 ; CHECK-NEXT:    st1.h { v0 }[2], [x8]
 ; CHECK-NEXT:    str s0, [x1]
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh16, Lloh17
 ;
 ; BE-LABEL: load_ext_to_64bits_default_align:
 ; BE:       // %bb.0: // %entry
 ; BE-NEXT:    ldr s0, [x0]
+; BE-NEXT:    adrp x8, .LCPI11_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI11_0
+; BE-NEXT:    ld1 { v1.8b }, [x8]
 ; BE-NEXT:    add x8, x1, #4
 ; BE-NEXT:    rev32 v0.8b, v0.8b
 ; BE-NEXT:    zip1 v0.8b, v0.8b, v0.8b
-; BE-NEXT:    bic v0.4h, #255, lsl #8
-; BE-NEXT:    rev32 v1.8h, v0.8h
-; BE-NEXT:    st1 { v0.h }[2], [x8]
-; BE-NEXT:    str s1, [x1]
+; BE-NEXT:    mov v0.d[1], v0.d[0]
+; BE-NEXT:    rev16 v0.16b, v0.16b
+; BE-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
+; BE-NEXT:    rev16 v1.16b, v0.16b
+; BE-NEXT:    rev32 v0.16b, v0.16b
+; BE-NEXT:    st1 { v1.h }[2], [x8]
+; BE-NEXT:    str s0, [x1]
 ; BE-NEXT:    ret
 entry:
   %l = load <3 x i8>, ptr %src
@@ -512,23 +578,35 @@ define void @load_ext_to_64bits_align_4(ptr %src, ptr %dst) {
 ; CHECK-LABEL: load_ext_to_64bits_align_4:
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:  Lloh18:
+; CHECK-NEXT:    adrp x8, lCPI12_0 at PAGE
+; CHECK-NEXT:  Lloh19:
+; CHECK-NEXT:    ldr d1, [x8, lCPI12_0 at PAGEOFF]
 ; CHECK-NEXT:    add x8, x1, #4
 ; CHECK-NEXT:    zip1.8b v0, v0, v0
-; CHECK-NEXT:    bic.4h v0, #255, lsl #8
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v1
 ; CHECK-NEXT:    st1.h { v0 }[2], [x8]
 ; CHECK-NEXT:    str s0, [x1]
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh18, Lloh19
 ;
 ; BE-LABEL: load_ext_to_64bits_align_4:
 ; BE:       // %bb.0: // %entry
 ; BE-NEXT:    ldr s0, [x0]
+; BE-NEXT:    adrp x8, .LCPI12_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI12_0
+; BE-NEXT:    ld1 { v1.8b }, [x8]
 ; BE-NEXT:    add x8, x1, #4
 ; BE-NEXT:    rev32 v0.8b, v0.8b
 ; BE-NEXT:    zip1 v0.8b, v0.8b, v0.8b
-; BE-NEXT:    bic v0.4h, #255, lsl #8
-; BE-NEXT:    rev32 v1.8h, v0.8h
-; BE-NEXT:    st1 { v0.h }[2], [x8]
-; BE-NEXT:    str s1, [x1]
+; BE-NEXT:    mov v0.d[1], v0.d[0]
+; BE-NEXT:    rev16 v0.16b, v0.16b
+; BE-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
+; BE-NEXT:    rev16 v1.16b, v0.16b
+; BE-NEXT:    rev32 v0.16b, v0.16b
+; BE-NEXT:    st1 { v1.h }[2], [x8]
+; BE-NEXT:    str s0, [x1]
 ; BE-NEXT:    ret
 entry:
   %l = load <3 x i8>, ptr %src, align 4
@@ -542,24 +620,31 @@ define void @load_ext_add_to_64bits(ptr %src, ptr %dst) {
 ; CHECK:       ; %bb.0: ; %entry
 ; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldrh w9, [x0]
-; CHECK-NEXT:  Lloh2:
+; CHECK-NEXT:    ldrh w8, [x0]
+; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    add x8, x0, #2
+; CHECK-NEXT:    ldr s0, [sp, #12]
+; CHECK-NEXT:    ushll.8h v0, v0, #0
+; CHECK-NEXT:    ld1.b { v0 }[4], [x8]
+; CHECK-NEXT:  Lloh20:
 ; CHECK-NEXT:    adrp x8, lCPI13_0 at PAGE
-; CHECK-NEXT:  Lloh3:
+; CHECK-NEXT:  Lloh21:
 ; CHECK-NEXT:    ldr d1, [x8, lCPI13_0 at PAGEOFF]
+; CHECK-NEXT:  Lloh22:
+; CHECK-NEXT:    adrp x8, lCPI13_1 at PAGE
+; CHECK-NEXT:    mov.d v0[1], v0[0]
+; CHECK-NEXT:    tbl.8b v0, { v0 }, v1
+; CHECK-NEXT:  Lloh23:
+; CHECK-NEXT:    ldr d1, [x8, lCPI13_1 at PAGEOFF]
 ; CHECK-NEXT:    add x8, x1, #4
-; CHECK-NEXT:    strh w9, [sp, #12]
-; CHECK-NEXT:    add x9, x0, #2
-; CHECK-NEXT:    ldr s0, [sp, #12]
-; CHECK-NEXT:    ushll.8h v0, v0, #0
-; CHECK-NEXT:    ld1.b { v0 }[4], [x9]
-; CHECK-NEXT:    bic.4h v0, #255, lsl #8
 ; CHECK-NEXT:    add.4h v0, v0, v1
 ; CHECK-NEXT:    st1.h { v0 }[2], [x8]
 ; CHECK-NEXT:    str s0, [x1]
 ; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
-; CHECK-NEXT:    .loh AdrpLdr Lloh2, Lloh3
+; CHECK-NEXT:    .loh AdrpLdr Lloh22, Lloh23
+; CHECK-NEXT:    .loh AdrpAdrp Lloh20, Lloh22
+; CHECK-NEXT:    .loh AdrpLdr Lloh20, Lloh21
 ;
 ; BE-LABEL: load_ext_add_to_64bits:
 ; BE:       // %bb.0: // %entry
@@ -574,9 +659,15 @@ define void @load_ext_add_to_64bits(ptr %src, ptr %dst) {
 ; BE-NEXT:    ld1 { v0.b }[4], [x8]
 ; BE-NEXT:    adrp x8, .LCPI13_0
 ; BE-NEXT:    add x8, x8, :lo12:.LCPI13_0
+; BE-NEXT:    ld1 { v1.8b }, [x8]
+; BE-NEXT:    adrp x8, .LCPI13_1
+; BE-NEXT:    add x8, x8, :lo12:.LCPI13_1
+; BE-NEXT:    mov v0.d[1], v0.d[0]
+; BE-NEXT:    rev16 v0.16b, v0.16b
+; BE-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; BE-NEXT:    ld1 { v1.4h }, [x8]
 ; BE-NEXT:    add x8, x1, #4
-; BE-NEXT:    bic v0.4h, #255, lsl #8
+; BE-NEXT:    rev16 v0.8b, v0.8b
 ; BE-NEXT:    add v0.4h, v0.4h, v1.4h
 ; BE-NEXT:    rev32 v1.8h, v0.8h
 ; BE-NEXT:    st1 { v0.h }[2], [x8]
diff --git a/llvm/test/CodeGen/AArch64/vec_uaddo.ll b/llvm/test/CodeGen/AArch64/vec_uaddo.ll
index 00609b0df9b4e1..5172433342f762 100644
--- a/llvm/test/CodeGen/AArch64/vec_uaddo.ll
+++ b/llvm/test/CodeGen/AArch64/vec_uaddo.ll
@@ -210,26 +210,27 @@ define <2 x i32> @uaddo_v2i64(<2 x i64> %a0, <2 x i64> %a1, ptr %p2) nounwind {
 define <4 x i32> @uaddo_v4i24(<4 x i24> %a0, <4 x i24> %a1, ptr %p2) nounwind {
 ; CHECK-LABEL: uaddo_v4i24:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bic v1.4s, #255, lsl #24
-; CHECK-NEXT:    bic v0.4s, #255, lsl #24
+; CHECK-NEXT:    adrp x8, .LCPI9_0
+; CHECK-NEXT:    ldr q2, [x8, :lo12:.LCPI9_0]
+; CHECK-NEXT:    tbl v1.16b, { v1.16b }, v2.16b
+; CHECK-NEXT:    tbl v0.16b, { v0.16b }, v2.16b
 ; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    mov v1.16b, v0.16b
+; CHECK-NEXT:    tbl v1.16b, { v0.16b }, v2.16b
 ; CHECK-NEXT:    mov w8, v0.s[3]
 ; CHECK-NEXT:    mov w9, v0.s[2]
 ; CHECK-NEXT:    mov w10, v0.s[1]
 ; CHECK-NEXT:    fmov w11, s0
-; CHECK-NEXT:    bic v1.4s, #1, lsl #24
+; CHECK-NEXT:    cmeq v1.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    sturh w8, [x0, #9]
 ; CHECK-NEXT:    lsr w8, w8, #16
 ; CHECK-NEXT:    strh w9, [x0, #6]
 ; CHECK-NEXT:    lsr w9, w9, #16
-; CHECK-NEXT:    cmeq v1.4s, v1.4s, v0.4s
 ; CHECK-NEXT:    strb w8, [x0, #11]
 ; CHECK-NEXT:    lsr w8, w10, #16
+; CHECK-NEXT:    mvn v0.16b, v1.16b
 ; CHECK-NEXT:    strb w9, [x0, #8]
 ; CHECK-NEXT:    lsr w9, w11, #16
 ; CHECK-NEXT:    sturh w10, [x0, #3]
-; CHECK-NEXT:    mvn v0.16b, v1.16b
 ; CHECK-NEXT:    strh w11, [x0]
 ; CHECK-NEXT:    strb w8, [x0, #5]
 ; CHECK-NEXT:    strb w9, [x0, #2]
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
index 7f804fe48fd854..1be9aadd2a8265 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
@@ -134,10 +134,13 @@ define i8 @test_v3i8(<3 x i8> %a) nounwind {
 ; CHECK-SD-LABEL: test_v3i8:
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-SD-NEXT:    adrp x8, .LCPI8_0
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI8_0]
 ; CHECK-SD-NEXT:    mov v0.h[0], w0
 ; CHECK-SD-NEXT:    mov v0.h[1], w1
 ; CHECK-SD-NEXT:    mov v0.h[2], w2
-; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    umaxv h0, v0.4h
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -160,7 +163,7 @@ define i8 @test_v9i8(<9 x i8> %a) nounwind {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    adrp x8, .LCPI9_0
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI9_0]
-; CHECK-SD-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-SD-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SD-NEXT:    umaxv b0, v0.16b
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
@@ -263,7 +266,9 @@ define i1 @test_v4i1(<4 x i1> %a) nounwind {
 define i24 @test_v4i24(<4 x i24> %a) nounwind {
 ; CHECK-SD-LABEL: test_v4i24:
 ; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    bic v0.4s, #255, lsl #24
+; CHECK-SD-NEXT:    adrp x8, .LCPI12_0
+; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI12_0]
+; CHECK-SD-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SD-NEXT:    umaxv s0, v0.4s
 ; CHECK-SD-NEXT:    fmov w0, s0
 ; CHECK-SD-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/vector-fcvt.ll b/llvm/test/CodeGen/AArch64/vector-fcvt.ll
index 8f38bdbedc629b..ef8d2a9b83fcd1 100644
--- a/llvm/test/CodeGen/AArch64/vector-fcvt.ll
+++ b/llvm/test/CodeGen/AArch64/vector-fcvt.ll
@@ -103,7 +103,11 @@ define <8 x float> @sitofp_i64_float(<8 x i64> %a) {
 define <4 x float> @uitofp_v4i8_float(<4 x i8> %a) {
 ; CHECK-LABEL: uitofp_v4i8_float:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    adrp x8, .LCPI6_0
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI6_0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-NEXT:    ucvtf v0.4s, v0.4s
 ; CHECK-NEXT:    ret
@@ -116,8 +120,12 @@ define <8 x float> @uitofp_v8i8_float(<8 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    zip1 v1.8b, v0.8b, v0.8b
 ; CHECK-NEXT:    zip2 v0.8b, v0.8b, v0.8b
-; CHECK-NEXT:    bic v1.4h, #255, lsl #8
-; CHECK-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-NEXT:    adrp x8, .LCPI7_0
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI7_0]
+; CHECK-NEXT:    mov v1.d[1], v1.d[0]
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    tbl v1.8b, { v1.16b }, v2.8b
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v2.8b
 ; CHECK-NEXT:    ushll v1.4s, v1.4h, #0
 ; CHECK-NEXT:    ushll v2.4s, v0.4h, #0
 ; CHECK-NEXT:    ucvtf v0.4s, v1.4s
@@ -132,14 +140,20 @@ define <16 x float> @uitofp_v16i8_float(<16 x i8> %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
 ; CHECK-NEXT:    zip1 v2.8b, v0.8b, v0.8b
+; CHECK-NEXT:    adrp x8, .LCPI8_0
 ; CHECK-NEXT:    zip2 v0.8b, v0.8b, v0.8b
+; CHECK-NEXT:    ldr d4, [x8, :lo12:.LCPI8_0]
+; CHECK-NEXT:    mov v2.d[1], v2.d[0]
 ; CHECK-NEXT:    zip1 v3.8b, v1.8b, v0.8b
 ; CHECK-NEXT:    zip2 v1.8b, v1.8b, v0.8b
-; CHECK-NEXT:    bic v2.4h, #255, lsl #8
-; CHECK-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    tbl v2.8b, { v2.16b }, v4.8b
+; CHECK-NEXT:    mov v3.d[1], v3.d[0]
+; CHECK-NEXT:    mov v1.d[1], v1.d[0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v4.8b
 ; CHECK-NEXT:    ushll v2.4s, v2.4h, #0
-; CHECK-NEXT:    bic v3.4h, #255, lsl #8
-; CHECK-NEXT:    bic v1.4h, #255, lsl #8
+; CHECK-NEXT:    tbl v3.8b, { v3.16b }, v4.8b
+; CHECK-NEXT:    tbl v1.8b, { v1.16b }, v4.8b
 ; CHECK-NEXT:    ushll v4.4s, v0.4h, #0
 ; CHECK-NEXT:    ucvtf v0.4s, v2.4s
 ; CHECK-NEXT:    ushll v3.4s, v3.4h, #0
@@ -373,13 +387,16 @@ define <4 x double> @uitofp_v4i8_double(<4 x i8> %a) {
 ; CHECK-LABEL: uitofp_v4i8_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
-; CHECK-NEXT:    movi d1, #0x0000ff000000ff
-; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT:    and v1.8b, v2.8b, v1.8b
+; CHECK-NEXT:    adrp x8, .LCPI18_0
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI18_0]
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    mov v1.d[1], v1.d[0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v2.8b
+; CHECK-NEXT:    tbl v1.8b, { v1.16b }, v2.8b
 ; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
-; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
 ; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
 ; CHECK-NEXT:    ucvtf v1.2d, v1.2d
 ; CHECK-NEXT:    ret
   %1 = uitofp <4 x i8> %a to <4 x double>
@@ -390,35 +407,40 @@ define <8 x double> @uitofp_v8i8_double(<8 x i8> %a) {
 ; CHECK-LABEL: uitofp_v8i8_double:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.b[0]
-; CHECK-NEXT:    umov w9, v0.b[2]
-; CHECK-NEXT:    umov w11, v0.b[4]
-; CHECK-NEXT:    umov w12, v0.b[6]
-; CHECK-NEXT:    umov w10, v0.b[1]
-; CHECK-NEXT:    umov w13, v0.b[3]
-; CHECK-NEXT:    umov w14, v0.b[5]
-; CHECK-NEXT:    umov w15, v0.b[7]
-; CHECK-NEXT:    movi d1, #0x0000ff000000ff
-; CHECK-NEXT:    fmov s0, w8
-; CHECK-NEXT:    fmov s2, w9
-; CHECK-NEXT:    fmov s3, w11
-; CHECK-NEXT:    fmov s4, w12
-; CHECK-NEXT:    mov v0.s[1], w10
-; CHECK-NEXT:    mov v2.s[1], w13
-; CHECK-NEXT:    mov v3.s[1], w14
-; CHECK-NEXT:    mov v4.s[1], w15
-; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT:    and v2.8b, v2.8b, v1.8b
-; CHECK-NEXT:    and v3.8b, v3.8b, v1.8b
-; CHECK-NEXT:    and v1.8b, v4.8b, v1.8b
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    umov w10, v0.b[2]
+; CHECK-NEXT:    adrp x8, .LCPI19_0
+; CHECK-NEXT:    umov w12, v0.b[4]
+; CHECK-NEXT:    umov w13, v0.b[6]
+; CHECK-NEXT:    ldr d4, [x8, :lo12:.LCPI19_0]
+; CHECK-NEXT:    umov w11, v0.b[1]
+; CHECK-NEXT:    umov w14, v0.b[3]
+; CHECK-NEXT:    umov w15, v0.b[5]
+; CHECK-NEXT:    umov w16, v0.b[7]
+; CHECK-NEXT:    fmov s0, w9
+; CHECK-NEXT:    fmov s1, w10
+; CHECK-NEXT:    fmov s2, w12
+; CHECK-NEXT:    fmov s3, w13
+; CHECK-NEXT:    mov v0.s[1], w11
+; CHECK-NEXT:    mov v1.s[1], w14
+; CHECK-NEXT:    mov v2.s[1], w15
+; CHECK-NEXT:    mov v3.s[1], w16
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    mov v1.d[1], v1.d[0]
+; CHECK-NEXT:    mov v2.d[1], v2.d[0]
+; CHECK-NEXT:    mov v3.d[1], v3.d[0]
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v4.8b
+; CHECK-NEXT:    tbl v1.8b, { v1.16b }, v4.8b
+; CHECK-NEXT:    tbl v2.8b, { v2.16b }, v4.8b
+; CHECK-NEXT:    tbl v3.8b, { v3.16b }, v4.8b
 ; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
 ; CHECK-NEXT:    ushll v2.2d, v2.2s, #0
 ; CHECK-NEXT:    ushll v3.2d, v3.2s, #0
-; CHECK-NEXT:    ushll v4.2d, v1.2s, #0
 ; CHECK-NEXT:    ucvtf v0.2d, v0.2d
-; CHECK-NEXT:    ucvtf v1.2d, v2.2d
-; CHECK-NEXT:    ucvtf v2.2d, v3.2d
-; CHECK-NEXT:    ucvtf v3.2d, v4.2d
+; CHECK-NEXT:    ucvtf v1.2d, v1.2d
+; CHECK-NEXT:    ucvtf v2.2d, v2.2d
+; CHECK-NEXT:    ucvtf v3.2d, v3.2d
 ; CHECK-NEXT:    ret
   %1 = uitofp <8 x i8> %a to <8 x double>
   ret <8 x double> %1
@@ -427,64 +449,73 @@ define <8 x double> @uitofp_v8i8_double(<8 x i8> %a) {
 define <16 x double> @uitofp_v16i8_double(<16 x i8> %a) {
 ; CHECK-LABEL: uitofp_v16i8_double:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v2.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    umov w8, v0.b[0]
-; CHECK-NEXT:    umov w10, v0.b[2]
-; CHECK-NEXT:    umov w9, v0.b[1]
-; CHECK-NEXT:    umov w12, v0.b[4]
-; CHECK-NEXT:    umov w11, v0.b[3]
-; CHECK-NEXT:    umov w13, v0.b[5]
-; CHECK-NEXT:    umov w18, v0.b[6]
-; CHECK-NEXT:    movi d1, #0x0000ff000000ff
-; CHECK-NEXT:    umov w14, v2.b[0]
-; CHECK-NEXT:    umov w16, v2.b[2]
-; CHECK-NEXT:    umov w0, v2.b[4]
-; CHECK-NEXT:    fmov s3, w8
-; CHECK-NEXT:    umov w8, v0.b[7]
-; CHECK-NEXT:    fmov s0, w10
-; CHECK-NEXT:    umov w10, v2.b[6]
-; CHECK-NEXT:    umov w15, v2.b[1]
-; CHECK-NEXT:    umov w17, v2.b[3]
-; CHECK-NEXT:    fmov s4, w12
-; CHECK-NEXT:    umov w12, v2.b[5]
-; CHECK-NEXT:    fmov s7, w18
-; CHECK-NEXT:    mov v3.s[1], w9
-; CHECK-NEXT:    umov w9, v2.b[7]
-; CHECK-NEXT:    fmov s2, w14
-; CHECK-NEXT:    fmov s5, w16
-; CHECK-NEXT:    fmov s6, w0
-; CHECK-NEXT:    mov v0.s[1], w11
-; CHECK-NEXT:    fmov s16, w10
-; CHECK-NEXT:    mov v4.s[1], w13
-; CHECK-NEXT:    mov v7.s[1], w8
-; CHECK-NEXT:    mov v2.s[1], w15
-; CHECK-NEXT:    mov v5.s[1], w17
-; CHECK-NEXT:    mov v6.s[1], w12
-; CHECK-NEXT:    and v3.8b, v3.8b, v1.8b
-; CHECK-NEXT:    mov v16.s[1], w9
-; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT:    and v4.8b, v4.8b, v1.8b
-; CHECK-NEXT:    and v7.8b, v7.8b, v1.8b
-; CHECK-NEXT:    and v2.8b, v2.8b, v1.8b
-; CHECK-NEXT:    ushll v3.2d, v3.2s, #0
-; CHECK-NEXT:    and v5.8b, v5.8b, v1.8b
-; CHECK-NEXT:    and v6.8b, v6.8b, v1.8b
-; CHECK-NEXT:    and v1.8b, v16.8b, v1.8b
-; CHECK-NEXT:    ushll v16.2d, v0.2s, #0
-; CHECK-NEXT:    ushll v17.2d, v4.2s, #0
+; CHECK-NEXT:    umov w9, v0.b[0]
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    adrp x8, .LCPI20_0
+; CHECK-NEXT:    umov w10, v0.b[1]
+; CHECK-NEXT:    umov w11, v0.b[2]
+; CHECK-NEXT:    ldr d16, [x8, :lo12:.LCPI20_0]
+; CHECK-NEXT:    umov w13, v0.b[4]
+; CHECK-NEXT:    umov w15, v0.b[6]
+; CHECK-NEXT:    umov w12, v0.b[3]
+; CHECK-NEXT:    umov w14, v0.b[5]
+; CHECK-NEXT:    fmov s2, w9
+; CHECK-NEXT:    umov w16, v1.b[0]
+; CHECK-NEXT:    umov w18, v1.b[2]
+; CHECK-NEXT:    umov w1, v1.b[4]
+; CHECK-NEXT:    umov w17, v1.b[1]
+; CHECK-NEXT:    umov w0, v1.b[3]
+; CHECK-NEXT:    umov w9, v0.b[7]
+; CHECK-NEXT:    fmov s0, w11
+; CHECK-NEXT:    umov w11, v1.b[5]
+; CHECK-NEXT:    mov v2.s[1], w10
+; CHECK-NEXT:    umov w10, v1.b[6]
+; CHECK-NEXT:    fmov s3, w13
+; CHECK-NEXT:    umov w13, v1.b[7]
+; CHECK-NEXT:    fmov s1, w15
+; CHECK-NEXT:    fmov s4, w16
+; CHECK-NEXT:    fmov s5, w18
+; CHECK-NEXT:    fmov s6, w1
+; CHECK-NEXT:    mov v0.s[1], w12
+; CHECK-NEXT:    mov v3.s[1], w14
+; CHECK-NEXT:    fmov s7, w10
+; CHECK-NEXT:    mov v1.s[1], w9
+; CHECK-NEXT:    mov v4.s[1], w17
+; CHECK-NEXT:    mov v5.s[1], w0
+; CHECK-NEXT:    mov v6.s[1], w11
+; CHECK-NEXT:    mov v2.d[1], v2.d[0]
+; CHECK-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-NEXT:    mov v7.s[1], w13
+; CHECK-NEXT:    mov v3.d[1], v3.d[0]
+; CHECK-NEXT:    mov v1.d[1], v1.d[0]
+; CHECK-NEXT:    mov v4.d[1], v4.d[0]
+; CHECK-NEXT:    mov v5.d[1], v5.d[0]
+; CHECK-NEXT:    mov v6.d[1], v6.d[0]
+; CHECK-NEXT:    tbl v2.8b, { v2.16b }, v16.8b
+; CHECK-NEXT:    tbl v0.8b, { v0.16b }, v16.8b
+; CHECK-NEXT:    mov v7.d[1], v7.d[0]
+; CHECK-NEXT:    tbl v3.8b, { v3.16b }, v16.8b
+; CHECK-NEXT:    tbl v1.8b, { v1.16b }, v16.8b
+; CHECK-NEXT:    tbl v4.8b, { v4.16b }, v16.8b
+; CHECK-NEXT:    tbl v5.8b, { v5.16b }, v16.8b
+; CHECK-NEXT:    tbl v6.8b, { v6.16b }, v16.8b
 ; CHECK-NEXT:    ushll v2.2d, v2.2s, #0
-; CHECK-NEXT:    ushll v7.2d, v7.2s, #0
-; CHECK-NEXT:    ucvtf v0.2d, v3.2d
+; CHECK-NEXT:    tbl v7.8b, { v7.16b }, v16.8b
+; CHECK-NEXT:    ushll v16.2d, v0.2s, #0
+; CHECK-NEXT:    ushll v3.2d, v3.2s, #0
+; CHECK-NEXT:    ushll v17.2d, v1.2s, #0
+; CHECK-NEXT:    ushll v4.2d, v4.2s, #0
+; CHECK-NEXT:    ucvtf v0.2d, v2.2d
 ; CHECK-NEXT:    ushll v5.2d, v5.2s, #0
 ; CHECK-NEXT:    ushll v6.2d, v6.2s, #0
-; CHECK-NEXT:    ushll v18.2d, v1.2s, #0
 ; CHECK-NEXT:    ucvtf v1.2d, v16.2d
-; CHECK-NEXT:    ucvtf v4.2d, v2.2d
-; CHECK-NEXT:    ucvtf v2.2d, v17.2d
-; CHECK-NEXT:    ucvtf v3.2d, v7.2d
+; CHECK-NEXT:    ucvtf v2.2d, v3.2d
+; CHECK-NEXT:    ushll v7.2d, v7.2s, #0
+; CHECK-NEXT:    ucvtf v4.2d, v4.2d
+; CHECK-NEXT:    ucvtf v3.2d, v17.2d
 ; CHECK-NEXT:    ucvtf v5.2d, v5.2d
 ; CHECK-NEXT:    ucvtf v6.2d, v6.2d
-; CHECK-NEXT:    ucvtf v7.2d, v18.2d
+; CHECK-NEXT:    ucvtf v7.2d, v7.2d
 ; CHECK-NEXT:    ret
   %1 = uitofp <16 x i8> %a to <16 x double>
   ret <16 x double> %1
@@ -493,16 +524,27 @@ define <16 x double> @uitofp_v16i8_double(<16 x i8> %a) {
 define <8 x double> @uitofp_i16_double(<8 x i16> %a) {
 ; CHECK-LABEL: uitofp_i16_double:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ushll v1.4s, v0.4h, #0
-; CHECK-NEXT:    ushll2 v0.4s, v0.8h, #0
-; CHECK-NEXT:    ushll v2.2d, v1.2s, #0
-; CHECK-NEXT:    ushll2 v3.2d, v0.4s, #0
-; CHECK-NEXT:    ushll2 v1.2d, v1.4s, #0
-; CHECK-NEXT:    ushll v4.2d, v0.2s, #0
-; CHECK-NEXT:    ucvtf v0.2d, v2.2d
+; CHECK-NEXT:    ushll2 v2.4s, v0.8h, #0
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-NEXT:    ext v3.16b, v2.16b, v2.16b, #8
+; CHECK-NEXT:    ext v4.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    rev32 v0.4h, v0.4h
+; CHECK-NEXT:    rev32 v2.4h, v2.4h
+; CHECK-NEXT:    rev32 v3.4h, v3.4h
+; CHECK-NEXT:    rev32 v4.4h, v4.4h
+; CHECK-NEXT:    trn2 v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    trn2 v2.4h, v2.4h, v1.4h
+; CHECK-NEXT:    trn2 v3.4h, v3.4h, v1.4h
+; CHECK-NEXT:    trn2 v1.4h, v4.4h, v1.4h
+; CHECK-NEXT:    ushll v0.2d, v0.2s, #0
+; CHECK-NEXT:    ushll v2.2d, v2.2s, #0
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    ushll v3.2d, v3.2s, #0
+; CHECK-NEXT:    ushll v1.2d, v1.2s, #0
+; CHECK-NEXT:    ucvtf v2.2d, v2.2d
 ; CHECK-NEXT:    ucvtf v3.2d, v3.2d
 ; CHECK-NEXT:    ucvtf v1.2d, v1.2d
-; CHECK-NEXT:    ucvtf v2.2d, v4.2d
 ; CHECK-NEXT:    ret
   %1 = uitofp <8 x i16> %a to <8 x double>
   ret <8 x double> %1
diff --git a/llvm/test/CodeGen/AArch64/vector-popcnt-128-ult-ugt.ll b/llvm/test/CodeGen/AArch64/vector-popcnt-128-ult-ugt.ll
index 3003f15dd549a9..df7e6ce1df7a03 100644
--- a/llvm/test/CodeGen/AArch64/vector-popcnt-128-ult-ugt.ll
+++ b/llvm/test/CodeGen/AArch64/vector-popcnt-128-ult-ugt.ll
@@ -1476,7 +1476,7 @@ define <2 x i64> @ugt_2_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_2_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #2
+; CHECK-NEXT:    mov w8, #2 // =0x2
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1493,7 +1493,7 @@ define <2 x i64> @ult_3_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_3_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #3
+; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1510,7 +1510,7 @@ define <2 x i64> @ugt_3_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_3_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #3
+; CHECK-NEXT:    mov w8, #3 // =0x3
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1527,7 +1527,7 @@ define <2 x i64> @ult_4_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_4_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #4
+; CHECK-NEXT:    mov w8, #4 // =0x4
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1544,7 +1544,7 @@ define <2 x i64> @ugt_4_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_4_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #4
+; CHECK-NEXT:    mov w8, #4 // =0x4
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1561,7 +1561,7 @@ define <2 x i64> @ult_5_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_5_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #5
+; CHECK-NEXT:    mov w8, #5 // =0x5
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1578,7 +1578,7 @@ define <2 x i64> @ugt_5_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_5_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #5
+; CHECK-NEXT:    mov w8, #5 // =0x5
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1595,7 +1595,7 @@ define <2 x i64> @ult_6_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_6_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #6
+; CHECK-NEXT:    mov w8, #6 // =0x6
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1612,7 +1612,7 @@ define <2 x i64> @ugt_6_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_6_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #6
+; CHECK-NEXT:    mov w8, #6 // =0x6
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1629,7 +1629,7 @@ define <2 x i64> @ult_7_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_7_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #7
+; CHECK-NEXT:    mov w8, #7 // =0x7
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1646,7 +1646,7 @@ define <2 x i64> @ugt_7_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_7_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #7
+; CHECK-NEXT:    mov w8, #7 // =0x7
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1663,7 +1663,7 @@ define <2 x i64> @ult_8_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_8_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #8
+; CHECK-NEXT:    mov w8, #8 // =0x8
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1680,7 +1680,7 @@ define <2 x i64> @ugt_8_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_8_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #8
+; CHECK-NEXT:    mov w8, #8 // =0x8
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1697,7 +1697,7 @@ define <2 x i64> @ult_9_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_9_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #9
+; CHECK-NEXT:    mov w8, #9 // =0x9
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1714,7 +1714,7 @@ define <2 x i64> @ugt_9_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_9_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #9
+; CHECK-NEXT:    mov w8, #9 // =0x9
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1731,7 +1731,7 @@ define <2 x i64> @ult_10_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_10_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #10
+; CHECK-NEXT:    mov w8, #10 // =0xa
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1748,7 +1748,7 @@ define <2 x i64> @ugt_10_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_10_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #10
+; CHECK-NEXT:    mov w8, #10 // =0xa
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1765,7 +1765,7 @@ define <2 x i64> @ult_11_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_11_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #11
+; CHECK-NEXT:    mov w8, #11 // =0xb
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1782,7 +1782,7 @@ define <2 x i64> @ugt_11_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_11_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #11
+; CHECK-NEXT:    mov w8, #11 // =0xb
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1799,7 +1799,7 @@ define <2 x i64> @ult_12_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_12_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #12
+; CHECK-NEXT:    mov w8, #12 // =0xc
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1816,7 +1816,7 @@ define <2 x i64> @ugt_12_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_12_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #12
+; CHECK-NEXT:    mov w8, #12 // =0xc
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1833,7 +1833,7 @@ define <2 x i64> @ult_13_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_13_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #13
+; CHECK-NEXT:    mov w8, #13 // =0xd
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1850,7 +1850,7 @@ define <2 x i64> @ugt_13_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_13_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #13
+; CHECK-NEXT:    mov w8, #13 // =0xd
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1867,7 +1867,7 @@ define <2 x i64> @ult_14_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_14_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #14
+; CHECK-NEXT:    mov w8, #14 // =0xe
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1884,7 +1884,7 @@ define <2 x i64> @ugt_14_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_14_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #14
+; CHECK-NEXT:    mov w8, #14 // =0xe
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1901,7 +1901,7 @@ define <2 x i64> @ult_15_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_15_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #15
+; CHECK-NEXT:    mov w8, #15 // =0xf
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1918,7 +1918,7 @@ define <2 x i64> @ugt_15_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_15_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #15
+; CHECK-NEXT:    mov w8, #15 // =0xf
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1935,7 +1935,7 @@ define <2 x i64> @ult_16_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_16_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #16
+; CHECK-NEXT:    mov w8, #16 // =0x10
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1952,7 +1952,7 @@ define <2 x i64> @ugt_16_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_16_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #16
+; CHECK-NEXT:    mov w8, #16 // =0x10
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1969,7 +1969,7 @@ define <2 x i64> @ult_17_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_17_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #17
+; CHECK-NEXT:    mov w8, #17 // =0x11
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -1986,7 +1986,7 @@ define <2 x i64> @ugt_17_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_17_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #17
+; CHECK-NEXT:    mov w8, #17 // =0x11
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2003,7 +2003,7 @@ define <2 x i64> @ult_18_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_18_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #18
+; CHECK-NEXT:    mov w8, #18 // =0x12
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2020,7 +2020,7 @@ define <2 x i64> @ugt_18_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_18_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #18
+; CHECK-NEXT:    mov w8, #18 // =0x12
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2037,7 +2037,7 @@ define <2 x i64> @ult_19_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_19_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #19
+; CHECK-NEXT:    mov w8, #19 // =0x13
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2054,7 +2054,7 @@ define <2 x i64> @ugt_19_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_19_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #19
+; CHECK-NEXT:    mov w8, #19 // =0x13
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2071,7 +2071,7 @@ define <2 x i64> @ult_20_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_20_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #20
+; CHECK-NEXT:    mov w8, #20 // =0x14
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2088,7 +2088,7 @@ define <2 x i64> @ugt_20_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_20_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #20
+; CHECK-NEXT:    mov w8, #20 // =0x14
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2105,7 +2105,7 @@ define <2 x i64> @ult_21_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_21_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #21
+; CHECK-NEXT:    mov w8, #21 // =0x15
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2122,7 +2122,7 @@ define <2 x i64> @ugt_21_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_21_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #21
+; CHECK-NEXT:    mov w8, #21 // =0x15
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2139,7 +2139,7 @@ define <2 x i64> @ult_22_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_22_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #22
+; CHECK-NEXT:    mov w8, #22 // =0x16
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2156,7 +2156,7 @@ define <2 x i64> @ugt_22_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_22_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #22
+; CHECK-NEXT:    mov w8, #22 // =0x16
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2173,7 +2173,7 @@ define <2 x i64> @ult_23_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_23_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #23
+; CHECK-NEXT:    mov w8, #23 // =0x17
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2190,7 +2190,7 @@ define <2 x i64> @ugt_23_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_23_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #23
+; CHECK-NEXT:    mov w8, #23 // =0x17
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2207,7 +2207,7 @@ define <2 x i64> @ult_24_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_24_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #24
+; CHECK-NEXT:    mov w8, #24 // =0x18
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2224,7 +2224,7 @@ define <2 x i64> @ugt_24_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_24_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #24
+; CHECK-NEXT:    mov w8, #24 // =0x18
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2241,7 +2241,7 @@ define <2 x i64> @ult_25_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_25_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #25
+; CHECK-NEXT:    mov w8, #25 // =0x19
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2258,7 +2258,7 @@ define <2 x i64> @ugt_25_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_25_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #25
+; CHECK-NEXT:    mov w8, #25 // =0x19
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2275,7 +2275,7 @@ define <2 x i64> @ult_26_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_26_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #26
+; CHECK-NEXT:    mov w8, #26 // =0x1a
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2292,7 +2292,7 @@ define <2 x i64> @ugt_26_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_26_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #26
+; CHECK-NEXT:    mov w8, #26 // =0x1a
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2309,7 +2309,7 @@ define <2 x i64> @ult_27_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_27_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #27
+; CHECK-NEXT:    mov w8, #27 // =0x1b
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2326,7 +2326,7 @@ define <2 x i64> @ugt_27_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_27_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #27
+; CHECK-NEXT:    mov w8, #27 // =0x1b
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2343,7 +2343,7 @@ define <2 x i64> @ult_28_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_28_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #28
+; CHECK-NEXT:    mov w8, #28 // =0x1c
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2360,7 +2360,7 @@ define <2 x i64> @ugt_28_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_28_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #28
+; CHECK-NEXT:    mov w8, #28 // =0x1c
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2377,7 +2377,7 @@ define <2 x i64> @ult_29_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_29_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #29
+; CHECK-NEXT:    mov w8, #29 // =0x1d
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2394,7 +2394,7 @@ define <2 x i64> @ugt_29_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_29_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #29
+; CHECK-NEXT:    mov w8, #29 // =0x1d
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2411,7 +2411,7 @@ define <2 x i64> @ult_30_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_30_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #30
+; CHECK-NEXT:    mov w8, #30 // =0x1e
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2428,7 +2428,7 @@ define <2 x i64> @ugt_30_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_30_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #30
+; CHECK-NEXT:    mov w8, #30 // =0x1e
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2445,7 +2445,7 @@ define <2 x i64> @ult_31_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_31_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #31
+; CHECK-NEXT:    mov w8, #31 // =0x1f
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2462,7 +2462,7 @@ define <2 x i64> @ugt_31_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_31_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #31
+; CHECK-NEXT:    mov w8, #31 // =0x1f
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2479,7 +2479,7 @@ define <2 x i64> @ult_32_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #32
+; CHECK-NEXT:    mov w8, #32 // =0x20
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2496,7 +2496,7 @@ define <2 x i64> @ugt_32_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_32_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #32
+; CHECK-NEXT:    mov w8, #32 // =0x20
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2513,7 +2513,7 @@ define <2 x i64> @ult_33_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_33_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #33
+; CHECK-NEXT:    mov w8, #33 // =0x21
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2530,7 +2530,7 @@ define <2 x i64> @ugt_33_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_33_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #33
+; CHECK-NEXT:    mov w8, #33 // =0x21
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2547,7 +2547,7 @@ define <2 x i64> @ult_34_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_34_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #34
+; CHECK-NEXT:    mov w8, #34 // =0x22
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2564,7 +2564,7 @@ define <2 x i64> @ugt_34_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_34_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #34
+; CHECK-NEXT:    mov w8, #34 // =0x22
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2581,7 +2581,7 @@ define <2 x i64> @ult_35_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_35_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #35
+; CHECK-NEXT:    mov w8, #35 // =0x23
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2598,7 +2598,7 @@ define <2 x i64> @ugt_35_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_35_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #35
+; CHECK-NEXT:    mov w8, #35 // =0x23
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2615,7 +2615,7 @@ define <2 x i64> @ult_36_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_36_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #36
+; CHECK-NEXT:    mov w8, #36 // =0x24
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2632,7 +2632,7 @@ define <2 x i64> @ugt_36_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_36_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #36
+; CHECK-NEXT:    mov w8, #36 // =0x24
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2649,7 +2649,7 @@ define <2 x i64> @ult_37_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_37_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #37
+; CHECK-NEXT:    mov w8, #37 // =0x25
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2666,7 +2666,7 @@ define <2 x i64> @ugt_37_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_37_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #37
+; CHECK-NEXT:    mov w8, #37 // =0x25
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2683,7 +2683,7 @@ define <2 x i64> @ult_38_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_38_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #38
+; CHECK-NEXT:    mov w8, #38 // =0x26
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2700,7 +2700,7 @@ define <2 x i64> @ugt_38_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_38_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #38
+; CHECK-NEXT:    mov w8, #38 // =0x26
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2717,7 +2717,7 @@ define <2 x i64> @ult_39_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_39_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #39
+; CHECK-NEXT:    mov w8, #39 // =0x27
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2734,7 +2734,7 @@ define <2 x i64> @ugt_39_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_39_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #39
+; CHECK-NEXT:    mov w8, #39 // =0x27
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2751,7 +2751,7 @@ define <2 x i64> @ult_40_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_40_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #40
+; CHECK-NEXT:    mov w8, #40 // =0x28
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2768,7 +2768,7 @@ define <2 x i64> @ugt_40_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_40_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #40
+; CHECK-NEXT:    mov w8, #40 // =0x28
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2785,7 +2785,7 @@ define <2 x i64> @ult_41_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_41_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #41
+; CHECK-NEXT:    mov w8, #41 // =0x29
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2802,7 +2802,7 @@ define <2 x i64> @ugt_41_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_41_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #41
+; CHECK-NEXT:    mov w8, #41 // =0x29
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2819,7 +2819,7 @@ define <2 x i64> @ult_42_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_42_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2836,7 +2836,7 @@ define <2 x i64> @ugt_42_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_42_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #42
+; CHECK-NEXT:    mov w8, #42 // =0x2a
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2853,7 +2853,7 @@ define <2 x i64> @ult_43_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_43_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #43
+; CHECK-NEXT:    mov w8, #43 // =0x2b
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2870,7 +2870,7 @@ define <2 x i64> @ugt_43_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_43_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #43
+; CHECK-NEXT:    mov w8, #43 // =0x2b
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2887,7 +2887,7 @@ define <2 x i64> @ult_44_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_44_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #44
+; CHECK-NEXT:    mov w8, #44 // =0x2c
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2904,7 +2904,7 @@ define <2 x i64> @ugt_44_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_44_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #44
+; CHECK-NEXT:    mov w8, #44 // =0x2c
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2921,7 +2921,7 @@ define <2 x i64> @ult_45_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_45_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #45
+; CHECK-NEXT:    mov w8, #45 // =0x2d
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2938,7 +2938,7 @@ define <2 x i64> @ugt_45_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_45_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #45
+; CHECK-NEXT:    mov w8, #45 // =0x2d
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2955,7 +2955,7 @@ define <2 x i64> @ult_46_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_46_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #46
+; CHECK-NEXT:    mov w8, #46 // =0x2e
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2972,7 +2972,7 @@ define <2 x i64> @ugt_46_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_46_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #46
+; CHECK-NEXT:    mov w8, #46 // =0x2e
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -2989,7 +2989,7 @@ define <2 x i64> @ult_47_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_47_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #47
+; CHECK-NEXT:    mov w8, #47 // =0x2f
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3006,7 +3006,7 @@ define <2 x i64> @ugt_47_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_47_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #47
+; CHECK-NEXT:    mov w8, #47 // =0x2f
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3023,7 +3023,7 @@ define <2 x i64> @ult_48_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_48_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #48
+; CHECK-NEXT:    mov w8, #48 // =0x30
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3040,7 +3040,7 @@ define <2 x i64> @ugt_48_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_48_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #48
+; CHECK-NEXT:    mov w8, #48 // =0x30
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3057,7 +3057,7 @@ define <2 x i64> @ult_49_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_49_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #49
+; CHECK-NEXT:    mov w8, #49 // =0x31
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3074,7 +3074,7 @@ define <2 x i64> @ugt_49_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_49_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #49
+; CHECK-NEXT:    mov w8, #49 // =0x31
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3091,7 +3091,7 @@ define <2 x i64> @ult_50_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_50_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #50
+; CHECK-NEXT:    mov w8, #50 // =0x32
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3108,7 +3108,7 @@ define <2 x i64> @ugt_50_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_50_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #50
+; CHECK-NEXT:    mov w8, #50 // =0x32
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3125,7 +3125,7 @@ define <2 x i64> @ult_51_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_51_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #51
+; CHECK-NEXT:    mov w8, #51 // =0x33
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3142,7 +3142,7 @@ define <2 x i64> @ugt_51_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_51_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #51
+; CHECK-NEXT:    mov w8, #51 // =0x33
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3159,7 +3159,7 @@ define <2 x i64> @ult_52_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_52_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #52
+; CHECK-NEXT:    mov w8, #52 // =0x34
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3176,7 +3176,7 @@ define <2 x i64> @ugt_52_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_52_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #52
+; CHECK-NEXT:    mov w8, #52 // =0x34
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3193,7 +3193,7 @@ define <2 x i64> @ult_53_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_53_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #53
+; CHECK-NEXT:    mov w8, #53 // =0x35
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3210,7 +3210,7 @@ define <2 x i64> @ugt_53_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_53_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #53
+; CHECK-NEXT:    mov w8, #53 // =0x35
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3227,7 +3227,7 @@ define <2 x i64> @ult_54_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_54_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #54
+; CHECK-NEXT:    mov w8, #54 // =0x36
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3244,7 +3244,7 @@ define <2 x i64> @ugt_54_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_54_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #54
+; CHECK-NEXT:    mov w8, #54 // =0x36
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3261,7 +3261,7 @@ define <2 x i64> @ult_55_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_55_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #55
+; CHECK-NEXT:    mov w8, #55 // =0x37
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3278,7 +3278,7 @@ define <2 x i64> @ugt_55_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_55_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #55
+; CHECK-NEXT:    mov w8, #55 // =0x37
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3295,7 +3295,7 @@ define <2 x i64> @ult_56_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_56_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #56
+; CHECK-NEXT:    mov w8, #56 // =0x38
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3312,7 +3312,7 @@ define <2 x i64> @ugt_56_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_56_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #56
+; CHECK-NEXT:    mov w8, #56 // =0x38
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3329,7 +3329,7 @@ define <2 x i64> @ult_57_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_57_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #57
+; CHECK-NEXT:    mov w8, #57 // =0x39
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3346,7 +3346,7 @@ define <2 x i64> @ugt_57_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_57_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #57
+; CHECK-NEXT:    mov w8, #57 // =0x39
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3363,7 +3363,7 @@ define <2 x i64> @ult_58_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_58_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #58
+; CHECK-NEXT:    mov w8, #58 // =0x3a
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3380,7 +3380,7 @@ define <2 x i64> @ugt_58_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_58_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #58
+; CHECK-NEXT:    mov w8, #58 // =0x3a
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3397,7 +3397,7 @@ define <2 x i64> @ult_59_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_59_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #59
+; CHECK-NEXT:    mov w8, #59 // =0x3b
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3414,7 +3414,7 @@ define <2 x i64> @ugt_59_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_59_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #59
+; CHECK-NEXT:    mov w8, #59 // =0x3b
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3431,7 +3431,7 @@ define <2 x i64> @ult_60_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_60_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #60
+; CHECK-NEXT:    mov w8, #60 // =0x3c
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3448,7 +3448,7 @@ define <2 x i64> @ugt_60_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_60_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #60
+; CHECK-NEXT:    mov w8, #60 // =0x3c
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3465,7 +3465,7 @@ define <2 x i64> @ult_61_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_61_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #61
+; CHECK-NEXT:    mov w8, #61 // =0x3d
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3482,7 +3482,7 @@ define <2 x i64> @ugt_61_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_61_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #61
+; CHECK-NEXT:    mov w8, #61 // =0x3d
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3499,7 +3499,7 @@ define <2 x i64> @ult_62_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_62_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #62
+; CHECK-NEXT:    mov w8, #62 // =0x3e
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3516,7 +3516,7 @@ define <2 x i64> @ugt_62_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ugt_62_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #62
+; CHECK-NEXT:    mov w8, #62 // =0x3e
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
@@ -3533,7 +3533,7 @@ define <2 x i64> @ult_63_v2i64(<2 x i64> %0) {
 ; CHECK-LABEL: ult_63_v2i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cnt v0.16b, v0.16b
-; CHECK-NEXT:    mov w8, #63
+; CHECK-NEXT:    mov w8, #63 // =0x3f
 ; CHECK-NEXT:    dup v1.2d, x8
 ; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    uaddlp v0.4s, v0.8h
diff --git a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
index 0a3476e5f4cef6..f2449e61f87892 100644
--- a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
@@ -997,18 +997,24 @@ exit:
 define void @zext_v4i8_to_v4i32_in_loop(ptr %src, ptr %dst) {
 ; CHECK-LABEL: zext_v4i8_to_v4i32_in_loop:
 ; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:  Lloh12:
+; CHECK-NEXT:    adrp x8, lCPI11_0 at PAGE
+; CHECK-NEXT:  Lloh13:
+; CHECK-NEXT:    ldr q0, [x8, lCPI11_0 at PAGEOFF]
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:  LBB11_1: ; %loop
 ; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    ldr s0, [x0, x8]
+; CHECK-NEXT:    ldr s1, [x0, x8]
 ; CHECK-NEXT:    add x8, x8, #16
 ; CHECK-NEXT:    cmp x8, #128
-; CHECK-NEXT:    ushll.8h v0, v0, #0
-; CHECK-NEXT:    ushll.4s v0, v0, #0
-; CHECK-NEXT:    str q0, [x1], #64
+; CHECK-NEXT:    ushll.8h v1, v1, #0
+; CHECK-NEXT:    ushll.4s v1, v1, #0
+; CHECK-NEXT:    tbl.16b v1, { v1 }, v0
+; CHECK-NEXT:    str q1, [x1], #64
 ; CHECK-NEXT:    b.ne LBB11_1
 ; CHECK-NEXT:  ; %bb.2: ; %exit
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh12, Lloh13
 ;
 ; CHECK-BE-LABEL: zext_v4i8_to_v4i32_in_loop:
 ; CHECK-BE:       // %bb.0: // %entry
@@ -1155,17 +1161,17 @@ exit:
 define void @zext_v12i8_to_v12i32_in_loop(ptr %src, ptr %dst) {
 ; CHECK-LABEL: zext_v12i8_to_v12i32_in_loop:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:  Lloh12:
+; CHECK-NEXT:  Lloh14:
 ; CHECK-NEXT:    adrp x8, lCPI12_0 at PAGE
-; CHECK-NEXT:  Lloh13:
+; CHECK-NEXT:  Lloh15:
 ; CHECK-NEXT:    adrp x9, lCPI12_1 at PAGE
-; CHECK-NEXT:  Lloh14:
+; CHECK-NEXT:  Lloh16:
 ; CHECK-NEXT:    adrp x10, lCPI12_2 at PAGE
-; CHECK-NEXT:  Lloh15:
+; CHECK-NEXT:  Lloh17:
 ; CHECK-NEXT:    ldr q0, [x8, lCPI12_0 at PAGEOFF]
-; CHECK-NEXT:  Lloh16:
+; CHECK-NEXT:  Lloh18:
 ; CHECK-NEXT:    ldr q1, [x9, lCPI12_1 at PAGEOFF]
-; CHECK-NEXT:  Lloh17:
+; CHECK-NEXT:  Lloh19:
 ; CHECK-NEXT:    ldr q2, [x10, lCPI12_2 at PAGEOFF]
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:  LBB12_1: ; %loop
@@ -1181,9 +1187,9 @@ define void @zext_v12i8_to_v12i32_in_loop(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    b.ne LBB12_1
 ; CHECK-NEXT:  ; %bb.2: ; %exit
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh16, Lloh19
+; CHECK-NEXT:    .loh AdrpLdr Lloh15, Lloh18
 ; CHECK-NEXT:    .loh AdrpLdr Lloh14, Lloh17
-; CHECK-NEXT:    .loh AdrpLdr Lloh13, Lloh16
-; CHECK-NEXT:    .loh AdrpLdr Lloh12, Lloh15
 ;
 ; CHECK-BE-LABEL: zext_v12i8_to_v12i32_in_loop:
 ; CHECK-BE:       // %bb.0: // %entry
@@ -1668,14 +1674,14 @@ exit:
 define void @zext_v8i8_to_v8i64_with_add_in_sequence_in_loop(ptr %src, ptr %dst) {
 ; CHECK-LABEL: zext_v8i8_to_v8i64_with_add_in_sequence_in_loop:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:  Lloh18:
+; CHECK-NEXT:  Lloh20:
 ; CHECK-NEXT:    adrp x9, lCPI17_0 at PAGE
-; CHECK-NEXT:  Lloh19:
+; CHECK-NEXT:  Lloh21:
 ; CHECK-NEXT:    adrp x10, lCPI17_1 at PAGE
 ; CHECK-NEXT:    mov x8, xzr
-; CHECK-NEXT:  Lloh20:
+; CHECK-NEXT:  Lloh22:
 ; CHECK-NEXT:    ldr q0, [x9, lCPI17_0 at PAGEOFF]
-; CHECK-NEXT:  Lloh21:
+; CHECK-NEXT:  Lloh23:
 ; CHECK-NEXT:    ldr q1, [x10, lCPI17_1 at PAGEOFF]
 ; CHECK-NEXT:    add x9, x0, #8
 ; CHECK-NEXT:  LBB17_1: ; %loop
@@ -1708,8 +1714,8 @@ define void @zext_v8i8_to_v8i64_with_add_in_sequence_in_loop(ptr %src, ptr %dst)
 ; CHECK-NEXT:    b.ne LBB17_1
 ; CHECK-NEXT:  ; %bb.2: ; %exit
 ; CHECK-NEXT:    ret
-; CHECK-NEXT:    .loh AdrpLdr Lloh19, Lloh21
-; CHECK-NEXT:    .loh AdrpLdr Lloh18, Lloh20
+; CHECK-NEXT:    .loh AdrpLdr Lloh21, Lloh23
+; CHECK-NEXT:    .loh AdrpLdr Lloh20, Lloh22
 ;
 ; CHECK-BE-LABEL: zext_v8i8_to_v8i64_with_add_in_sequence_in_loop:
 ; CHECK-BE:       // %bb.0: // %entry
@@ -2173,21 +2179,21 @@ exit:
 define void @zext_v20i8_to_v20i24_in_loop(ptr %src, ptr %dst) {
 ; CHECK-LABEL: zext_v20i8_to_v20i24_in_loop:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:  Lloh22:
+; CHECK-NEXT:  Lloh24:
 ; CHECK-NEXT:    adrp x8, lCPI20_0 at PAGE
-; CHECK-NEXT:  Lloh23:
+; CHECK-NEXT:  Lloh25:
 ; CHECK-NEXT:    adrp x9, lCPI20_1 at PAGE
-; CHECK-NEXT:  Lloh24:
+; CHECK-NEXT:  Lloh26:
 ; CHECK-NEXT:    adrp x10, lCPI20_2 at PAGE
-; CHECK-NEXT:  Lloh25:
+; CHECK-NEXT:  Lloh27:
 ; CHECK-NEXT:    ldr q0, [x8, lCPI20_0 at PAGEOFF]
-; CHECK-NEXT:  Lloh26:
+; CHECK-NEXT:  Lloh28:
 ; CHECK-NEXT:    adrp x8, lCPI20_3 at PAGE
-; CHECK-NEXT:  Lloh27:
+; CHECK-NEXT:  Lloh29:
 ; CHECK-NEXT:    ldr q1, [x9, lCPI20_1 at PAGEOFF]
-; CHECK-NEXT:  Lloh28:
+; CHECK-NEXT:  Lloh30:
 ; CHECK-NEXT:    ldr q2, [x10, lCPI20_2 at PAGEOFF]
-; CHECK-NEXT:  Lloh29:
+; CHECK-NEXT:  Lloh31:
 ; CHECK-NEXT:    ldr q3, [x8, lCPI20_3 at PAGEOFF]
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:  LBB20_1: ; %loop
@@ -2209,11 +2215,11 @@ define void @zext_v20i8_to_v20i24_in_loop(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    b.ne LBB20_1
 ; CHECK-NEXT:  ; %bb.2: ; %exit
 ; CHECK-NEXT:    ret
-; CHECK-NEXT:    .loh AdrpLdr Lloh26, Lloh29
-; CHECK-NEXT:    .loh AdrpLdr Lloh24, Lloh28
-; CHECK-NEXT:    .loh AdrpLdr Lloh23, Lloh27
-; CHECK-NEXT:    .loh AdrpAdrp Lloh22, Lloh26
-; CHECK-NEXT:    .loh AdrpLdr Lloh22, Lloh25
+; CHECK-NEXT:    .loh AdrpLdr Lloh28, Lloh31
+; CHECK-NEXT:    .loh AdrpLdr Lloh26, Lloh30
+; CHECK-NEXT:    .loh AdrpLdr Lloh25, Lloh29
+; CHECK-NEXT:    .loh AdrpAdrp Lloh24, Lloh28
+; CHECK-NEXT:    .loh AdrpLdr Lloh24, Lloh27
 ;
 ; CHECK-BE-LABEL: zext_v20i8_to_v20i24_in_loop:
 ; CHECK-BE:       // %bb.0: // %entry
@@ -2501,29 +2507,29 @@ exit:
 define void @zext_v23i8_to_v23i48_in_loop(ptr %src, ptr %dst) {
 ; CHECK-LABEL: zext_v23i8_to_v23i48_in_loop:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:  Lloh30:
+; CHECK-NEXT:  Lloh32:
 ; CHECK-NEXT:    adrp x8, lCPI21_0 at PAGE
-; CHECK-NEXT:  Lloh31:
+; CHECK-NEXT:  Lloh33:
 ; CHECK-NEXT:    adrp x9, lCPI21_1 at PAGE
-; CHECK-NEXT:  Lloh32:
+; CHECK-NEXT:  Lloh34:
 ; CHECK-NEXT:    adrp x10, lCPI21_2 at PAGE
-; CHECK-NEXT:  Lloh33:
+; CHECK-NEXT:  Lloh35:
 ; CHECK-NEXT:    ldr q0, [x8, lCPI21_0 at PAGEOFF]
-; CHECK-NEXT:  Lloh34:
+; CHECK-NEXT:  Lloh36:
 ; CHECK-NEXT:    ldr q1, [x9, lCPI21_1 at PAGEOFF]
-; CHECK-NEXT:  Lloh35:
+; CHECK-NEXT:  Lloh37:
 ; CHECK-NEXT:    ldr q2, [x10, lCPI21_2 at PAGEOFF]
-; CHECK-NEXT:  Lloh36:
+; CHECK-NEXT:  Lloh38:
 ; CHECK-NEXT:    adrp x8, lCPI21_3 at PAGE
-; CHECK-NEXT:  Lloh37:
+; CHECK-NEXT:  Lloh39:
 ; CHECK-NEXT:    adrp x9, lCPI21_4 at PAGE
-; CHECK-NEXT:  Lloh38:
+; CHECK-NEXT:  Lloh40:
 ; CHECK-NEXT:    adrp x10, lCPI21_5 at PAGE
-; CHECK-NEXT:  Lloh39:
+; CHECK-NEXT:  Lloh41:
 ; CHECK-NEXT:    ldr q3, [x8, lCPI21_3 at PAGEOFF]
-; CHECK-NEXT:  Lloh40:
+; CHECK-NEXT:  Lloh42:
 ; CHECK-NEXT:    ldr q4, [x9, lCPI21_4 at PAGEOFF]
-; CHECK-NEXT:  Lloh41:
+; CHECK-NEXT:  Lloh43:
 ; CHECK-NEXT:    ldr q5, [x10, lCPI21_5 at PAGEOFF]
 ; CHECK-NEXT:    mov x8, xzr
 ; CHECK-NEXT:  LBB21_1: ; %loop
@@ -2552,15 +2558,15 @@ define void @zext_v23i8_to_v23i48_in_loop(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    b.ne LBB21_1
 ; CHECK-NEXT:  ; %bb.2: ; %exit
 ; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh40, Lloh43
+; CHECK-NEXT:    .loh AdrpLdr Lloh39, Lloh42
 ; CHECK-NEXT:    .loh AdrpLdr Lloh38, Lloh41
-; CHECK-NEXT:    .loh AdrpLdr Lloh37, Lloh40
-; CHECK-NEXT:    .loh AdrpLdr Lloh36, Lloh39
+; CHECK-NEXT:    .loh AdrpAdrp Lloh34, Lloh40
+; CHECK-NEXT:    .loh AdrpLdr Lloh34, Lloh37
+; CHECK-NEXT:    .loh AdrpAdrp Lloh33, Lloh39
+; CHECK-NEXT:    .loh AdrpLdr Lloh33, Lloh36
 ; CHECK-NEXT:    .loh AdrpAdrp Lloh32, Lloh38
 ; CHECK-NEXT:    .loh AdrpLdr Lloh32, Lloh35
-; CHECK-NEXT:    .loh AdrpAdrp Lloh31, Lloh37
-; CHECK-NEXT:    .loh AdrpLdr Lloh31, Lloh34
-; CHECK-NEXT:    .loh AdrpAdrp Lloh30, Lloh36
-; CHECK-NEXT:    .loh AdrpLdr Lloh30, Lloh33
 ;
 ; CHECK-BE-LABEL: zext_v23i8_to_v23i48_in_loop:
 ; CHECK-BE:       // %bb.0: // %entry
@@ -2894,21 +2900,21 @@ exit:
 define i32 @test_widening_instr_mull_64(ptr %p1, ptr %p2, i32 %h) {
 ; CHECK-LABEL: test_widening_instr_mull_64:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:  Lloh42:
+; CHECK-NEXT:  Lloh44:
 ; CHECK-NEXT:    adrp x8, lCPI25_0 at PAGE
-; CHECK-NEXT:  Lloh43:
+; CHECK-NEXT:  Lloh45:
 ; CHECK-NEXT:    adrp x9, lCPI25_1 at PAGE
-; CHECK-NEXT:  Lloh44:
+; CHECK-NEXT:  Lloh46:
 ; CHECK-NEXT:    adrp x10, lCPI25_3 at PAGE
-; CHECK-NEXT:  Lloh45:
+; CHECK-NEXT:  Lloh47:
 ; CHECK-NEXT:    ldr q0, [x8, lCPI25_0 at PAGEOFF]
-; CHECK-NEXT:  Lloh46:
+; CHECK-NEXT:  Lloh48:
 ; CHECK-NEXT:    adrp x8, lCPI25_2 at PAGE
-; CHECK-NEXT:  Lloh47:
+; CHECK-NEXT:  Lloh49:
 ; CHECK-NEXT:    ldr q1, [x9, lCPI25_1 at PAGEOFF]
-; CHECK-NEXT:  Lloh48:
+; CHECK-NEXT:  Lloh50:
 ; CHECK-NEXT:    ldr q2, [x8, lCPI25_2 at PAGEOFF]
-; CHECK-NEXT:  Lloh49:
+; CHECK-NEXT:  Lloh51:
 ; CHECK-NEXT:    ldr q3, [x10, lCPI25_3 at PAGEOFF]
 ; CHECK-NEXT:    mov x8, x1
 ; CHECK-NEXT:  LBB25_1: ; %loop
@@ -2940,11 +2946,11 @@ define i32 @test_widening_instr_mull_64(ptr %p1, ptr %p2, i32 %h) {
 ; CHECK-NEXT:  ; %bb.2: ; %exit
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ret
-; CHECK-NEXT:    .loh AdrpLdr Lloh46, Lloh48
-; CHECK-NEXT:    .loh AdrpLdr Lloh44, Lloh49
-; CHECK-NEXT:    .loh AdrpLdr Lloh43, Lloh47
-; CHECK-NEXT:    .loh AdrpAdrp Lloh42, Lloh46
-; CHECK-NEXT:    .loh AdrpLdr Lloh42, Lloh45
+; CHECK-NEXT:    .loh AdrpLdr Lloh48, Lloh50
+; CHECK-NEXT:    .loh AdrpLdr Lloh46, Lloh51
+; CHECK-NEXT:    .loh AdrpLdr Lloh45, Lloh49
+; CHECK-NEXT:    .loh AdrpAdrp Lloh44, Lloh48
+; CHECK-NEXT:    .loh AdrpLdr Lloh44, Lloh47
 ;
 ; CHECK-BE-LABEL: test_widening_instr_mull_64:
 ; CHECK-BE:       // %bb.0: // %entry
@@ -3043,21 +3049,21 @@ exit:
 define i32 @test_widening_instr_mull_2(ptr %p1, ptr %p2, i32 %h) {
 ; CHECK-LABEL: test_widening_instr_mull_2:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:  Lloh50:
+; CHECK-NEXT:  Lloh52:
 ; CHECK-NEXT:    adrp x8, lCPI26_0 at PAGE
-; CHECK-NEXT:  Lloh51:
+; CHECK-NEXT:  Lloh53:
 ; CHECK-NEXT:    adrp x9, lCPI26_1 at PAGE
-; CHECK-NEXT:  Lloh52:
+; CHECK-NEXT:  Lloh54:
 ; CHECK-NEXT:    adrp x10, lCPI26_3 at PAGE
-; CHECK-NEXT:  Lloh53:
+; CHECK-NEXT:  Lloh55:
 ; CHECK-NEXT:    ldr q0, [x8, lCPI26_0 at PAGEOFF]
-; CHECK-NEXT:  Lloh54:
+; CHECK-NEXT:  Lloh56:
 ; CHECK-NEXT:    adrp x8, lCPI26_2 at PAGE
-; CHECK-NEXT:  Lloh55:
+; CHECK-NEXT:  Lloh57:
 ; CHECK-NEXT:    ldr q1, [x9, lCPI26_1 at PAGEOFF]
-; CHECK-NEXT:  Lloh56:
+; CHECK-NEXT:  Lloh58:
 ; CHECK-NEXT:    ldr q2, [x8, lCPI26_2 at PAGEOFF]
-; CHECK-NEXT:  Lloh57:
+; CHECK-NEXT:  Lloh59:
 ; CHECK-NEXT:    ldr q3, [x10, lCPI26_3 at PAGEOFF]
 ; CHECK-NEXT:    mov x8, x0
 ; CHECK-NEXT:  LBB26_1: ; %loop
@@ -3083,11 +3089,11 @@ define i32 @test_widening_instr_mull_2(ptr %p1, ptr %p2, i32 %h) {
 ; CHECK-NEXT:  ; %bb.2: ; %exit
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    ret
-; CHECK-NEXT:    .loh AdrpLdr Lloh54, Lloh56
-; CHECK-NEXT:    .loh AdrpLdr Lloh52, Lloh57
-; CHECK-NEXT:    .loh AdrpLdr Lloh51, Lloh55
-; CHECK-NEXT:    .loh AdrpAdrp Lloh50, Lloh54
-; CHECK-NEXT:    .loh AdrpLdr Lloh50, Lloh53
+; CHECK-NEXT:    .loh AdrpLdr Lloh56, Lloh58
+; CHECK-NEXT:    .loh AdrpLdr Lloh54, Lloh59
+; CHECK-NEXT:    .loh AdrpLdr Lloh53, Lloh57
+; CHECK-NEXT:    .loh AdrpAdrp Lloh52, Lloh56
+; CHECK-NEXT:    .loh AdrpLdr Lloh52, Lloh55
 ;
 ; CHECK-BE-LABEL: test_widening_instr_mull_2:
 ; CHECK-BE:       // %bb.0: // %entry
diff --git a/llvm/test/CodeGen/AArch64/zext.ll b/llvm/test/CodeGen/AArch64/zext.ll
index e513340f5b18ad..7c7335027cf1fa 100644
--- a/llvm/test/CodeGen/AArch64/zext.ll
+++ b/llvm/test/CodeGen/AArch64/zext.ll
@@ -108,22 +108,40 @@ entry:
 }
 
 define <2 x i16> @zext_v2i8_v2i16(<2 x i8> %a) {
-; CHECK-LABEL: zext_v2i8_v2i16:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi d1, #0x0000ff000000ff
-; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: zext_v2i8_v2i16:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    adrp x8, .LCPI10_0
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI10_0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: zext_v2i8_v2i16:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    movi d1, #0x0000ff000000ff
+; CHECK-GI-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT:    ret
 entry:
   %c = zext <2 x i8> %a to <2 x i16>
   ret <2 x i16> %c
 }
 
 define <2 x i32> @zext_v2i8_v2i32(<2 x i8> %a) {
-; CHECK-LABEL: zext_v2i8_v2i32:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi d1, #0x0000ff000000ff
-; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: zext_v2i8_v2i32:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    adrp x8, .LCPI11_0
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI11_0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: zext_v2i8_v2i32:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    movi d1, #0x0000ff000000ff
+; CHECK-GI-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT:    ret
 entry:
   %c = zext <2 x i8> %a to <2 x i32>
   ret <2 x i32> %c
@@ -132,8 +150,11 @@ entry:
 define <2 x i64> @zext_v2i8_v2i64(<2 x i8> %a) {
 ; CHECK-SD-LABEL: zext_v2i8_v2i64:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    movi d1, #0x0000ff000000ff
-; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    adrp x8, .LCPI12_0
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI12_0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    ushll v0.2d, v0.2s, #0
 ; CHECK-SD-NEXT:    ret
 ;
@@ -149,11 +170,18 @@ entry:
 }
 
 define <2 x i32> @zext_v2i16_v2i32(<2 x i16> %a) {
-; CHECK-LABEL: zext_v2i16_v2i32:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    movi d1, #0x00ffff0000ffff
-; CHECK-NEXT:    and v0.8b, v0.8b, v1.8b
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: zext_v2i16_v2i32:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-SD-NEXT:    rev32 v0.4h, v0.4h
+; CHECK-SD-NEXT:    trn2 v0.4h, v0.4h, v1.4h
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: zext_v2i16_v2i32:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    movi d1, #0x00ffff0000ffff
+; CHECK-GI-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-GI-NEXT:    ret
 entry:
   %c = zext <2 x i16> %a to <2 x i32>
   ret <2 x i32> %c
@@ -162,8 +190,9 @@ entry:
 define <2 x i64> @zext_v2i16_v2i64(<2 x i16> %a) {
 ; CHECK-SD-LABEL: zext_v2i16_v2i64:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    movi d1, #0x00ffff0000ffff
-; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT:    movi v1.2d, #0000000000000000
+; CHECK-SD-NEXT:    rev32 v0.4h, v0.4h
+; CHECK-SD-NEXT:    trn2 v0.4h, v0.4h, v1.4h
 ; CHECK-SD-NEXT:    ushll v0.2d, v0.2s, #0
 ; CHECK-SD-NEXT:    ret
 ;
@@ -234,10 +263,12 @@ define <3 x i16> @zext_v3i8_v3i16(<3 x i8> %a) {
 ; CHECK-SD-LABEL: zext_v3i8_v3i16:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    fmov s0, w0
+; CHECK-SD-NEXT:    adrp x8, .LCPI19_0
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI19_0]
 ; CHECK-SD-NEXT:    mov v0.h[1], w1
 ; CHECK-SD-NEXT:    mov v0.h[2], w2
-; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
-; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: zext_v3i8_v3i16:
@@ -264,11 +295,12 @@ define <3 x i32> @zext_v3i8_v3i32(<3 x i8> %a) {
 ; CHECK-SD-LABEL: zext_v3i8_v3i32:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    fmov s0, w0
-; CHECK-SD-NEXT:    movi v1.2d, #0x0000ff000000ff
+; CHECK-SD-NEXT:    adrp x8, .LCPI20_0
+; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI20_0]
 ; CHECK-SD-NEXT:    mov v0.h[1], w1
 ; CHECK-SD-NEXT:    mov v0.h[2], w2
 ; CHECK-SD-NEXT:    ushll v0.4s, v0.4h, #0
-; CHECK-SD-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-SD-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: zext_v3i8_v3i32:
@@ -293,13 +325,14 @@ define <3 x i64> @zext_v3i8_v3i64(<3 x i8> %a) {
 ; CHECK-SD-LABEL: zext_v3i8_v3i64:
 ; CHECK-SD:       // %bb.0: // %entry
 ; CHECK-SD-NEXT:    fmov s0, w0
-; CHECK-SD-NEXT:    movi v1.2d, #0x000000000000ff
-; CHECK-SD-NEXT:    fmov s3, w2
+; CHECK-SD-NEXT:    adrp x8, .LCPI21_0
 ; CHECK-SD-NEXT:    movi v2.2d, #0000000000000000
+; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI21_0]
 ; CHECK-SD-NEXT:    mov v0.s[1], w1
 ; CHECK-SD-NEXT:    ushll v0.2d, v0.2s, #0
-; CHECK-SD-NEXT:    and v0.16b, v0.16b, v1.16b
-; CHECK-SD-NEXT:    ushll v1.2d, v3.2s, #0
+; CHECK-SD-NEXT:    tbl v0.16b, { v0.16b }, v1.16b
+; CHECK-SD-NEXT:    fmov s1, w2
+; CHECK-SD-NEXT:    ushll v1.2d, v1.2s, #0
 ; CHECK-SD-NEXT:    mov v2.b[0], v1.b[0]
 ; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
@@ -500,7 +533,11 @@ entry:
 define <4 x i16> @zext_v4i8_v4i16(<4 x i8> %a) {
 ; CHECK-SD-LABEL: zext_v4i8_v4i16:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    adrp x8, .LCPI28_0
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI28_0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: zext_v4i8_v4i16:
@@ -516,7 +553,11 @@ entry:
 define <4 x i32> @zext_v4i8_v4i32(<4 x i8> %a) {
 ; CHECK-SD-LABEL: zext_v4i8_v4i32:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    adrp x8, .LCPI29_0
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI29_0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-SD-NEXT:    ret
 ;
@@ -534,7 +575,11 @@ entry:
 define <4 x i64> @zext_v4i8_v4i64(<4 x i8> %a) {
 ; CHECK-SD-LABEL: zext_v4i8_v4i64:
 ; CHECK-SD:       // %bb.0: // %entry
-; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    adrp x8, .LCPI30_0
+; CHECK-SD-NEXT:    mov v0.d[1], v0.d[0]
+; CHECK-SD-NEXT:    ldr d1, [x8, :lo12:.LCPI30_0]
+; CHECK-SD-NEXT:    tbl v0.8b, { v0.16b }, v1.8b
 ; CHECK-SD-NEXT:    ushll v0.4s, v0.4h, #0
 ; CHECK-SD-NEXT:    ushll2 v1.2d, v0.4s, #0
 ; CHECK-SD-NEXT:    ushll v0.2d, v0.2s, #0



More information about the llvm-commits mailing list