[llvm] 0d8cd4e - [AArch64InstPrinter] Change printAddSubImm to comment imm value when shifted

Jason Molenda via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 3 02:28:55 PDT 2021


Author: Jason Molenda
Date: 2021-08-03T02:28:46-07:00
New Revision: 0d8cd4e2d5d4abb804d40984522e0413c66a3cbd

URL: https://github.com/llvm/llvm-project/commit/0d8cd4e2d5d4abb804d40984522e0413c66a3cbd
DIFF: https://github.com/llvm/llvm-project/commit/0d8cd4e2d5d4abb804d40984522e0413c66a3cbd.diff

LOG: [AArch64InstPrinter] Change printAddSubImm to comment imm value when shifted

Add a comment when there is a shifted value,
    add x9, x0, #291, lsl #12 ; =1191936
but not when the immediate value is unshifted,
    subs x9, x0, #256 ; =256
when the comment adds nothing additional to the reader.

Differential Revision: https://reviews.llvm.org/D107196

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
    llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
    llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
    llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll
    llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
    llvm/test/CodeGen/AArch64/aarch64-load-ext.ll
    llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
    llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll
    llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
    llvm/test/CodeGen/AArch64/addsub-constant-folding.ll
    llvm/test/CodeGen/AArch64/addsub.ll
    llvm/test/CodeGen/AArch64/align-down.ll
    llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll
    llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
    llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
    llvm/test/CodeGen/AArch64/arm64-ccmp.ll
    llvm/test/CodeGen/AArch64/arm64-fp128.ll
    llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
    llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
    llvm/test/CodeGen/AArch64/arm64-nvcast.ll
    llvm/test/CodeGen/AArch64/arm64-popcnt.ll
    llvm/test/CodeGen/AArch64/arm64-rev.ll
    llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
    llvm/test/CodeGen/AArch64/arm64-vabs.ll
    llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
    llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll
    llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
    llvm/test/CodeGen/AArch64/branch-relax-cbz.ll
    llvm/test/CodeGen/AArch64/cgp-usubo.ll
    llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
    llvm/test/CodeGen/AArch64/cmp-select-sign.ll
    llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
    llvm/test/CodeGen/AArch64/extract-bits.ll
    llvm/test/CodeGen/AArch64/extract-lowbits.ll
    llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll
    llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll
    llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
    llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
    llvm/test/CodeGen/AArch64/funnel-shift.ll
    llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
    llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
    llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll
    llvm/test/CodeGen/AArch64/implicit-null-check.ll
    llvm/test/CodeGen/AArch64/inc-of-add.ll
    llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
    llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
    llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
    llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
    llvm/test/CodeGen/AArch64/ls64-inline-asm.ll
    llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
    llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll
    llvm/test/CodeGen/AArch64/named-vector-shuffle-reverse-neon.ll
    llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
    llvm/test/CodeGen/AArch64/neg-abs.ll
    llvm/test/CodeGen/AArch64/pow.ll
    llvm/test/CodeGen/AArch64/pr48188.ll
    llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
    llvm/test/CodeGen/AArch64/sadd_sat.ll
    llvm/test/CodeGen/AArch64/sadd_sat_plus.ll
    llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
    llvm/test/CodeGen/AArch64/sat-add.ll
    llvm/test/CodeGen/AArch64/sdivpow2.ll
    llvm/test/CodeGen/AArch64/select_const.ll
    llvm/test/CodeGen/AArch64/shift-mod.ll
    llvm/test/CodeGen/AArch64/signbit-shift.ll
    llvm/test/CodeGen/AArch64/signed-truncation-check.ll
    llvm/test/CodeGen/AArch64/sink-addsub-of-const.ll
    llvm/test/CodeGen/AArch64/split-vector-insert.ll
    llvm/test/CodeGen/AArch64/srem-lkk.ll
    llvm/test/CodeGen/AArch64/srem-seteq-illegal-types.ll
    llvm/test/CodeGen/AArch64/srem-seteq.ll
    llvm/test/CodeGen/AArch64/srem-vector-lkk.ll
    llvm/test/CodeGen/AArch64/ssub_sat.ll
    llvm/test/CodeGen/AArch64/ssub_sat_plus.ll
    llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
    llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
    llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
    llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
    llvm/test/CodeGen/AArch64/sub-of-not.ll
    llvm/test/CodeGen/AArch64/sub1.ll
    llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
    llvm/test/CodeGen/AArch64/sve-extract-vector.ll
    llvm/test/CodeGen/AArch64/sve-insert-element.ll
    llvm/test/CodeGen/AArch64/sve-insert-vector.ll
    llvm/test/CodeGen/AArch64/sve-ld1r.ll
    llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll
    llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
    llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
    llvm/test/CodeGen/AArch64/uadd_sat.ll
    llvm/test/CodeGen/AArch64/uadd_sat_plus.ll
    llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
    llvm/test/CodeGen/AArch64/uaddo.ll
    llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
    llvm/test/CodeGen/AArch64/unwind-preserved.ll
    llvm/test/CodeGen/AArch64/urem-seteq-illegal-types.ll
    llvm/test/CodeGen/AArch64/urem-seteq-nonzero.ll
    llvm/test/CodeGen/AArch64/urem-seteq.ll
    llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll
    llvm/test/CodeGen/AArch64/usub_sat_vec.ll
    llvm/test/CodeGen/AArch64/vec-libcalls.ll
    llvm/test/CodeGen/AArch64/vec_uaddo.ll
    llvm/test/CodeGen/AArch64/vec_umulo.ll
    llvm/test/CodeGen/AArch64/vecreduce-bool.ll
    llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll
    llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
    llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll
    llvm/test/CodeGen/AArch64/vldn_shuffle.ll
    llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll
    llvm/test/Transforms/CanonicalizeFreezeInLoops/aarch64.ll
    llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-pre-inc-offset-check.ll
    llvm/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected
    llvm/test/tools/llvm-objdump/ELF/AArch64/disassemble-align.s

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
index cd1bfed9d40d0..bebff71ddbf20 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64InstPrinter.cpp
@@ -1026,11 +1026,11 @@ void AArch64InstPrinter::printAddSubImm(const MCInst *MI, unsigned OpNum,
     unsigned Shift =
         AArch64_AM::getShiftValue(MI->getOperand(OpNum + 1).getImm());
     O << '#' << formatImm(Val);
-    if (Shift != 0)
+    if (Shift != 0) {
       printShifter(MI, OpNum + 1, STI, O);
-
-    if (CommentStream)
-      *CommentStream << '=' << formatImm(Val << Shift) << '\n';
+      if (CommentStream)
+        *CommentStream << '=' << formatImm(Val << Shift) << '\n';
+    }
   } else {
     assert(MO.isExpr() && "Unexpected operand type!");
     MO.getExpr()->print(O, &MAI);

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll
index db1b011c06ea4..c0f09428c3799 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll
@@ -65,7 +65,7 @@ define void @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
 ;
 ; CHECK-CAS-O0-LABEL: val_compare_and_swap:
 ; CHECK-CAS-O0:       // %bb.0:
-; CHECK-CAS-O0-NEXT:    sub sp, sp, #16 // =16
+; CHECK-CAS-O0-NEXT:    sub sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-CAS-O0-NEXT:    str x3, [sp, #8] // 8-byte Folded Spill
 ; CHECK-CAS-O0-NEXT:    mov x1, x5
@@ -81,7 +81,7 @@ define void @val_compare_and_swap(i128* %p, i128 %oldval, i128 %newval) {
 ; CHECK-CAS-O0-NEXT:    mov v0.d[0], x9
 ; CHECK-CAS-O0-NEXT:    mov v0.d[1], x8
 ; CHECK-CAS-O0-NEXT:    str q0, [x0]
-; CHECK-CAS-O0-NEXT:    add sp, sp, #16 // =16
+; CHECK-CAS-O0-NEXT:    add sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    ret
 
 %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval acquire acquire
@@ -150,7 +150,7 @@ define void @val_compare_and_swap_monotonic_seqcst(i128* %p, i128 %oldval, i128
 ;
 ; CHECK-CAS-O0-LABEL: val_compare_and_swap_monotonic_seqcst:
 ; CHECK-CAS-O0:       // %bb.0:
-; CHECK-CAS-O0-NEXT:    sub sp, sp, #16 // =16
+; CHECK-CAS-O0-NEXT:    sub sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-CAS-O0-NEXT:    str x3, [sp, #8] // 8-byte Folded Spill
 ; CHECK-CAS-O0-NEXT:    mov x1, x5
@@ -166,7 +166,7 @@ define void @val_compare_and_swap_monotonic_seqcst(i128* %p, i128 %oldval, i128
 ; CHECK-CAS-O0-NEXT:    mov v0.d[0], x9
 ; CHECK-CAS-O0-NEXT:    mov v0.d[1], x8
 ; CHECK-CAS-O0-NEXT:    str q0, [x0]
-; CHECK-CAS-O0-NEXT:    add sp, sp, #16 // =16
+; CHECK-CAS-O0-NEXT:    add sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    ret
 
   %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval monotonic seq_cst
@@ -235,7 +235,7 @@ define void @val_compare_and_swap_release_acquire(i128* %p, i128 %oldval, i128 %
 ;
 ; CHECK-CAS-O0-LABEL: val_compare_and_swap_release_acquire:
 ; CHECK-CAS-O0:       // %bb.0:
-; CHECK-CAS-O0-NEXT:    sub sp, sp, #16 // =16
+; CHECK-CAS-O0-NEXT:    sub sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-CAS-O0-NEXT:    str x3, [sp, #8] // 8-byte Folded Spill
 ; CHECK-CAS-O0-NEXT:    mov x1, x5
@@ -251,7 +251,7 @@ define void @val_compare_and_swap_release_acquire(i128* %p, i128 %oldval, i128 %
 ; CHECK-CAS-O0-NEXT:    mov v0.d[0], x9
 ; CHECK-CAS-O0-NEXT:    mov v0.d[1], x8
 ; CHECK-CAS-O0-NEXT:    str q0, [x0]
-; CHECK-CAS-O0-NEXT:    add sp, sp, #16 // =16
+; CHECK-CAS-O0-NEXT:    add sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    ret
 
   %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval release acquire
@@ -320,7 +320,7 @@ define void @val_compare_and_swap_monotonic(i128* %p, i128 %oldval, i128 %newval
 ;
 ; CHECK-CAS-O0-LABEL: val_compare_and_swap_monotonic:
 ; CHECK-CAS-O0:       // %bb.0:
-; CHECK-CAS-O0-NEXT:    sub sp, sp, #16 // =16
+; CHECK-CAS-O0-NEXT:    sub sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-CAS-O0-NEXT:    str x3, [sp, #8] // 8-byte Folded Spill
 ; CHECK-CAS-O0-NEXT:    mov x1, x5
@@ -336,7 +336,7 @@ define void @val_compare_and_swap_monotonic(i128* %p, i128 %oldval, i128 %newval
 ; CHECK-CAS-O0-NEXT:    mov v0.d[0], x9
 ; CHECK-CAS-O0-NEXT:    mov v0.d[1], x8
 ; CHECK-CAS-O0-NEXT:    str q0, [x0]
-; CHECK-CAS-O0-NEXT:    add sp, sp, #16 // =16
+; CHECK-CAS-O0-NEXT:    add sp, sp, #16
 ; CHECK-CAS-O0-NEXT:    ret
   %pair = cmpxchg i128* %p, i128 %oldval, i128 %newval release acquire
   %val = extractvalue { i128, i1 } %pair, 0
@@ -373,7 +373,7 @@ define void @atomic_load_relaxed(i64, i64, i128* %p, i128* %p2) {
 ;
 ; CHECK-LLSC-O0-LABEL: atomic_load_relaxed:
 ; CHECK-LLSC-O0:       // %bb.0:
-; CHECK-LLSC-O0-NEXT:    sub sp, sp, #64 // =64
+; CHECK-LLSC-O0-NEXT:    sub sp, sp, #64
 ; CHECK-LLSC-O0-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-LLSC-O0-NEXT:    str x2, [sp, #48] // 8-byte Folded Spill
 ; CHECK-LLSC-O0-NEXT:    str x3, [sp, #56] // 8-byte Folded Spill
@@ -388,17 +388,17 @@ define void @atomic_load_relaxed(i64, i64, i128* %p, i128* %p2) {
 ; CHECK-LLSC-O0-NEXT:    mov w10, #64
 ; CHECK-LLSC-O0-NEXT:    // kill: def $x10 killed $w10
 ; CHECK-LLSC-O0-NEXT:    str x10, [sp, #8] // 8-byte Folded Spill
-; CHECK-LLSC-O0-NEXT:    subs x16, x10, #64 // =64
-; CHECK-LLSC-O0-NEXT:    subs x13, x8, #64 // =64
+; CHECK-LLSC-O0-NEXT:    subs x16, x10, #64
+; CHECK-LLSC-O0-NEXT:    subs x13, x8, #64
 ; CHECK-LLSC-O0-NEXT:    lsl x14, x15, x10
 ; CHECK-LLSC-O0-NEXT:    lsr x13, x15, x13
 ; CHECK-LLSC-O0-NEXT:    orr x13, x13, x12
 ; CHECK-LLSC-O0-NEXT:    lsl x15, x15, x16
-; CHECK-LLSC-O0-NEXT:    subs x16, x10, #64 // =64
+; CHECK-LLSC-O0-NEXT:    subs x16, x10, #64
 ; CHECK-LLSC-O0-NEXT:    csel x14, x14, x12, lo
-; CHECK-LLSC-O0-NEXT:    subs x16, x10, #64 // =64
+; CHECK-LLSC-O0-NEXT:    subs x16, x10, #64
 ; CHECK-LLSC-O0-NEXT:    csel x13, x13, x15, lo
-; CHECK-LLSC-O0-NEXT:    subs x15, x10, #0 // =0
+; CHECK-LLSC-O0-NEXT:    subs x15, x10, #0
 ; CHECK-LLSC-O0-NEXT:    csel x13, x12, x13, eq
 ; CHECK-LLSC-O0-NEXT:    orr x9, x9, x14
 ; CHECK-LLSC-O0-NEXT:    orr x12, x12, x13
@@ -407,14 +407,14 @@ define void @atomic_load_relaxed(i64, i64, i128* %p, i128* %p2) {
 ; CHECK-LLSC-O0-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-LLSC-O0-NEXT:    mov v0.d[1], x12
 ; CHECK-LLSC-O0-NEXT:    str q0, [sp, #32] // 16-byte Folded Spill
-; CHECK-LLSC-O0-NEXT:    subs x13, x10, #64 // =64
-; CHECK-LLSC-O0-NEXT:    subs x8, x8, #64 // =64
+; CHECK-LLSC-O0-NEXT:    subs x13, x10, #64
+; CHECK-LLSC-O0-NEXT:    subs x8, x8, #64
 ; CHECK-LLSC-O0-NEXT:    lsl x8, x12, x8
 ; CHECK-LLSC-O0-NEXT:    orr x8, x8, x9, lsr #0
 ; CHECK-LLSC-O0-NEXT:    lsr x12, x12, x13
-; CHECK-LLSC-O0-NEXT:    subs x13, x10, #64 // =64
+; CHECK-LLSC-O0-NEXT:    subs x13, x10, #64
 ; CHECK-LLSC-O0-NEXT:    csel x8, x8, x12, lo
-; CHECK-LLSC-O0-NEXT:    subs x10, x10, #0 // =0
+; CHECK-LLSC-O0-NEXT:    subs x10, x10, #0
 ; CHECK-LLSC-O0-NEXT:    csel x10, x9, x8, eq
 ; CHECK-LLSC-O0-NEXT:    stxp w8, x9, x10, [x11]
 ; CHECK-LLSC-O0-NEXT:    cbnz w8, .LBB4_1
@@ -423,12 +423,12 @@ define void @atomic_load_relaxed(i64, i64, i128* %p, i128* %p2) {
 ; CHECK-LLSC-O0-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
 ; CHECK-LLSC-O0-NEXT:    ldr x8, [sp, #56] // 8-byte Folded Reload
 ; CHECK-LLSC-O0-NEXT:    str q0, [x8]
-; CHECK-LLSC-O0-NEXT:    add sp, sp, #64 // =64
+; CHECK-LLSC-O0-NEXT:    add sp, sp, #64
 ; CHECK-LLSC-O0-NEXT:    ret
 ;
 ; CHECK-CAS-O0-LABEL: atomic_load_relaxed:
 ; CHECK-CAS-O0:       // %bb.0:
-; CHECK-CAS-O0-NEXT:    sub sp, sp, #64 // =64
+; CHECK-CAS-O0-NEXT:    sub sp, sp, #64
 ; CHECK-CAS-O0-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-CAS-O0-NEXT:    str x2, [sp, #48] // 8-byte Folded Spill
 ; CHECK-CAS-O0-NEXT:    str x3, [sp, #56] // 8-byte Folded Spill
@@ -443,17 +443,17 @@ define void @atomic_load_relaxed(i64, i64, i128* %p, i128* %p2) {
 ; CHECK-CAS-O0-NEXT:    mov w10, #64
 ; CHECK-CAS-O0-NEXT:    // kill: def $x10 killed $w10
 ; CHECK-CAS-O0-NEXT:    str x10, [sp, #8] // 8-byte Folded Spill
-; CHECK-CAS-O0-NEXT:    subs x16, x10, #64 // =64
-; CHECK-CAS-O0-NEXT:    subs x13, x8, #64 // =64
+; CHECK-CAS-O0-NEXT:    subs x16, x10, #64
+; CHECK-CAS-O0-NEXT:    subs x13, x8, #64
 ; CHECK-CAS-O0-NEXT:    lsl x14, x15, x10
 ; CHECK-CAS-O0-NEXT:    lsr x13, x15, x13
 ; CHECK-CAS-O0-NEXT:    orr x13, x13, x12
 ; CHECK-CAS-O0-NEXT:    lsl x15, x15, x16
-; CHECK-CAS-O0-NEXT:    subs x16, x10, #64 // =64
+; CHECK-CAS-O0-NEXT:    subs x16, x10, #64
 ; CHECK-CAS-O0-NEXT:    csel x14, x14, x12, lo
-; CHECK-CAS-O0-NEXT:    subs x16, x10, #64 // =64
+; CHECK-CAS-O0-NEXT:    subs x16, x10, #64
 ; CHECK-CAS-O0-NEXT:    csel x13, x13, x15, lo
-; CHECK-CAS-O0-NEXT:    subs x15, x10, #0 // =0
+; CHECK-CAS-O0-NEXT:    subs x15, x10, #0
 ; CHECK-CAS-O0-NEXT:    csel x13, x12, x13, eq
 ; CHECK-CAS-O0-NEXT:    orr x9, x9, x14
 ; CHECK-CAS-O0-NEXT:    orr x12, x12, x13
@@ -462,14 +462,14 @@ define void @atomic_load_relaxed(i64, i64, i128* %p, i128* %p2) {
 ; CHECK-CAS-O0-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-CAS-O0-NEXT:    mov v0.d[1], x12
 ; CHECK-CAS-O0-NEXT:    str q0, [sp, #32] // 16-byte Folded Spill
-; CHECK-CAS-O0-NEXT:    subs x13, x10, #64 // =64
-; CHECK-CAS-O0-NEXT:    subs x8, x8, #64 // =64
+; CHECK-CAS-O0-NEXT:    subs x13, x10, #64
+; CHECK-CAS-O0-NEXT:    subs x8, x8, #64
 ; CHECK-CAS-O0-NEXT:    lsl x8, x12, x8
 ; CHECK-CAS-O0-NEXT:    orr x8, x8, x9, lsr #0
 ; CHECK-CAS-O0-NEXT:    lsr x12, x12, x13
-; CHECK-CAS-O0-NEXT:    subs x13, x10, #64 // =64
+; CHECK-CAS-O0-NEXT:    subs x13, x10, #64
 ; CHECK-CAS-O0-NEXT:    csel x8, x8, x12, lo
-; CHECK-CAS-O0-NEXT:    subs x10, x10, #0 // =0
+; CHECK-CAS-O0-NEXT:    subs x10, x10, #0
 ; CHECK-CAS-O0-NEXT:    csel x10, x9, x8, eq
 ; CHECK-CAS-O0-NEXT:    stxp w8, x9, x10, [x11]
 ; CHECK-CAS-O0-NEXT:    cbnz w8, .LBB4_1
@@ -478,7 +478,7 @@ define void @atomic_load_relaxed(i64, i64, i128* %p, i128* %p2) {
 ; CHECK-CAS-O0-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
 ; CHECK-CAS-O0-NEXT:    ldr x8, [sp, #56] // 8-byte Folded Reload
 ; CHECK-CAS-O0-NEXT:    str q0, [x8]
-; CHECK-CAS-O0-NEXT:    add sp, sp, #64 // =64
+; CHECK-CAS-O0-NEXT:    add sp, sp, #64
 ; CHECK-CAS-O0-NEXT:    ret
 
     %r = load atomic i128, i128* %p monotonic, align 16

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
index 80e77e9ec842a..13cb5191a690f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
@@ -325,7 +325,7 @@ define i32 @fetch_and_nand(i32* %p) #0 {
 ;
 ; CHECK-NOLSE-O0-LABEL: fetch_and_nand:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    ldr w8, [x0]
 ; CHECK-NOLSE-O0-NEXT:    str w8, [sp, #28] ; 4-byte Folded Spill
@@ -357,7 +357,7 @@ define i32 @fetch_and_nand(i32* %p) #0 {
 ; CHECK-NOLSE-O0-NEXT:    b LBB6_5
 ; CHECK-NOLSE-O0-NEXT:  LBB6_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: fetch_and_nand:
@@ -375,7 +375,7 @@ define i32 @fetch_and_nand(i32* %p) #0 {
 ;
 ; CHECK-LSE-O0-LABEL: fetch_and_nand:
 ; CHECK-LSE-O0:       ; %bb.0:
-; CHECK-LSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-LSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-LSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-LSE-O0-NEXT:    ldr w8, [x0]
 ; CHECK-LSE-O0-NEXT:    str w8, [sp, #28] ; 4-byte Folded Spill
@@ -396,7 +396,7 @@ define i32 @fetch_and_nand(i32* %p) #0 {
 ; CHECK-LSE-O0-NEXT:    b LBB6_2
 ; CHECK-LSE-O0-NEXT:  LBB6_2: ; %atomicrmw.end
 ; CHECK-LSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-LSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-LSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-LSE-O0-NEXT:    ret
   %val = atomicrmw nand i32* %p, i32 7 release
   ret i32 %val
@@ -418,7 +418,7 @@ define i64 @fetch_and_nand_64(i64* %p) #0 {
 ;
 ; CHECK-NOLSE-O0-LABEL: fetch_and_nand_64:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    ldr x8, [x0]
 ; CHECK-NOLSE-O0-NEXT:    str x8, [sp, #24] ; 8-byte Folded Spill
@@ -450,7 +450,7 @@ define i64 @fetch_and_nand_64(i64* %p) #0 {
 ; CHECK-NOLSE-O0-NEXT:    b LBB7_5
 ; CHECK-NOLSE-O0-NEXT:  LBB7_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr x0, [sp, #8] ; 8-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: fetch_and_nand_64:
@@ -468,7 +468,7 @@ define i64 @fetch_and_nand_64(i64* %p) #0 {
 ;
 ; CHECK-LSE-O0-LABEL: fetch_and_nand_64:
 ; CHECK-LSE-O0:       ; %bb.0:
-; CHECK-LSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-LSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-LSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-LSE-O0-NEXT:    ldr x8, [x0]
 ; CHECK-LSE-O0-NEXT:    str x8, [sp, #24] ; 8-byte Folded Spill
@@ -489,7 +489,7 @@ define i64 @fetch_and_nand_64(i64* %p) #0 {
 ; CHECK-LSE-O0-NEXT:    b LBB7_2
 ; CHECK-LSE-O0-NEXT:  LBB7_2: ; %atomicrmw.end
 ; CHECK-LSE-O0-NEXT:    ldr x0, [sp, #8] ; 8-byte Folded Reload
-; CHECK-LSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-LSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-LSE-O0-NEXT:    ret
   %val = atomicrmw nand i64* %p, i64 7 acq_rel
   ret i64 %val
@@ -511,7 +511,7 @@ define i32 @fetch_and_or(i32* %p) #0 {
 ;
 ; CHECK-NOLSE-O0-LABEL: fetch_and_or:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    ldr w8, [x0]
 ; CHECK-NOLSE-O0-NEXT:    str w8, [sp, #28] ; 4-byte Folded Spill
@@ -543,7 +543,7 @@ define i32 @fetch_and_or(i32* %p) #0 {
 ; CHECK-NOLSE-O0-NEXT:    LBB8_5
 ; CHECK-NOLSE-O0-NEXT:  LBB8_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: fetch_and_or:
@@ -576,7 +576,7 @@ define i64 @fetch_and_or_64(i64* %p) #0 {
 ;
 ; CHECK-NOLSE-O0-LABEL: fetch_and_or_64:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    ldr x8, [x0]
 ; CHECK-NOLSE-O0-NEXT:    str x8, [sp, #24] ; 8-byte Folded Spill
@@ -607,7 +607,7 @@ define i64 @fetch_and_or_64(i64* %p) #0 {
 ; CHECK-NOLSE-O0-NEXT:    b LBB9_5
 ; CHECK-NOLSE-O0-NEXT:  LBB9_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr x0, [sp, #8] ; 8-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: fetch_and_or_64:
@@ -721,7 +721,7 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 {
 ; CHECK-NOLSE-O0-NEXT:    add x8, x0, w1, sxtw
 ; CHECK-NOLSE-O0-NEXT:    ldrb w8, [x8]
 ; CHECK-NOLSE-O0-NEXT:    add w8, w8, w9, uxtb
-; CHECK-NOLSE-O0-NEXT:    subs x9, x0, #256 ; =256
+; CHECK-NOLSE-O0-NEXT:    subs x9, x0, #256
 ; CHECK-NOLSE-O0-NEXT:    ldrb w9, [x9]
 ; CHECK-NOLSE-O0-NEXT:    add w8, w8, w9, uxtb
 ; CHECK-NOLSE-O0-NEXT:    add x9, x0, #291, lsl #12 ; =1191936
@@ -747,7 +747,7 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 {
 ; CHECK-LSE-O0-NEXT:    add x8, x0, w1, sxtw
 ; CHECK-LSE-O0-NEXT:    ldrb w8, [x8]
 ; CHECK-LSE-O0-NEXT:    add w8, w8, w9, uxtb
-; CHECK-LSE-O0-NEXT:    subs x9, x0, #256 ; =256
+; CHECK-LSE-O0-NEXT:    subs x9, x0, #256
 ; CHECK-LSE-O0-NEXT:    ldrb w9, [x9]
 ; CHECK-LSE-O0-NEXT:    add w8, w8, w9, uxtb
 ; CHECK-LSE-O0-NEXT:    add x9, x0, #291, lsl #12 ; =1191936
@@ -791,7 +791,7 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 {
 ; CHECK-NOLSE-O0-NEXT:    add x8, x0, w1, sxtw #1
 ; CHECK-NOLSE-O0-NEXT:    ldrh w8, [x8]
 ; CHECK-NOLSE-O0-NEXT:    add w8, w8, w9, uxth
-; CHECK-NOLSE-O0-NEXT:    subs x9, x0, #256 ; =256
+; CHECK-NOLSE-O0-NEXT:    subs x9, x0, #256
 ; CHECK-NOLSE-O0-NEXT:    ldrh w9, [x9]
 ; CHECK-NOLSE-O0-NEXT:    add w8, w8, w9, uxth
 ; CHECK-NOLSE-O0-NEXT:    add x9, x0, #291, lsl #12 ; =1191936
@@ -817,7 +817,7 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 {
 ; CHECK-LSE-O0-NEXT:    add x8, x0, w1, sxtw #1
 ; CHECK-LSE-O0-NEXT:    ldrh w8, [x8]
 ; CHECK-LSE-O0-NEXT:    add w8, w8, w9, uxth
-; CHECK-LSE-O0-NEXT:    subs x9, x0, #256 ; =256
+; CHECK-LSE-O0-NEXT:    subs x9, x0, #256
 ; CHECK-LSE-O0-NEXT:    ldrh w9, [x9]
 ; CHECK-LSE-O0-NEXT:    add w8, w8, w9, uxth
 ; CHECK-LSE-O0-NEXT:    add x9, x0, #291, lsl #12 ; =1191936
@@ -1350,7 +1350,7 @@ define i8 @atomicrmw_add_i8(i8* %ptr, i8 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_add_i8:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -1385,7 +1385,7 @@ define i8 @atomicrmw_add_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB27_5
 ; CHECK-NOLSE-O0-NEXT:  LBB27_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_add_i8:
@@ -1416,7 +1416,7 @@ define i8 @atomicrmw_xchg_i8(i8* %ptr, i8 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_xchg_i8:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -1450,7 +1450,7 @@ define i8 @atomicrmw_xchg_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB28_5
 ; CHECK-NOLSE-O0-NEXT:  LBB28_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_xchg_i8:
@@ -1481,7 +1481,7 @@ define i8 @atomicrmw_sub_i8(i8* %ptr, i8 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_sub_i8:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -1516,7 +1516,7 @@ define i8 @atomicrmw_sub_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB29_5
 ; CHECK-NOLSE-O0-NEXT:  LBB29_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_sub_i8:
@@ -1549,7 +1549,7 @@ define i8 @atomicrmw_and_i8(i8* %ptr, i8 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_and_i8:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -1584,7 +1584,7 @@ define i8 @atomicrmw_and_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB30_5
 ; CHECK-NOLSE-O0-NEXT:  LBB30_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_and_i8:
@@ -1617,7 +1617,7 @@ define i8 @atomicrmw_or_i8(i8* %ptr, i8 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_or_i8:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -1652,7 +1652,7 @@ define i8 @atomicrmw_or_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB31_5 
 ; CHECK-NOLSE-O0-NEXT:  LBB31_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_or_i8:
@@ -1683,7 +1683,7 @@ define i8 @atomicrmw_xor_i8(i8* %ptr, i8 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_xor_i8:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -1718,7 +1718,7 @@ define i8 @atomicrmw_xor_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB32_5
 ; CHECK-NOLSE-O0-NEXT:  LBB32_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_xor_i8:
@@ -1751,7 +1751,7 @@ define i8 @atomicrmw_min_i8(i8* %ptr, i8 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_min_i8:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -1789,7 +1789,7 @@ define i8 @atomicrmw_min_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB33_5
 ; CHECK-NOLSE-O0-NEXT:  LBB33_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_min_i8:
@@ -1822,7 +1822,7 @@ define i8 @atomicrmw_max_i8(i8* %ptr, i8 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_max_i8:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -1860,7 +1860,7 @@ define i8 @atomicrmw_max_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB34_5
 ; CHECK-NOLSE-O0-NEXT:  LBB34_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_max_i8:
@@ -1893,7 +1893,7 @@ define i8 @atomicrmw_umin_i8(i8* %ptr, i8 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_umin_i8:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -1931,7 +1931,7 @@ define i8 @atomicrmw_umin_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:  b LBB35_5
 ; CHECK-NOLSE-O0-NEXT:  LBB35_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_umin_i8:
@@ -1964,7 +1964,7 @@ define i8 @atomicrmw_umax_i8(i8* %ptr, i8 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_umax_i8:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -2002,7 +2002,7 @@ define i8 @atomicrmw_umax_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB36_5
 ; CHECK-NOLSE-O0-NEXT:  LBB36_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_umax_i8:
@@ -2033,7 +2033,7 @@ define i16 @atomicrmw_add_i16(i16* %ptr, i16 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_add_i16:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -2068,7 +2068,7 @@ define i16 @atomicrmw_add_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB37_5
 ; CHECK-NOLSE-O0-NEXT:  LBB37_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_add_i16:
@@ -2099,7 +2099,7 @@ define i16 @atomicrmw_xchg_i16(i16* %ptr, i16 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_xchg_i16:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -2133,7 +2133,7 @@ define i16 @atomicrmw_xchg_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB38_5
 ; CHECK-NOLSE-O0-NEXT:  LBB38_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_xchg_i16:
@@ -2164,7 +2164,7 @@ define i16 @atomicrmw_sub_i16(i16* %ptr, i16 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_sub_i16:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -2199,7 +2199,7 @@ define i16 @atomicrmw_sub_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB39_5
 ; CHECK-NOLSE-O0-NEXT:  LBB39_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_sub_i16:
@@ -2232,7 +2232,7 @@ define i16 @atomicrmw_and_i16(i16* %ptr, i16 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_and_i16:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -2267,7 +2267,7 @@ define i16 @atomicrmw_and_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB40_5
 ; CHECK-NOLSE-O0-NEXT:  LBB40_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_and_i16:
@@ -2300,7 +2300,7 @@ define i16 @atomicrmw_or_i16(i16* %ptr, i16 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_or_i16:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -2335,7 +2335,7 @@ define i16 @atomicrmw_or_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB41_5
 ; CHECK-NOLSE-O0-NEXT:  LBB41_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_or_i16:
@@ -2366,7 +2366,7 @@ define i16 @atomicrmw_xor_i16(i16* %ptr, i16 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_xor_i16:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -2401,7 +2401,7 @@ define i16 @atomicrmw_xor_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB42_5
 ; CHECK-NOLSE-O0-NEXT:  LBB42_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_xor_i16:
@@ -2434,7 +2434,7 @@ define i16 @atomicrmw_min_i16(i16* %ptr, i16 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_min_i16:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -2472,7 +2472,7 @@ define i16 @atomicrmw_min_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB43_5
 ; CHECK-NOLSE-O0-NEXT:  LBB43_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_min_i16:
@@ -2505,7 +2505,7 @@ define i16 @atomicrmw_max_i16(i16* %ptr, i16 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_max_i16:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -2543,7 +2543,7 @@ define i16 @atomicrmw_max_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB44_5
 ; CHECK-NOLSE-O0-NEXT:  LBB44_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_max_i16:
@@ -2576,7 +2576,7 @@ define i16 @atomicrmw_umin_i16(i16* %ptr, i16 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_umin_i16:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -2614,7 +2614,7 @@ define i16 @atomicrmw_umin_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB45_5
 ; CHECK-NOLSE-O0-NEXT:  LBB45_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_umin_i16:
@@ -2647,7 +2647,7 @@ define i16 @atomicrmw_umax_i16(i16* %ptr, i16 %rhs) {
 ;
 ; CHECK-NOLSE-O0-LABEL: atomicrmw_umax_i16:
 ; CHECK-NOLSE-O0:       ; %bb.0:
-; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    sub sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NOLSE-O0-NEXT:    str x0, [sp, #16] ; 8-byte Folded Spill
 ; CHECK-NOLSE-O0-NEXT:    str w1, [sp, #24] ; 4-byte Folded Spill
@@ -2685,7 +2685,7 @@ define i16 @atomicrmw_umax_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    b LBB46_5
 ; CHECK-NOLSE-O0-NEXT:  LBB46_5: ; %atomicrmw.end
 ; CHECK-NOLSE-O0-NEXT:    ldr w0, [sp, #12] ; 4-byte Folded Reload
-; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32 ; =32
+; CHECK-NOLSE-O0-NEXT:    add sp, sp, #32
 ; CHECK-NOLSE-O0-NEXT:    ret
 ;
 ; CHECK-LSE-O1-LABEL: atomicrmw_umax_i16:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
index 778c823552a1f..f8d4731d32496 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/byval-call.ll
@@ -6,7 +6,7 @@ declare void @byval_i32(i32* byval(i32) %ptr)
 define void @call_byval_i32(i32* %incoming) {
 ; CHECK-LABEL: call_byval_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -14,7 +14,7 @@ define void @call_byval_i32(i32* %incoming) {
 ; CHECK-NEXT:    str w8, [sp]
 ; CHECK-NEXT:    bl byval_i32
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   call void @byval_i32(i32* byval(i32) %incoming)
   ret void
@@ -25,10 +25,10 @@ declare void @byval_a64i32([64 x i32]* byval([64 x i32]) %ptr)
 define void @call_byval_a64i32([64 x i32]* %incoming) {
 ; CHECK-LABEL: call_byval_a64i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #288 // =288
+; CHECK-NEXT:    sub sp, sp, #288
 ; CHECK-NEXT:    stp x29, x30, [sp, #256] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x28, [sp, #272] // 8-byte Folded Spill
-; CHECK-NEXT:    add x29, sp, #256 // =256
+; CHECK-NEXT:    add x29, sp, #256
 ; CHECK-NEXT:    .cfi_def_cfa w29, 32
 ; CHECK-NEXT:    .cfi_offset w28, -16
 ; CHECK-NEXT:    .cfi_offset w30, -24
@@ -68,7 +68,7 @@ define void @call_byval_a64i32([64 x i32]* %incoming) {
 ; CHECK-NEXT:    bl byval_a64i32
 ; CHECK-NEXT:    ldr x28, [sp, #272] // 8-byte Folded Reload
 ; CHECK-NEXT:    ldp x29, x30, [sp, #256] // 16-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #288 // =288
+; CHECK-NEXT:    add sp, sp, #288
 ; CHECK-NEXT:    ret
   call void @byval_a64i32([64 x i32]* byval([64 x i32]) %incoming)
   ret void

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
index 0a0f94aa0a348..42e91f6318225 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-variadic-musttail.ll
@@ -30,7 +30,7 @@ declare void @puts(i8*)
 define i32 @test_musttail_variadic_spill(i32 %arg0, ...) {
 ; CHECK-LABEL: test_musttail_variadic_spill:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    sub sp, sp, #224 ; =224
+; CHECK-NEXT:    sub sp, sp, #224
 ; CHECK-NEXT:    stp x28, x27, [sp, #128] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x26, x25, [sp, #144] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x24, x23, [sp, #160] ; 16-byte Folded Spill
@@ -87,7 +87,7 @@ define i32 @test_musttail_variadic_spill(i32 %arg0, ...) {
 ; CHECK-NEXT:    ldp x24, x23, [sp, #160] ; 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x26, x25, [sp, #144] ; 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x28, x27, [sp, #128] ; 16-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #224 ; =224
+; CHECK-NEXT:    add sp, sp, #224
 ; CHECK-NEXT:    b _musttail_variadic_callee
 ; CHECK-NEXT:    .loh AdrpAdd Lloh0, Lloh1
   call void @puts(i8* getelementptr ([4 x i8], [4 x i8]* @asdf, i32 0, i32 0))
@@ -102,7 +102,7 @@ declare void(i8*, ...)* @get_f(i8* %this)
 define void @f_thunk(i8* %this, ...) {
 ; CHECK-LABEL: f_thunk:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    sub sp, sp, #256 ; =256
+; CHECK-NEXT:    sub sp, sp, #256
 ; CHECK-NEXT:    stp x28, x27, [sp, #160] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x26, x25, [sp, #176] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x24, x23, [sp, #192] ; 16-byte Folded Spill
@@ -123,8 +123,8 @@ define void @f_thunk(i8* %this, ...) {
 ; CHECK-NEXT:    .cfi_offset w27, -88
 ; CHECK-NEXT:    .cfi_offset w28, -96
 ; CHECK-NEXT:    mov x27, x8
-; CHECK-NEXT:    add x8, sp, #128 ; =128
-; CHECK-NEXT:    add x9, sp, #256 ; =256
+; CHECK-NEXT:    add x8, sp, #128
+; CHECK-NEXT:    add x9, sp, #256
 ; CHECK-NEXT:    mov x19, x0
 ; CHECK-NEXT:    mov x20, x1
 ; CHECK-NEXT:    mov x21, x2
@@ -159,7 +159,7 @@ define void @f_thunk(i8* %this, ...) {
 ; CHECK-NEXT:    ldp x24, x23, [sp, #192] ; 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x26, x25, [sp, #176] ; 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x28, x27, [sp, #160] ; 16-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #256 ; =256
+; CHECK-NEXT:    add sp, sp, #256
 ; CHECK-NEXT:    br x9
   %ap = alloca [4 x i8*], align 16
   %ap_i8 = bitcast [4 x i8*]* %ap to i8*

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll b/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll
index 42ea5a5ef514b..67af9e7574bad 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/freeze.ll
@@ -67,12 +67,12 @@ define <2 x i32> @freeze_ivec() {
 define i8* @freeze_ptr() {
 ; CHECK-LABEL: freeze_ptr:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x0, x8, #4 // =4
+; CHECK-NEXT:    add x0, x8, #4
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: freeze_ptr:
 ; GISEL:       // %bb.0:
-; GISEL-NEXT:    add x0, x8, #4 // =4
+; GISEL-NEXT:    add x0, x8, #4
 ; GISEL-NEXT:    ret
   %y1 = freeze i8* undef
   %t1 = getelementptr i8, i8* %y1, i64 4

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
index d980bc09548c5..cdb55d9c15c77 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
@@ -154,14 +154,14 @@ entry:
 }
 ; CHECK-LABEL: novla_nodynamicrealign_nocall
 ;   Check that space is reserved for one local variable on the stack.
-; CHECK:	sub	sp, sp, #16             // =16
+; CHECK:	sub	sp, sp, #16
 ;   Check correct access to arguments passed on the stack, through stack pointer
 ; CHECK: ldr	d[[DARG:[0-9]+]], [sp, #40]
 ; CHECK: ldr	w[[IARG:[0-9]+]], [sp, #24]
 ;   Check correct access to local variable on the stack, through stack pointer
 ; CHECK: ldr	w[[ILOC:[0-9]+]], [sp, #12]
 ;   Check epilogue:
-; CHECK: add	sp, sp, #16             // =16
+; CHECK: add	sp, sp, #16
 ; CHECK: ret
 
 
@@ -394,7 +394,7 @@ entry:
 ;   bytes & the base pointer (x19) gets initialized to
 ;   this 128-byte aligned area for local variables &
 ;   spill slots
-; CHECK: sub	x9, sp, #80            // =80
+; CHECK: sub	x9, sp, #80
 ; CHECK: and	sp, x9, #0xffffffffffffff80
 ; CHECK: mov    x19, sp
 ;   Check correctness of cfi pseudo-instructions
@@ -688,7 +688,7 @@ bb1:
 ; CHECK-LABEL: realign_conditional2
 ; Extra realignment in the prologue (performance issue).
 ; CHECK:  tbz  {{.*}} .[[LABEL:.*]]
-; CHECK:  sub  x9, sp, #32            // =32
+; CHECK:  sub  x9, sp, #32
 ; CHECK:  and  sp, x9, #0xffffffffffffffe0
 ; CHECK:  mov   x19, sp
 ; Stack is realigned in a non-entry BB.

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll b/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll
index ec58526468810..e4bbdbf88bcc6 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-load-ext.ll
@@ -23,7 +23,7 @@ define <2 x i16> @test1(<2 x i16>* %v2i16_ptr) {
 ; CHECK-LE-LABEL: test1:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ld1 { v0.h }[0], [x0]
-; CHECK-LE-NEXT:    add x8, x0, #2 // =2
+; CHECK-LE-NEXT:    add x8, x0, #2
 ; CHECK-LE-NEXT:    ld1 { v0.h }[2], [x8]
 ; CHECK-LE-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-LE-NEXT:    ret
@@ -31,7 +31,7 @@ define <2 x i16> @test1(<2 x i16>* %v2i16_ptr) {
 ; CHECK-BE-LABEL: test1:
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ld1 { v0.h }[0], [x0]
-; CHECK-BE-NEXT:    add x8, x0, #2 // =2
+; CHECK-BE-NEXT:    add x8, x0, #2
 ; CHECK-BE-NEXT:    ld1 { v0.h }[2], [x8]
 ; CHECK-BE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECK-BE-NEXT:    ret
@@ -67,7 +67,7 @@ define <2 x i8> @test3(<2 x i8>* %v2i8_ptr) {
 ; CHECK-LE-LABEL: test3:
 ; CHECK-LE:       // %bb.0:
 ; CHECK-LE-NEXT:    ld1 { v0.b }[0], [x0]
-; CHECK-LE-NEXT:    add x8, x0, #1 // =1
+; CHECK-LE-NEXT:    add x8, x0, #1
 ; CHECK-LE-NEXT:    ld1 { v0.b }[4], [x8]
 ; CHECK-LE-NEXT:    // kill: def $d0 killed $d0 killed $q0
 ; CHECK-LE-NEXT:    ret
@@ -75,7 +75,7 @@ define <2 x i8> @test3(<2 x i8>* %v2i8_ptr) {
 ; CHECK-BE-LABEL: test3:
 ; CHECK-BE:       // %bb.0:
 ; CHECK-BE-NEXT:    ld1 { v0.b }[0], [x0]
-; CHECK-BE-NEXT:    add x8, x0, #1 // =1
+; CHECK-BE-NEXT:    add x8, x0, #1
 ; CHECK-BE-NEXT:    ld1 { v0.b }[4], [x8]
 ; CHECK-BE-NEXT:    rev64 v0.2s, v0.2s
 ; CHECK-BE-NEXT:    ret
@@ -446,25 +446,25 @@ define <4 x i32> @anyext_v4i32(<4 x i8> *%a, <4 x i8> *%b) {
 define <4 x i8> @bitcast(i32 %0) {
 ; CHECK-LE-LABEL: bitcast:
 ; CHECK-LE:       // %bb.0:
-; CHECK-LE-NEXT:    sub sp, sp, #16 // =16
+; CHECK-LE-NEXT:    sub sp, sp, #16
 ; CHECK-LE-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-LE-NEXT:    str w0, [sp, #12]
 ; CHECK-LE-NEXT:    ldr s0, [sp, #12]
 ; CHECK-LE-NEXT:    ushll v0.8h, v0.8b, #0
 ; CHECK-LE-NEXT:    // kill: def $d0 killed $d0 killed $q0
-; CHECK-LE-NEXT:    add sp, sp, #16 // =16
+; CHECK-LE-NEXT:    add sp, sp, #16
 ; CHECK-LE-NEXT:    ret
 ;
 ; CHECK-BE-LABEL: bitcast:
 ; CHECK-BE:       // %bb.0:
-; CHECK-BE-NEXT:    sub sp, sp, #16 // =16
+; CHECK-BE-NEXT:    sub sp, sp, #16
 ; CHECK-BE-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-BE-NEXT:    str w0, [sp, #12]
 ; CHECK-BE-NEXT:    ldr s0, [sp, #12]
 ; CHECK-BE-NEXT:    rev32 v0.8b, v0.8b
 ; CHECK-BE-NEXT:    ushll v0.8h, v0.8b, #0
 ; CHECK-BE-NEXT:    rev64 v0.4h, v0.4h
-; CHECK-BE-NEXT:    add sp, sp, #16 // =16
+; CHECK-BE-NEXT:    add sp, sp, #16
 ; CHECK-BE-NEXT:    ret
   %2 = bitcast i32 %0 to <4 x i8>
   ret <4 x i8> %2

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
index 966cf7b46daa5..3a692d576c6c7 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
@@ -13,8 +13,8 @@ define void @matrix_mul_unsigned(i32 %N, i32* nocapture %C, i16* nocapture reado
 ; CHECK-NEXT:    add x9, x2, w0, uxtw #1
 ; CHECK-NEXT:    ldp d1, d2, [x9]
 ; CHECK-NEXT:    add x9, x1, w0, uxtw #2
-; CHECK-NEXT:    subs x8, x8, #8 // =8
-; CHECK-NEXT:    add w0, w0, #8 // =8
+; CHECK-NEXT:    subs x8, x8, #8
+; CHECK-NEXT:    add w0, w0, #8
 ; CHECK-NEXT:    umull v1.4s, v0.4h, v1.4h
 ; CHECK-NEXT:    umull v2.4s, v0.4h, v2.4h
 ; CHECK-NEXT:    stp q1, q2, [x9]
@@ -77,8 +77,8 @@ define void @matrix_mul_signed(i32 %N, i32* nocapture %C, i16* nocapture readonl
 ; CHECK-NEXT:    add x9, x2, w0, sxtw #1
 ; CHECK-NEXT:    ldp d1, d2, [x9]
 ; CHECK-NEXT:    add x9, x1, w0, sxtw #2
-; CHECK-NEXT:    subs x8, x8, #8 // =8
-; CHECK-NEXT:    add w0, w0, #8 // =8
+; CHECK-NEXT:    subs x8, x8, #8
+; CHECK-NEXT:    add w0, w0, #8
 ; CHECK-NEXT:    smull v1.4s, v0.4h, v1.4h
 ; CHECK-NEXT:    smull v2.4s, v0.4h, v2.4h
 ; CHECK-NEXT:    stp q1, q2, [x9]
@@ -141,11 +141,11 @@ define void @matrix_mul_double_shuffle(i32 %N, i32* nocapture %C, i16* nocapture
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldrh w9, [x2], #16
 ; CHECK-NEXT:    mov w10, w0
-; CHECK-NEXT:    subs x8, x8, #8 // =8
+; CHECK-NEXT:    subs x8, x8, #8
 ; CHECK-NEXT:    lsl x10, x10, #2
 ; CHECK-NEXT:    dup v1.4h, w9
 ; CHECK-NEXT:    umull v1.4s, v0.4h, v1.4h
-; CHECK-NEXT:    add w0, w0, #8 // =8
+; CHECK-NEXT:    add w0, w0, #8
 ; CHECK-NEXT:    str q1, [x1, x10]
 ; CHECK-NEXT:    b.ne .LBB2_1
 ; CHECK-NEXT:  // %bb.2: // %for.end12

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll b/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll
index 256ea1d0cf6a7..d3b89099f41ae 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-tail-dup-size.ll
@@ -29,7 +29,7 @@ define dso_local void @testcase(%a** nocapture %arg){
 ; CHECK-O2-NEXT:  .LBB0_3: // %if.end
 ; CHECK-O2-NEXT:    adrp x9, global_int
 ; CHECK-O2-NEXT:    ldr w1, [x9, :lo12:global_int]
-; CHECK-O2-NEXT:    add x2, x8, #16 // =16
+; CHECK-O2-NEXT:    add x2, x8, #16
 ; CHECK-O2-NEXT:    mov w0, #10
 ; CHECK-O2-NEXT:    b externalfunc
 ;
@@ -44,14 +44,14 @@ define dso_local void @testcase(%a** nocapture %arg){
 ; CHECK-O3-NEXT:    ldr x8, [x8, :lo12:global_ptr]
 ; CHECK-O3-NEXT:    adrp x9, global_int
 ; CHECK-O3-NEXT:    ldr w1, [x9, :lo12:global_int]
-; CHECK-O3-NEXT:    add x2, x8, #16 // =16
+; CHECK-O3-NEXT:    add x2, x8, #16
 ; CHECK-O3-NEXT:    mov w0, #10
 ; CHECK-O3-NEXT:    b externalfunc
 ; CHECK-O3-NEXT:  .LBB0_2:
 ; CHECK-O3-NEXT:    mov x8, xzr
 ; CHECK-O3-NEXT:    adrp x9, global_int
 ; CHECK-O3-NEXT:    ldr w1, [x9, :lo12:global_int]
-; CHECK-O3-NEXT:    add x2, x8, #16 // =16
+; CHECK-O3-NEXT:    add x2, x8, #16
 ; CHECK-O3-NEXT:    mov w0, #10
 ; CHECK-O3-NEXT:    b externalfunc
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll b/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
index fe1a1f0e58136..4972c9e18a7c6 100644
--- a/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64_win64cc_vararg.ll
@@ -4,9 +4,9 @@
 define win64cc void @pass_va(i32 %count, ...) nounwind {
 ; CHECK-LABEL: pass_va:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub sp, sp, #96 // =96
-; CHECK-NEXT:    add x8, sp, #40 // =40
-; CHECK-NEXT:    add x0, sp, #40 // =40
+; CHECK-NEXT:    sub sp, sp, #96
+; CHECK-NEXT:    add x8, sp, #40
+; CHECK-NEXT:    add x0, sp, #40
 ; CHECK-NEXT:    stp x30, x18, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x1, x2, [sp, #40]
 ; CHECK-NEXT:    stp x3, x4, [sp, #56]
@@ -15,7 +15,7 @@ define win64cc void @pass_va(i32 %count, ...) nounwind {
 ; CHECK-NEXT:    str x8, [sp, #8]
 ; CHECK-NEXT:    bl other_func
 ; CHECK-NEXT:    ldp x30, x18, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #96 // =96
+; CHECK-NEXT:    add sp, sp, #96
 ; CHECK-NEXT:    ret
 entry:
   %ap = alloca i8*, align 8
@@ -35,8 +35,8 @@ define win64cc i8* @f9(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64
 ; CHECK-LABEL: f9:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x18, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    add x8, sp, #24 // =24
-; CHECK-NEXT:    add x0, sp, #24 // =24
+; CHECK-NEXT:    add x8, sp, #24
+; CHECK-NEXT:    add x0, sp, #24
 ; CHECK-NEXT:    str x8, [sp, #8]
 ; CHECK-NEXT:    ldr x18, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -52,8 +52,8 @@ define win64cc i8* @f8(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64
 ; CHECK-LABEL: f8:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x18, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    add x8, sp, #16 // =16
-; CHECK-NEXT:    add x0, sp, #16 // =16
+; CHECK-NEXT:    add x8, sp, #16
+; CHECK-NEXT:    add x0, sp, #16
 ; CHECK-NEXT:    str x8, [sp, #8]
 ; CHECK-NEXT:    ldr x18, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -69,9 +69,9 @@ define win64cc i8* @f7(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, i64 %a5, i64
 ; CHECK-LABEL: f7:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x18, [sp, #-32]! // 8-byte Folded Spill
-; CHECK-NEXT:    add x8, sp, #24 // =24
+; CHECK-NEXT:    add x8, sp, #24
 ; CHECK-NEXT:    str x7, [sp, #24]
-; CHECK-NEXT:    add x0, sp, #24 // =24
+; CHECK-NEXT:    add x0, sp, #24
 ; CHECK-NEXT:    str x8, [sp, #8]
 ; CHECK-NEXT:    ldr x18, [sp], #32 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll
index 43c98e818750e..aee78ba09a2f0 100644
--- a/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll
+++ b/llvm/test/CodeGen/AArch64/addsub-constant-folding.ll
@@ -9,7 +9,7 @@ declare void @vec_use(<4 x i32> %arg)
 define i32 @add_const_add_const(i32 %arg) {
 ; CHECK-LABEL: add_const_add_const:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w0, w0, #10 // =10
+; CHECK-NEXT:    add w0, w0, #10
 ; CHECK-NEXT:    ret
   %t0 = add i32 %arg, 8
   %t1 = add i32 %t0, 2
@@ -24,9 +24,9 @@ define i32 @add_const_add_const_extrause(i32 %arg) {
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov w19, w0
-; CHECK-NEXT:    add w0, w0, #8 // =8
+; CHECK-NEXT:    add w0, w0, #8
 ; CHECK-NEXT:    bl use
-; CHECK-NEXT:    add w0, w19, #10 // =10
+; CHECK-NEXT:    add w0, w19, #10
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
   %t0 = add i32 %arg, 8
@@ -49,7 +49,7 @@ define <4 x i32> @vec_add_const_add_const(<4 x i32> %arg) {
 define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_add_const_add_const_extrause:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -61,7 +61,7 @@ define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    movi v0.4s, #10
 ; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %t0 = add <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
   call void @vec_use(<4 x i32> %t0)
@@ -86,7 +86,7 @@ define <4 x i32> @vec_add_const_add_const_nonsplat(<4 x i32> %arg) {
 define i32 @add_const_sub_const(i32 %arg) {
 ; CHECK-LABEL: add_const_sub_const:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w0, w0, #6 // =6
+; CHECK-NEXT:    add w0, w0, #6
 ; CHECK-NEXT:    ret
   %t0 = add i32 %arg, 8
   %t1 = sub i32 %t0, 2
@@ -101,9 +101,9 @@ define i32 @add_const_sub_const_extrause(i32 %arg) {
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov w19, w0
-; CHECK-NEXT:    add w0, w0, #8 // =8
+; CHECK-NEXT:    add w0, w0, #8
 ; CHECK-NEXT:    bl use
-; CHECK-NEXT:    add w0, w19, #6 // =6
+; CHECK-NEXT:    add w0, w19, #6
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
   %t0 = add i32 %arg, 8
@@ -126,7 +126,7 @@ define <4 x i32> @vec_add_const_sub_const(<4 x i32> %arg) {
 define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_add_const_sub_const_extrause:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -138,7 +138,7 @@ define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    movi v0.4s, #6
 ; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %t0 = add <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
   call void @vec_use(<4 x i32> %t0)
@@ -179,7 +179,7 @@ define i32 @add_const_const_sub_extrause(i32 %arg) {
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov w19, w0
-; CHECK-NEXT:    add w0, w0, #8 // =8
+; CHECK-NEXT:    add w0, w0, #8
 ; CHECK-NEXT:    bl use
 ; CHECK-NEXT:    mov w8, #-6
 ; CHECK-NEXT:    sub w0, w8, w19
@@ -205,7 +205,7 @@ define <4 x i32> @vec_add_const_const_sub(<4 x i32> %arg) {
 define <4 x i32> @vec_add_const_const_sub_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_add_const_const_sub_extrause:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -217,7 +217,7 @@ define <4 x i32> @vec_add_const_const_sub_extrause(<4 x i32> %arg) {
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    mvni v0.4s, #5
 ; CHECK-NEXT:    sub v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %t0 = add <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
   call void @vec_use(<4 x i32> %t0)
@@ -242,7 +242,7 @@ define <4 x i32> @vec_add_const_const_sub_nonsplat(<4 x i32> %arg) {
 define i32 @sub_const_add_const(i32 %arg) {
 ; CHECK-LABEL: sub_const_add_const:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub w0, w0, #6 // =6
+; CHECK-NEXT:    sub w0, w0, #6
 ; CHECK-NEXT:    ret
   %t0 = sub i32 %arg, 8
   %t1 = add i32 %t0, 2
@@ -257,9 +257,9 @@ define i32 @sub_const_add_const_extrause(i32 %arg) {
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov w19, w0
-; CHECK-NEXT:    sub w0, w0, #8 // =8
+; CHECK-NEXT:    sub w0, w0, #8
 ; CHECK-NEXT:    bl use
-; CHECK-NEXT:    sub w0, w19, #6 // =6
+; CHECK-NEXT:    sub w0, w19, #6
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
   %t0 = sub i32 %arg, 8
@@ -282,7 +282,7 @@ define <4 x i32> @vec_sub_const_add_const(<4 x i32> %arg) {
 define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_sub_const_add_const_extrause:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -294,7 +294,7 @@ define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    mvni v0.4s, #5
 ; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %t0 = sub <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
   call void @vec_use(<4 x i32> %t0)
@@ -319,7 +319,7 @@ define <4 x i32> @vec_sub_const_add_const_nonsplat(<4 x i32> %arg) {
 define i32 @sub_const_sub_const(i32 %arg) {
 ; CHECK-LABEL: sub_const_sub_const:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub w0, w0, #10 // =10
+; CHECK-NEXT:    sub w0, w0, #10
 ; CHECK-NEXT:    ret
   %t0 = sub i32 %arg, 8
   %t1 = sub i32 %t0, 2
@@ -334,9 +334,9 @@ define i32 @sub_const_sub_const_extrause(i32 %arg) {
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov w19, w0
-; CHECK-NEXT:    sub w0, w0, #8 // =8
+; CHECK-NEXT:    sub w0, w0, #8
 ; CHECK-NEXT:    bl use
-; CHECK-NEXT:    sub w0, w19, #10 // =10
+; CHECK-NEXT:    sub w0, w19, #10
 ; CHECK-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
   %t0 = sub i32 %arg, 8
@@ -359,7 +359,7 @@ define <4 x i32> @vec_sub_const_sub_const(<4 x i32> %arg) {
 define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_sub_const_sub_const_extrause:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -371,7 +371,7 @@ define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    movi v0.4s, #10
 ; CHECK-NEXT:    sub v0.4s, v1.4s, v0.4s
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %t0 = sub <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
   call void @vec_use(<4 x i32> %t0)
@@ -412,7 +412,7 @@ define i32 @sub_const_const_sub_extrause(i32 %arg) {
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    mov w19, w0
-; CHECK-NEXT:    sub w0, w0, #8 // =8
+; CHECK-NEXT:    sub w0, w0, #8
 ; CHECK-NEXT:    bl use
 ; CHECK-NEXT:    mov w8, #10
 ; CHECK-NEXT:    sub w0, w8, w19
@@ -438,7 +438,7 @@ define <4 x i32> @vec_sub_const_const_sub(<4 x i32> %arg) {
 define <4 x i32> @vec_sub_const_const_sub_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_sub_const_const_sub_extrause:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -450,7 +450,7 @@ define <4 x i32> @vec_sub_const_const_sub_extrause(<4 x i32> %arg) {
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    movi v0.4s, #2
 ; CHECK-NEXT:    sub v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %t0 = sub <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
   call void @vec_use(<4 x i32> %t0)
@@ -518,7 +518,7 @@ define <4 x i32> @vec_const_sub_add_const(<4 x i32> %arg) {
 define <4 x i32> @vec_const_sub_add_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_const_sub_add_const_extrause:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -530,7 +530,7 @@ define <4 x i32> @vec_const_sub_add_const_extrause(<4 x i32> %arg) {
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    movi v0.4s, #10
 ; CHECK-NEXT:    sub v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %t0 = sub <4 x i32> <i32 8, i32 8, i32 8, i32 8>, %arg
   call void @vec_use(<4 x i32> %t0)
@@ -598,7 +598,7 @@ define <4 x i32> @vec_const_sub_sub_const(<4 x i32> %arg) {
 define <4 x i32> @vec_const_sub_sub_const_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_const_sub_sub_const_extrause:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -610,7 +610,7 @@ define <4 x i32> @vec_const_sub_sub_const_extrause(<4 x i32> %arg) {
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    movi v0.4s, #6
 ; CHECK-NEXT:    sub v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %t0 = sub <4 x i32> <i32 8, i32 8, i32 8, i32 8>, %arg
   call void @vec_use(<4 x i32> %t0)
@@ -635,7 +635,7 @@ define <4 x i32> @vec_const_sub_sub_const_nonsplat(<4 x i32> %arg) {
 define i32 @const_sub_const_sub(i32 %arg) {
 ; CHECK-LABEL: const_sub_const_sub:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub w0, w0, #6 // =6
+; CHECK-NEXT:    sub w0, w0, #6
 ; CHECK-NEXT:    ret
   %t0 = sub i32 8, %arg
   %t1 = sub i32 2, %t0
@@ -677,7 +677,7 @@ define <4 x i32> @vec_const_sub_const_sub(<4 x i32> %arg) {
 define <4 x i32> @vec_const_sub_const_sub_extrause(<4 x i32> %arg) {
 ; CHECK-LABEL: vec_const_sub_const_sub_extrause:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -689,7 +689,7 @@ define <4 x i32> @vec_const_sub_const_sub_extrause(<4 x i32> %arg) {
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
 ; CHECK-NEXT:    movi v0.4s, #2
 ; CHECK-NEXT:    sub v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %t0 = sub <4 x i32> <i32 8, i32 8, i32 8, i32 8>, %arg
   call void @vec_use(<4 x i32> %t0)

diff  --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll
index 5800676d012e8..53a9feaf59f98 100644
--- a/llvm/test/CodeGen/AArch64/addsub.ll
+++ b/llvm/test/CodeGen/AArch64/addsub.ll
@@ -19,8 +19,8 @@ define void @add_small() {
 ; CHECK-NEXT:    ldr x9, [x9, :got_lo12:var_i64]
 ; CHECK-NEXT:    ldr w10, [x8]
 ; CHECK-NEXT:    ldr x11, [x9]
-; CHECK-NEXT:    add w10, w10, #4095 // =4095
-; CHECK-NEXT:    add x11, x11, #52 // =52
+; CHECK-NEXT:    add w10, w10, #4095
+; CHECK-NEXT:    add x11, x11, #52
 ; CHECK-NEXT:    str w10, [x8]
 ; CHECK-NEXT:    str x11, [x9]
 ; CHECK-NEXT:    ret
@@ -50,7 +50,7 @@ define void @add_small_imm(i8* %p, i64* %q, i32 %b, i32* %addr) {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
 ; CHECK-NEXT:    add w9, w8, w2
-; CHECK-NEXT:    add x8, x8, #12 // =12
+; CHECK-NEXT:    add x8, x8, #12
 ; CHECK-NEXT:    str w9, [x3]
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
@@ -105,8 +105,8 @@ define void @sub_small() {
 ; CHECK-NEXT:    ldr x9, [x9, :got_lo12:var_i64]
 ; CHECK-NEXT:    ldr w10, [x8]
 ; CHECK-NEXT:    ldr x11, [x9]
-; CHECK-NEXT:    sub w10, w10, #4095 // =4095
-; CHECK-NEXT:    sub x11, x11, #52 // =52
+; CHECK-NEXT:    sub w10, w10, #4095
+; CHECK-NEXT:    sub x11, x11, #52
 ; CHECK-NEXT:    str w10, [x8]
 ; CHECK-NEXT:    str x11, [x9]
 ; CHECK-NEXT:    ret
@@ -155,33 +155,33 @@ define void @testing() {
 ; CHECK-NEXT:    adrp x8, :got:var_i32
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:var_i32]
 ; CHECK-NEXT:    ldr w9, [x8]
-; CHECK-NEXT:    cmp w9, #4095 // =4095
+; CHECK-NEXT:    cmp w9, #4095
 ; CHECK-NEXT:    b.ne .LBB5_6
 ; CHECK-NEXT:  // %bb.1: // %test2
 ; CHECK-NEXT:    adrp x10, :got:var2_i32
 ; CHECK-NEXT:    ldr x10, [x10, :got_lo12:var2_i32]
-; CHECK-NEXT:    add w11, w9, #1 // =1
+; CHECK-NEXT:    add w11, w9, #1
 ; CHECK-NEXT:    str w11, [x8]
 ; CHECK-NEXT:    ldr w10, [x10]
 ; CHECK-NEXT:    cmp w10, #3567, lsl #12 // =14610432
 ; CHECK-NEXT:    b.lo .LBB5_6
 ; CHECK-NEXT:  // %bb.2: // %test3
-; CHECK-NEXT:    add w11, w9, #2 // =2
-; CHECK-NEXT:    cmp w9, #123 // =123
+; CHECK-NEXT:    add w11, w9, #2
+; CHECK-NEXT:    cmp w9, #123
 ; CHECK-NEXT:    str w11, [x8]
 ; CHECK-NEXT:    b.lt .LBB5_6
 ; CHECK-NEXT:  // %bb.3: // %test4
-; CHECK-NEXT:    add w11, w9, #3 // =3
-; CHECK-NEXT:    cmp w10, #321 // =321
+; CHECK-NEXT:    add w11, w9, #3
+; CHECK-NEXT:    cmp w10, #321
 ; CHECK-NEXT:    str w11, [x8]
 ; CHECK-NEXT:    b.gt .LBB5_6
 ; CHECK-NEXT:  // %bb.4: // %test5
-; CHECK-NEXT:    add w11, w9, #4 // =4
-; CHECK-NEXT:    cmn w10, #443 // =443
+; CHECK-NEXT:    add w11, w9, #4
+; CHECK-NEXT:    cmn w10, #443
 ; CHECK-NEXT:    str w11, [x8]
 ; CHECK-NEXT:    b.ge .LBB5_6
 ; CHECK-NEXT:  // %bb.5: // %test6
-; CHECK-NEXT:    add w9, w9, #5 // =5
+; CHECK-NEXT:    add w9, w9, #5
 ; CHECK-NEXT:    str w9, [x8]
 ; CHECK-NEXT:  .LBB5_6: // %common.ret
 ; CHECK-NEXT:    ret
@@ -232,7 +232,7 @@ define i1 @sadd_add(i32 %a, i32 %b, i32* %p) {
 ; CHECK-NEXT:    mvn w8, w0
 ; CHECK-NEXT:    adds w8, w8, w1
 ; CHECK-NEXT:    cset w0, vs
-; CHECK-NEXT:    add w8, w8, #1 // =1
+; CHECK-NEXT:    add w8, w8, #1
 ; CHECK-NEXT:    str w8, [x2]
 ; CHECK-NEXT:    ret
   %nota = xor i32 %a, -1
@@ -253,7 +253,7 @@ define i1 @uadd_add(i8 %a, i8 %b, i8* %p) {
 ; CHECK-NEXT:    and w8, w8, #0xff
 ; CHECK-NEXT:    add w8, w8, w1, uxtb
 ; CHECK-NEXT:    lsr w0, w8, #8
-; CHECK-NEXT:    add w8, w8, #1 // =1
+; CHECK-NEXT:    add w8, w8, #1
 ; CHECK-NEXT:    strb w8, [x2]
 ; CHECK-NEXT:    ret
   %nota = xor i8 %a, -1

diff  --git a/llvm/test/CodeGen/AArch64/align-down.ll b/llvm/test/CodeGen/AArch64/align-down.ll
index 659b69ca27bdc..9c04d19517b33 100644
--- a/llvm/test/CodeGen/AArch64/align-down.ll
+++ b/llvm/test/CodeGen/AArch64/align-down.ll
@@ -55,7 +55,7 @@ define i32 @t3_extrause0(i32 %ptr, i32 %alignment, i32* %mask_storage) nounwind
 ; CHECK-LABEL: t3_extrause0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w9, w1
-; CHECK-NEXT:    sub w8, w1, #1 // =1
+; CHECK-NEXT:    sub w8, w1, #1
 ; CHECK-NEXT:    and w0, w0, w9
 ; CHECK-NEXT:    str w8, [x2]
 ; CHECK-NEXT:    ret
@@ -68,7 +68,7 @@ define i32 @t3_extrause0(i32 %ptr, i32 %alignment, i32* %mask_storage) nounwind
 define i32 @n4_extrause1(i32 %ptr, i32 %alignment, i32* %bias_storage) nounwind {
 ; CHECK-LABEL: n4_extrause1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub w8, w1, #1 // =1
+; CHECK-NEXT:    sub w8, w1, #1
 ; CHECK-NEXT:    and w8, w0, w8
 ; CHECK-NEXT:    sub w0, w0, w8
 ; CHECK-NEXT:    str w8, [x2]
@@ -82,7 +82,7 @@ define i32 @n4_extrause1(i32 %ptr, i32 %alignment, i32* %bias_storage) nounwind
 define i32 @n5_extrause2(i32 %ptr, i32 %alignment, i32* %mask_storage, i32* %bias_storage) nounwind {
 ; CHECK-LABEL: n5_extrause2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub w8, w1, #1 // =1
+; CHECK-NEXT:    sub w8, w1, #1
 ; CHECK-NEXT:    str w8, [x2]
 ; CHECK-NEXT:    and w8, w0, w8
 ; CHECK-NEXT:    sub w0, w0, w8
@@ -101,7 +101,7 @@ define i32 @n5_extrause2(i32 %ptr, i32 %alignment, i32* %mask_storage, i32* %bia
 define i32 @n6_
diff erent_ptrs(i32 %ptr0, i32 %ptr1, i32 %alignment) nounwind {
 ; CHECK-LABEL: n6_
diff erent_ptrs:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub w8, w2, #1 // =1
+; CHECK-NEXT:    sub w8, w2, #1
 ; CHECK-NEXT:    and w8, w1, w8
 ; CHECK-NEXT:    sub w0, w0, w8
 ; CHECK-NEXT:    ret
@@ -113,7 +113,7 @@ define i32 @n6_
diff erent_ptrs(i32 %ptr0, i32 %ptr1, i32 %alignment) nounwind {
 define i32 @n7_
diff erent_ptrs_commutative(i32 %ptr0, i32 %ptr1, i32 %alignment) nounwind {
 ; CHECK-LABEL: n7_
diff erent_ptrs_commutative:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub w8, w2, #1 // =1
+; CHECK-NEXT:    sub w8, w2, #1
 ; CHECK-NEXT:    and w8, w8, w1
 ; CHECK-NEXT:    sub w0, w0, w8
 ; CHECK-NEXT:    ret
@@ -126,7 +126,7 @@ define i32 @n7_
diff erent_ptrs_commutative(i32 %ptr0, i32 %ptr1, i32 %alignment)
 define i32 @n8_not_lowbit_mask(i32 %ptr, i32 %alignment) nounwind {
 ; CHECK-LABEL: n8_not_lowbit_mask:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w1, #1 // =1
+; CHECK-NEXT:    add w8, w1, #1
 ; CHECK-NEXT:    bic w0, w0, w8
 ; CHECK-NEXT:    ret
   %mask = add i32 %alignment, 1 ; not -1
@@ -138,7 +138,7 @@ define i32 @n8_not_lowbit_mask(i32 %ptr, i32 %alignment) nounwind {
 define i32 @n9_sub_is_not_commutative(i32 %ptr, i32 %alignment) nounwind {
 ; CHECK-LABEL: n9_sub_is_not_commutative:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub w8, w1, #1 // =1
+; CHECK-NEXT:    sub w8, w1, #1
 ; CHECK-NEXT:    and w8, w0, w8
 ; CHECK-NEXT:    sub w0, w8, w0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll
index e904e86d8e6f0..893fb8c2c1b16 100644
--- a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll
@@ -7,7 +7,7 @@
 define void @fn9(i32* %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7, i32 %a8, i32 %a9, ...) nounwind noinline ssp {
 ; CHECK-LABEL: fn9:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    sub sp, sp, #64 ; =64
+; CHECK-NEXT:    sub sp, sp, #64
 ; CHECK-NEXT:    ldr w8, [sp, #64]
 ; CHECK-NEXT:    stp w2, w1, [sp, #52]
 ; CHECK-NEXT:    stp w4, w3, [sp, #44]
@@ -17,12 +17,12 @@ define void @fn9(i32* %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6, i32 %a7,
 ; CHECK-NEXT:    ldr w9, [sp, #72]
 ; CHECK-NEXT:    ldr w8, [sp, #80]
 ; CHECK-NEXT:    stp w8, w9, [sp, #16]
-; CHECK-NEXT:    add x8, sp, #72 ; =72
-; CHECK-NEXT:    add x8, x8, #24 ; =24
+; CHECK-NEXT:    add x8, sp, #72
+; CHECK-NEXT:    add x8, x8, #24
 ; CHECK-NEXT:    str x8, [sp, #24]
 ; CHECK-NEXT:    ldr w8, [sp, #88]
 ; CHECK-NEXT:    str w8, [sp, #12]
-; CHECK-NEXT:    add sp, sp, #64 ; =64
+; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
   %1 = alloca i32, align 4
   %2 = alloca i32, align 4
@@ -62,7 +62,7 @@ declare void @llvm.va_start(i8*) nounwind
 define i32 @main() nounwind ssp {
 ; CHECK-LABEL: main:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    sub sp, sp, #96 ; =96
+; CHECK-NEXT:    sub sp, sp, #96
 ; CHECK-NEXT:    stp x29, x30, [sp, #80] ; 16-byte Folded Spill
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    mov w8, #2
@@ -85,7 +85,7 @@ define i32 @main() nounwind ssp {
 ; CHECK-NEXT:    stp x10, x11, [sp, #16]
 ; CHECK-NEXT:    str x9, [sp, #8]
 ; CHECK-NEXT:    str w8, [sp]
-; CHECK-NEXT:    add x0, sp, #76 ; =76
+; CHECK-NEXT:    add x0, sp, #76
 ; CHECK-NEXT:    mov w1, #2
 ; CHECK-NEXT:    mov w2, #3
 ; CHECK-NEXT:    mov w3, #4
@@ -96,7 +96,7 @@ define i32 @main() nounwind ssp {
 ; CHECK-NEXT:    bl _fn9
 ; CHECK-NEXT:    mov w0, #0
 ; CHECK-NEXT:    ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #96 ; =96
+; CHECK-NEXT:    add sp, sp, #96
 ; CHECK-NEXT:    ret
   %a1 = alloca i32, align 4
   %a2 = alloca i32, align 4
@@ -143,13 +143,13 @@ define i32 @main() nounwind ssp {
 define void @foo(i8* %fmt, ...) nounwind {
 ; CHECK-LABEL: foo:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    sub sp, sp, #48 ; =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    ldr w8, [sp, #48]
 ; CHECK-NEXT:    str w8, [sp, #28]
-; CHECK-NEXT:    add x8, sp, #48 ; =48
-; CHECK-NEXT:    add x8, x8, #23 ; =23
+; CHECK-NEXT:    add x8, sp, #48
+; CHECK-NEXT:    add x8, x8, #23
 ; CHECK-NEXT:    and x8, x8, #0xfffffffffffffff0
-; CHECK-NEXT:    add x9, x8, #16 ; =16
+; CHECK-NEXT:    add x9, x8, #16
 ; CHECK-NEXT:    stp x9, x0, [sp, #32]
 ; CHECK-NEXT:    ldr q0, [x8]
 ; CHECK-NEXT:    str q0, [sp], #48
@@ -172,7 +172,7 @@ entry:
 define void @bar(i32 %x, <4 x i32> %y) nounwind {
 ; CHECK-LABEL: bar:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    sub sp, sp, #80 ; =80
+; CHECK-NEXT:    sub sp, sp, #80
 ; CHECK-NEXT:    stp x29, x30, [sp, #64] ; 16-byte Folded Spill
 ; CHECK-NEXT:    ; kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    str w0, [sp, #60]
@@ -184,7 +184,7 @@ define void @bar(i32 %x, <4 x i32> %y) nounwind {
 ; CHECK-NEXT:    add x0, x0, l_.str at PAGEOFF
 ; CHECK-NEXT:    bl _foo
 ; CHECK-NEXT:    ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #80 ; =80
+; CHECK-NEXT:    add sp, sp, #80
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .loh AdrpAdd Lloh0, Lloh1
 entry:
@@ -205,13 +205,13 @@ entry:
 define void @foo2(i8* %fmt, ...) nounwind {
 ; CHECK-LABEL: foo2:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    sub sp, sp, #48 ; =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    ldr w8, [sp, #48]
 ; CHECK-NEXT:    str w8, [sp, #28]
-; CHECK-NEXT:    add x8, sp, #48 ; =48
-; CHECK-NEXT:    add x8, x8, #23 ; =23
+; CHECK-NEXT:    add x8, sp, #48
+; CHECK-NEXT:    add x8, x8, #23
 ; CHECK-NEXT:    and x8, x8, #0xfffffffffffffff0
-; CHECK-NEXT:    add x9, x8, #16 ; =16
+; CHECK-NEXT:    add x9, x8, #16
 ; CHECK-NEXT:    stp x9, x0, [sp, #32]
 ; CHECK-NEXT:    ldr q0, [x8]
 ; CHECK-NEXT:    str q0, [sp], #48
@@ -244,7 +244,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) n
 define void @bar2(i32 %x, i128 %s41.coerce) nounwind {
 ; CHECK-LABEL: bar2:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    sub sp, sp, #80 ; =80
+; CHECK-NEXT:    sub sp, sp, #80
 ; CHECK-NEXT:    stp x29, x30, [sp, #64] ; 16-byte Folded Spill
 ; CHECK-NEXT:    ; kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    str w0, [sp, #60]
@@ -257,7 +257,7 @@ define void @bar2(i32 %x, i128 %s41.coerce) nounwind {
 ; CHECK-NEXT:    add x0, x0, l_.str at PAGEOFF
 ; CHECK-NEXT:    bl _foo2
 ; CHECK-NEXT:    ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #80 ; =80
+; CHECK-NEXT:    add sp, sp, #80
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:    .loh AdrpAdd Lloh2, Lloh3
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
index 89ae80a710073..d6aabd1bb6911 100644
--- a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll
@@ -306,7 +306,7 @@ define void @fetch_and_min(i128* %p, i128 %bits) {
 ; CHECK-NEXT:    cmp x8, x3
 ; CHECK-NEXT:    cset w11, le
 ; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0 // =0
+; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csel x10, x8, x3, ne
 ; CHECK-NEXT:    csel x11, x9, x2, ne
 ; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
@@ -333,7 +333,7 @@ define void @fetch_and_max(i128* %p, i128 %bits) {
 ; CHECK-NEXT:    cmp x8, x3
 ; CHECK-NEXT:    cset w11, gt
 ; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0 // =0
+; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csel x10, x8, x3, ne
 ; CHECK-NEXT:    csel x11, x9, x2, ne
 ; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
@@ -360,7 +360,7 @@ define void @fetch_and_umin(i128* %p, i128 %bits) {
 ; CHECK-NEXT:    cmp x8, x3
 ; CHECK-NEXT:    cset w11, ls
 ; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0 // =0
+; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csel x10, x8, x3, ne
 ; CHECK-NEXT:    csel x11, x9, x2, ne
 ; CHECK-NEXT:    stlxp w12, x11, x10, [x0]
@@ -387,7 +387,7 @@ define void @fetch_and_umax(i128* %p, i128 %bits) {
 ; CHECK-NEXT:    cmp x8, x3
 ; CHECK-NEXT:    cset w11, hi
 ; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0 // =0
+; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csel x10, x8, x3, ne
 ; CHECK-NEXT:    csel x11, x9, x2, ne
 ; CHECK-NEXT:    stlxp w12, x11, x10, [x0]

diff  --git a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
index 567337367f801..93af46d45da09 100644
--- a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll
@@ -813,7 +813,7 @@ define i32 @fct19(i64 %arg1) nounwind readonly ssp  {
 ; LLC-NEXT:    adrp x9, first_ones
 ; LLC-NEXT:    add x9, x9, :lo12:first_ones
 ; LLC-NEXT:    ldrb w8, [x9, x8]
-; LLC-NEXT:    add w0, w8, #16 // =16
+; LLC-NEXT:    add w0, w8, #16
 ; LLC-NEXT:    ret
 ; LLC-NEXT:  .LBB26_4: // %if.end13
 ; LLC-NEXT:    ubfx x8, x0, #16, #16
@@ -822,7 +822,7 @@ define i32 @fct19(i64 %arg1) nounwind readonly ssp  {
 ; LLC-NEXT:    adrp x9, first_ones
 ; LLC-NEXT:    add x9, x9, :lo12:first_ones
 ; LLC-NEXT:    ldrb w8, [x9, x8]
-; LLC-NEXT:    add w0, w8, #32 // =32
+; LLC-NEXT:    add w0, w8, #32
 ; LLC-NEXT:    ret
 ; LLC-NEXT:  .LBB26_6:
 ; LLC-NEXT:    mov w0, #64
@@ -932,7 +932,7 @@ define i80 @fct20(i128 %a, i128 %b) {
 ; LLC-NEXT:    movk x12, #45, lsl #48
 ; LLC-NEXT:    and x11, x9, x11
 ; LLC-NEXT:    and x12, x8, x12
-; LLC-NEXT:    cmp x10, #0 // =0
+; LLC-NEXT:    cmp x10, #0
 ; LLC-NEXT:    csel x0, x12, x8, eq
 ; LLC-NEXT:    csel x1, x11, x9, eq
 ; LLC-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
index 8e2ba9b11993f..cc05240800e82 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -5,7 +5,7 @@ target triple = "arm64-apple-ios"
 define i32 @single_same(i32 %a, i32 %b) nounwind ssp {
 ; CHECK-LABEL: single_same:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w0, #5 ; =5
+; CHECK-NEXT:    cmp w0, #5
 ; CHECK-NEXT:    ccmp w1, #17, #4, ne
 ; CHECK-NEXT:    b.ne LBB0_2
 ; CHECK-NEXT:  ; %bb.1: ; %if.then
@@ -33,7 +33,7 @@ if.end:
 define i32 @single_
diff erent(i32 %a, i32 %b) nounwind ssp {
 ; CHECK-LABEL: single_
diff erent:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w0, #6 ; =6
+; CHECK-NEXT:    cmp w0, #6
 ; CHECK-NEXT:    ccmp w1, #17, #0, ge
 ; CHECK-NEXT:    b.eq LBB1_2
 ; CHECK-NEXT:  ; %bb.1: ; %if.then
@@ -61,13 +61,13 @@ if.end:
 define i32 @single_flagclobber(i32 %a, i32 %b) nounwind ssp {
 ; CHECK-LABEL: single_flagclobber:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w0, #5 ; =5
+; CHECK-NEXT:    cmp w0, #5
 ; CHECK-NEXT:    b.eq LBB2_2
 ; CHECK-NEXT:  ; %bb.1: ; %lor.lhs.false
 ; CHECK-NEXT:    lsl w8, w1, #1
-; CHECK-NEXT:    cmp w1, #7 ; =7
+; CHECK-NEXT:    cmp w1, #7
 ; CHECK-NEXT:    csinc w8, w8, w1, lt
-; CHECK-NEXT:    cmp w8, #16 ; =16
+; CHECK-NEXT:    cmp w8, #16
 ; CHECK-NEXT:    b.gt LBB2_3
 ; CHECK-NEXT:  LBB2_2: ; %if.then
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
@@ -100,11 +100,11 @@ if.end:                                           ; preds = %if.then, %lor.lhs.f
 define i32 @single_flagclobber_tbz(i32 %a, i32 %b) nounwind ssp {
 ; CHECK-LABEL: single_flagclobber_tbz:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w0, #5 ; =5
+; CHECK-NEXT:    cmp w0, #5
 ; CHECK-NEXT:    b.eq LBB3_2
 ; CHECK-NEXT:  ; %bb.1: ; %lor.lhs.false
 ; CHECK-NEXT:    lsl w8, w1, #1
-; CHECK-NEXT:    cmp w1, #7 ; =7
+; CHECK-NEXT:    cmp w1, #7
 ; CHECK-NEXT:    csinc w8, w8, w1, lt
 ; CHECK-NEXT:    tbz w8, #3, LBB3_3
 ; CHECK-NEXT:  LBB3_2: ; %if.then
@@ -141,7 +141,7 @@ if.end:                                           ; preds = %if.then, %lor.lhs.f
 define i32 @speculate_division(i32 %a, i32 %b) nounwind ssp {
 ; CHECK-LABEL: speculate_division:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w0, #1 ; =1
+; CHECK-NEXT:    cmp w0, #1
 ; CHECK-NEXT:    sdiv w8, w1, w0
 ; CHECK-NEXT:    ccmp w8, #16, #0, ge
 ; CHECK-NEXT:    b.le LBB4_2
@@ -175,7 +175,7 @@ if.end:
 define i32 @single_fcmp(i32 %a, float %b) nounwind ssp {
 ; CHECK-LABEL: single_fcmp:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w0, #1 ; =1
+; CHECK-NEXT:    cmp w0, #1
 ; CHECK-NEXT:    scvtf s1, w0
 ; CHECK-NEXT:    fdiv s0, s0, s1
 ; CHECK-NEXT:    fmov s1, #17.00000000
@@ -244,7 +244,7 @@ if.end:
 define i32 @cbz_head(i32 %a, i32 %b) nounwind ssp {
 ; CHECK-LABEL: cbz_head:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w0, #0 ; =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    ccmp w1, #17, #0, ne
 ; CHECK-NEXT:    b.eq LBB7_2
 ; CHECK-NEXT:  ; %bb.1: ; %if.then
@@ -274,10 +274,10 @@ if.end:
 define i32 @immediate_range(i32 %a, i32 %b) nounwind ssp {
 ; CHECK-LABEL: immediate_range:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w0, #5 ; =5
+; CHECK-NEXT:    cmp w0, #5
 ; CHECK-NEXT:    b.eq LBB8_3
 ; CHECK-NEXT:  ; %bb.1: ; %entry
-; CHECK-NEXT:    cmp w1, #32 ; =32
+; CHECK-NEXT:    cmp w1, #32
 ; CHECK-NEXT:    b.eq LBB8_3
 ; CHECK-NEXT:  ; %bb.2: ; %if.end
 ; CHECK-NEXT:    mov w0, #7
@@ -306,7 +306,7 @@ if.end:
 define i32 @cbz_second(i32 %a, i32 %b) nounwind ssp {
 ; CHECK-LABEL: cbz_second:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w0, #0 ; =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    ccmp w1, #0, #0, ne
 ; CHECK-NEXT:    b.eq LBB9_2
 ; CHECK-NEXT:  ; %bb.1: ; %if.then
@@ -334,7 +334,7 @@ if.end:
 define i32 @cbnz_second(i32 %a, i32 %b) nounwind ssp {
 ; CHECK-LABEL: cbnz_second:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w0, #0 ; =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    ccmp w1, #0, #4, ne
 ; CHECK-NEXT:    b.ne LBB10_2
 ; CHECK-NEXT:  ; %bb.1: ; %if.then
@@ -367,7 +367,7 @@ declare i32 @foo()
 define void @build_modify_expr() nounwind ssp {
 ; CHECK-LABEL: build_modify_expr:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w8, #37 ; =37
+; CHECK-NEXT:    cmp w8, #37
 ; CHECK-NEXT:    mov w8, #1
 ; CHECK-NEXT:    lsl x8, x8, xzr
 ; CHECK-NEXT:    mov x9, #31
@@ -409,7 +409,7 @@ sw.bb.i.i:
 define i64 @select_and(i32 %w0, i32 %w1, i64 %x2, i64 %x3) {
 ; CHECK-LABEL: select_and:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    cmp w1, #5 ; =5
+; CHECK-NEXT:    cmp w1, #5
 ; CHECK-NEXT:    ccmp w0, w1, #0, ne
 ; CHECK-NEXT:    csel x0, x2, x3, lt
 ; CHECK-NEXT:    ret
@@ -423,7 +423,7 @@ define i64 @select_and(i32 %w0, i32 %w1, i64 %x2, i64 %x3) {
 define i64 @select_or(i32 %w0, i32 %w1, i64 %x2, i64 %x3) {
 ; CHECK-LABEL: select_or:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    cmp w1, #5 ; =5
+; CHECK-NEXT:    cmp w1, #5
 ; CHECK-NEXT:    ccmp w0, w1, #8, eq
 ; CHECK-NEXT:    csel x0, x2, x3, lt
 ; CHECK-NEXT:    ret
@@ -437,7 +437,7 @@ define i64 @select_or(i32 %w0, i32 %w1, i64 %x2, i64 %x3) {
 define i64 @gccbug(i64 %x0, i64 %x1) {
 ; CHECK-LABEL: gccbug:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    cmp x0, #2 ; =2
+; CHECK-NEXT:    cmp x0, #2
 ; CHECK-NEXT:    ccmp x0, #4, #4, ne
 ; CHECK-NEXT:    ccmp x1, #0, #0, eq
 ; CHECK-NEXT:    mov w8, #1
@@ -457,7 +457,7 @@ define i64 @gccbug(i64 %x0, i64 %x1) {
 define i32 @select_ororand(i32 %w0, i32 %w1, i32 %w2, i32 %w3) {
 ; CHECK-LABEL: select_ororand:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    cmp w3, #4 ; =4
+; CHECK-NEXT:    cmp w3, #4
 ; CHECK-NEXT:    ccmp w2, #2, #0, gt
 ; CHECK-NEXT:    ccmp w1, #13, #2, ge
 ; CHECK-NEXT:    ccmp w0, #0, #4, ls
@@ -494,18 +494,18 @@ define i32 @select_andor(i32 %v1, i32 %v2, i32 %v3) {
 define i64 @select_noccmp1(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
 ; CHECK-LABEL: select_noccmp1:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    cmp x0, #0 ; =0
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmp x0, #13 ; =13
+; CHECK-NEXT:    cmp x0, #13
 ; CHECK-NEXT:    cset w9, gt
-; CHECK-NEXT:    cmp x2, #2 ; =2
+; CHECK-NEXT:    cmp x2, #2
 ; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmp x2, #4 ; =4
+; CHECK-NEXT:    cmp x2, #4
 ; CHECK-NEXT:    cset w11, gt
 ; CHECK-NEXT:    and w8, w8, w9
 ; CHECK-NEXT:    and w9, w10, w11
 ; CHECK-NEXT:    orr w8, w8, w9
-; CHECK-NEXT:    cmp w8, #0 ; =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csel x0, xzr, x3, ne
 ; CHECK-NEXT:    ret
   %c0 = icmp slt i64 %v1, 0
@@ -526,12 +526,12 @@ define i64 @select_noccmp1(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
 define i64 @select_noccmp2(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
 ; CHECK-LABEL: select_noccmp2:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    cmp x0, #0 ; =0
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmp x0, #13 ; =13
+; CHECK-NEXT:    cmp x0, #13
 ; CHECK-NEXT:    cset w9, gt
 ; CHECK-NEXT:    orr w8, w8, w9
-; CHECK-NEXT:    cmp w8, #0 ; =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csel x0, xzr, x3, ne
 ; CHECK-NEXT:    sbfx w8, w8, #0, #1
 ; CHECK-NEXT:    adrp x9, _g at PAGE
@@ -551,17 +551,17 @@ define i64 @select_noccmp2(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
 define i32 @select_noccmp3(i32 %v0, i32 %v1, i32 %v2) {
 ; CHECK-LABEL: select_noccmp3:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    cmp w0, #0 ; =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    cset w8, lt
-; CHECK-NEXT:    cmp w0, #13 ; =13
+; CHECK-NEXT:    cmp w0, #13
 ; CHECK-NEXT:    cset w9, gt
-; CHECK-NEXT:    cmp w0, #22 ; =22
+; CHECK-NEXT:    cmp w0, #22
 ; CHECK-NEXT:    cset w10, lt
-; CHECK-NEXT:    cmp w0, #44 ; =44
+; CHECK-NEXT:    cmp w0, #44
 ; CHECK-NEXT:    cset w11, gt
-; CHECK-NEXT:    cmp w0, #99 ; =99
+; CHECK-NEXT:    cmp w0, #99
 ; CHECK-NEXT:    cset w12, eq
-; CHECK-NEXT:    cmp w0, #77 ; =77
+; CHECK-NEXT:    cmp w0, #77
 ; CHECK-NEXT:    cset w13, eq
 ; CHECK-NEXT:    orr w8, w8, w9
 ; CHECK-NEXT:    orr w9, w10, w11
@@ -787,7 +787,7 @@ define i32 @half_select_and_olt_one(half %v0, half %v1, half %v2, half %v3, i32
 define i32 @f128_select_and_olt_oge(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3, i32 %a, i32 %b) #0 {
 ; CHECK-LABEL: f128_select_and_olt_oge:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    sub sp, sp, #80 ; =80
+; CHECK-NEXT:    sub sp, sp, #80
 ; CHECK-NEXT:    stp x22, x21, [sp, #32] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #48] ; 16-byte Folded Spill
 ; CHECK-NEXT:    stp x29, x30, [sp, #64] ; 16-byte Folded Spill
@@ -795,18 +795,18 @@ define i32 @f128_select_and_olt_oge(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3,
 ; CHECK-NEXT:    mov x20, x0
 ; CHECK-NEXT:    stp q2, q3, [sp] ; 32-byte Folded Spill
 ; CHECK-NEXT:    bl ___lttf2
-; CHECK-NEXT:    cmp w0, #0 ; =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    cset w21, lt
 ; CHECK-NEXT:    ldp q0, q1, [sp] ; 32-byte Folded Reload
 ; CHECK-NEXT:    bl ___getf2
-; CHECK-NEXT:    cmp w0, #0 ; =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    cset w8, ge
 ; CHECK-NEXT:    tst w8, w21
 ; CHECK-NEXT:    csel w0, w20, w19, ne
 ; CHECK-NEXT:    ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #80 ; =80
+; CHECK-NEXT:    add sp, sp, #80
 ; CHECK-NEXT:    ret
   %c0 = fcmp olt fp128 %v0, %v1
   %c1 = fcmp oge fp128 %v2, %v3
@@ -820,7 +820,7 @@ define i32 @f128_select_and_olt_oge(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3,
 define i32 @deep_or(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) {
 ; CHECK-LABEL: deep_or:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    cmp w2, #20 ; =20
+; CHECK-NEXT:    cmp w2, #20
 ; CHECK-NEXT:    ccmp w2, #15, #4, ne
 ; CHECK-NEXT:    ccmp w1, #0, #4, eq
 ; CHECK-NEXT:    ccmp w0, #0, #4, ne
@@ -842,7 +842,7 @@ define i32 @deep_or(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) {
 define i32 @deep_or1(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) {
 ; CHECK-LABEL: deep_or1:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    cmp w2, #20 ; =20
+; CHECK-NEXT:    cmp w2, #20
 ; CHECK-NEXT:    ccmp w2, #15, #4, ne
 ; CHECK-NEXT:    ccmp w0, #0, #4, eq
 ; CHECK-NEXT:    ccmp w1, #0, #4, ne
@@ -864,7 +864,7 @@ define i32 @deep_or1(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) {
 define i32 @deep_or2(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) {
 ; CHECK-LABEL: deep_or2:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    cmp w2, #20 ; =20
+; CHECK-NEXT:    cmp w2, #20
 ; CHECK-NEXT:    ccmp w2, #15, #4, ne
 ; CHECK-NEXT:    ccmp w1, #0, #4, eq
 ; CHECK-NEXT:    ccmp w0, #0, #4, ne

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
index 1386bc9de5836..fbc2bcf34eab8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
@@ -74,7 +74,7 @@ define fp128 @test_div() {
 define dso_local void @test_fptosi() {
 ; CHECK-LABEL: test_fptosi:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -89,7 +89,7 @@ define dso_local void @test_fptosi() {
 ; CHECK-NEXT:    adrp x8, var64
 ; CHECK-NEXT:    str x0, [x8, :lo12:var64]
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %val = load fp128, fp128* @lhs, align 16
 
@@ -105,7 +105,7 @@ define dso_local void @test_fptosi() {
 define dso_local void @test_fptoui() {
 ; CHECK-LABEL: test_fptoui:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -120,7 +120,7 @@ define dso_local void @test_fptoui() {
 ; CHECK-NEXT:    adrp x8, var64
 ; CHECK-NEXT:    str x0, [x8, :lo12:var64]
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %val = load fp128, fp128* @lhs, align 16
 
@@ -204,7 +204,7 @@ define dso_local i1 @test_setcc1() {
 ; CHECK-NEXT:    adrp x8, rhs
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:rhs]
 ; CHECK-NEXT:    bl __letf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    cset w0, le
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -230,7 +230,7 @@ define dso_local i1 @test_setcc2() {
 ; CHECK-NEXT:    adrp x8, rhs
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:rhs]
 ; CHECK-NEXT:    bl __letf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    cset w0, gt
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -246,7 +246,7 @@ define dso_local i1 @test_setcc2() {
 define dso_local i1 @test_setcc3() {
 ; CHECK-LABEL: test_setcc3:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    stp x30, x19, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    .cfi_offset w19, -8
@@ -257,15 +257,15 @@ define dso_local i1 @test_setcc3() {
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:rhs]
 ; CHECK-NEXT:    stp q1, q0, [sp] // 32-byte Folded Spill
 ; CHECK-NEXT:    bl __eqtf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    cset w19, eq
 ; CHECK-NEXT:    ldp q1, q0, [sp] // 32-byte Folded Reload
 ; CHECK-NEXT:    bl __unordtf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    cset w8, ne
 ; CHECK-NEXT:    orr w0, w8, w19
 ; CHECK-NEXT:    ldp x30, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
 
   %lhs = load fp128, fp128* @lhs, align 16
@@ -288,7 +288,7 @@ define dso_local i32 @test_br_cc() {
 ; CHECK-NEXT:    adrp x8, rhs
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:rhs]
 ; CHECK-NEXT:    bl __lttf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    b.ge .LBB11_2
 ; CHECK-NEXT:  // %bb.1: // %iftrue
 ; CHECK-NEXT:    mov w0, #42
@@ -336,7 +336,7 @@ define dso_local void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
 define dso_local void @test_round() {
 ; CHECK-LABEL: test_round:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -355,7 +355,7 @@ define dso_local void @test_round() {
 ; CHECK-NEXT:    adrp x8, vardouble
 ; CHECK-NEXT:    str d0, [x8, :lo12:vardouble]
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
 
   %val = load fp128, fp128* @lhs, align 16

diff  --git a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
index a0b2e2168731c..0ebd35267f772 100644
--- a/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
@@ -233,7 +233,7 @@ define void @memset_8_stack() {
 ; CHECK-LABEL: memset_8_stack:
 ; CHECK:       mov x8, #-6148914691236517206
 ; CHECK-NEXT:  stp x30, x8, [sp, #-16]!
-; CHECK-NEXT:  add x0, sp, #8 // =8
+; CHECK-NEXT:  add x0, sp, #8
 ; CHECK-NEXT:  bl something
   %buf = alloca [8 x i8], align 1
   %cast = bitcast [8 x i8]* %buf to i8*

diff  --git a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
index 2878811b063ab..b3828073fb08f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll
@@ -1237,7 +1237,7 @@ define <4 x i16> @test_extracts_inserts_varidx_extract(<8 x i16> %x, i32 %idx) {
 ; CHECK-NEXT:    mov v1.h[2], v0.h[2]
 ; CHECK-NEXT:    mov v1.h[3], v0.h[3]
 ; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    add sp, sp, #16 // =16
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %tmp = extractelement <8 x i16> %x, i32 %idx
   %tmp2 = insertelement <4 x i16> undef, i16 %tmp, i32 0
@@ -1253,11 +1253,11 @@ define <4 x i16> @test_extracts_inserts_varidx_extract(<8 x i16> %x, i32 %idx) {
 define <4 x i16> @test_extracts_inserts_varidx_insert(<8 x i16> %x, i32 %idx) {
 ; CHECK-LABEL: test_extracts_inserts_varidx_insert:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16 // =16
+; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    and x8, x0, #0x3
-; CHECK-NEXT:    add x9, sp, #8 // =8
+; CHECK-NEXT:    add x9, sp, #8
 ; CHECK-NEXT:    bfi x9, x8, #1, #2
 ; CHECK-NEXT:    str h0, [x9]
 ; CHECK-NEXT:    ldr d1, [sp, #8]
@@ -1265,7 +1265,7 @@ define <4 x i16> @test_extracts_inserts_varidx_insert(<8 x i16> %x, i32 %idx) {
 ; CHECK-NEXT:    mov v1.h[2], v0.h[2]
 ; CHECK-NEXT:    mov v1.h[3], v0.h[3]
 ; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    add sp, sp, #16 // =16
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %tmp = extractelement <8 x i16> %x, i32 0
   %tmp2 = insertelement <4 x i16> undef, i16 %tmp, i32 %idx

diff  --git a/llvm/test/CodeGen/AArch64/arm64-nvcast.ll b/llvm/test/CodeGen/AArch64/arm64-nvcast.ll
index ebaef0cbd4c20..327d37a6ab9e8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-nvcast.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-nvcast.ll
@@ -3,7 +3,7 @@
 define void @test(float * %p1, i32 %v1) {
 ; CHECK-LABEL: test:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    sub sp, sp, #16 ; =16
+; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ; kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    fmov.2d v0, #2.00000000
@@ -13,7 +13,7 @@ define void @test(float * %p1, i32 %v1) {
 ; CHECK-NEXT:    bfi x9, x8, #2, #2
 ; CHECK-NEXT:    ldr s0, [x9]
 ; CHECK-NEXT:    str s0, [x0]
-; CHECK-NEXT:    add sp, sp, #16 ; =16
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
 entry:
   %v2 = extractelement <3 x float> <float 0.000000e+00, float 2.000000e+00, float 0.000000e+00>, i32 %v1
@@ -24,7 +24,7 @@ entry:
 define void @test2(float * %p1, i32 %v1) {
 ; CHECK-LABEL: test2:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    sub sp, sp, #16 ; =16
+; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ; kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    movi.16b v0, #63
@@ -34,7 +34,7 @@ define void @test2(float * %p1, i32 %v1) {
 ; CHECK-NEXT:    bfi x9, x8, #2, #2
 ; CHECK-NEXT:    ldr s0, [x9]
 ; CHECK-NEXT:    str s0, [x0]
-; CHECK-NEXT:    add sp, sp, #16 ; =16
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
 entry:
   %v2 = extractelement <3 x float> <float 0.7470588088035583, float 0.7470588088035583, float 0.7470588088035583>, i32 %v1

diff  --git a/llvm/test/CodeGen/AArch64/arm64-popcnt.ll b/llvm/test/CodeGen/AArch64/arm64-popcnt.ll
index 1b465eb60adde..f05f025c199ec 100644
--- a/llvm/test/CodeGen/AArch64/arm64-popcnt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-popcnt.ll
@@ -172,7 +172,7 @@ define i32 @ctpop_eq_one(i64 %x) nounwind readnone {
 ; CHECK-NEXT:    cnt.8b v0, v0
 ; CHECK-NEXT:    uaddlv.8b h0, v0
 ; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    cmp x8, #1 // =1
+; CHECK-NEXT:    cmp x8, #1
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
 ;
@@ -190,7 +190,7 @@ define i32 @ctpop_eq_one(i64 %x) nounwind readnone {
 ; CHECK-NONEON-NEXT:    mov x9, #72340172838076673
 ; CHECK-NONEON-NEXT:    mul x8, x8, x9
 ; CHECK-NONEON-NEXT:    lsr x8, x8, #56
-; CHECK-NONEON-NEXT:    cmp x8, #1 // =1
+; CHECK-NONEON-NEXT:    cmp x8, #1
 ; CHECK-NONEON-NEXT:    cset w0, eq
 ; CHECK-NONEON-NEXT:    ret
   %count = tail call i64 @llvm.ctpop.i64(i64 %x)
@@ -206,7 +206,7 @@ define i32 @ctpop_ne_one(i64 %x) nounwind readnone {
 ; CHECK-NEXT:    cnt.8b v0, v0
 ; CHECK-NEXT:    uaddlv.8b h0, v0
 ; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    cmp x8, #1 // =1
+; CHECK-NEXT:    cmp x8, #1
 ; CHECK-NEXT:    cset w0, ne
 ; CHECK-NEXT:    ret
 ;
@@ -224,7 +224,7 @@ define i32 @ctpop_ne_one(i64 %x) nounwind readnone {
 ; CHECK-NONEON-NEXT:    mov x9, #72340172838076673
 ; CHECK-NONEON-NEXT:    mul x8, x8, x9
 ; CHECK-NONEON-NEXT:    lsr x8, x8, #56
-; CHECK-NONEON-NEXT:    cmp x8, #1 // =1
+; CHECK-NONEON-NEXT:    cmp x8, #1
 ; CHECK-NONEON-NEXT:    cset w0, ne
 ; CHECK-NONEON-NEXT:    ret
   %count = tail call i64 @llvm.ctpop.i64(i64 %x)

diff  --git a/llvm/test/CodeGen/AArch64/arm64-rev.ll b/llvm/test/CodeGen/AArch64/arm64-rev.ll
index 0879184c507a9..5393cdc8ab5da 100644
--- a/llvm/test/CodeGen/AArch64/arm64-rev.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-rev.ll
@@ -535,7 +535,7 @@ define void @test_vrev64(<4 x i16>* nocapture %source, <2 x i16>* nocapture %dst
 ; CHECK-LABEL: test_vrev64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    add x8, x1, #2 // =2
+; CHECK-NEXT:    add x8, x1, #2
 ; CHECK-NEXT:    st1.h { v0 }[5], [x8]
 ; CHECK-NEXT:    st1.h { v0 }[6], [x1]
 ; CHECK-NEXT:    ret
@@ -543,7 +543,7 @@ define void @test_vrev64(<4 x i16>* nocapture %source, <2 x i16>* nocapture %dst
 ; GISEL-LABEL: test_vrev64:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    ldr q0, [x0]
-; GISEL-NEXT:    add x8, x1, #2 // =2
+; GISEL-NEXT:    add x8, x1, #2
 ; GISEL-NEXT:    st1.h { v0 }[6], [x1]
 ; GISEL-NEXT:    st1.h { v0 }[5], [x8]
 ; GISEL-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
index 68664a35661fe..20cb62491c677 100644
--- a/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-shrink-wrapping.ll
@@ -12,26 +12,26 @@ define i32 @foo(i32 %a, i32 %b) {
 ; ENABLE-NEXT:    cmp w0, w1
 ; ENABLE-NEXT:    b.ge LBB0_2
 ; ENABLE-NEXT:  ; %bb.1: ; %true
-; ENABLE-NEXT:    sub sp, sp, #32 ; =32
+; ENABLE-NEXT:    sub sp, sp, #32
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; ENABLE-NEXT:    add x29, sp, #16 ; =16
+; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
 ; ENABLE-NEXT:    .cfi_offset w29, -16
 ; ENABLE-NEXT:    stur w0, [x29, #-4]
-; ENABLE-NEXT:    sub x1, x29, #4 ; =4
+; ENABLE-NEXT:    sub x1, x29, #4
 ; ENABLE-NEXT:    mov w0, wzr
 ; ENABLE-NEXT:    bl _doSomething
 ; ENABLE-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
-; ENABLE-NEXT:    add sp, sp, #32 ; =32
+; ENABLE-NEXT:    add sp, sp, #32
 ; ENABLE-NEXT:  LBB0_2: ; %false
 ; ENABLE-NEXT:    ret
 ;
 ; DISABLE-LABEL: foo:
 ; DISABLE:       ; %bb.0:
-; DISABLE-NEXT:    sub sp, sp, #32 ; =32
+; DISABLE-NEXT:    sub sp, sp, #32
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; DISABLE-NEXT:    add x29, sp, #16 ; =16
+; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
 ; DISABLE-NEXT:    .cfi_offset w29, -16
@@ -39,12 +39,12 @@ define i32 @foo(i32 %a, i32 %b) {
 ; DISABLE-NEXT:    b.ge LBB0_2
 ; DISABLE-NEXT:  ; %bb.1: ; %true
 ; DISABLE-NEXT:    stur w0, [x29, #-4]
-; DISABLE-NEXT:    sub x1, x29, #4 ; =4
+; DISABLE-NEXT:    sub x1, x29, #4
 ; DISABLE-NEXT:    mov w0, wzr
 ; DISABLE-NEXT:    bl _doSomething
 ; DISABLE-NEXT:  LBB0_2: ; %false
 ; DISABLE-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
-; DISABLE-NEXT:    add sp, sp, #32 ; =32
+; DISABLE-NEXT:    add sp, sp, #32
 ; DISABLE-NEXT:    ret
   %tmp = alloca i32, align 4
   %tmp2 = icmp slt i32 %a, %b
@@ -73,7 +73,7 @@ define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:  ; %bb.1: ; %for.body.preheader
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; ENABLE-NEXT:    add x29, sp, #16 ; =16
+; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
 ; ENABLE-NEXT:    .cfi_offset w29, -16
@@ -84,7 +84,7 @@ define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:  LBB1_2: ; %for.body
 ; ENABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    bl _something
-; ENABLE-NEXT:    subs w20, w20, #1 ; =1
+; ENABLE-NEXT:    subs w20, w20, #1
 ; ENABLE-NEXT:    add w19, w0, w19
 ; ENABLE-NEXT:    b.ne LBB1_2
 ; ENABLE-NEXT:  ; %bb.3: ; %for.end
@@ -100,7 +100,7 @@ define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; DISABLE-NEXT:    add x29, sp, #16 ; =16
+; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
 ; DISABLE-NEXT:    .cfi_offset w29, -16
@@ -113,7 +113,7 @@ define i32 @freqSaveAndRestoreOutsideLoop(i32 %cond, i32 %N) {
 ; DISABLE-NEXT:  LBB1_2: ; %for.body
 ; DISABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    bl _something
-; DISABLE-NEXT:    subs w20, w20, #1 ; =1
+; DISABLE-NEXT:    subs w20, w20, #1
 ; DISABLE-NEXT:    add w19, w0, w19
 ; DISABLE-NEXT:    b.ne LBB1_2
 ; DISABLE-NEXT:  ; %bb.3: ; %for.end
@@ -160,7 +160,7 @@ define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
 ; ENABLE:       ; %bb.0: ; %entry
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; ENABLE-NEXT:    add x29, sp, #16 ; =16
+; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
 ; ENABLE-NEXT:    .cfi_offset w29, -16
@@ -171,7 +171,7 @@ define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
 ; ENABLE-NEXT:  LBB2_1: ; %for.body
 ; ENABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    bl _something
-; ENABLE-NEXT:    subs w20, w20, #1 ; =1
+; ENABLE-NEXT:    subs w20, w20, #1
 ; ENABLE-NEXT:    add w19, w0, w19
 ; ENABLE-NEXT:    b.ne LBB2_1
 ; ENABLE-NEXT:  ; %bb.2: ; %for.end
@@ -184,7 +184,7 @@ define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; DISABLE-NEXT:    add x29, sp, #16 ; =16
+; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
 ; DISABLE-NEXT:    .cfi_offset w29, -16
@@ -195,7 +195,7 @@ define i32 @freqSaveAndRestoreOutsideLoop2(i32 %cond) {
 ; DISABLE-NEXT:  LBB2_1: ; %for.body
 ; DISABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    bl _something
-; DISABLE-NEXT:    subs w20, w20, #1 ; =1
+; DISABLE-NEXT:    subs w20, w20, #1
 ; DISABLE-NEXT:    add w19, w0, w19
 ; DISABLE-NEXT:    b.ne LBB2_1
 ; DISABLE-NEXT:  ; %bb.2: ; %for.end
@@ -228,7 +228,7 @@ define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:  ; %bb.1: ; %for.body.preheader
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; ENABLE-NEXT:    add x29, sp, #16 ; =16
+; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
 ; ENABLE-NEXT:    .cfi_offset w29, -16
@@ -239,7 +239,7 @@ define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:  LBB3_2: ; %for.body
 ; ENABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    bl _something
-; ENABLE-NEXT:    subs w20, w20, #1 ; =1
+; ENABLE-NEXT:    subs w20, w20, #1
 ; ENABLE-NEXT:    add w19, w0, w19
 ; ENABLE-NEXT:    b.ne LBB3_2
 ; ENABLE-NEXT:  ; %bb.3: ; %for.end
@@ -256,7 +256,7 @@ define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; DISABLE-NEXT:    add x29, sp, #16 ; =16
+; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
 ; DISABLE-NEXT:    .cfi_offset w29, -16
@@ -269,7 +269,7 @@ define i32 @loopInfoSaveOutsideLoop(i32 %cond, i32 %N) {
 ; DISABLE-NEXT:  LBB3_2: ; %for.body
 ; DISABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    bl _something
-; DISABLE-NEXT:    subs w20, w20, #1 ; =1
+; DISABLE-NEXT:    subs w20, w20, #1
 ; DISABLE-NEXT:    add w19, w0, w19
 ; DISABLE-NEXT:    b.ne LBB3_2
 ; DISABLE-NEXT:  ; %bb.3: ; %for.end
@@ -320,14 +320,14 @@ define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind {
 ; ENABLE-NEXT:  ; %bb.1: ; %if.then
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; ENABLE-NEXT:    add x29, sp, #16 ; =16
+; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    bl _somethingElse
 ; ENABLE-NEXT:    mov w19, wzr
 ; ENABLE-NEXT:    mov w20, #10
 ; ENABLE-NEXT:  LBB4_2: ; %for.body
 ; ENABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    bl _something
-; ENABLE-NEXT:    subs w20, w20, #1 ; =1
+; ENABLE-NEXT:    subs w20, w20, #1
 ; ENABLE-NEXT:    add w19, w0, w19
 ; ENABLE-NEXT:    b.ne LBB4_2
 ; ENABLE-NEXT:  ; %bb.3: ; %for.end
@@ -343,7 +343,7 @@ define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind {
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; DISABLE-NEXT:    add x29, sp, #16 ; =16
+; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    cbz w0, LBB4_4
 ; DISABLE-NEXT:  ; %bb.1: ; %if.then
 ; DISABLE-NEXT:    bl _somethingElse
@@ -352,7 +352,7 @@ define i32 @loopInfoRestoreOutsideLoop(i32 %cond, i32 %N) nounwind {
 ; DISABLE-NEXT:  LBB4_2: ; %for.body
 ; DISABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    bl _something
-; DISABLE-NEXT:    subs w20, w20, #1 ; =1
+; DISABLE-NEXT:    subs w20, w20, #1
 ; DISABLE-NEXT:    add w19, w0, w19
 ; DISABLE-NEXT:    b.ne LBB4_2
 ; DISABLE-NEXT:  ; %bb.3: ; %for.end
@@ -415,23 +415,23 @@ define i32 @variadicFunc(i32 %cond, i32 %count, ...) nounwind {
 ; ENABLE:       ; %bb.0: ; %entry
 ; ENABLE-NEXT:    cbz w0, LBB6_4
 ; ENABLE-NEXT:  ; %bb.1: ; %if.then
-; ENABLE-NEXT:    sub sp, sp, #16 ; =16
-; ENABLE-NEXT:    add x8, sp, #16 ; =16
-; ENABLE-NEXT:    cmp w1, #1 ; =1
+; ENABLE-NEXT:    sub sp, sp, #16
+; ENABLE-NEXT:    add x8, sp, #16
+; ENABLE-NEXT:    cmp w1, #1
 ; ENABLE-NEXT:    str x8, [sp, #8]
 ; ENABLE-NEXT:    mov w0, wzr
 ; ENABLE-NEXT:    b.lt LBB6_3
 ; ENABLE-NEXT:  LBB6_2: ; %for.body
 ; ENABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; ENABLE-NEXT:    ldr x8, [sp, #8]
-; ENABLE-NEXT:    add x9, x8, #8 ; =8
+; ENABLE-NEXT:    add x9, x8, #8
 ; ENABLE-NEXT:    str x9, [sp, #8]
 ; ENABLE-NEXT:    ldr w8, [x8]
-; ENABLE-NEXT:    subs w1, w1, #1 ; =1
+; ENABLE-NEXT:    subs w1, w1, #1
 ; ENABLE-NEXT:    add w0, w0, w8
 ; ENABLE-NEXT:    b.ne LBB6_2
 ; ENABLE-NEXT:  LBB6_3: ; %for.end
-; ENABLE-NEXT:    add sp, sp, #16 ; =16
+; ENABLE-NEXT:    add sp, sp, #16
 ; ENABLE-NEXT:    ret
 ; ENABLE-NEXT:  LBB6_4: ; %if.else
 ; ENABLE-NEXT:    lsl w0, w1, #1
@@ -439,29 +439,29 @@ define i32 @variadicFunc(i32 %cond, i32 %count, ...) nounwind {
 ;
 ; DISABLE-LABEL: variadicFunc:
 ; DISABLE:       ; %bb.0: ; %entry
-; DISABLE-NEXT:    sub sp, sp, #16 ; =16
+; DISABLE-NEXT:    sub sp, sp, #16
 ; DISABLE-NEXT:    cbz w0, LBB6_4
 ; DISABLE-NEXT:  ; %bb.1: ; %if.then
-; DISABLE-NEXT:    add x8, sp, #16 ; =16
-; DISABLE-NEXT:    cmp w1, #1 ; =1
+; DISABLE-NEXT:    add x8, sp, #16
+; DISABLE-NEXT:    cmp w1, #1
 ; DISABLE-NEXT:    str x8, [sp, #8]
 ; DISABLE-NEXT:    mov w0, wzr
 ; DISABLE-NEXT:    b.lt LBB6_3
 ; DISABLE-NEXT:  LBB6_2: ; %for.body
 ; DISABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; DISABLE-NEXT:    ldr x8, [sp, #8]
-; DISABLE-NEXT:    add x9, x8, #8 ; =8
+; DISABLE-NEXT:    add x9, x8, #8
 ; DISABLE-NEXT:    str x9, [sp, #8]
 ; DISABLE-NEXT:    ldr w8, [x8]
-; DISABLE-NEXT:    subs w1, w1, #1 ; =1
+; DISABLE-NEXT:    subs w1, w1, #1
 ; DISABLE-NEXT:    add w0, w0, w8
 ; DISABLE-NEXT:    b.ne LBB6_2
 ; DISABLE-NEXT:  LBB6_3: ; %if.end
-; DISABLE-NEXT:    add sp, sp, #16 ; =16
+; DISABLE-NEXT:    add sp, sp, #16
 ; DISABLE-NEXT:    ret
 ; DISABLE-NEXT:  LBB6_4: ; %if.else
 ; DISABLE-NEXT:    lsl w0, w1, #1
-; DISABLE-NEXT:    add sp, sp, #16 ; =16
+; DISABLE-NEXT:    add sp, sp, #16
 ; DISABLE-NEXT:    ret
 entry:
   %ap = alloca i8*, align 8
@@ -514,9 +514,9 @@ define i32 @inlineAsm(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:    mov w8, #10
 ; ENABLE-NEXT:  LBB7_2: ; %for.body
 ; ENABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
-; ENABLE-NEXT:    subs w8, w8, #1 ; =1
+; ENABLE-NEXT:    subs w8, w8, #1
 ; ENABLE-NEXT:    ; InlineAsm Start
-; ENABLE-NEXT:    add x19, x19, #1 ; =1
+; ENABLE-NEXT:    add x19, x19, #1
 ; ENABLE-NEXT:    ; InlineAsm End
 ; ENABLE-NEXT:    b.ne LBB7_2
 ; ENABLE-NEXT:  ; %bb.3:
@@ -538,9 +538,9 @@ define i32 @inlineAsm(i32 %cond, i32 %N) {
 ; DISABLE-NEXT:    mov w8, #10
 ; DISABLE-NEXT:  LBB7_2: ; %for.body
 ; DISABLE-NEXT:    ; =>This Inner Loop Header: Depth=1
-; DISABLE-NEXT:    subs w8, w8, #1 ; =1
+; DISABLE-NEXT:    subs w8, w8, #1
 ; DISABLE-NEXT:    ; InlineAsm Start
-; DISABLE-NEXT:    add x19, x19, #1 ; =1
+; DISABLE-NEXT:    add x19, x19, #1
 ; DISABLE-NEXT:    ; InlineAsm End
 ; DISABLE-NEXT:    b.ne LBB7_2
 ; DISABLE-NEXT:  ; %bb.3:
@@ -578,9 +578,9 @@ define i32 @callVariadicFunc(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:    ; kill: def $w1 killed $w1 def $x1
 ; ENABLE-NEXT:    cbz w0, LBB8_2
 ; ENABLE-NEXT:  ; %bb.1: ; %if.then
-; ENABLE-NEXT:    sub sp, sp, #64 ; =64
+; ENABLE-NEXT:    sub sp, sp, #64
 ; ENABLE-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
-; ENABLE-NEXT:    add x29, sp, #48 ; =48
+; ENABLE-NEXT:    add x29, sp, #48
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
 ; ENABLE-NEXT:    .cfi_offset w29, -16
@@ -591,7 +591,7 @@ define i32 @callVariadicFunc(i32 %cond, i32 %N) {
 ; ENABLE-NEXT:    bl _someVariadicFunc
 ; ENABLE-NEXT:    lsl w0, w0, #3
 ; ENABLE-NEXT:    ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
-; ENABLE-NEXT:    add sp, sp, #64 ; =64
+; ENABLE-NEXT:    add sp, sp, #64
 ; ENABLE-NEXT:    ret
 ; ENABLE-NEXT:  LBB8_2: ; %if.else
 ; ENABLE-NEXT:    lsl w0, w1, #1
@@ -599,9 +599,9 @@ define i32 @callVariadicFunc(i32 %cond, i32 %N) {
 ;
 ; DISABLE-LABEL: callVariadicFunc:
 ; DISABLE:       ; %bb.0: ; %entry
-; DISABLE-NEXT:    sub sp, sp, #64 ; =64
+; DISABLE-NEXT:    sub sp, sp, #64
 ; DISABLE-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
-; DISABLE-NEXT:    add x29, sp, #48 ; =48
+; DISABLE-NEXT:    add x29, sp, #48
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
 ; DISABLE-NEXT:    .cfi_offset w29, -16
@@ -619,7 +619,7 @@ define i32 @callVariadicFunc(i32 %cond, i32 %N) {
 ; DISABLE-NEXT:    lsl w0, w1, #1
 ; DISABLE-NEXT:  LBB8_3: ; %if.end
 ; DISABLE-NEXT:    ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
-; DISABLE-NEXT:    add sp, sp, #64 ; =64
+; DISABLE-NEXT:    add sp, sp, #64
 ; DISABLE-NEXT:    ret
 entry:
   %tobool = icmp eq i32 %cond, 0
@@ -703,7 +703,7 @@ define void @infiniteloop() {
 ; ENABLE:       ; %bb.0: ; %entry
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; ENABLE-NEXT:    add x29, sp, #16 ; =16
+; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
 ; ENABLE-NEXT:    .cfi_offset w29, -16
@@ -711,7 +711,7 @@ define void @infiniteloop() {
 ; ENABLE-NEXT:    .cfi_offset w20, -32
 ; ENABLE-NEXT:    cbnz wzr, LBB10_3
 ; ENABLE-NEXT:  ; %bb.1: ; %if.then
-; ENABLE-NEXT:    sub x19, sp, #16 ; =16
+; ENABLE-NEXT:    sub x19, sp, #16
 ; ENABLE-NEXT:    mov sp, x19
 ; ENABLE-NEXT:    mov w20, wzr
 ; ENABLE-NEXT:  LBB10_2: ; %for.body
@@ -721,7 +721,7 @@ define void @infiniteloop() {
 ; ENABLE-NEXT:    str w20, [x19]
 ; ENABLE-NEXT:    b LBB10_2
 ; ENABLE-NEXT:  LBB10_3: ; %if.end
-; ENABLE-NEXT:    sub sp, x29, #16 ; =16
+; ENABLE-NEXT:    sub sp, x29, #16
 ; ENABLE-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
 ; ENABLE-NEXT:    ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
 ; ENABLE-NEXT:    ret
@@ -730,7 +730,7 @@ define void @infiniteloop() {
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; DISABLE-NEXT:    add x29, sp, #16 ; =16
+; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
 ; DISABLE-NEXT:    .cfi_offset w29, -16
@@ -738,7 +738,7 @@ define void @infiniteloop() {
 ; DISABLE-NEXT:    .cfi_offset w20, -32
 ; DISABLE-NEXT:    cbnz wzr, LBB10_3
 ; DISABLE-NEXT:  ; %bb.1: ; %if.then
-; DISABLE-NEXT:    sub x19, sp, #16 ; =16
+; DISABLE-NEXT:    sub x19, sp, #16
 ; DISABLE-NEXT:    mov sp, x19
 ; DISABLE-NEXT:    mov w20, wzr
 ; DISABLE-NEXT:  LBB10_2: ; %for.body
@@ -748,7 +748,7 @@ define void @infiniteloop() {
 ; DISABLE-NEXT:    str w20, [x19]
 ; DISABLE-NEXT:    b LBB10_2
 ; DISABLE-NEXT:  LBB10_3: ; %if.end
-; DISABLE-NEXT:    sub sp, x29, #16 ; =16
+; DISABLE-NEXT:    sub sp, x29, #16
 ; DISABLE-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
 ; DISABLE-NEXT:    ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
 ; DISABLE-NEXT:    ret
@@ -776,7 +776,7 @@ define void @infiniteloop2() {
 ; ENABLE:       ; %bb.0: ; %entry
 ; ENABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; ENABLE-NEXT:    add x29, sp, #16 ; =16
+; ENABLE-NEXT:    add x29, sp, #16
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
 ; ENABLE-NEXT:    .cfi_offset w29, -16
@@ -784,7 +784,7 @@ define void @infiniteloop2() {
 ; ENABLE-NEXT:    .cfi_offset w20, -32
 ; ENABLE-NEXT:    cbnz wzr, LBB11_3
 ; ENABLE-NEXT:  ; %bb.1: ; %if.then
-; ENABLE-NEXT:    sub x8, sp, #16 ; =16
+; ENABLE-NEXT:    sub x8, sp, #16
 ; ENABLE-NEXT:    mov sp, x8
 ; ENABLE-NEXT:    mov w9, wzr
 ; ENABLE-NEXT:    ; InlineAsm Start
@@ -800,7 +800,7 @@ define void @infiniteloop2() {
 ; ENABLE-NEXT:    mov w9, #1
 ; ENABLE-NEXT:    b LBB11_2
 ; ENABLE-NEXT:  LBB11_3: ; %if.end
-; ENABLE-NEXT:    sub sp, x29, #16 ; =16
+; ENABLE-NEXT:    sub sp, x29, #16
 ; ENABLE-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
 ; ENABLE-NEXT:    ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
 ; ENABLE-NEXT:    ret
@@ -809,7 +809,7 @@ define void @infiniteloop2() {
 ; DISABLE:       ; %bb.0: ; %entry
 ; DISABLE-NEXT:    stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; DISABLE-NEXT:    add x29, sp, #16 ; =16
+; DISABLE-NEXT:    add x29, sp, #16
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
 ; DISABLE-NEXT:    .cfi_offset w29, -16
@@ -817,7 +817,7 @@ define void @infiniteloop2() {
 ; DISABLE-NEXT:    .cfi_offset w20, -32
 ; DISABLE-NEXT:    cbnz wzr, LBB11_3
 ; DISABLE-NEXT:  ; %bb.1: ; %if.then
-; DISABLE-NEXT:    sub x8, sp, #16 ; =16
+; DISABLE-NEXT:    sub x8, sp, #16
 ; DISABLE-NEXT:    mov sp, x8
 ; DISABLE-NEXT:    mov w9, wzr
 ; DISABLE-NEXT:    ; InlineAsm Start
@@ -833,7 +833,7 @@ define void @infiniteloop2() {
 ; DISABLE-NEXT:    mov w9, #1
 ; DISABLE-NEXT:    b LBB11_2
 ; DISABLE-NEXT:  LBB11_3: ; %if.end
-; DISABLE-NEXT:    sub sp, x29, #16 ; =16
+; DISABLE-NEXT:    sub sp, x29, #16
 ; DISABLE-NEXT:    ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
 ; DISABLE-NEXT:    ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
 ; DISABLE-NEXT:    ret
@@ -947,7 +947,7 @@ define i32 @stack_realign(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) {
 ; ENABLE:       ; %bb.0:
 ; ENABLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
 ; ENABLE-NEXT:    mov x29, sp
-; ENABLE-NEXT:    sub x9, sp, #16 ; =16
+; ENABLE-NEXT:    sub x9, sp, #16
 ; ENABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
@@ -969,7 +969,7 @@ define i32 @stack_realign(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) {
 ; DISABLE:       ; %bb.0:
 ; DISABLE-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
 ; DISABLE-NEXT:    mov x29, sp
-; DISABLE-NEXT:    sub x9, sp, #16 ; =16
+; DISABLE-NEXT:    sub x9, sp, #16
 ; DISABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
@@ -1018,8 +1018,8 @@ define void @stack_realign2(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2, i32* %ptr3,
 ; ENABLE-NEXT:    stp x22, x21, [sp, #48] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x20, x19, [sp, #64] ; 16-byte Folded Spill
 ; ENABLE-NEXT:    stp x29, x30, [sp, #80] ; 16-byte Folded Spill
-; ENABLE-NEXT:    add x29, sp, #80 ; =80
-; ENABLE-NEXT:    sub x9, sp, #32 ; =32
+; ENABLE-NEXT:    add x29, sp, #80
+; ENABLE-NEXT:    sub x9, sp, #32
 ; ENABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; ENABLE-NEXT:    .cfi_def_cfa w29, 16
 ; ENABLE-NEXT:    .cfi_offset w30, -8
@@ -1060,7 +1060,7 @@ define void @stack_realign2(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2, i32* %ptr3,
 ; ENABLE-NEXT:    stp w0, w1, [x2, #4]
 ; ENABLE-NEXT:    stp w16, w11, [x2, #12]
 ; ENABLE-NEXT:    stp w13, w14, [x2, #20]
-; ENABLE-NEXT:    sub sp, x29, #80 ; =80
+; ENABLE-NEXT:    sub sp, x29, #80
 ; ENABLE-NEXT:    ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
 ; ENABLE-NEXT:    ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
 ; ENABLE-NEXT:    ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
@@ -1077,8 +1077,8 @@ define void @stack_realign2(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2, i32* %ptr3,
 ; DISABLE-NEXT:    stp x22, x21, [sp, #48] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x20, x19, [sp, #64] ; 16-byte Folded Spill
 ; DISABLE-NEXT:    stp x29, x30, [sp, #80] ; 16-byte Folded Spill
-; DISABLE-NEXT:    add x29, sp, #80 ; =80
-; DISABLE-NEXT:    sub x9, sp, #32 ; =32
+; DISABLE-NEXT:    add x29, sp, #80
+; DISABLE-NEXT:    sub x9, sp, #32
 ; DISABLE-NEXT:    and sp, x9, #0xffffffffffffffe0
 ; DISABLE-NEXT:    .cfi_def_cfa w29, 16
 ; DISABLE-NEXT:    .cfi_offset w30, -8
@@ -1119,7 +1119,7 @@ define void @stack_realign2(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2, i32* %ptr3,
 ; DISABLE-NEXT:    stp w0, w1, [x2, #4]
 ; DISABLE-NEXT:    stp w16, w11, [x2, #12]
 ; DISABLE-NEXT:    stp w13, w14, [x2, #20]
-; DISABLE-NEXT:    sub sp, x29, #80 ; =80
+; DISABLE-NEXT:    sub sp, x29, #80
 ; DISABLE-NEXT:    ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
 ; DISABLE-NEXT:    ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
 ; DISABLE-NEXT:    ldp x22, x21, [sp, #48] ; 16-byte Folded Reload

diff  --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index 24ddb890085a4..7e0e670d7682f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1628,12 +1628,12 @@ define <2 x i128> @uabd_i64(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-NEXT:    sbcs x11, x12, x14
 ; CHECK-NEXT:    negs x12, x8
 ; CHECK-NEXT:    ngcs x13, x11
-; CHECK-NEXT:    cmp x11, #0 // =0
+; CHECK-NEXT:    cmp x11, #0
 ; CHECK-NEXT:    csel x2, x12, x8, lt
 ; CHECK-NEXT:    csel x3, x13, x11, lt
 ; CHECK-NEXT:    negs x8, x9
 ; CHECK-NEXT:    ngcs x11, x10
-; CHECK-NEXT:    cmp x10, #0 // =0
+; CHECK-NEXT:    cmp x10, #0
 ; CHECK-NEXT:    csel x8, x8, x9, lt
 ; CHECK-NEXT:    csel x1, x11, x10, lt
 ; CHECK-NEXT:    fmov d0, x8

diff  --git a/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll b/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
index 0f2be66dc46e5..06be6209f023e 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-O0.ll
@@ -7,7 +7,7 @@
 define i8 @test_rmw_add_8(i8* %dst)   {
 ; NOLSE-LABEL: test_rmw_add_8:
 ; NOLSE:       // %bb.0: // %entry
-; NOLSE-NEXT:    sub sp, sp, #32 // =32
+; NOLSE-NEXT:    sub sp, sp, #32
 ; NOLSE-NEXT:    .cfi_def_cfa_offset 32
 ; NOLSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; NOLSE-NEXT:    ldrb w8, [x0]
@@ -18,7 +18,7 @@ define i8 @test_rmw_add_8(i8* %dst)   {
 ; NOLSE-NEXT:    // Child Loop BB0_2 Depth 2
 ; NOLSE-NEXT:    ldr w9, [sp, #28] // 4-byte Folded Reload
 ; NOLSE-NEXT:    ldr x11, [sp, #16] // 8-byte Folded Reload
-; NOLSE-NEXT:    add w12, w9, #1 // =1
+; NOLSE-NEXT:    add w12, w9, #1
 ; NOLSE-NEXT:  .LBB0_2: // %atomicrmw.start
 ; NOLSE-NEXT:    // Parent Loop BB0_1 Depth=1
 ; NOLSE-NEXT:    // => This Inner Loop Header: Depth=2
@@ -34,13 +34,13 @@ define i8 @test_rmw_add_8(i8* %dst)   {
 ; NOLSE-NEXT:    subs w9, w8, w9, uxtb
 ; NOLSE-NEXT:    cset w9, eq
 ; NOLSE-NEXT:    str w8, [sp, #12] // 4-byte Folded Spill
-; NOLSE-NEXT:    subs w9, w9, #1 // =1
+; NOLSE-NEXT:    subs w9, w9, #1
 ; NOLSE-NEXT:    str w8, [sp, #28] // 4-byte Folded Spill
 ; NOLSE-NEXT:    b.ne .LBB0_1
 ; NOLSE-NEXT:    b .LBB0_5
 ; NOLSE-NEXT:  .LBB0_5: // %atomicrmw.end
 ; NOLSE-NEXT:    ldr w0, [sp, #12] // 4-byte Folded Reload
-; NOLSE-NEXT:    add sp, sp, #32 // =32
+; NOLSE-NEXT:    add sp, sp, #32
 ; NOLSE-NEXT:    ret
 ;
 ; LSE-LABEL: test_rmw_add_8:
@@ -56,7 +56,7 @@ entry:
 define i16 @test_rmw_add_16(i16* %dst)   {
 ; NOLSE-LABEL: test_rmw_add_16:
 ; NOLSE:       // %bb.0: // %entry
-; NOLSE-NEXT:    sub sp, sp, #32 // =32
+; NOLSE-NEXT:    sub sp, sp, #32
 ; NOLSE-NEXT:    .cfi_def_cfa_offset 32
 ; NOLSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; NOLSE-NEXT:    ldrh w8, [x0]
@@ -67,7 +67,7 @@ define i16 @test_rmw_add_16(i16* %dst)   {
 ; NOLSE-NEXT:    // Child Loop BB1_2 Depth 2
 ; NOLSE-NEXT:    ldr w9, [sp, #28] // 4-byte Folded Reload
 ; NOLSE-NEXT:    ldr x11, [sp, #16] // 8-byte Folded Reload
-; NOLSE-NEXT:    add w12, w9, #1 // =1
+; NOLSE-NEXT:    add w12, w9, #1
 ; NOLSE-NEXT:  .LBB1_2: // %atomicrmw.start
 ; NOLSE-NEXT:    // Parent Loop BB1_1 Depth=1
 ; NOLSE-NEXT:    // => This Inner Loop Header: Depth=2
@@ -83,13 +83,13 @@ define i16 @test_rmw_add_16(i16* %dst)   {
 ; NOLSE-NEXT:    subs w9, w8, w9, uxth
 ; NOLSE-NEXT:    cset w9, eq
 ; NOLSE-NEXT:    str w8, [sp, #12] // 4-byte Folded Spill
-; NOLSE-NEXT:    subs w9, w9, #1 // =1
+; NOLSE-NEXT:    subs w9, w9, #1
 ; NOLSE-NEXT:    str w8, [sp, #28] // 4-byte Folded Spill
 ; NOLSE-NEXT:    b.ne .LBB1_1
 ; NOLSE-NEXT:    b .LBB1_5
 ; NOLSE-NEXT:  .LBB1_5: // %atomicrmw.end
 ; NOLSE-NEXT:    ldr w0, [sp, #12] // 4-byte Folded Reload
-; NOLSE-NEXT:    add sp, sp, #32 // =32
+; NOLSE-NEXT:    add sp, sp, #32
 ; NOLSE-NEXT:    ret
 ;
 ; LSE-LABEL: test_rmw_add_16:
@@ -105,7 +105,7 @@ entry:
 define i32 @test_rmw_add_32(i32* %dst)   {
 ; NOLSE-LABEL: test_rmw_add_32:
 ; NOLSE:       // %bb.0: // %entry
-; NOLSE-NEXT:    sub sp, sp, #32 // =32
+; NOLSE-NEXT:    sub sp, sp, #32
 ; NOLSE-NEXT:    .cfi_def_cfa_offset 32
 ; NOLSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; NOLSE-NEXT:    ldr w8, [x0]
@@ -116,7 +116,7 @@ define i32 @test_rmw_add_32(i32* %dst)   {
 ; NOLSE-NEXT:    // Child Loop BB2_2 Depth 2
 ; NOLSE-NEXT:    ldr w9, [sp, #28] // 4-byte Folded Reload
 ; NOLSE-NEXT:    ldr x11, [sp, #16] // 8-byte Folded Reload
-; NOLSE-NEXT:    add w12, w9, #1 // =1
+; NOLSE-NEXT:    add w12, w9, #1
 ; NOLSE-NEXT:  .LBB2_2: // %atomicrmw.start
 ; NOLSE-NEXT:    // Parent Loop BB2_1 Depth=1
 ; NOLSE-NEXT:    // => This Inner Loop Header: Depth=2
@@ -132,13 +132,13 @@ define i32 @test_rmw_add_32(i32* %dst)   {
 ; NOLSE-NEXT:    subs w9, w8, w9
 ; NOLSE-NEXT:    cset w9, eq
 ; NOLSE-NEXT:    str w8, [sp, #12] // 4-byte Folded Spill
-; NOLSE-NEXT:    subs w9, w9, #1 // =1
+; NOLSE-NEXT:    subs w9, w9, #1
 ; NOLSE-NEXT:    str w8, [sp, #28] // 4-byte Folded Spill
 ; NOLSE-NEXT:    b.ne .LBB2_1
 ; NOLSE-NEXT:    b .LBB2_5
 ; NOLSE-NEXT:  .LBB2_5: // %atomicrmw.end
 ; NOLSE-NEXT:    ldr w0, [sp, #12] // 4-byte Folded Reload
-; NOLSE-NEXT:    add sp, sp, #32 // =32
+; NOLSE-NEXT:    add sp, sp, #32
 ; NOLSE-NEXT:    ret
 ;
 ; LSE-LABEL: test_rmw_add_32:
@@ -154,7 +154,7 @@ entry:
 define i64 @test_rmw_add_64(i64* %dst)   {
 ; NOLSE-LABEL: test_rmw_add_64:
 ; NOLSE:       // %bb.0: // %entry
-; NOLSE-NEXT:    sub sp, sp, #32 // =32
+; NOLSE-NEXT:    sub sp, sp, #32
 ; NOLSE-NEXT:    .cfi_def_cfa_offset 32
 ; NOLSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; NOLSE-NEXT:    ldr x8, [x0]
@@ -165,7 +165,7 @@ define i64 @test_rmw_add_64(i64* %dst)   {
 ; NOLSE-NEXT:    // Child Loop BB3_2 Depth 2
 ; NOLSE-NEXT:    ldr x9, [sp, #24] // 8-byte Folded Reload
 ; NOLSE-NEXT:    ldr x11, [sp, #16] // 8-byte Folded Reload
-; NOLSE-NEXT:    add x12, x9, #1 // =1
+; NOLSE-NEXT:    add x12, x9, #1
 ; NOLSE-NEXT:  .LBB3_2: // %atomicrmw.start
 ; NOLSE-NEXT:    // Parent Loop BB3_1 Depth=1
 ; NOLSE-NEXT:    // => This Inner Loop Header: Depth=2
@@ -181,13 +181,13 @@ define i64 @test_rmw_add_64(i64* %dst)   {
 ; NOLSE-NEXT:    subs x9, x8, x9
 ; NOLSE-NEXT:    cset w9, eq
 ; NOLSE-NEXT:    str x8, [sp, #8] // 8-byte Folded Spill
-; NOLSE-NEXT:    subs w9, w9, #1 // =1
+; NOLSE-NEXT:    subs w9, w9, #1
 ; NOLSE-NEXT:    str x8, [sp, #24] // 8-byte Folded Spill
 ; NOLSE-NEXT:    b.ne .LBB3_1
 ; NOLSE-NEXT:    b .LBB3_5
 ; NOLSE-NEXT:  .LBB3_5: // %atomicrmw.end
 ; NOLSE-NEXT:    ldr x0, [sp, #8] // 8-byte Folded Reload
-; NOLSE-NEXT:    add sp, sp, #32 // =32
+; NOLSE-NEXT:    add sp, sp, #32
 ; NOLSE-NEXT:    ret
 ;
 ; LSE-LABEL: test_rmw_add_64:
@@ -204,7 +204,7 @@ entry:
 define i128 @test_rmw_add_128(i128* %dst)   {
 ; NOLSE-LABEL: test_rmw_add_128:
 ; NOLSE:       // %bb.0: // %entry
-; NOLSE-NEXT:    sub sp, sp, #48 // =48
+; NOLSE-NEXT:    sub sp, sp, #48
 ; NOLSE-NEXT:    .cfi_def_cfa_offset 48
 ; NOLSE-NEXT:    str x0, [sp, #24] // 8-byte Folded Spill
 ; NOLSE-NEXT:    ldr x8, [x0, #8]
@@ -218,7 +218,7 @@ define i128 @test_rmw_add_128(i128* %dst)   {
 ; NOLSE-NEXT:    ldr x11, [sp, #40] // 8-byte Folded Reload
 ; NOLSE-NEXT:    ldr x8, [sp, #32] // 8-byte Folded Reload
 ; NOLSE-NEXT:    ldr x13, [sp, #24] // 8-byte Folded Reload
-; NOLSE-NEXT:    adds x14, x8, #1 // =1
+; NOLSE-NEXT:    adds x14, x8, #1
 ; NOLSE-NEXT:    mov x9, xzr
 ; NOLSE-NEXT:    adcs x15, x11, x9
 ; NOLSE-NEXT:  .LBB4_2: // %atomicrmw.start
@@ -253,12 +253,12 @@ define i128 @test_rmw_add_128(i128* %dst)   {
 ; NOLSE-NEXT:  .LBB4_6: // %atomicrmw.end
 ; NOLSE-NEXT:    ldr x1, [sp, #8] // 8-byte Folded Reload
 ; NOLSE-NEXT:    ldr x0, [sp, #16] // 8-byte Folded Reload
-; NOLSE-NEXT:    add sp, sp, #48 // =48
+; NOLSE-NEXT:    add sp, sp, #48
 ; NOLSE-NEXT:    ret
 ;
 ; LSE-LABEL: test_rmw_add_128:
 ; LSE:       // %bb.0: // %entry
-; LSE-NEXT:    sub sp, sp, #80 // =80
+; LSE-NEXT:    sub sp, sp, #80
 ; LSE-NEXT:    .cfi_def_cfa_offset 80
 ; LSE-NEXT:    str x0, [sp, #56] // 8-byte Folded Spill
 ; LSE-NEXT:    ldr x8, [x0, #8]
@@ -271,7 +271,7 @@ define i128 @test_rmw_add_128(i128* %dst)   {
 ; LSE-NEXT:    ldr x10, [sp, #72] // 8-byte Folded Reload
 ; LSE-NEXT:    ldr x8, [sp, #64] // 8-byte Folded Reload
 ; LSE-NEXT:    ldr x9, [sp, #56] // 8-byte Folded Reload
-; LSE-NEXT:    adds x2, x8, #1 // =1
+; LSE-NEXT:    adds x2, x8, #1
 ; LSE-NEXT:    mov x11, xzr
 ; LSE-NEXT:    adcs x11, x10, x11
 ; LSE-NEXT:    // kill: def $x2 killed $x2 def $x2_x3
@@ -295,7 +295,7 @@ define i128 @test_rmw_add_128(i128* %dst)   {
 ; LSE-NEXT:  .LBB4_2: // %atomicrmw.end
 ; LSE-NEXT:    ldr x1, [sp, #40] // 8-byte Folded Reload
 ; LSE-NEXT:    ldr x0, [sp, #48] // 8-byte Folded Reload
-; LSE-NEXT:    add sp, sp, #80 // =80
+; LSE-NEXT:    add sp, sp, #80
 ; LSE-NEXT:    ret
 entry:
   %res = atomicrmw add i128* %dst, i128 1 seq_cst
@@ -304,7 +304,7 @@ entry:
 define i8 @test_rmw_nand_8(i8* %dst)   {
 ; NOLSE-LABEL: test_rmw_nand_8:
 ; NOLSE:       // %bb.0: // %entry
-; NOLSE-NEXT:    sub sp, sp, #32 // =32
+; NOLSE-NEXT:    sub sp, sp, #32
 ; NOLSE-NEXT:    .cfi_def_cfa_offset 32
 ; NOLSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; NOLSE-NEXT:    ldrb w8, [x0]
@@ -332,18 +332,18 @@ define i8 @test_rmw_nand_8(i8* %dst)   {
 ; NOLSE-NEXT:    subs w9, w8, w9, uxtb
 ; NOLSE-NEXT:    cset w9, eq
 ; NOLSE-NEXT:    str w8, [sp, #12] // 4-byte Folded Spill
-; NOLSE-NEXT:    subs w9, w9, #1 // =1
+; NOLSE-NEXT:    subs w9, w9, #1
 ; NOLSE-NEXT:    str w8, [sp, #28] // 4-byte Folded Spill
 ; NOLSE-NEXT:    b.ne .LBB5_1
 ; NOLSE-NEXT:    b .LBB5_5
 ; NOLSE-NEXT:  .LBB5_5: // %atomicrmw.end
 ; NOLSE-NEXT:    ldr w0, [sp, #12] // 4-byte Folded Reload
-; NOLSE-NEXT:    add sp, sp, #32 // =32
+; NOLSE-NEXT:    add sp, sp, #32
 ; NOLSE-NEXT:    ret
 ;
 ; LSE-LABEL: test_rmw_nand_8:
 ; LSE:       // %bb.0: // %entry
-; LSE-NEXT:    sub sp, sp, #32 // =32
+; LSE-NEXT:    sub sp, sp, #32
 ; LSE-NEXT:    .cfi_def_cfa_offset 32
 ; LSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; LSE-NEXT:    ldrb w8, [x0]
@@ -361,13 +361,13 @@ define i8 @test_rmw_nand_8(i8* %dst)   {
 ; LSE-NEXT:    subs w9, w8, w9, uxtb
 ; LSE-NEXT:    cset w9, eq
 ; LSE-NEXT:    str w8, [sp, #12] // 4-byte Folded Spill
-; LSE-NEXT:    subs w9, w9, #1 // =1
+; LSE-NEXT:    subs w9, w9, #1
 ; LSE-NEXT:    str w8, [sp, #28] // 4-byte Folded Spill
 ; LSE-NEXT:    b.ne .LBB5_1
 ; LSE-NEXT:    b .LBB5_2
 ; LSE-NEXT:  .LBB5_2: // %atomicrmw.end
 ; LSE-NEXT:    ldr w0, [sp, #12] // 4-byte Folded Reload
-; LSE-NEXT:    add sp, sp, #32 // =32
+; LSE-NEXT:    add sp, sp, #32
 ; LSE-NEXT:    ret
 entry:
   %res = atomicrmw nand i8* %dst, i8 1 seq_cst
@@ -377,7 +377,7 @@ entry:
 define i16 @test_rmw_nand_16(i16* %dst)   {
 ; NOLSE-LABEL: test_rmw_nand_16:
 ; NOLSE:       // %bb.0: // %entry
-; NOLSE-NEXT:    sub sp, sp, #32 // =32
+; NOLSE-NEXT:    sub sp, sp, #32
 ; NOLSE-NEXT:    .cfi_def_cfa_offset 32
 ; NOLSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; NOLSE-NEXT:    ldrh w8, [x0]
@@ -405,18 +405,18 @@ define i16 @test_rmw_nand_16(i16* %dst)   {
 ; NOLSE-NEXT:    subs w9, w8, w9, uxth
 ; NOLSE-NEXT:    cset w9, eq
 ; NOLSE-NEXT:    str w8, [sp, #12] // 4-byte Folded Spill
-; NOLSE-NEXT:    subs w9, w9, #1 // =1
+; NOLSE-NEXT:    subs w9, w9, #1
 ; NOLSE-NEXT:    str w8, [sp, #28] // 4-byte Folded Spill
 ; NOLSE-NEXT:    b.ne .LBB6_1
 ; NOLSE-NEXT:    b .LBB6_5
 ; NOLSE-NEXT:  .LBB6_5: // %atomicrmw.end
 ; NOLSE-NEXT:    ldr w0, [sp, #12] // 4-byte Folded Reload
-; NOLSE-NEXT:    add sp, sp, #32 // =32
+; NOLSE-NEXT:    add sp, sp, #32
 ; NOLSE-NEXT:    ret
 ;
 ; LSE-LABEL: test_rmw_nand_16:
 ; LSE:       // %bb.0: // %entry
-; LSE-NEXT:    sub sp, sp, #32 // =32
+; LSE-NEXT:    sub sp, sp, #32
 ; LSE-NEXT:    .cfi_def_cfa_offset 32
 ; LSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; LSE-NEXT:    ldrh w8, [x0]
@@ -434,13 +434,13 @@ define i16 @test_rmw_nand_16(i16* %dst)   {
 ; LSE-NEXT:    subs w9, w8, w9, uxth
 ; LSE-NEXT:    cset w9, eq
 ; LSE-NEXT:    str w8, [sp, #12] // 4-byte Folded Spill
-; LSE-NEXT:    subs w9, w9, #1 // =1
+; LSE-NEXT:    subs w9, w9, #1
 ; LSE-NEXT:    str w8, [sp, #28] // 4-byte Folded Spill
 ; LSE-NEXT:    b.ne .LBB6_1
 ; LSE-NEXT:    b .LBB6_2
 ; LSE-NEXT:  .LBB6_2: // %atomicrmw.end
 ; LSE-NEXT:    ldr w0, [sp, #12] // 4-byte Folded Reload
-; LSE-NEXT:    add sp, sp, #32 // =32
+; LSE-NEXT:    add sp, sp, #32
 ; LSE-NEXT:    ret
 entry:
   %res = atomicrmw nand i16* %dst, i16 1 seq_cst
@@ -450,7 +450,7 @@ entry:
 define i32 @test_rmw_nand_32(i32* %dst)   {
 ; NOLSE-LABEL: test_rmw_nand_32:
 ; NOLSE:       // %bb.0: // %entry
-; NOLSE-NEXT:    sub sp, sp, #32 // =32
+; NOLSE-NEXT:    sub sp, sp, #32
 ; NOLSE-NEXT:    .cfi_def_cfa_offset 32
 ; NOLSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; NOLSE-NEXT:    ldr w8, [x0]
@@ -478,18 +478,18 @@ define i32 @test_rmw_nand_32(i32* %dst)   {
 ; NOLSE-NEXT:    subs w9, w8, w9
 ; NOLSE-NEXT:    cset w9, eq
 ; NOLSE-NEXT:    str w8, [sp, #12] // 4-byte Folded Spill
-; NOLSE-NEXT:    subs w9, w9, #1 // =1
+; NOLSE-NEXT:    subs w9, w9, #1
 ; NOLSE-NEXT:    str w8, [sp, #28] // 4-byte Folded Spill
 ; NOLSE-NEXT:    b.ne .LBB7_1
 ; NOLSE-NEXT:    b .LBB7_5
 ; NOLSE-NEXT:  .LBB7_5: // %atomicrmw.end
 ; NOLSE-NEXT:    ldr w0, [sp, #12] // 4-byte Folded Reload
-; NOLSE-NEXT:    add sp, sp, #32 // =32
+; NOLSE-NEXT:    add sp, sp, #32
 ; NOLSE-NEXT:    ret
 ;
 ; LSE-LABEL: test_rmw_nand_32:
 ; LSE:       // %bb.0: // %entry
-; LSE-NEXT:    sub sp, sp, #32 // =32
+; LSE-NEXT:    sub sp, sp, #32
 ; LSE-NEXT:    .cfi_def_cfa_offset 32
 ; LSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; LSE-NEXT:    ldr w8, [x0]
@@ -507,13 +507,13 @@ define i32 @test_rmw_nand_32(i32* %dst)   {
 ; LSE-NEXT:    subs w9, w8, w9
 ; LSE-NEXT:    cset w9, eq
 ; LSE-NEXT:    str w8, [sp, #12] // 4-byte Folded Spill
-; LSE-NEXT:    subs w9, w9, #1 // =1
+; LSE-NEXT:    subs w9, w9, #1
 ; LSE-NEXT:    str w8, [sp, #28] // 4-byte Folded Spill
 ; LSE-NEXT:    b.ne .LBB7_1
 ; LSE-NEXT:    b .LBB7_2
 ; LSE-NEXT:  .LBB7_2: // %atomicrmw.end
 ; LSE-NEXT:    ldr w0, [sp, #12] // 4-byte Folded Reload
-; LSE-NEXT:    add sp, sp, #32 // =32
+; LSE-NEXT:    add sp, sp, #32
 ; LSE-NEXT:    ret
 entry:
   %res = atomicrmw nand i32* %dst, i32 1 seq_cst
@@ -523,7 +523,7 @@ entry:
 define i64 @test_rmw_nand_64(i64* %dst)   {
 ; NOLSE-LABEL: test_rmw_nand_64:
 ; NOLSE:       // %bb.0: // %entry
-; NOLSE-NEXT:    sub sp, sp, #32 // =32
+; NOLSE-NEXT:    sub sp, sp, #32
 ; NOLSE-NEXT:    .cfi_def_cfa_offset 32
 ; NOLSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; NOLSE-NEXT:    ldr x8, [x0]
@@ -554,18 +554,18 @@ define i64 @test_rmw_nand_64(i64* %dst)   {
 ; NOLSE-NEXT:    subs x9, x8, x9
 ; NOLSE-NEXT:    cset w9, eq
 ; NOLSE-NEXT:    str x8, [sp, #8] // 8-byte Folded Spill
-; NOLSE-NEXT:    subs w9, w9, #1 // =1
+; NOLSE-NEXT:    subs w9, w9, #1
 ; NOLSE-NEXT:    str x8, [sp, #24] // 8-byte Folded Spill
 ; NOLSE-NEXT:    b.ne .LBB8_1
 ; NOLSE-NEXT:    b .LBB8_5
 ; NOLSE-NEXT:  .LBB8_5: // %atomicrmw.end
 ; NOLSE-NEXT:    ldr x0, [sp, #8] // 8-byte Folded Reload
-; NOLSE-NEXT:    add sp, sp, #32 // =32
+; NOLSE-NEXT:    add sp, sp, #32
 ; NOLSE-NEXT:    ret
 ;
 ; LSE-LABEL: test_rmw_nand_64:
 ; LSE:       // %bb.0: // %entry
-; LSE-NEXT:    sub sp, sp, #32 // =32
+; LSE-NEXT:    sub sp, sp, #32
 ; LSE-NEXT:    .cfi_def_cfa_offset 32
 ; LSE-NEXT:    str x0, [sp, #16] // 8-byte Folded Spill
 ; LSE-NEXT:    ldr x8, [x0]
@@ -586,13 +586,13 @@ define i64 @test_rmw_nand_64(i64* %dst)   {
 ; LSE-NEXT:    subs x9, x8, x9
 ; LSE-NEXT:    cset w9, eq
 ; LSE-NEXT:    str x8, [sp, #8] // 8-byte Folded Spill
-; LSE-NEXT:    subs w9, w9, #1 // =1
+; LSE-NEXT:    subs w9, w9, #1
 ; LSE-NEXT:    str x8, [sp, #24] // 8-byte Folded Spill
 ; LSE-NEXT:    b.ne .LBB8_1
 ; LSE-NEXT:    b .LBB8_2
 ; LSE-NEXT:  .LBB8_2: // %atomicrmw.end
 ; LSE-NEXT:    ldr x0, [sp, #8] // 8-byte Folded Reload
-; LSE-NEXT:    add sp, sp, #32 // =32
+; LSE-NEXT:    add sp, sp, #32
 ; LSE-NEXT:    ret
 entry:
   %res = atomicrmw nand i64* %dst, i64 1 seq_cst
@@ -602,7 +602,7 @@ entry:
 define i128 @test_rmw_nand_128(i128* %dst)   {
 ; NOLSE-LABEL: test_rmw_nand_128:
 ; NOLSE:       // %bb.0: // %entry
-; NOLSE-NEXT:    sub sp, sp, #48 // =48
+; NOLSE-NEXT:    sub sp, sp, #48
 ; NOLSE-NEXT:    .cfi_def_cfa_offset 48
 ; NOLSE-NEXT:    str x0, [sp, #24] // 8-byte Folded Spill
 ; NOLSE-NEXT:    ldr x8, [x0, #8]
@@ -654,12 +654,12 @@ define i128 @test_rmw_nand_128(i128* %dst)   {
 ; NOLSE-NEXT:  .LBB9_6: // %atomicrmw.end
 ; NOLSE-NEXT:    ldr x1, [sp, #8] // 8-byte Folded Reload
 ; NOLSE-NEXT:    ldr x0, [sp, #16] // 8-byte Folded Reload
-; NOLSE-NEXT:    add sp, sp, #48 // =48
+; NOLSE-NEXT:    add sp, sp, #48
 ; NOLSE-NEXT:    ret
 ;
 ; LSE-LABEL: test_rmw_nand_128:
 ; LSE:       // %bb.0: // %entry
-; LSE-NEXT:    sub sp, sp, #80 // =80
+; LSE-NEXT:    sub sp, sp, #80
 ; LSE-NEXT:    .cfi_def_cfa_offset 80
 ; LSE-NEXT:    str x0, [sp, #56] // 8-byte Folded Spill
 ; LSE-NEXT:    ldr x8, [x0, #8]
@@ -699,7 +699,7 @@ define i128 @test_rmw_nand_128(i128* %dst)   {
 ; LSE-NEXT:  .LBB9_2: // %atomicrmw.end
 ; LSE-NEXT:    ldr x1, [sp, #40] // 8-byte Folded Reload
 ; LSE-NEXT:    ldr x0, [sp, #48] // 8-byte Folded Reload
-; LSE-NEXT:    add sp, sp, #80 // =80
+; LSE-NEXT:    add sp, sp, #80
 ; LSE-NEXT:    ret
 entry:
   %res = atomicrmw nand i128* %dst, i128 1 seq_cst

diff  --git a/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll b/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll
index 52d8ce9bcdb3d..2cc6b484d6b98 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-xchg-fp.ll
@@ -78,7 +78,7 @@ define double @test_rmw_xchg_f64(double* %dst, double %new) {
 define fp128 @test_rmw_xchg_f128(fp128* %dst, fp128 %new) {
 ; NOLSE-LABEL: test_rmw_xchg_f128:
 ; NOLSE:       // %bb.0:
-; NOLSE-NEXT:    sub sp, sp, #32 // =32
+; NOLSE-NEXT:    sub sp, sp, #32
 ; NOLSE-NEXT:    .cfi_def_cfa_offset 32
 ; NOLSE-NEXT:    str q0, [sp, #16]
 ; NOLSE-NEXT:    ldp x9, x8, [sp, #16]
@@ -94,7 +94,7 @@ define fp128 @test_rmw_xchg_f128(fp128* %dst, fp128 %new) {
 ;
 ; LSE-LABEL: test_rmw_xchg_f128:
 ; LSE:       // %bb.0:
-; LSE-NEXT:    sub sp, sp, #32 // =32
+; LSE-NEXT:    sub sp, sp, #32
 ; LSE-NEXT:    .cfi_def_cfa_offset 32
 ; LSE-NEXT:    str q0, [sp, #16]
 ; LSE-NEXT:    ldp x9, x8, [sp, #16]

diff  --git a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
index 3f9be3a283018..1e3e16188adf2 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
@@ -44,14 +44,14 @@ declare i32 @foo() #0
 define i32 @block_split(i32 %a, i32 %b) #0 {
 ; CHECK-LABEL: block_split:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    cmp w0, #5 ; =5
+; CHECK-NEXT:    cmp w0, #5
 ; CHECK-NEXT:    b.ne LBB1_1
 ; CHECK-NEXT:    b LBB1_2
 ; CHECK-NEXT:  LBB1_1: ; %lor.lhs.false
 ; CHECK-NEXT:    lsl w8, w1, #1
-; CHECK-NEXT:    cmp w1, #7 ; =7
+; CHECK-NEXT:    cmp w1, #7
 ; CHECK-NEXT:    csinc w8, w8, w1, lt
-; CHECK-NEXT:    cmp w8, #16 ; =16
+; CHECK-NEXT:    cmp w8, #16
 ; CHECK-NEXT:    b.le LBB1_2
 ; CHECK-NEXT:    b LBB1_3
 ; CHECK-NEXT:  LBB1_2: ; %if.then

diff  --git a/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll b/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll
index ae7600451bc23..22d9ffe2cc1ac 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll
@@ -6,7 +6,7 @@ define void @split_block_no_fallthrough(i64 %val) #0 {
 ; CHECK-LABEL: split_block_no_fallthrough:
 ; CHECK:       ; %bb.0: ; %bb
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; CHECK-NEXT:    cmn x0, #5 ; =5
+; CHECK-NEXT:    cmn x0, #5
 ; CHECK-NEXT:    b.le LBB0_3
 ; CHECK-NEXT:  ; %bb.1: ; %b3
 ; CHECK-NEXT:    ldr w8, [x8]

diff  --git a/llvm/test/CodeGen/AArch64/cgp-usubo.ll b/llvm/test/CodeGen/AArch64/cgp-usubo.ll
index defe500d73068..58f367105ad11 100644
--- a/llvm/test/CodeGen/AArch64/cgp-usubo.ll
+++ b/llvm/test/CodeGen/AArch64/cgp-usubo.ll
@@ -38,7 +38,7 @@ define i1 @usubo_ugt_constant_op0_i8(i8 %x, i8* %p) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xff
 ; CHECK-NEXT:    mov w9, #42
-; CHECK-NEXT:    cmp w8, #42 // =42
+; CHECK-NEXT:    cmp w8, #42
 ; CHECK-NEXT:    sub w9, w9, w0
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    strb w9, [x1]
@@ -56,7 +56,7 @@ define i1 @usubo_ult_constant_op0_i16(i16 %x, i16* %p) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xffff
 ; CHECK-NEXT:    mov w9, #43
-; CHECK-NEXT:    cmp w8, #43 // =43
+; CHECK-NEXT:    cmp w8, #43
 ; CHECK-NEXT:    sub w9, w9, w0
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    strh w9, [x1]
@@ -73,8 +73,8 @@ define i1 @usubo_ult_constant_op1_i16(i16 %x, i16* %p) nounwind {
 ; CHECK-LABEL: usubo_ult_constant_op1_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xffff
-; CHECK-NEXT:    cmp w8, #44 // =44
-; CHECK-NEXT:    sub w9, w0, #44 // =44
+; CHECK-NEXT:    cmp w8, #44
+; CHECK-NEXT:    sub w9, w0, #44
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    strh w9, [x1]
 ; CHECK-NEXT:    ret
@@ -88,9 +88,9 @@ define i1 @usubo_ugt_constant_op1_i8(i8 %x, i8* %p) nounwind {
 ; CHECK-LABEL: usubo_ugt_constant_op1_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xff
-; CHECK-NEXT:    cmp w8, #45 // =45
+; CHECK-NEXT:    cmp w8, #45
 ; CHECK-NEXT:    cset w8, lo
-; CHECK-NEXT:    sub w9, w0, #45 // =45
+; CHECK-NEXT:    sub w9, w0, #45
 ; CHECK-NEXT:    mov w0, w8
 ; CHECK-NEXT:    strb w9, [x1]
 ; CHECK-NEXT:    ret
@@ -105,8 +105,8 @@ define i1 @usubo_ugt_constant_op1_i8(i8 %x, i8* %p) nounwind {
 define i1 @usubo_eq_constant1_op1_i32(i32 %x, i32* %p) nounwind {
 ; CHECK-LABEL: usubo_eq_constant1_op1_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp w0, #0 // =0
-; CHECK-NEXT:    sub w8, w0, #1 // =1
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    sub w8, w0, #1
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    str w8, [x1]
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
index 0d1d3b7ddca55..8fbed8bfdb3fd 100644
--- a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
+++ b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
@@ -14,7 +14,7 @@ define i32 @f_i8_sign_extend_inreg(i8 %in, i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: f_i8_sign_extend_inreg:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sxtb w8, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csel w8, w1, w2, ge
 ; CHECK-NEXT:    add w0, w8, w0, uxtb
 ; CHECK-NEXT:    ret
@@ -36,7 +36,7 @@ define i32 @f_i16_sign_extend_inreg(i16 %in, i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: f_i16_sign_extend_inreg:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sxth w8, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csel w8, w1, w2, ge
 ; CHECK-NEXT:    add w0, w8, w0, uxth
 ; CHECK-NEXT:    ret
@@ -57,7 +57,7 @@ B:
 define i64 @f_i32_sign_extend_inreg(i32 %in, i64 %a, i64 %b) nounwind {
 ; CHECK-LABEL: f_i32_sign_extend_inreg:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel x8, x1, x2, ge
 ; CHECK-NEXT:    add x0, x8, w0, uxtw
 ; CHECK-NEXT:    ret
@@ -79,7 +79,7 @@ define i32 @g_i8_sign_extend_inreg(i8 %in, i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: g_i8_sign_extend_inreg:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sxtb w8, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csel w8, w1, w2, lt
 ; CHECK-NEXT:    add w0, w8, w0, uxtb
 ; CHECK-NEXT:    ret
@@ -101,7 +101,7 @@ define i32 @g_i16_sign_extend_inreg(i16 %in, i32 %a, i32 %b) nounwind {
 ; CHECK-LABEL: g_i16_sign_extend_inreg:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sxth w8, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csel w8, w1, w2, lt
 ; CHECK-NEXT:    add w0, w8, w0, uxth
 ; CHECK-NEXT:    ret
@@ -122,7 +122,7 @@ B:
 define i64 @g_i32_sign_extend_inreg(i32 %in, i64 %a, i64 %b) nounwind {
 ; CHECK-LABEL: g_i32_sign_extend_inreg:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel x8, x1, x2, lt
 ; CHECK-NEXT:    add x0, x8, w0, uxtw
 ; CHECK-NEXT:    ret
@@ -145,7 +145,7 @@ define i64 @f_i32_sign_extend_i64(i32 %in, i64 %a, i64 %b) nounwind {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    sxtw x8, w0
-; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cmp x8, #0
 ; CHECK-NEXT:    csel x8, x1, x2, ge
 ; CHECK-NEXT:    add x0, x8, w0, uxtw
 ; CHECK-NEXT:    ret
@@ -169,7 +169,7 @@ define i64 @g_i32_sign_extend_i64(i32 %in, i64 %a, i64 %b) nounwind {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    sxtw x8, w0
-; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cmp x8, #0
 ; CHECK-NEXT:    csel x8, x1, x2, lt
 ; CHECK-NEXT:    add x0, x8, w0, uxtw
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
index f58fb55c44054..043e1363eba0b 100644
--- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
@@ -71,7 +71,7 @@ define i64 @sign_i64(i64 %a) {
 define i64 @not_sign_i64(i64 %a) {
 ; CHECK-LABEL: not_sign_i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, #0 // =0
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    mov w8, #1
 ; CHECK-NEXT:    cneg x0, x8, le
 ; CHECK-NEXT:    ret
@@ -172,7 +172,7 @@ define <4 x i32> @sign_4xi32(<4 x i32> %a) {
 define <4 x i32> @sign_4xi32_multi_use(<4 x i32> %a) {
 ; CHECK-LABEL: sign_4xi32_multi_use:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w30, -16
@@ -185,7 +185,7 @@ define <4 x i32> @sign_4xi32_multi_use(<4 x i32> %a) {
 ; CHECK-NEXT:    bl use_4xi1
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
   %c = icmp sgt <4 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1>
   %res = select <4 x i1> %c, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>

diff  --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
index fca92a409f0d6..061d0f35a5d76 100644
--- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
+++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
@@ -14,7 +14,7 @@ define i32 @combine_gt_ge_10() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmp w8, #10 // =10
+; CHECK-NEXT:    cmp w8, #10
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
 ; CHECK-NEXT:    b.le .LBB0_3
@@ -79,7 +79,7 @@ define i32 @combine_gt_lt_5() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmp w8, #5 // =5
+; CHECK-NEXT:    cmp w8, #5
 ; CHECK-NEXT:    b.le .LBB1_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    adrp x8, :got:b
@@ -146,7 +146,7 @@ define i32 @combine_lt_ge_5() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmp w8, #5 // =5
+; CHECK-NEXT:    cmp w8, #5
 ; CHECK-NEXT:    adrp x8, :got:b
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:b]
 ; CHECK-NEXT:    b.ge .LBB2_3
@@ -211,7 +211,7 @@ define i32 @combine_lt_gt_5() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmp w8, #5 // =5
+; CHECK-NEXT:    cmp w8, #5
 ; CHECK-NEXT:    b.ge .LBB3_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    adrp x8, :got:b
@@ -278,7 +278,7 @@ define i32 @combine_gt_lt_n5() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmn w8, #5 // =5
+; CHECK-NEXT:    cmn w8, #5
 ; CHECK-NEXT:    b.le .LBB4_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    adrp x8, :got:b
@@ -345,7 +345,7 @@ define i32 @combine_lt_gt_n5() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmn w8, #5 // =5
+; CHECK-NEXT:    cmn w8, #5
 ; CHECK-NEXT:    b.ge .LBB5_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    adrp x8, :got:b
@@ -427,17 +427,17 @@ define void @combine_non_adjacent_cmp_br(%struct.Struct* nocapture readonly %hdC
 ; CHECK-NEXT:    ldr x19, [x0]
 ; CHECK-NEXT:    mov w20, #24
 ; CHECK-NEXT:    adrp x22, glob
-; CHECK-NEXT:    add x21, x19, #2 // =2
+; CHECK-NEXT:    add x21, x19, #2
 ; CHECK-NEXT:  .LBB6_1: // %land.rhs
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldr x8, [x20]
-; CHECK-NEXT:    cmp x8, #1 // =1
+; CHECK-NEXT:    cmp x8, #1
 ; CHECK-NEXT:    b.lt .LBB6_3
 ; CHECK-NEXT:  // %bb.2: // %while.body
 ; CHECK-NEXT:    // in Loop: Header=BB6_1 Depth=1
 ; CHECK-NEXT:    ldr x0, [x22, :lo12:glob]
 ; CHECK-NEXT:    bl Update
-; CHECK-NEXT:    sub x21, x21, #2 // =2
+; CHECK-NEXT:    sub x21, x21, #2
 ; CHECK-NEXT:    cmp x19, x21
 ; CHECK-NEXT:    b.lt .LBB6_1
 ; CHECK-NEXT:  .LBB6_3: // %while.end
@@ -482,19 +482,19 @@ define i32 @do_nothing_if_resultant_opcodes_would_
diff er() #0 {
 ; CHECK-NEXT:    adrp x19, :got:a
 ; CHECK-NEXT:    ldr x19, [x19, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x19]
-; CHECK-NEXT:    cmn w8, #2 // =2
+; CHECK-NEXT:    cmn w8, #2
 ; CHECK-NEXT:    b.gt .LBB7_4
 ; CHECK-NEXT:  // %bb.1: // %while.body.preheader
-; CHECK-NEXT:    sub w20, w8, #1 // =1
+; CHECK-NEXT:    sub w20, w8, #1
 ; CHECK-NEXT:  .LBB7_2: // %while.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    bl do_something
-; CHECK-NEXT:    adds w20, w20, #1 // =1
+; CHECK-NEXT:    adds w20, w20, #1
 ; CHECK-NEXT:    b.mi .LBB7_2
 ; CHECK-NEXT:  // %bb.3: // %while.cond.while.end_crit_edge
 ; CHECK-NEXT:    ldr w8, [x19]
 ; CHECK-NEXT:  .LBB7_4: // %while.end
-; CHECK-NEXT:    cmp w8, #1 // =1
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    b.gt .LBB7_7
 ; CHECK-NEXT:  // %bb.5: // %land.lhs.true
 ; CHECK-NEXT:    adrp x8, :got:b
@@ -562,20 +562,20 @@ define i32 @do_nothing_if_compares_can_not_be_adjusted_to_each_other() #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    b.gt .LBB8_3
 ; CHECK-NEXT:  // %bb.1: // %while.body.preheader
-; CHECK-NEXT:    sub w19, w8, #1 // =1
+; CHECK-NEXT:    sub w19, w8, #1
 ; CHECK-NEXT:  .LBB8_2: // %while.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    bl do_something
-; CHECK-NEXT:    adds w19, w19, #1 // =1
+; CHECK-NEXT:    adds w19, w19, #1
 ; CHECK-NEXT:    b.mi .LBB8_2
 ; CHECK-NEXT:  .LBB8_3: // %while.end
 ; CHECK-NEXT:    adrp x8, :got:c
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:c]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmn w8, #2 // =2
+; CHECK-NEXT:    cmn w8, #2
 ; CHECK-NEXT:    b.lt .LBB8_6
 ; CHECK-NEXT:  // %bb.4: // %land.lhs.true
 ; CHECK-NEXT:    adrp x8, :got:b
@@ -647,7 +647,7 @@ define i32 @fcmpri(i32 %argc, i8** nocapture readonly %argv) {
 ; CHECK-NEXT:    .cfi_offset w19, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    .cfi_offset b8, -32
-; CHECK-NEXT:    cmp w0, #2 // =2
+; CHECK-NEXT:    cmp w0, #2
 ; CHECK-NEXT:    b.lt .LBB9_3
 ; CHECK-NEXT:  // %bb.1: // %land.lhs.true
 ; CHECK-NEXT:    ldr x8, [x1, #8]
@@ -661,7 +661,7 @@ define i32 @fcmpri(i32 %argc, i8** nocapture readonly %argv) {
 ; CHECK-NEXT:    mov w19, w0
 ; CHECK-NEXT:    mov w0, #-1
 ; CHECK-NEXT:    bl yoo
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    cinc w0, w19, gt
 ; CHECK-NEXT:    mov w1, #2
 ; CHECK-NEXT:    mov v8.16b, v0.16b
@@ -720,7 +720,7 @@ define void @cmp_shifted(i32 %in, i32 %lhs, i32 %rhs) {
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w8, #42
 ; CHECK-NEXT:    csinc w8, w8, wzr, gt
 ; CHECK-NEXT:    cmp w0, #2, lsl #12 // =8192
@@ -757,12 +757,12 @@ define i32 @combine_gt_ge_sel(i64 %v, i64* %p) #0 {
 ; CHECK-NEXT:    adrp x8, :got:a
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:a]
 ; CHECK-NEXT:    ldr w8, [x8]
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csel x9, x0, xzr, gt
 ; CHECK-NEXT:    str x9, [x1]
 ; CHECK-NEXT:    b.le .LBB11_2
 ; CHECK-NEXT:  // %bb.1: // %lor.lhs.false
-; CHECK-NEXT:    cmp w8, #2 // =2
+; CHECK-NEXT:    cmp w8, #2
 ; CHECK-NEXT:    b.ge .LBB11_4
 ; CHECK-NEXT:    b .LBB11_6
 ; CHECK-NEXT:  .LBB11_2: // %land.lhs.true

diff  --git a/llvm/test/CodeGen/AArch64/extract-bits.ll b/llvm/test/CodeGen/AArch64/extract-bits.ll
index 6bec84a14c4e2..92e2ad227d715 100644
--- a/llvm/test/CodeGen/AArch64/extract-bits.ll
+++ b/llvm/test/CodeGen/AArch64/extract-bits.ll
@@ -24,7 +24,7 @@ define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl w9, w9, w2
 ; CHECK-NEXT:    lsr w8, w0, w1
-; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    sub w9, w9, #1
 ; CHECK-NEXT:    and w0, w9, w8
 ; CHECK-NEXT:    ret
   %shifted = lshr i32 %val, %numskipbits
@@ -40,7 +40,7 @@ define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) n
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl w9, w9, w2
 ; CHECK-NEXT:    asr w8, w0, w1
-; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    sub w9, w9, #1
 ; CHECK-NEXT:    and w0, w9, w8
 ; CHECK-NEXT:    ret
   %shifted = ashr i32 %val, %numskipbits
@@ -56,7 +56,7 @@ define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl w9, w9, w2
 ; CHECK-NEXT:    lsr w8, w0, w1
-; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    sub w9, w9, #1
 ; CHECK-NEXT:    and w0, w9, w8
 ; CHECK-NEXT:    ret
   %skip = zext i8 %numskipbits to i32
@@ -74,7 +74,7 @@ define i32 @bextr32_a2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl w9, w9, w2
-; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    sub w9, w9, #1
 ; CHECK-NEXT:    lsr w8, w8, w1
 ; CHECK-NEXT:    and w0, w9, w8
 ; CHECK-NEXT:    ret
@@ -92,7 +92,7 @@ define i32 @bextr32_a3_load_indexzext(i32* %w, i8 zeroext %numskipbits, i8 zeroe
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl w9, w9, w2
-; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    sub w9, w9, #1
 ; CHECK-NEXT:    lsr w8, w8, w1
 ; CHECK-NEXT:    and w0, w9, w8
 ; CHECK-NEXT:    ret
@@ -112,7 +112,7 @@ define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits)
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl w9, w9, w2
 ; CHECK-NEXT:    lsr w8, w0, w1
-; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    sub w9, w9, #1
 ; CHECK-NEXT:    and w0, w8, w9
 ; CHECK-NEXT:    ret
   %shifted = lshr i32 %val, %numskipbits
@@ -130,7 +130,7 @@ define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl x9, x9, x2
 ; CHECK-NEXT:    lsr x8, x0, x1
-; CHECK-NEXT:    sub x9, x9, #1 // =1
+; CHECK-NEXT:    sub x9, x9, #1
 ; CHECK-NEXT:    and x0, x9, x8
 ; CHECK-NEXT:    ret
   %shifted = lshr i64 %val, %numskipbits
@@ -146,7 +146,7 @@ define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) n
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl x9, x9, x2
 ; CHECK-NEXT:    asr x8, x0, x1
-; CHECK-NEXT:    sub x9, x9, #1 // =1
+; CHECK-NEXT:    sub x9, x9, #1
 ; CHECK-NEXT:    and x0, x9, x8
 ; CHECK-NEXT:    ret
   %shifted = ashr i64 %val, %numskipbits
@@ -164,7 +164,7 @@ define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %
 ; CHECK-NEXT:    lsl x9, x9, x2
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    lsr x8, x0, x1
-; CHECK-NEXT:    sub x9, x9, #1 // =1
+; CHECK-NEXT:    sub x9, x9, #1
 ; CHECK-NEXT:    and x0, x9, x8
 ; CHECK-NEXT:    ret
   %skip = zext i8 %numskipbits to i64
@@ -182,7 +182,7 @@ define i64 @bextr64_a2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl x9, x9, x2
-; CHECK-NEXT:    sub x9, x9, #1 // =1
+; CHECK-NEXT:    sub x9, x9, #1
 ; CHECK-NEXT:    lsr x8, x8, x1
 ; CHECK-NEXT:    and x0, x9, x8
 ; CHECK-NEXT:    ret
@@ -202,7 +202,7 @@ define i64 @bextr64_a3_load_indexzext(i64* %w, i8 zeroext %numskipbits, i8 zeroe
 ; CHECK-NEXT:    // kill: def $w2 killed $w2 def $x2
 ; CHECK-NEXT:    lsl x9, x9, x2
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
-; CHECK-NEXT:    sub x9, x9, #1 // =1
+; CHECK-NEXT:    sub x9, x9, #1
 ; CHECK-NEXT:    lsr x8, x8, x1
 ; CHECK-NEXT:    and x0, x9, x8
 ; CHECK-NEXT:    ret
@@ -222,7 +222,7 @@ define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits)
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl x9, x9, x2
 ; CHECK-NEXT:    lsr x8, x0, x1
-; CHECK-NEXT:    sub x9, x9, #1 // =1
+; CHECK-NEXT:    sub x9, x9, #1
 ; CHECK-NEXT:    and x0, x8, x9
 ; CHECK-NEXT:    ret
   %shifted = lshr i64 %val, %numskipbits
@@ -241,7 +241,7 @@ define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl x9, x9, x2
 ; CHECK-NEXT:    lsr x8, x0, x1
-; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    sub w9, w9, #1
 ; CHECK-NEXT:    and w0, w9, w8
 ; CHECK-NEXT:    ret
   %shifted = lshr i64 %val, %numskipbits
@@ -259,7 +259,7 @@ define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl w9, w9, w2
 ; CHECK-NEXT:    lsr x8, x0, x1
-; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    sub w9, w9, #1
 ; CHECK-NEXT:    and w0, w9, w8
 ; CHECK-NEXT:    ret
   %shifted = lshr i64 %val, %numskipbits
@@ -278,7 +278,7 @@ define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl w9, w9, w2
 ; CHECK-NEXT:    lsr x8, x0, x1
-; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    sub w9, w9, #1
 ; CHECK-NEXT:    and w0, w9, w8
 ; CHECK-NEXT:    ret
   %shifted = lshr i64 %val, %numskipbits
@@ -974,7 +974,7 @@ define void @pr38938(i32* %a0, i64* %a1) nounwind {
 ; CHECK-NEXT:    ubfx x8, x8, #21, #10
 ; CHECK-NEXT:    lsl x8, x8, #2
 ; CHECK-NEXT:    ldr w9, [x0, x8]
-; CHECK-NEXT:    add w9, w9, #1 // =1
+; CHECK-NEXT:    add w9, w9, #1
 ; CHECK-NEXT:    str w9, [x0, x8]
 ; CHECK-NEXT:    ret
   %tmp = load i64, i64* %a1, align 8

diff  --git a/llvm/test/CodeGen/AArch64/extract-lowbits.ll b/llvm/test/CodeGen/AArch64/extract-lowbits.ll
index 22c699dc1a121..b58e9cbd9be3a 100644
--- a/llvm/test/CodeGen/AArch64/extract-lowbits.ll
+++ b/llvm/test/CodeGen/AArch64/extract-lowbits.ll
@@ -23,7 +23,7 @@ define i32 @bzhi32_a0(i32 %val, i32 %numlowbits) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
 ; CHECK-NEXT:    lsl w8, w8, w1
-; CHECK-NEXT:    sub w8, w8, #1 // =1
+; CHECK-NEXT:    sub w8, w8, #1
 ; CHECK-NEXT:    and w0, w8, w0
 ; CHECK-NEXT:    ret
   %onebit = shl i32 1, %numlowbits
@@ -37,7 +37,7 @@ define i32 @bzhi32_a1_indexzext(i32 %val, i8 zeroext %numlowbits) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
 ; CHECK-NEXT:    lsl w8, w8, w1
-; CHECK-NEXT:    sub w8, w8, #1 // =1
+; CHECK-NEXT:    sub w8, w8, #1
 ; CHECK-NEXT:    and w0, w8, w0
 ; CHECK-NEXT:    ret
   %conv = zext i8 %numlowbits to i32
@@ -53,7 +53,7 @@ define i32 @bzhi32_a2_load(i32* %w, i32 %numlowbits) nounwind {
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl w9, w9, w1
-; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    sub w9, w9, #1
 ; CHECK-NEXT:    and w0, w9, w8
 ; CHECK-NEXT:    ret
   %val = load i32, i32* %w
@@ -69,7 +69,7 @@ define i32 @bzhi32_a3_load_indexzext(i32* %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    ldr w8, [x0]
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl w9, w9, w1
-; CHECK-NEXT:    sub w9, w9, #1 // =1
+; CHECK-NEXT:    sub w9, w9, #1
 ; CHECK-NEXT:    and w0, w9, w8
 ; CHECK-NEXT:    ret
   %val = load i32, i32* %w
@@ -85,7 +85,7 @@ define i32 @bzhi32_a4_commutative(i32 %val, i32 %numlowbits) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
 ; CHECK-NEXT:    lsl w8, w8, w1
-; CHECK-NEXT:    sub w8, w8, #1 // =1
+; CHECK-NEXT:    sub w8, w8, #1
 ; CHECK-NEXT:    and w0, w0, w8
 ; CHECK-NEXT:    ret
   %onebit = shl i32 1, %numlowbits
@@ -101,7 +101,7 @@ define i64 @bzhi64_a0(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
 ; CHECK-NEXT:    lsl x8, x8, x1
-; CHECK-NEXT:    sub x8, x8, #1 // =1
+; CHECK-NEXT:    sub x8, x8, #1
 ; CHECK-NEXT:    and x0, x8, x0
 ; CHECK-NEXT:    ret
   %onebit = shl i64 1, %numlowbits
@@ -116,7 +116,7 @@ define i64 @bzhi64_a1_indexzext(i64 %val, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    mov w8, #1
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    lsl x8, x8, x1
-; CHECK-NEXT:    sub x8, x8, #1 // =1
+; CHECK-NEXT:    sub x8, x8, #1
 ; CHECK-NEXT:    and x0, x8, x0
 ; CHECK-NEXT:    ret
   %conv = zext i8 %numlowbits to i64
@@ -132,7 +132,7 @@ define i64 @bzhi64_a2_load(i64* %w, i64 %numlowbits) nounwind {
 ; CHECK-NEXT:    ldr x8, [x0]
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    lsl x9, x9, x1
-; CHECK-NEXT:    sub x9, x9, #1 // =1
+; CHECK-NEXT:    sub x9, x9, #1
 ; CHECK-NEXT:    and x0, x9, x8
 ; CHECK-NEXT:    ret
   %val = load i64, i64* %w
@@ -149,7 +149,7 @@ define i64 @bzhi64_a3_load_indexzext(i64* %w, i8 zeroext %numlowbits) nounwind {
 ; CHECK-NEXT:    mov w9, #1
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    lsl x9, x9, x1
-; CHECK-NEXT:    sub x9, x9, #1 // =1
+; CHECK-NEXT:    sub x9, x9, #1
 ; CHECK-NEXT:    and x0, x9, x8
 ; CHECK-NEXT:    ret
   %val = load i64, i64* %w
@@ -165,7 +165,7 @@ define i64 @bzhi64_a4_commutative(i64 %val, i64 %numlowbits) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
 ; CHECK-NEXT:    lsl x8, x8, x1
-; CHECK-NEXT:    sub x8, x8, #1 // =1
+; CHECK-NEXT:    sub x8, x8, #1
 ; CHECK-NEXT:    and x0, x0, x8
 ; CHECK-NEXT:    ret
   %onebit = shl i64 1, %numlowbits

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll b/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll
index 29ee9c4c24e88..2facd7337e9a8 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll
@@ -138,9 +138,9 @@ bb4:
 define i64 @test_or_unpredictable(i32 %a, i32 %b) {
 ; CHECK-LABEL: test_or_unpredictable:
 ; CHECK:       ; %bb.0: ; %bb1
-; CHECK-NEXT:    cmp w0, #0 ; =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    cset w8, eq
-; CHECK-NEXT:    cmp w1, #0 ; =0
+; CHECK-NEXT:    cmp w1, #0
 ; CHECK-NEXT:    cset w9, eq
 ; CHECK-NEXT:    orr w8, w8, w9
 ; CHECK-NEXT:    mov x0, xzr
@@ -171,9 +171,9 @@ bb4:
 define i64 @test_and_unpredictable(i32 %a, i32 %b) {
 ; CHECK-LABEL: test_and_unpredictable:
 ; CHECK:       ; %bb.0: ; %bb1
-; CHECK-NEXT:    cmp w0, #0 ; =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    cset w8, ne
-; CHECK-NEXT:    cmp w1, #0 ; =0
+; CHECK-NEXT:    cmp w1, #0
 ; CHECK-NEXT:    cset w9, ne
 ; CHECK-NEXT:    and w8, w8, w9
 ; CHECK-NEXT:    mov x0, xzr

diff  --git a/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll b/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll
index d36b92b8c26b8..539fe7e7d3c83 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll
@@ -14,8 +14,8 @@ define i32 @sdiv_i32_exact(i32 %a) {
 define i32 @sdiv_i32_pos(i32 %a) {
 ; CHECK-LABEL: sdiv_i32_pos:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #7 // =7
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    add w8, w0, #7
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, w8, w0, lt
 ; CHECK-NEXT:    asr w0, w8, #3
 ; CHECK-NEXT:    ret
@@ -26,8 +26,8 @@ define i32 @sdiv_i32_pos(i32 %a) {
 define i32 @sdiv_i32_neg(i32 %a) {
 ; CHECK-LABEL: sdiv_i32_neg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #7 // =7
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    add w8, w0, #7
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, w8, w0, lt
 ; CHECK-NEXT:    neg w0, w8, asr #3
 ; CHECK-NEXT:    ret
@@ -47,8 +47,8 @@ define i64 @sdiv_i64_exact(i64 %a) {
 define i64 @sdiv_i64_pos(i64 %a) {
 ; CHECK-LABEL: sdiv_i64_pos:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #15 // =15
-; CHECK-NEXT:    cmp x0, #0 // =0
+; CHECK-NEXT:    add x8, x0, #15
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    csel x8, x8, x0, lt
 ; CHECK-NEXT:    asr x0, x8, #4
 ; CHECK-NEXT:    ret
@@ -59,8 +59,8 @@ define i64 @sdiv_i64_pos(i64 %a) {
 define i64 @sdiv_i64_neg(i64 %a) {
 ; CHECK-LABEL: sdiv_i64_neg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #15 // =15
-; CHECK-NEXT:    cmp x0, #0 // =0
+; CHECK-NEXT:    add x8, x0, #15
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    csel x8, x8, x0, lt
 ; CHECK-NEXT:    neg x0, x8, asr #4
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
index 2f194a5746100..dd097be8b40d2 100644
--- a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
@@ -272,7 +272,7 @@ declare <4 x i32> @llvm.fptosi.sat.v4f128.v4i32 (<4 x fp128>)
 define <1 x i32> @test_signed_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-LABEL: test_signed_v1f128_v1i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    stp x30, x19, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
@@ -284,7 +284,7 @@ define <1 x i32> @test_signed_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    mov w19, w0
 ; CHECK-NEXT:    bl __fixtfsi
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    mov w8, #-2147483648
 ; CHECK-NEXT:    csel w19, w8, w0, lt
 ; CHECK-NEXT:    adrp x8, .LCPI14_1
@@ -292,16 +292,16 @@ define <1 x i32> @test_signed_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w8, #2147483647
 ; CHECK-NEXT:    csel w19, w8, w19, gt
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    bl __unordtf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, wzr, w19, ne
 ; CHECK-NEXT:    ldp x30, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    fmov s0, w8
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
     %x = call <1 x i32> @llvm.fptosi.sat.v1f128.v1i32(<1 x fp128> %f)
     ret <1 x i32> %x
@@ -310,7 +310,7 @@ define <1 x i32> @test_signed_v1f128_v1i32(<1 x fp128> %f) {
 define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-LABEL: test_signed_v2f128_v2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #112 // =112
+; CHECK-NEXT:    sub sp, sp, #112
 ; CHECK-NEXT:    str x30, [sp, #64] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #96] // 16-byte Folded Spill
@@ -333,20 +333,20 @@ define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-NEXT:    adrp x8, .LCPI15_1
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI15_1]
 ; CHECK-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    mov w20, #-2147483648
 ; CHECK-NEXT:    csel w19, w20, w0, lt
 ; CHECK-NEXT:    str q1, [sp] // 16-byte Folded Spill
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w21, #2147483647
 ; CHECK-NEXT:    csel w19, w21, w19, gt
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    bl __unordtf2
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w22, wzr, w19, ne
 ; CHECK-NEXT:    bl __getf2
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
@@ -354,15 +354,15 @@ define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-NEXT:    bl __fixtfsi
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, w20, w0, lt
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w19, w21, w19, gt
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    bl __unordtf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, wzr, w19, ne
 ; CHECK-NEXT:    fmov s0, w8
 ; CHECK-NEXT:    mov v0.s[1], w22
@@ -370,7 +370,7 @@ define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-NEXT:    ldp x22, x21, [sp, #80] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp, #64] // 8-byte Folded Reload
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT:    add sp, sp, #112 // =112
+; CHECK-NEXT:    add sp, sp, #112
 ; CHECK-NEXT:    ret
     %x = call <2 x i32> @llvm.fptosi.sat.v2f128.v2i32(<2 x fp128> %f)
     ret <2 x i32> %x
@@ -379,7 +379,7 @@ define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) {
 define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-LABEL: test_signed_v3f128_v3i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #128 // =128
+; CHECK-NEXT:    sub sp, sp, #128
 ; CHECK-NEXT:    str x30, [sp, #80] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #96] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #112] // 16-byte Folded Spill
@@ -403,20 +403,20 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-NEXT:    adrp x8, .LCPI16_1
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI16_1]
 ; CHECK-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    mov w20, #-2147483648
 ; CHECK-NEXT:    csel w19, w20, w0, lt
 ; CHECK-NEXT:    str q1, [sp] // 16-byte Folded Spill
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w21, #2147483647
 ; CHECK-NEXT:    csel w19, w21, w19, gt
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    bl __unordtf2
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w22, wzr, w19, ne
 ; CHECK-NEXT:    bl __getf2
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
@@ -424,15 +424,15 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-NEXT:    bl __fixtfsi
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, w20, w0, lt
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w19, w21, w19, gt
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    bl __unordtf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, wzr, w19, ne
 ; CHECK-NEXT:    fmov s0, w8
 ; CHECK-NEXT:    mov v0.s[1], w22
@@ -445,22 +445,22 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-NEXT:    bl __fixtfsi
 ; CHECK-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, w20, w0, lt
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w19, w21, w19, gt
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    bl __unordtf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    csel w8, wzr, w19, ne
 ; CHECK-NEXT:    ldp x20, x19, [sp, #112] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x22, x21, [sp, #96] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp, #80] // 8-byte Folded Reload
 ; CHECK-NEXT:    mov v0.s[2], w8
-; CHECK-NEXT:    add sp, sp, #128 // =128
+; CHECK-NEXT:    add sp, sp, #128
 ; CHECK-NEXT:    ret
     %x = call <3 x i32> @llvm.fptosi.sat.v3f128.v3i32(<3 x fp128> %f)
     ret <3 x i32> %x
@@ -469,7 +469,7 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-LABEL: test_signed_v4f128_v4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #144 // =144
+; CHECK-NEXT:    sub sp, sp, #144
 ; CHECK-NEXT:    str x30, [sp, #96] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x22, x21, [sp, #112] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #128] // 16-byte Folded Spill
@@ -494,19 +494,19 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-NEXT:    adrp x8, .LCPI17_1
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI17_1]
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    mov w20, #-2147483648
 ; CHECK-NEXT:    csel w19, w20, w0, lt
 ; CHECK-NEXT:    str q1, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w21, #2147483647
 ; CHECK-NEXT:    csel w19, w21, w19, gt
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    bl __unordtf2
 ; CHECK-NEXT:    ldp q1, q0, [sp, #32] // 32-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w22, wzr, w19, ne
 ; CHECK-NEXT:    bl __getf2
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
@@ -514,15 +514,15 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-NEXT:    bl __fixtfsi
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, w20, w0, lt
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w19, w21, w19, gt
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    bl __unordtf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, wzr, w19, ne
 ; CHECK-NEXT:    fmov s0, w8
 ; CHECK-NEXT:    mov v0.s[1], w22
@@ -535,16 +535,16 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-NEXT:    bl __fixtfsi
 ; CHECK-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, w20, w0, lt
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w19, w21, w19, gt
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    bl __unordtf2
 ; CHECK-NEXT:    ldp q1, q0, [sp, #32] // 32-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, wzr, w19, ne
 ; CHECK-NEXT:    mov v0.s[2], w8
 ; CHECK-NEXT:    str q0, [sp, #48] // 16-byte Folded Spill
@@ -555,22 +555,22 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-NEXT:    bl __fixtfsi
 ; CHECK-NEXT:    ldr q0, [sp, #80] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, w20, w0, lt
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp, #80] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w19, w21, w19, gt
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    bl __unordtf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    csel w8, wzr, w19, ne
 ; CHECK-NEXT:    ldp x20, x19, [sp, #128] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldp x22, x21, [sp, #112] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp, #96] // 8-byte Folded Reload
 ; CHECK-NEXT:    mov v0.s[3], w8
-; CHECK-NEXT:    add sp, sp, #144 // =144
+; CHECK-NEXT:    add sp, sp, #144
 ; CHECK-NEXT:    ret
     %x = call <4 x i32> @llvm.fptosi.sat.v4f128.v4i32(<4 x fp128> %f)
     ret <4 x i32> %x
@@ -1004,7 +1004,7 @@ define <2 x i64> @test_signed_v2f32_v2i64(<2 x float> %f) {
 define <2 x i100> @test_signed_v2f32_v2i100(<2 x float> %f) {
 ; CHECK-LABEL: test_signed_v2f32_v2i100:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #80 // =80
+; CHECK-NEXT:    sub sp, sp, #80
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
@@ -1062,7 +1062,7 @@ define <2 x i100> @test_signed_v2f32_v2i100(<2 x float> %f) {
 ; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #80 // =80
+; CHECK-NEXT:    add sp, sp, #80
 ; CHECK-NEXT:    ret
     %x = call <2 x i100> @llvm.fptosi.sat.v2f32.v2i100(<2 x float> %f)
     ret <2 x i100> %x
@@ -1071,7 +1071,7 @@ define <2 x i100> @test_signed_v2f32_v2i100(<2 x float> %f) {
 define <2 x i128> @test_signed_v2f32_v2i128(<2 x float> %f) {
 ; CHECK-LABEL: test_signed_v2f32_v2i128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #80 // =80
+; CHECK-NEXT:    sub sp, sp, #80
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
@@ -1129,7 +1129,7 @@ define <2 x i128> @test_signed_v2f32_v2i128(<2 x float> %f) {
 ; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #80 // =80
+; CHECK-NEXT:    add sp, sp, #80
 ; CHECK-NEXT:    ret
     %x = call <2 x i128> @llvm.fptosi.sat.v2f32.v2i128(<2 x float> %f)
     ret <2 x i128> %x
@@ -1337,7 +1337,7 @@ define <2 x i64> @test_signed_v2f64_v2i64(<2 x double> %f) {
 define <2 x i100> @test_signed_v2f64_v2i100(<2 x double> %f) {
 ; CHECK-LABEL: test_signed_v2f64_v2i100:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #80 // =80
+; CHECK-NEXT:    sub sp, sp, #80
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
@@ -1394,7 +1394,7 @@ define <2 x i100> @test_signed_v2f64_v2i100(<2 x double> %f) {
 ; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #80 // =80
+; CHECK-NEXT:    add sp, sp, #80
 ; CHECK-NEXT:    ret
     %x = call <2 x i100> @llvm.fptosi.sat.v2f64.v2i100(<2 x double> %f)
     ret <2 x i100> %x
@@ -1403,7 +1403,7 @@ define <2 x i100> @test_signed_v2f64_v2i100(<2 x double> %f) {
 define <2 x i128> @test_signed_v2f64_v2i128(<2 x double> %f) {
 ; CHECK-LABEL: test_signed_v2f64_v2i128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #80 // =80
+; CHECK-NEXT:    sub sp, sp, #80
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
@@ -1460,7 +1460,7 @@ define <2 x i128> @test_signed_v2f64_v2i128(<2 x double> %f) {
 ; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #80 // =80
+; CHECK-NEXT:    add sp, sp, #80
 ; CHECK-NEXT:    ret
     %x = call <2 x i128> @llvm.fptosi.sat.v2f64.v2i128(<2 x double> %f)
     ret <2 x i128> %x
@@ -1808,7 +1808,7 @@ define <4 x i64> @test_signed_v4f16_v4i64(<4 x half> %f) {
 define <4 x i100> @test_signed_v4f16_v4i100(<4 x half> %f) {
 ; CHECK-LABEL: test_signed_v4f16_v4i100:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #112 // =112
+; CHECK-NEXT:    sub sp, sp, #112
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
@@ -1907,7 +1907,7 @@ define <4 x i100> @test_signed_v4f16_v4i100(<4 x half> %f) {
 ; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #112 // =112
+; CHECK-NEXT:    add sp, sp, #112
 ; CHECK-NEXT:    ret
     %x = call <4 x i100> @llvm.fptosi.sat.v4f16.v4i100(<4 x half> %f)
     ret <4 x i100> %x
@@ -1916,7 +1916,7 @@ define <4 x i100> @test_signed_v4f16_v4i100(<4 x half> %f) {
 define <4 x i128> @test_signed_v4f16_v4i128(<4 x half> %f) {
 ; CHECK-LABEL: test_signed_v4f16_v4i128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #112 // =112
+; CHECK-NEXT:    sub sp, sp, #112
 ; CHECK-NEXT:    str d10, [sp, #16] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp d9, d8, [sp, #24] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #40] // 8-byte Folded Spill
@@ -2015,7 +2015,7 @@ define <4 x i128> @test_signed_v4f16_v4i128(<4 x half> %f) {
 ; CHECK-NEXT:    fmov d0, x9
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #112 // =112
+; CHECK-NEXT:    add sp, sp, #112
 ; CHECK-NEXT:    ret
     %x = call <4 x i128> @llvm.fptosi.sat.v4f16.v4i128(<4 x half> %f)
     ret <4 x i128> %x

diff  --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
index 05bb87ba00e86..f0e934c5c0fd6 100644
--- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
@@ -272,7 +272,7 @@ declare <4 x i32> @llvm.fptoui.sat.v4f128.v4i32 (<4 x fp128>)
 define <1 x i32> @test_unsigned_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-LABEL: test_unsigned_v1f128_v1i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    stp x30, x19, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    .cfi_offset w19, -8
@@ -287,14 +287,14 @@ define <1 x i32> @test_unsigned_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-NEXT:    adrp x8, .LCPI14_1
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI14_1]
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, wzr, w0, lt
 ; CHECK-NEXT:    bl __gttf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csinv w8, w19, wzr, le
 ; CHECK-NEXT:    ldp x30, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    fmov s0, w8
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
     %x = call <1 x i32> @llvm.fptoui.sat.v1f128.v1i32(<1 x fp128> %f)
     ret <1 x i32> %x
@@ -303,7 +303,7 @@ define <1 x i32> @test_unsigned_v1f128_v1i32(<1 x fp128> %f) {
 define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-LABEL: test_unsigned_v2f128_v2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #96 // =96
+; CHECK-NEXT:    sub sp, sp, #96
 ; CHECK-NEXT:    str x30, [sp, #64] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #80] // 16-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 96
@@ -323,13 +323,13 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-NEXT:    adrp x8, .LCPI15_1
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI15_1]
 ; CHECK-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, wzr, w0, lt
 ; CHECK-NEXT:    str q1, [sp] // 16-byte Folded Spill
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csinv w20, w19, wzr, le
 ; CHECK-NEXT:    bl __getf2
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
@@ -337,17 +337,17 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-NEXT:    bl __fixunstfsi
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, wzr, w0, lt
 ; CHECK-NEXT:    bl __gttf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csinv w8, w19, wzr, le
 ; CHECK-NEXT:    fmov s0, w8
 ; CHECK-NEXT:    mov v0.s[1], w20
 ; CHECK-NEXT:    ldp x20, x19, [sp, #80] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp, #64] // 8-byte Folded Reload
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT:    add sp, sp, #96 // =96
+; CHECK-NEXT:    add sp, sp, #96
 ; CHECK-NEXT:    ret
     %x = call <2 x i32> @llvm.fptoui.sat.v2f128.v2i32(<2 x fp128> %f)
     ret <2 x i32> %x
@@ -356,7 +356,7 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-LABEL: test_unsigned_v3f128_v3i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #112 // =112
+; CHECK-NEXT:    sub sp, sp, #112
 ; CHECK-NEXT:    str x30, [sp, #80] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #96] // 16-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 112
@@ -377,12 +377,12 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-NEXT:    adrp x8, .LCPI16_1
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI16_1]
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, wzr, w0, lt
 ; CHECK-NEXT:    str q1, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldp q1, q0, [sp, #32] // 32-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csinv w20, w19, wzr, le
 ; CHECK-NEXT:    bl __getf2
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
@@ -390,10 +390,10 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-NEXT:    bl __fixunstfsi
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, wzr, w0, lt
 ; CHECK-NEXT:    bl __gttf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csinv w8, w19, wzr, le
 ; CHECK-NEXT:    fmov s0, w8
 ; CHECK-NEXT:    mov v0.s[1], w20
@@ -406,16 +406,16 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-NEXT:    bl __fixunstfsi
 ; CHECK-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, wzr, w0, lt
 ; CHECK-NEXT:    bl __gttf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-NEXT:    csinv w8, w19, wzr, le
 ; CHECK-NEXT:    ldp x20, x19, [sp, #96] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp, #80] // 8-byte Folded Reload
 ; CHECK-NEXT:    mov v0.s[2], w8
-; CHECK-NEXT:    add sp, sp, #112 // =112
+; CHECK-NEXT:    add sp, sp, #112
 ; CHECK-NEXT:    ret
     %x = call <3 x i32> @llvm.fptoui.sat.v3f128.v3i32(<3 x fp128> %f)
     ret <3 x i32> %x
@@ -424,7 +424,7 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-LABEL: test_unsigned_v4f128_v4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #128 // =128
+; CHECK-NEXT:    sub sp, sp, #128
 ; CHECK-NEXT:    str x30, [sp, #96] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #112] // 16-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 128
@@ -446,13 +446,13 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-NEXT:    adrp x8, .LCPI17_1
 ; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI17_1]
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, wzr, w0, lt
 ; CHECK-NEXT:    str q1, [sp, #48] // 16-byte Folded Spill
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp, #64] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csinv w20, w19, wzr, le
 ; CHECK-NEXT:    bl __getf2
 ; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
@@ -460,10 +460,10 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-NEXT:    bl __fixunstfsi
 ; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, wzr, w0, lt
 ; CHECK-NEXT:    bl __gttf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csinv w8, w19, wzr, le
 ; CHECK-NEXT:    fmov s0, w8
 ; CHECK-NEXT:    mov v0.s[1], w20
@@ -475,11 +475,11 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-NEXT:    mov w19, w0
 ; CHECK-NEXT:    bl __fixunstfsi
 ; CHECK-NEXT:    ldp q0, q1, [sp, #32] // 32-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, wzr, w0, lt
 ; CHECK-NEXT:    bl __gttf2
 ; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csinv w8, w19, wzr, le
 ; CHECK-NEXT:    mov v0.s[2], w8
 ; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
@@ -490,16 +490,16 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-NEXT:    bl __fixunstfsi
 ; CHECK-NEXT:    ldr q0, [sp, #80] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr q1, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT:    cmp w19, #0 // =0
+; CHECK-NEXT:    cmp w19, #0
 ; CHECK-NEXT:    csel w19, wzr, w0, lt
 ; CHECK-NEXT:    bl __gttf2
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    csinv w8, w19, wzr, le
 ; CHECK-NEXT:    ldp x20, x19, [sp, #112] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp, #96] // 8-byte Folded Reload
 ; CHECK-NEXT:    mov v0.s[3], w8
-; CHECK-NEXT:    add sp, sp, #128 // =128
+; CHECK-NEXT:    add sp, sp, #128
 ; CHECK-NEXT:    ret
     %x = call <4 x i32> @llvm.fptoui.sat.v4f128.v4i32(<4 x fp128> %f)
     ret <4 x i32> %x
@@ -902,7 +902,7 @@ define <2 x i64> @test_unsigned_v2f32_v2i64(<2 x float> %f) {
 define <2 x i100> @test_unsigned_v2f32_v2i100(<2 x float> %f) {
 ; CHECK-LABEL: test_unsigned_v2f32_v2i100:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #64 // =64
+; CHECK-NEXT:    sub sp, sp, #64
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x30, x21, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #48] // 16-byte Folded Spill
@@ -945,7 +945,7 @@ define <2 x i100> @test_unsigned_v2f32_v2i100(<2 x float> %f) {
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #64 // =64
+; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
     %x = call <2 x i100> @llvm.fptoui.sat.v2f32.v2i100(<2 x float> %f)
     ret <2 x i100> %x
@@ -954,7 +954,7 @@ define <2 x i100> @test_unsigned_v2f32_v2i100(<2 x float> %f) {
 define <2 x i128> @test_unsigned_v2f32_v2i128(<2 x float> %f) {
 ; CHECK-LABEL: test_unsigned_v2f32_v2i128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #64 // =64
+; CHECK-NEXT:    sub sp, sp, #64
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #48] // 16-byte Folded Spill
@@ -995,7 +995,7 @@ define <2 x i128> @test_unsigned_v2f32_v2i128(<2 x float> %f) {
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #64 // =64
+; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
     %x = call <2 x i128> @llvm.fptoui.sat.v2f32.v2i128(<2 x float> %f)
     ret <2 x i128> %x
@@ -1174,7 +1174,7 @@ define <2 x i64> @test_unsigned_v2f64_v2i64(<2 x double> %f) {
 define <2 x i100> @test_unsigned_v2f64_v2i100(<2 x double> %f) {
 ; CHECK-LABEL: test_unsigned_v2f64_v2i100:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #64 // =64
+; CHECK-NEXT:    sub sp, sp, #64
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x30, x21, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #48] // 16-byte Folded Spill
@@ -1216,7 +1216,7 @@ define <2 x i100> @test_unsigned_v2f64_v2i100(<2 x double> %f) {
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #64 // =64
+; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
     %x = call <2 x i100> @llvm.fptoui.sat.v2f64.v2i100(<2 x double> %f)
     ret <2 x i100> %x
@@ -1225,7 +1225,7 @@ define <2 x i100> @test_unsigned_v2f64_v2i100(<2 x double> %f) {
 define <2 x i128> @test_unsigned_v2f64_v2i128(<2 x double> %f) {
 ; CHECK-LABEL: test_unsigned_v2f64_v2i128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #64 // =64
+; CHECK-NEXT:    sub sp, sp, #64
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x20, x19, [sp, #48] // 16-byte Folded Spill
@@ -1265,7 +1265,7 @@ define <2 x i128> @test_unsigned_v2f64_v2i128(<2 x double> %f) {
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #64 // =64
+; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
     %x = call <2 x i128> @llvm.fptoui.sat.v2f64.v2i128(<2 x double> %f)
     ret <2 x i128> %x
@@ -1558,7 +1558,7 @@ define <4 x i64> @test_unsigned_v4f16_v4i64(<4 x half> %f) {
 define <4 x i100> @test_unsigned_v4f16_v4i100(<4 x half> %f) {
 ; CHECK-LABEL: test_unsigned_v4f16_v4i100:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #96 // =96
+; CHECK-NEXT:    sub sp, sp, #96
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x30, x25, [sp, #32] // 16-byte Folded Spill
 ; CHECK-NEXT:    stp x24, x23, [sp, #48] // 16-byte Folded Spill
@@ -1636,7 +1636,7 @@ define <4 x i100> @test_unsigned_v4f16_v4i100(<4 x half> %f) {
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #96 // =96
+; CHECK-NEXT:    add sp, sp, #96
 ; CHECK-NEXT:    ret
     %x = call <4 x i100> @llvm.fptoui.sat.v4f16.v4i100(<4 x half> %f)
     ret <4 x i100> %x
@@ -1645,7 +1645,7 @@ define <4 x i100> @test_unsigned_v4f16_v4i100(<4 x half> %f) {
 define <4 x i128> @test_unsigned_v4f16_v4i128(<4 x half> %f) {
 ; CHECK-LABEL: test_unsigned_v4f16_v4i128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #96 // =96
+; CHECK-NEXT:    sub sp, sp, #96
 ; CHECK-NEXT:    stp d9, d8, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
 ; CHECK-NEXT:    stp x24, x23, [sp, #48] // 16-byte Folded Spill
@@ -1721,7 +1721,7 @@ define <4 x i128> @test_unsigned_v4f16_v4i128(<4 x half> %f) {
 ; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    mov v0.d[1], x1
 ; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    add sp, sp, #96 // =96
+; CHECK-NEXT:    add sp, sp, #96
 ; CHECK-NEXT:    ret
     %x = call <4 x i128> @llvm.fptoui.sat.v4f16.v4i128(<4 x half> %f)
     ret <4 x i128> %x

diff  --git a/llvm/test/CodeGen/AArch64/funnel-shift.ll b/llvm/test/CodeGen/AArch64/funnel-shift.ll
index 18a192084fdfc..7e3b284d0b595 100644
--- a/llvm/test/CodeGen/AArch64/funnel-shift.ll
+++ b/llvm/test/CodeGen/AArch64/funnel-shift.ll
@@ -193,7 +193,7 @@ define i37 @fshr_i37(i37 %x, i37 %y, i37 %z) {
 ; CHECK-NEXT:    lsr x8, x8, #5
 ; CHECK-NEXT:    msub w8, w8, w9, w2
 ; CHECK-NEXT:    lsl x10, x1, #27
-; CHECK-NEXT:    add w8, w8, #27 // =27
+; CHECK-NEXT:    add w8, w8, #27
 ; CHECK-NEXT:    lsr x9, x10, x8
 ; CHECK-NEXT:    mvn w8, w8
 ; CHECK-NEXT:    lsl x10, x0, #1

diff  --git a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index 670f198fd0d3b..351eeaa826143 100644
--- a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -301,7 +301,7 @@ define i1 @scalar_i32_x_is_const2_eq(i32 %y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #1
 ; CHECK-NEXT:    lsr w8, w8, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
   %t0 = lshr i32 1, %y
@@ -328,7 +328,7 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    lsr w8, w8, w1
 ; CHECK-NEXT:    and w8, w8, w0
-; CHECK-NEXT:    cmp w8, #1 // =1
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
   %t0 = lshr i8 128, %y

diff  --git a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
index 00bf33360871d..32a62453202f4 100644
--- a/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/AArch64/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
@@ -339,7 +339,7 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
 ; CHECK-NEXT:    lsl w8, w8, w1
 ; CHECK-NEXT:    and w8, w8, w0
 ; CHECK-NEXT:    and w8, w8, #0x80
-; CHECK-NEXT:    cmp w8, #1 // =1
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
   %t0 = shl i8 128, %y

diff  --git a/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll b/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll
index 3a2bf36450070..f3447b18ec3db 100644
--- a/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll
+++ b/llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll
@@ -38,11 +38,11 @@ define void @test3() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x8, x
 ; CHECK-NEXT:    add x8, x8, :lo12:x
-; CHECK-NEXT:    add x8, x8, #512 // =512
+; CHECK-NEXT:    add x8, x8, #512
 ; CHECK-NEXT:    ldp x8, x9, [x8]
 ; CHECK-NEXT:    adrp x10, y
 ; CHECK-NEXT:    add x10, x10, :lo12:y
-; CHECK-NEXT:    add x10, x10, #512 // =512
+; CHECK-NEXT:    add x10, x10, #512
 ; CHECK-NEXT:    stp x8, x9, [x10]
 ; CHECK-NEXT:    ret
   %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 512) to i128*)
@@ -70,11 +70,11 @@ define void @test5() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x8, x
 ; CHECK-NEXT:    add x8, x8, :lo12:x
-; CHECK-NEXT:    sub x8, x8, #520 // =520
+; CHECK-NEXT:    sub x8, x8, #520
 ; CHECK-NEXT:    ldp x8, x9, [x8]
 ; CHECK-NEXT:    adrp x10, y
 ; CHECK-NEXT:    add x10, x10, :lo12:y
-; CHECK-NEXT:    sub x10, x10, #520 // =520
+; CHECK-NEXT:    sub x10, x10, #520
 ; CHECK-NEXT:    stp x8, x9, [x10]
 ; CHECK-NEXT:    ret
   %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 -520) to i128*)
@@ -87,11 +87,11 @@ define void @test6() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x8, x
 ; CHECK-NEXT:    add x8, x8, :lo12:x
-; CHECK-NEXT:    sub x8, x8, #520 // =520
+; CHECK-NEXT:    sub x8, x8, #520
 ; CHECK-NEXT:    ldp x8, x9, [x8]
 ; CHECK-NEXT:    adrp x10, y
 ; CHECK-NEXT:    add x10, x10, :lo12:y
-; CHECK-NEXT:    sub x10, x10, #520 // =520
+; CHECK-NEXT:    sub x10, x10, #520
 ; CHECK-NEXT:    stp x8, x9, [x10]
 ; CHECK-NEXT:    ret
   %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 -520) to i128*)
@@ -104,11 +104,11 @@ define void @test7() {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x8, x
 ; CHECK-NEXT:    add x8, x8, :lo12:x
-; CHECK-NEXT:    add x8, x8, #503 // =503
+; CHECK-NEXT:    add x8, x8, #503
 ; CHECK-NEXT:    ldp x8, x9, [x8]
 ; CHECK-NEXT:    adrp x10, y
 ; CHECK-NEXT:    add x10, x10, :lo12:y
-; CHECK-NEXT:    add x10, x10, #503 // =503
+; CHECK-NEXT:    add x10, x10, #503
 ; CHECK-NEXT:    stp x8, x9, [x10]
 ; CHECK-NEXT:    ret
   %tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 503) to i128*)

diff  --git a/llvm/test/CodeGen/AArch64/implicit-null-check.ll b/llvm/test/CodeGen/AArch64/implicit-null-check.ll
index 39f04e9662822..6ba87f5e9543e 100644
--- a/llvm/test/CodeGen/AArch64/implicit-null-check.ll
+++ b/llvm/test/CodeGen/AArch64/implicit-null-check.ll
@@ -284,7 +284,7 @@ define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) {
 ; CHECK-NEXT:  // %bb.1: // %not_null
 ; CHECK-NEXT:    add w9, w0, w1
 ; CHECK-NEXT:    add w8, w9, w8
-; CHECK-NEXT:    add w0, w8, #4 // =4
+; CHECK-NEXT:    add w0, w8, #4
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB11_2:
 ; CHECK-NEXT:    mov w0, #42

diff  --git a/llvm/test/CodeGen/AArch64/inc-of-add.ll b/llvm/test/CodeGen/AArch64/inc-of-add.ll
index 5c52fc398f119..b5ce2ab5d409f 100644
--- a/llvm/test/CodeGen/AArch64/inc-of-add.ll
+++ b/llvm/test/CodeGen/AArch64/inc-of-add.ll
@@ -10,7 +10,7 @@ define i8 @scalar_i8(i8 %x, i8 %y) nounwind {
 ; CHECK-LABEL: scalar_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, w1
-; CHECK-NEXT:    add w0, w8, #1 // =1
+; CHECK-NEXT:    add w0, w8, #1
 ; CHECK-NEXT:    ret
   %t0 = add i8 %x, 1
   %t1 = add i8 %y, %t0
@@ -21,7 +21,7 @@ define i16 @scalar_i16(i16 %x, i16 %y) nounwind {
 ; CHECK-LABEL: scalar_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, w1
-; CHECK-NEXT:    add w0, w8, #1 // =1
+; CHECK-NEXT:    add w0, w8, #1
 ; CHECK-NEXT:    ret
   %t0 = add i16 %x, 1
   %t1 = add i16 %y, %t0
@@ -32,7 +32,7 @@ define i32 @scalar_i32(i32 %x, i32 %y) nounwind {
 ; CHECK-LABEL: scalar_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, w1
-; CHECK-NEXT:    add w0, w8, #1 // =1
+; CHECK-NEXT:    add w0, w8, #1
 ; CHECK-NEXT:    ret
   %t0 = add i32 %x, 1
   %t1 = add i32 %y, %t0
@@ -43,7 +43,7 @@ define i64 @scalar_i64(i64 %x, i64 %y) nounwind {
 ; CHECK-LABEL: scalar_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, x1
-; CHECK-NEXT:    add x0, x8, #1 // =1
+; CHECK-NEXT:    add x0, x8, #1
 ; CHECK-NEXT:    ret
   %t0 = add i64 %x, 1
   %t1 = add i64 %y, %t0

diff  --git a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
index 7ffbcbac48e86..ecc210691da2d 100644
--- a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
@@ -101,9 +101,9 @@ define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_nonzero_i8(<vscale x 8 x
 ; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    cnth x9
-; CHECK-NEXT:    sub x9, x9, #8 // =8
+; CHECK-NEXT:    sub x9, x9, #8
 ; CHECK-NEXT:    mov w8, #8
-; CHECK-NEXT:    cmp x9, #8 // =8
+; CHECK-NEXT:    cmp x9, #8
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    lsl x8, x8, #1
 ; CHECK-NEXT:    st1h { z0.h }, p0, [sp]
@@ -150,9 +150,9 @@ define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_nonzero_i16(<vscale x 4
 ; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    cntw x9
-; CHECK-NEXT:    sub x9, x9, #4 // =4
+; CHECK-NEXT:    sub x9, x9, #4
 ; CHECK-NEXT:    mov w8, #4
-; CHECK-NEXT:    cmp x9, #4 // =4
+; CHECK-NEXT:    cmp x9, #4
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    lsl x8, x8, #2
 ; CHECK-NEXT:    st1w { z0.s }, p0, [sp]
@@ -199,9 +199,9 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_i32(<vscale x 2
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ldr d1, [x1]
 ; CHECK-NEXT:    cntd x9
-; CHECK-NEXT:    sub x9, x9, #2 // =2
+; CHECK-NEXT:    sub x9, x9, #2
 ; CHECK-NEXT:    mov w8, #2
-; CHECK-NEXT:    cmp x9, #2 // =2
+; CHECK-NEXT:    cmp x9, #2
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    lsl x8, x8, #3
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
@@ -228,10 +228,10 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_large_i32(<vsca
 ; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld1w { z1.s }, p1/z, [x1]
 ; CHECK-NEXT:    cntd x8
-; CHECK-NEXT:    subs x8, x8, #8 // =8
+; CHECK-NEXT:    subs x8, x8, #8
 ; CHECK-NEXT:    csel x8, xzr, x8, lo
 ; CHECK-NEXT:    mov w9, #8
-; CHECK-NEXT:    cmp x8, #8 // =8
+; CHECK-NEXT:    cmp x8, #8
 ; CHECK-NEXT:    ptrue p1.d, vl8
 ; CHECK-NEXT:    csel x8, x8, x9, lo
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]

diff  --git a/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll b/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
index f4680354d7e42..6b6682cd92195 100644
--- a/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
+++ b/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
@@ -273,7 +273,7 @@ define i1 @add_ugecmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, w1
 ; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #255 // =255
+; CHECK-NEXT:    cmp w8, #255
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, %y
@@ -285,7 +285,7 @@ define i1 @add_ugecmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind {
 define i1 @add_ugecmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i16_i8_cmp:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    add w8, w0, #128
 ; CHECK-NEXT:    and w8, w8, #0xffff
 ; CHECK-NEXT:    cmp w8, w1, uxth
 ; CHECK-NEXT:    cset w0, hs
@@ -299,9 +299,9 @@ define i1 @add_ugecmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
 define i1 @add_ugecmp_bad_i8_i16(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i8_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    add w8, w0, #128
 ; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #127 // =127
+; CHECK-NEXT:    cmp w8, #127
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
@@ -313,9 +313,9 @@ define i1 @add_ugecmp_bad_i8_i16(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i16_i8_c0notpoweroftwo:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #192 // =192
+; CHECK-NEXT:    add w8, w0, #192
 ; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #255 // =255
+; CHECK-NEXT:    cmp w8, #255
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1))
@@ -327,9 +327,9 @@ define i1 @add_ugecmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i16_i8_c1notpoweroftwo:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    add w8, w0, #128
 ; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #767 // =767
+; CHECK-NEXT:    cmp w8, #767
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
@@ -341,9 +341,9 @@ define i1 @add_ugecmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i8_magic(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i16_i8_magic:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #64 // =64
+; CHECK-NEXT:    add w8, w0, #64
 ; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #255 // =255
+; CHECK-NEXT:    cmp w8, #255
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 64 ; 1U << (8-1-1)
@@ -355,9 +355,9 @@ define i1 @add_ugecmp_bad_i16_i8_magic(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i4(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i16_i4:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #8 // =8
+; CHECK-NEXT:    add w8, w0, #8
 ; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #15 // =15
+; CHECK-NEXT:    cmp w8, #15
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 8 ; 1U << (4-1)
@@ -369,9 +369,9 @@ define i1 @add_ugecmp_bad_i16_i4(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i24_i8(i24 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i24_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    add w8, w0, #128
 ; CHECK-NEXT:    and w8, w8, #0xffffff
-; CHECK-NEXT:    cmp w8, #255 // =255
+; CHECK-NEXT:    cmp w8, #255
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i24 %x, 128 ; 1U << (8-1)

diff  --git a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
index baba8936aa5a4..e8fbf89d9588f 100644
--- a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
+++ b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
@@ -12,20 +12,20 @@ define i32 @main() local_unnamed_addr #1 {
 ; Make sure the stores happen in the correct order (the exact instructions could change).
 ; CHECK-LABEL: main:
 ; CHECK:       // %bb.0: // %for.body.lr.ph.i.i.i.i.i.i63
-; CHECK-NEXT:    sub sp, sp, #112 // =112
+; CHECK-NEXT:    sub sp, sp, #112
 ; CHECK-NEXT:    str x30, [sp, #96] // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 112
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl _Z5setupv
 ; CHECK-NEXT:    movi v0.4s, #1
 ; CHECK-NEXT:    mov w9, #1
-; CHECK-NEXT:    add x0, sp, #48 // =48
+; CHECK-NEXT:    add x0, sp, #48
 ; CHECK-NEXT:    mov x1, sp
 ; CHECK-NEXT:    str xzr, [sp, #80]
 ; CHECK-NEXT:    str w9, [sp, #80]
 ; CHECK-NEXT:    stp q0, q0, [sp, #48]
 ; CHECK-NEXT:    ldr w8, [sp, #48]
-; CHECK-NEXT:    cmp w8, #1 // =1
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    b.ne .LBB0_2
 ; CHECK-NEXT:  // %bb.1: // %for.inc
 ; CHECK-NEXT:    bl f
@@ -35,7 +35,7 @@ define i32 @main() local_unnamed_addr #1 {
 ; CHECK-NEXT:  .LBB0_3: // %common.ret
 ; CHECK-NEXT:    ldr x30, [sp, #96] // 8-byte Folded Reload
 ; CHECK-NEXT:    mov w0, wzr
-; CHECK-NEXT:    add sp, sp, #112 // =112
+; CHECK-NEXT:    add sp, sp, #112
 ; CHECK-NEXT:    ret
 
 

diff  --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
index cb6b38c2b0307..2c2c50004b014 100644
--- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
+++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -266,7 +266,7 @@ define void @flag_setting() {
 ; CHECK-NEXT:    b.lt .LBB2_4
 ; CHECK-NEXT:  // %bb.2: // %test3
 ; CHECK-NEXT:    and x10, x9, x10, asr #12
-; CHECK-NEXT:    cmp x10, #1 // =1
+; CHECK-NEXT:    cmp x10, #1
 ; CHECK-NEXT:    b.ge .LBB2_4
 ; CHECK-NEXT:  // %bb.3: // %other_exit
 ; CHECK-NEXT:    str x9, [x8]

diff  --git a/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll b/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll
index b1dfe85a04cbc..fd6f43fbc1dc7 100644
--- a/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll
+++ b/llvm/test/CodeGen/AArch64/ls64-inline-asm.ll
@@ -42,7 +42,7 @@ entry:
 define void @store2(i32* %in, i8* %addr) {
 ; CHECK-LABEL: store2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub sp, sp, #64 // =64
+; CHECK-NEXT:    sub sp, sp, #64
 ; CHECK-NEXT:    .cfi_def_cfa_offset 64
 ; CHECK-NEXT:    ldpsw x2, x3, [x0]
 ; CHECK-NEXT:    ldrsw x4, [x0, #16]
@@ -54,7 +54,7 @@ define void @store2(i32* %in, i8* %addr) {
 ; CHECK-NEXT:    //APP
 ; CHECK-NEXT:    st64b x2, [x1]
 ; CHECK-NEXT:    //NO_APP
-; CHECK-NEXT:    add sp, sp, #64 // =64
+; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
 entry:
   %0 = load i32, i32* %in, align 4

diff  --git a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
index f8d53a574dd2f..f1cac68a914da 100644
--- a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
+++ b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
@@ -16,7 +16,7 @@ define i32 @sink_load_and_copy(i32 %n) {
 ; CHECK-NEXT:    .cfi_offset w21, -24
 ; CHECK-NEXT:    .cfi_offset w30, -32
 ; CHECK-NEXT:    mov w19, w0
-; CHECK-NEXT:    cmp w0, #1 // =1
+; CHECK-NEXT:    cmp w0, #1
 ; CHECK-NEXT:    b.lt .LBB0_3
 ; CHECK-NEXT:  // %bb.1: // %for.body.preheader
 ; CHECK-NEXT:    adrp x8, A
@@ -26,7 +26,7 @@ define i32 @sink_load_and_copy(i32 %n) {
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    mov w0, w21
 ; CHECK-NEXT:    bl _Z3usei
-; CHECK-NEXT:    subs w19, w19, #1 // =1
+; CHECK-NEXT:    subs w19, w19, #1
 ; CHECK-NEXT:    sdiv w20, w20, w0
 ; CHECK-NEXT:    b.ne .LBB0_2
 ; CHECK-NEXT:    b .LBB0_4
@@ -70,7 +70,7 @@ define i32 @cant_sink_successive_call(i32 %n) {
 ; CHECK-NEXT:    .cfi_offset w21, -24
 ; CHECK-NEXT:    .cfi_offset w30, -32
 ; CHECK-NEXT:    mov w19, w0
-; CHECK-NEXT:    cmp w0, #1 // =1
+; CHECK-NEXT:    cmp w0, #1
 ; CHECK-NEXT:    b.lt .LBB1_3
 ; CHECK-NEXT:  // %bb.1: // %for.body.preheader
 ; CHECK-NEXT:    adrp x8, A
@@ -82,7 +82,7 @@ define i32 @cant_sink_successive_call(i32 %n) {
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    mov w0, w20
 ; CHECK-NEXT:    bl _Z3usei
-; CHECK-NEXT:    subs w19, w19, #1 // =1
+; CHECK-NEXT:    subs w19, w19, #1
 ; CHECK-NEXT:    sdiv w21, w21, w0
 ; CHECK-NEXT:    b.ne .LBB1_2
 ; CHECK-NEXT:    b .LBB1_4
@@ -127,7 +127,7 @@ define i32 @cant_sink_successive_store(i32* nocapture readnone %store, i32 %n) {
 ; CHECK-NEXT:    .cfi_offset w21, -24
 ; CHECK-NEXT:    .cfi_offset w30, -32
 ; CHECK-NEXT:    mov w19, w1
-; CHECK-NEXT:    cmp w1, #1 // =1
+; CHECK-NEXT:    cmp w1, #1
 ; CHECK-NEXT:    b.lt .LBB2_3
 ; CHECK-NEXT:  // %bb.1: // %for.body.preheader
 ; CHECK-NEXT:    adrp x8, A
@@ -139,7 +139,7 @@ define i32 @cant_sink_successive_store(i32* nocapture readnone %store, i32 %n) {
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    mov w0, w20
 ; CHECK-NEXT:    bl _Z3usei
-; CHECK-NEXT:    subs w19, w19, #1 // =1
+; CHECK-NEXT:    subs w19, w19, #1
 ; CHECK-NEXT:    sdiv w21, w21, w0
 ; CHECK-NEXT:    b.ne .LBB2_2
 ; CHECK-NEXT:    b .LBB2_4

diff  --git a/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll b/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll
index 2fd2bfb8e8024..eae121ff17f7f 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll
@@ -13,7 +13,7 @@ define i32 @a() {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl [[OUTLINED_DIRECT:OUTLINED_FUNCTION_[0-9]+]]
-; CHECK-NEXT:    add w0, w0, #8 // =8
+; CHECK-NEXT:    add w0, w0, #8
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -29,7 +29,7 @@ define i32 @b() {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl [[OUTLINED_DIRECT]]
-; CHECK-NEXT:    add w0, w0, #88 // =88
+; CHECK-NEXT:    add w0, w0, #88
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -45,7 +45,7 @@ define hidden i32 @c(i32 (i32, i32, i32, i32)* %fptr) {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl [[OUTLINED_INDIRECT:OUTLINED_FUNCTION_[0-9]+]]
-; CHECK-NEXT:    add w0, w0, #8 // =8
+; CHECK-NEXT:    add w0, w0, #8
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -61,7 +61,7 @@ define hidden i32 @d(i32 (i32, i32, i32, i32)* %fptr) {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl [[OUTLINED_INDIRECT]]
-; CHECK-NEXT:    add w0, w0, #88 // =88
+; CHECK-NEXT:    add w0, w0, #88
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/named-vector-shuffle-reverse-neon.ll b/llvm/test/CodeGen/AArch64/named-vector-shuffle-reverse-neon.ll
index 0e6d19e451dc1..7fbed2d37e833 100644
--- a/llvm/test/CodeGen/AArch64/named-vector-shuffle-reverse-neon.ll
+++ b/llvm/test/CodeGen/AArch64/named-vector-shuffle-reverse-neon.ll
@@ -106,7 +106,7 @@ define <8 x i32> @reverse_v8i32(<8 x i32> %a) #0 {
 ;
 ; CHECK-FASTISEL-LABEL: reverse_v8i32:
 ; CHECK-FASTISEL:       // %bb.0:
-; CHECK-FASTISEL-NEXT:    sub sp, sp, #16 // =16
+; CHECK-FASTISEL-NEXT:    sub sp, sp, #16
 ; CHECK-FASTISEL-NEXT:    str q1, [sp] // 16-byte Folded Spill
 ; CHECK-FASTISEL-NEXT:    mov v1.16b, v0.16b
 ; CHECK-FASTISEL-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
@@ -114,7 +114,7 @@ define <8 x i32> @reverse_v8i32(<8 x i32> %a) #0 {
 ; CHECK-FASTISEL-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
 ; CHECK-FASTISEL-NEXT:    rev64 v1.4s, v1.4s
 ; CHECK-FASTISEL-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
-; CHECK-FASTISEL-NEXT:    add sp, sp, #16 // =16
+; CHECK-FASTISEL-NEXT:    add sp, sp, #16
 ; CHECK-FASTISEL-NEXT:    ret
 
   %res = call <8 x i32> @llvm.experimental.vector.reverse.v8i32(<8 x i32> %a)
@@ -137,7 +137,7 @@ define <16 x float> @reverse_v16f32(<16 x float> %a) #0 {
 ;
 ; CHECK-FASTISEL-LABEL: reverse_v16f32:
 ; CHECK-FASTISEL:       // %bb.0:
-; CHECK-FASTISEL-NEXT:    sub sp, sp, #32 // =32
+; CHECK-FASTISEL-NEXT:    sub sp, sp, #32
 ; CHECK-FASTISEL-NEXT:    str q3, [sp, #16] // 16-byte Folded Spill
 ; CHECK-FASTISEL-NEXT:    str q2, [sp] // 16-byte Folded Spill
 ; CHECK-FASTISEL-NEXT:    mov v2.16b, v1.16b
@@ -152,7 +152,7 @@ define <16 x float> @reverse_v16f32(<16 x float> %a) #0 {
 ; CHECK-FASTISEL-NEXT:    ext v2.16b, v2.16b, v2.16b, #8
 ; CHECK-FASTISEL-NEXT:    rev64 v3.4s, v3.4s
 ; CHECK-FASTISEL-NEXT:    ext v3.16b, v3.16b, v3.16b, #8
-; CHECK-FASTISEL-NEXT:    add sp, sp, #32 // =32
+; CHECK-FASTISEL-NEXT:    add sp, sp, #32
 ; CHECK-FASTISEL-NEXT:    ret
 
   %res = call <16 x float> @llvm.experimental.vector.reverse.v16f32(<16 x float> %a)

diff  --git a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
index e723126108046..00aedf135f209 100644
--- a/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
+++ b/llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
@@ -32,11 +32,11 @@ define <vscale x 16 x i8> @splice_nxv16i8_clamped_idx(<vscale x 16 x i8> %a, <vs
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    rdvl x9, #1
-; CHECK-NEXT:    sub x9, x9, #1 // =1
+; CHECK-NEXT:    sub x9, x9, #1
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w10, #16
-; CHECK-NEXT:    cmp x9, #16 // =16
+; CHECK-NEXT:    cmp x9, #16
 ; CHECK-NEXT:    st1b { z0.b }, p0, [sp]
 ; CHECK-NEXT:    st1b { z1.b }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x9, x10, lo
@@ -73,11 +73,11 @@ define <vscale x 8 x i16> @splice_nxv8i16_clamped_idx(<vscale x 8 x i16> %a, <vs
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    cnth x10
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w9, #8
-; CHECK-NEXT:    cmp x10, #8 // =8
+; CHECK-NEXT:    cmp x10, #8
 ; CHECK-NEXT:    st1h { z0.h }, p0, [sp]
 ; CHECK-NEXT:    st1h { z1.h }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x10, x9, lo
@@ -114,11 +114,11 @@ define <vscale x 4 x i32> @splice_nxv4i32_clamped_idx(<vscale x 4 x i32> %a, <vs
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    cntw x10
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w9, #4
-; CHECK-NEXT:    cmp x10, #4 // =4
+; CHECK-NEXT:    cmp x10, #4
 ; CHECK-NEXT:    st1w { z0.s }, p0, [sp]
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x10, x9, lo
@@ -155,11 +155,11 @@ define <vscale x 2 x i64> @splice_nxv2i64_clamped_idx(<vscale x 2 x i64> %a, <vs
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    cntd x10
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w9, #2
-; CHECK-NEXT:    cmp x10, #2 // =2
+; CHECK-NEXT:    cmp x10, #2
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x10, x9, lo
@@ -227,9 +227,9 @@ define <vscale x 2 x half> @splice_nxv2f16_last_idx(<vscale x 2 x half> %a, <vsc
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    cntd x10
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    mov w9, #2
-; CHECK-NEXT:    cmp x10, #2 // =2
+; CHECK-NEXT:    cmp x10, #2
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    csel x9, x10, x9, lo
@@ -301,9 +301,9 @@ define <vscale x 4 x half> @splice_nxv4f16_last_idx(<vscale x 4 x half> %a, <vsc
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    cntw x10
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    mov w9, #4
-; CHECK-NEXT:    cmp x10, #4 // =4
+; CHECK-NEXT:    cmp x10, #4
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    csel x9, x10, x9, lo
@@ -345,11 +345,11 @@ define <vscale x 8 x half> @splice_nxv8f16_clamped_idx(<vscale x 8 x half> %a, <
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    cnth x10
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w9, #8
-; CHECK-NEXT:    cmp x10, #8 // =8
+; CHECK-NEXT:    cmp x10, #8
 ; CHECK-NEXT:    st1h { z0.h }, p0, [sp]
 ; CHECK-NEXT:    st1h { z1.h }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x10, x9, lo
@@ -417,9 +417,9 @@ define <vscale x 2 x float> @splice_nxv2f32_last_idx(<vscale x 2 x float> %a, <v
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    cntd x10
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    mov w9, #2
-; CHECK-NEXT:    cmp x10, #2 // =2
+; CHECK-NEXT:    cmp x10, #2
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    csel x9, x10, x9, lo
@@ -460,11 +460,11 @@ define <vscale x 4 x float> @splice_nxv4f32_clamped_idx(<vscale x 4 x float> %a,
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    cntw x10
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w9, #4
-; CHECK-NEXT:    cmp x10, #4 // =4
+; CHECK-NEXT:    cmp x10, #4
 ; CHECK-NEXT:    st1w { z0.s }, p0, [sp]
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x10, x9, lo
@@ -501,11 +501,11 @@ define <vscale x 2 x double> @splice_nxv2f64_clamped_idx(<vscale x 2 x double> %
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-2
 ; CHECK-NEXT:    cntd x10
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w9, #2
-; CHECK-NEXT:    cmp x10, #2 // =2
+; CHECK-NEXT:    cmp x10, #2
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x10, x9, lo
@@ -616,11 +616,11 @@ define <vscale x 16 x float> @splice_nxv16f32_clamped_idx(<vscale x 16 x float>
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-8
 ; CHECK-NEXT:    rdvl x10, #1
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w9, #16
-; CHECK-NEXT:    cmp x10, #16 // =16
+; CHECK-NEXT:    cmp x10, #16
 ; CHECK-NEXT:    st1w { z3.s }, p0, [x8, #3, mul vl]
 ; CHECK-NEXT:    st1w { z2.s }, p0, [x8, #2, mul vl]
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x8, #1, mul vl]
@@ -687,7 +687,7 @@ define <vscale x 16 x i8> @splice_nxv16i8_clamped(<vscale x 16 x i8> %a, <vscale
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w10, #17
-; CHECK-NEXT:    cmp x9, #17 // =17
+; CHECK-NEXT:    cmp x9, #17
 ; CHECK-NEXT:    st1b { z0.b }, p0, [sp]
 ; CHECK-NEXT:    st1b { z1.b }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x9, x10, lo
@@ -742,7 +742,7 @@ define <vscale x 8 x i16> @splice_nxv8i16_clamped(<vscale x 8 x i16> %a, <vscale
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w10, #18
-; CHECK-NEXT:    cmp x9, #18 // =18
+; CHECK-NEXT:    cmp x9, #18
 ; CHECK-NEXT:    st1h { z0.h }, p0, [sp]
 ; CHECK-NEXT:    st1h { z1.h }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x9, x10, lo
@@ -797,7 +797,7 @@ define <vscale x 4 x i32> @splice_nxv4i32_clamped(<vscale x 4 x i32> %a, <vscale
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w10, #20
-; CHECK-NEXT:    cmp x9, #20 // =20
+; CHECK-NEXT:    cmp x9, #20
 ; CHECK-NEXT:    st1w { z0.s }, p0, [sp]
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x9, x10, lo
@@ -852,7 +852,7 @@ define <vscale x 2 x i64> @splice_nxv2i64_clamped(<vscale x 2 x i64> %a, <vscale
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w10, #24
-; CHECK-NEXT:    cmp x9, #24 // =24
+; CHECK-NEXT:    cmp x9, #24
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x9, x10, lo
@@ -907,7 +907,7 @@ define <vscale x 8 x half> @splice_nxv8f16_clamped(<vscale x 8 x half> %a, <vsca
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w10, #18
-; CHECK-NEXT:    cmp x9, #18 // =18
+; CHECK-NEXT:    cmp x9, #18
 ; CHECK-NEXT:    st1h { z0.h }, p0, [sp]
 ; CHECK-NEXT:    st1h { z1.h }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x9, x10, lo
@@ -962,7 +962,7 @@ define <vscale x 4 x float> @splice_nxv4f32_clamped(<vscale x 4 x float> %a, <vs
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w10, #20
-; CHECK-NEXT:    cmp x9, #20 // =20
+; CHECK-NEXT:    cmp x9, #20
 ; CHECK-NEXT:    st1w { z0.s }, p0, [sp]
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x9, x10, lo
@@ -1017,7 +1017,7 @@ define <vscale x 2 x double> @splice_nxv2f64_clamped(<vscale x 2 x double> %a, <
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w10, #24
-; CHECK-NEXT:    cmp x9, #24 // =24
+; CHECK-NEXT:    cmp x9, #24
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    csel x9, x9, x10, lo
@@ -1130,7 +1130,7 @@ define <vscale x 8 x i32> @splice_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i
 ; CHECK-NEXT:    st1w { z2.s }, p0, [x8, #2, mul vl]
 ; CHECK-NEXT:    addvl x8, x8, #2
 ; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x8, x9, lsl #2]
-; CHECK-NEXT:    sub x8, x8, #32 // =32
+; CHECK-NEXT:    sub x8, x8, #32
 ; CHECK-NEXT:    ld1w { z1.s }, p0/z, [x8, #1, mul vl]
 ; CHECK-NEXT:    addvl sp, sp, #4
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -1149,7 +1149,7 @@ define <vscale x 16 x float> @splice_nxv16f32_clamped(<vscale x 16 x float> %a,
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w10, #68
-; CHECK-NEXT:    cmp x9, #68 // =68
+; CHECK-NEXT:    cmp x9, #68
 ; CHECK-NEXT:    st1w { z3.s }, p0, [x8, #3, mul vl]
 ; CHECK-NEXT:    st1w { z2.s }, p0, [x8, #2, mul vl]
 ; CHECK-NEXT:    st1w { z1.s }, p0, [x8, #1, mul vl]

diff  --git a/llvm/test/CodeGen/AArch64/neg-abs.ll b/llvm/test/CodeGen/AArch64/neg-abs.ll
index 88cca29ef94ea..e3b11fbe6c8fc 100644
--- a/llvm/test/CodeGen/AArch64/neg-abs.ll
+++ b/llvm/test/CodeGen/AArch64/neg-abs.ll
@@ -7,7 +7,7 @@ declare i64 @llvm.abs.i64(i64, i1 immarg)
 define i64 @neg_abs64(i64 %x) {
 ; CHECK-LABEL: neg_abs64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, #0 // =0
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    cneg x8, x0, mi
 ; CHECK-NEXT:    neg x0, x8
 ; CHECK-NEXT:    ret
@@ -21,7 +21,7 @@ declare i32 @llvm.abs.i32(i32, i1 immarg)
 define i32 @neg_abs32(i32 %x) {
 ; CHECK-LABEL: neg_abs32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    cneg w8, w0, mi
 ; CHECK-NEXT:    neg w0, w8
 ; CHECK-NEXT:    ret
@@ -66,7 +66,7 @@ define i128 @neg_abs128(i128 %x) {
 define i64 @abs64(i64 %x) {
 ; CHECK-LABEL: abs64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x0, #0 // =0
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    cneg x0, x0, mi
 ; CHECK-NEXT:    ret
   %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
@@ -76,7 +76,7 @@ define i64 @abs64(i64 %x) {
 define i32 @abs32(i32 %x) {
 ; CHECK-LABEL: abs32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    cneg w0, w0, mi
 ; CHECK-NEXT:    ret
   %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
@@ -87,7 +87,7 @@ define i16 @abs16(i16 %x) {
 ; CHECK-LABEL: abs16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sxth w8, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    cneg w0, w8, mi
 ; CHECK-NEXT:    ret
   %abs = tail call i16 @llvm.abs.i16(i16 %x, i1 true)
@@ -99,7 +99,7 @@ define i128 @abs128(i128 %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    negs x8, x0
 ; CHECK-NEXT:    ngcs x9, x1
-; CHECK-NEXT:    cmp x1, #0 // =0
+; CHECK-NEXT:    cmp x1, #0
 ; CHECK-NEXT:    csel x0, x8, x0, lt
 ; CHECK-NEXT:    csel x1, x9, x1, lt
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/pow.ll b/llvm/test/CodeGen/AArch64/pow.ll
index c8e8ab9fc9f7d..0302368e55b25 100644
--- a/llvm/test/CodeGen/AArch64/pow.ll
+++ b/llvm/test/CodeGen/AArch64/pow.ll
@@ -68,7 +68,7 @@ define double @pow_f64_one_fourth_not_enough_fmf(double %x) nounwind {
 define <4 x float> @pow_v4f32_one_fourth_not_enough_fmf(<4 x float> %x) nounwind {
 ; CHECK-LABEL: pow_v4f32_one_fourth_not_enough_fmf:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov s0, v0.s[1]
 ; CHECK-NEXT:    fmov s1, #0.25000000
@@ -100,7 +100,7 @@ define <4 x float> @pow_v4f32_one_fourth_not_enough_fmf(<4 x float> %x) nounwind
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-NEXT:    mov v1.s[3], v0.s[0]
 ; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %r = call afn nsz <4 x float> @llvm.pow.v4f32(<4 x float> %x, <4 x float> <float 2.5e-1, float 2.5e-1, float 2.5e-01, float 2.5e-01>)
   ret <4 x float> %r
@@ -109,7 +109,7 @@ define <4 x float> @pow_v4f32_one_fourth_not_enough_fmf(<4 x float> %x) nounwind
 define <2 x double> @pow_v2f64_one_fourth_not_enough_fmf(<2 x double> %x) nounwind {
 ; CHECK-LABEL: pow_v2f64_one_fourth_not_enough_fmf:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov d0, v0.d[1]
 ; CHECK-NEXT:    fmov d1, #0.25000000
@@ -124,7 +124,7 @@ define <2 x double> @pow_v2f64_one_fourth_not_enough_fmf(<2 x double> %x) nounwi
 ; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %r = call nsz nnan reassoc <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 2.5e-1, double 2.5e-1>)
   ret <2 x double> %r

diff  --git a/llvm/test/CodeGen/AArch64/pr48188.ll b/llvm/test/CodeGen/AArch64/pr48188.ll
index 2da02e640ec1d..599021674e858 100644
--- a/llvm/test/CodeGen/AArch64/pr48188.ll
+++ b/llvm/test/CodeGen/AArch64/pr48188.ll
@@ -5,7 +5,7 @@
 define void @test() nounwind {
 ; CHECK-LABEL: test:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub sp, sp, #16 // =16
+; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    mov x1, xzr
 ; CHECK-NEXT:    mov x0, x1
 ; CHECK-NEXT:    str x1, [sp] // 8-byte Folded Spill

diff  --git a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
index da5d1c6fff5fa..811d4746ff00f 100644
--- a/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
+++ b/llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
@@ -138,10 +138,10 @@ define dso_local void @run_test() local_unnamed_addr #0 {
 ; CHECK-NEXT:    add v28.2d, v28.2d, v14.2d
 ; CHECK-NEXT:    fmov d14, x17
 ; CHECK-NEXT:    mov v14.d[1], x16
-; CHECK-NEXT:    add x8, x8, #8 // =8
+; CHECK-NEXT:    add x8, x8, #8
 ; CHECK-NEXT:    add v27.2d, v27.2d, v14.2d
-; CHECK-NEXT:    cmp x8, #64 // =64
-; CHECK-NEXT:    add x9, x9, #1 // =1
+; CHECK-NEXT:    cmp x8, #64
+; CHECK-NEXT:    add x9, x9, #1
 ; CHECK-NEXT:    b.ne .LBB0_1
 ; CHECK-NEXT:  // %bb.2: // %for.cond.cleanup
 ; CHECK-NEXT:    adrp x8, C

diff  --git a/llvm/test/CodeGen/AArch64/sadd_sat.ll b/llvm/test/CodeGen/AArch64/sadd_sat.ll
index 99711660615e6..89ff9fad542bd 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat.ll
@@ -13,7 +13,7 @@ define i32 @func(i32 %x, i32 %y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds w8, w0, w1
 ; CHECK-NEXT:    mov w9, #2147483647
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    cinv w8, w9, ge
 ; CHECK-NEXT:    adds w9, w0, w1
 ; CHECK-NEXT:    csel w0, w8, w9, vs
@@ -27,7 +27,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x8, x0, x1
 ; CHECK-NEXT:    mov x9, #9223372036854775807
-; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cmp x8, #0
 ; CHECK-NEXT:    cinv x8, x9, ge
 ; CHECK-NEXT:    adds x9, x0, x1
 ; CHECK-NEXT:    csel x0, x8, x9, vs
@@ -58,9 +58,9 @@ define i8 @func8(i8 %x, i8 %y) nounwind {
 ; CHECK-NEXT:    sxtb w8, w0
 ; CHECK-NEXT:    add w8, w8, w1, sxtb
 ; CHECK-NEXT:    mov w9, #127
-; CHECK-NEXT:    cmp w8, #127 // =127
+; CHECK-NEXT:    cmp w8, #127
 ; CHECK-NEXT:    csel w8, w8, w9, lt
-; CHECK-NEXT:    cmn w8, #128 // =128
+; CHECK-NEXT:    cmn w8, #128
 ; CHECK-NEXT:    mov w9, #-128
 ; CHECK-NEXT:    csel w0, w8, w9, gt
 ; CHECK-NEXT:    ret
@@ -75,9 +75,9 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
 ; CHECK-NEXT:    sbfx w9, w0, #0, #4
 ; CHECK-NEXT:    add w8, w9, w8, asr #28
 ; CHECK-NEXT:    mov w10, #7
-; CHECK-NEXT:    cmp w8, #7 // =7
+; CHECK-NEXT:    cmp w8, #7
 ; CHECK-NEXT:    csel w8, w8, w10, lt
-; CHECK-NEXT:    cmn w8, #8 // =8
+; CHECK-NEXT:    cmn w8, #8
 ; CHECK-NEXT:    mov w9, #-8
 ; CHECK-NEXT:    csel w0, w8, w9, gt
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll b/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll
index 4acda701b1976..1b631448181a7 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat_plus.ll
@@ -13,7 +13,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ; CHECK-NEXT:    mul w8, w1, w2
 ; CHECK-NEXT:    adds w10, w0, w8
 ; CHECK-NEXT:    mov w9, #2147483647
-; CHECK-NEXT:    cmp w10, #0 // =0
+; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    cinv w9, w9, ge
 ; CHECK-NEXT:    adds w8, w0, w8
 ; CHECK-NEXT:    csel w0, w9, w8, vs
@@ -28,7 +28,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adds x8, x0, x2
 ; CHECK-NEXT:    mov x9, #9223372036854775807
-; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cmp x8, #0
 ; CHECK-NEXT:    cinv x8, x9, ge
 ; CHECK-NEXT:    adds x9, x0, x2
 ; CHECK-NEXT:    csel x0, x8, x9, vs
@@ -63,9 +63,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; CHECK-NEXT:    mul w9, w1, w2
 ; CHECK-NEXT:    add w8, w8, w9, sxtb
 ; CHECK-NEXT:    mov w10, #127
-; CHECK-NEXT:    cmp w8, #127 // =127
+; CHECK-NEXT:    cmp w8, #127
 ; CHECK-NEXT:    csel w8, w8, w10, lt
-; CHECK-NEXT:    cmn w8, #128 // =128
+; CHECK-NEXT:    cmn w8, #128
 ; CHECK-NEXT:    mov w9, #-128
 ; CHECK-NEXT:    csel w0, w8, w9, gt
 ; CHECK-NEXT:    ret
@@ -82,9 +82,9 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; CHECK-NEXT:    lsl w9, w9, #28
 ; CHECK-NEXT:    add w8, w8, w9, asr #28
 ; CHECK-NEXT:    mov w10, #7
-; CHECK-NEXT:    cmp w8, #7 // =7
+; CHECK-NEXT:    cmp w8, #7
 ; CHECK-NEXT:    csel w8, w8, w10, lt
-; CHECK-NEXT:    cmn w8, #8 // =8
+; CHECK-NEXT:    cmn w8, #8
 ; CHECK-NEXT:    mov w9, #-8
 ; CHECK-NEXT:    csel w0, w8, w9, gt
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
index 9c654f6719b18..53e3563e7cea9 100644
--- a/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
@@ -135,8 +135,8 @@ define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1 { v0.b }[0], [x1]
 ; CHECK-NEXT:    ld1 { v1.b }[0], [x0]
-; CHECK-NEXT:    add x8, x0, #1 // =1
-; CHECK-NEXT:    add x9, x1, #1 // =1
+; CHECK-NEXT:    add x8, x0, #1
+; CHECK-NEXT:    add x9, x1, #1
 ; CHECK-NEXT:    ld1 { v0.b }[4], [x9]
 ; CHECK-NEXT:    ld1 { v1.b }[4], [x8]
 ; CHECK-NEXT:    shl v0.2s, v0.2s, #24
@@ -175,8 +175,8 @@ define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1 { v0.h }[0], [x1]
 ; CHECK-NEXT:    ld1 { v1.h }[0], [x0]
-; CHECK-NEXT:    add x8, x0, #2 // =2
-; CHECK-NEXT:    add x9, x1, #2 // =2
+; CHECK-NEXT:    add x8, x0, #2
+; CHECK-NEXT:    add x9, x1, #2
 ; CHECK-NEXT:    ld1 { v0.h }[2], [x9]
 ; CHECK-NEXT:    ld1 { v1.h }[2], [x8]
 ; CHECK-NEXT:    shl v0.2s, v0.2s, #16
@@ -354,7 +354,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-NEXT:    adcs x12, x3, x7
 ; CHECK-NEXT:    mov x9, #9223372036854775807
 ; CHECK-NEXT:    eor x10, x3, x7
-; CHECK-NEXT:    cmp x12, #0 // =0
+; CHECK-NEXT:    cmp x12, #0
 ; CHECK-NEXT:    eor x13, x3, x12
 ; CHECK-NEXT:    cinv x14, x9, ge
 ; CHECK-NEXT:    bics xzr, x13, x10
@@ -364,7 +364,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-NEXT:    adds x8, x0, x4
 ; CHECK-NEXT:    adcs x10, x1, x5
 ; CHECK-NEXT:    eor x11, x1, x5
-; CHECK-NEXT:    cmp x10, #0 // =0
+; CHECK-NEXT:    cmp x10, #0
 ; CHECK-NEXT:    eor x12, x1, x10
 ; CHECK-NEXT:    cinv x9, x9, ge
 ; CHECK-NEXT:    bics xzr, x12, x11

diff  --git a/llvm/test/CodeGen/AArch64/sat-add.ll b/llvm/test/CodeGen/AArch64/sat-add.ll
index c38f505be5d7d..605ee5148f2f4 100644
--- a/llvm/test/CodeGen/AArch64/sat-add.ll
+++ b/llvm/test/CodeGen/AArch64/sat-add.ll
@@ -10,10 +10,10 @@ define i8 @unsigned_sat_constant_i8_using_min(i8 %x) {
 ; CHECK-LABEL: unsigned_sat_constant_i8_using_min:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xff
-; CHECK-NEXT:    cmp w8, #213 // =213
+; CHECK-NEXT:    cmp w8, #213
 ; CHECK-NEXT:    mov w8, #-43
 ; CHECK-NEXT:    csel w8, w0, w8, lo
-; CHECK-NEXT:    add w0, w8, #42 // =42
+; CHECK-NEXT:    add w0, w8, #42
 ; CHECK-NEXT:    ret
   %c = icmp ult i8 %x, -43
   %s = select i1 %c, i8 %x, i8 -43
@@ -25,7 +25,7 @@ define i8 @unsigned_sat_constant_i8_using_cmp_sum(i8 %x) {
 ; CHECK-LABEL: unsigned_sat_constant_i8_using_cmp_sum:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xff
-; CHECK-NEXT:    add w8, w8, #42 // =42
+; CHECK-NEXT:    add w8, w8, #42
 ; CHECK-NEXT:    tst w8, #0x100
 ; CHECK-NEXT:    csinv w0, w8, wzr, eq
 ; CHECK-NEXT:    ret
@@ -39,8 +39,8 @@ define i8 @unsigned_sat_constant_i8_using_cmp_notval(i8 %x) {
 ; CHECK-LABEL: unsigned_sat_constant_i8_using_cmp_notval:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xff
-; CHECK-NEXT:    add w9, w0, #42 // =42
-; CHECK-NEXT:    cmp w8, #213 // =213
+; CHECK-NEXT:    add w9, w0, #42
+; CHECK-NEXT:    cmp w8, #213
 ; CHECK-NEXT:    csinv w0, w9, wzr, ls
 ; CHECK-NEXT:    ret
   %a = add i8 %x, 42
@@ -56,7 +56,7 @@ define i16 @unsigned_sat_constant_i16_using_min(i16 %x) {
 ; CHECK-NEXT:    cmp w8, w0, uxth
 ; CHECK-NEXT:    mov w8, #-43
 ; CHECK-NEXT:    csel w8, w0, w8, hi
-; CHECK-NEXT:    add w0, w8, #42 // =42
+; CHECK-NEXT:    add w0, w8, #42
 ; CHECK-NEXT:    ret
   %c = icmp ult i16 %x, -43
   %s = select i1 %c, i16 %x, i16 -43
@@ -68,7 +68,7 @@ define i16 @unsigned_sat_constant_i16_using_cmp_sum(i16 %x) {
 ; CHECK-LABEL: unsigned_sat_constant_i16_using_cmp_sum:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xffff
-; CHECK-NEXT:    add w8, w8, #42 // =42
+; CHECK-NEXT:    add w8, w8, #42
 ; CHECK-NEXT:    tst w8, #0x10000
 ; CHECK-NEXT:    csinv w0, w8, wzr, eq
 ; CHECK-NEXT:    ret
@@ -82,7 +82,7 @@ define i16 @unsigned_sat_constant_i16_using_cmp_notval(i16 %x) {
 ; CHECK-LABEL: unsigned_sat_constant_i16_using_cmp_notval:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w9, #65493
-; CHECK-NEXT:    add w8, w0, #42 // =42
+; CHECK-NEXT:    add w8, w0, #42
 ; CHECK-NEXT:    cmp w9, w0, uxth
 ; CHECK-NEXT:    csinv w0, w8, wzr, hs
 ; CHECK-NEXT:    ret
@@ -95,10 +95,10 @@ define i16 @unsigned_sat_constant_i16_using_cmp_notval(i16 %x) {
 define i32 @unsigned_sat_constant_i32_using_min(i32 %x) {
 ; CHECK-LABEL: unsigned_sat_constant_i32_using_min:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmn w0, #43 // =43
+; CHECK-NEXT:    cmn w0, #43
 ; CHECK-NEXT:    mov w8, #-43
 ; CHECK-NEXT:    csel w8, w0, w8, lo
-; CHECK-NEXT:    add w0, w8, #42 // =42
+; CHECK-NEXT:    add w0, w8, #42
 ; CHECK-NEXT:    ret
   %c = icmp ult i32 %x, -43
   %s = select i1 %c, i32 %x, i32 -43
@@ -109,7 +109,7 @@ define i32 @unsigned_sat_constant_i32_using_min(i32 %x) {
 define i32 @unsigned_sat_constant_i32_using_cmp_sum(i32 %x) {
 ; CHECK-LABEL: unsigned_sat_constant_i32_using_cmp_sum:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adds w8, w0, #42 // =42
+; CHECK-NEXT:    adds w8, w0, #42
 ; CHECK-NEXT:    csinv w0, w8, wzr, lo
 ; CHECK-NEXT:    ret
   %a = add i32 %x, 42
@@ -121,7 +121,7 @@ define i32 @unsigned_sat_constant_i32_using_cmp_sum(i32 %x) {
 define i32 @unsigned_sat_constant_i32_using_cmp_notval(i32 %x) {
 ; CHECK-LABEL: unsigned_sat_constant_i32_using_cmp_notval:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adds w8, w0, #42 // =42
+; CHECK-NEXT:    adds w8, w0, #42
 ; CHECK-NEXT:    csinv w0, w8, wzr, lo
 ; CHECK-NEXT:    ret
   %a = add i32 %x, 42
@@ -133,10 +133,10 @@ define i32 @unsigned_sat_constant_i32_using_cmp_notval(i32 %x) {
 define i64 @unsigned_sat_constant_i64_using_min(i64 %x) {
 ; CHECK-LABEL: unsigned_sat_constant_i64_using_min:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmn x0, #43 // =43
+; CHECK-NEXT:    cmn x0, #43
 ; CHECK-NEXT:    mov x8, #-43
 ; CHECK-NEXT:    csel x8, x0, x8, lo
-; CHECK-NEXT:    add x0, x8, #42 // =42
+; CHECK-NEXT:    add x0, x8, #42
 ; CHECK-NEXT:    ret
   %c = icmp ult i64 %x, -43
   %s = select i1 %c, i64 %x, i64 -43
@@ -147,7 +147,7 @@ define i64 @unsigned_sat_constant_i64_using_min(i64 %x) {
 define i64 @unsigned_sat_constant_i64_using_cmp_sum(i64 %x) {
 ; CHECK-LABEL: unsigned_sat_constant_i64_using_cmp_sum:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adds x8, x0, #42 // =42
+; CHECK-NEXT:    adds x8, x0, #42
 ; CHECK-NEXT:    csinv x0, x8, xzr, lo
 ; CHECK-NEXT:    ret
   %a = add i64 %x, 42
@@ -159,7 +159,7 @@ define i64 @unsigned_sat_constant_i64_using_cmp_sum(i64 %x) {
 define i64 @unsigned_sat_constant_i64_using_cmp_notval(i64 %x) {
 ; CHECK-LABEL: unsigned_sat_constant_i64_using_cmp_notval:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adds x8, x0, #42 // =42
+; CHECK-NEXT:    adds x8, x0, #42
 ; CHECK-NEXT:    csinv x0, x8, xzr, lo
 ; CHECK-NEXT:    ret
   %a = add i64 %x, 42

diff  --git a/llvm/test/CodeGen/AArch64/sdivpow2.ll b/llvm/test/CodeGen/AArch64/sdivpow2.ll
index 25e89176ad04f..22a11e4a83404 100644
--- a/llvm/test/CodeGen/AArch64/sdivpow2.ll
+++ b/llvm/test/CodeGen/AArch64/sdivpow2.ll
@@ -5,8 +5,8 @@
 define i32 @test1(i32 %x) {
 ; CHECK-LABEL: test1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #7 // =7
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    add w8, w0, #7
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, w8, w0, lt
 ; CHECK-NEXT:    asr w0, w8, #3
 ; CHECK-NEXT:    ret
@@ -17,8 +17,8 @@ define i32 @test1(i32 %x) {
 define i32 @test2(i32 %x) {
 ; CHECK-LABEL: test2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #7 // =7
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    add w8, w0, #7
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, w8, w0, lt
 ; CHECK-NEXT:    neg w0, w8, asr #3
 ; CHECK-NEXT:    ret
@@ -29,8 +29,8 @@ define i32 @test2(i32 %x) {
 define i32 @test3(i32 %x) {
 ; CHECK-LABEL: test3:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #31 // =31
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    add w8, w0, #31
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, w8, w0, lt
 ; CHECK-NEXT:    asr w0, w8, #5
 ; CHECK-NEXT:    ret
@@ -41,8 +41,8 @@ define i32 @test3(i32 %x) {
 define i64 @test4(i64 %x) {
 ; CHECK-LABEL: test4:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #7 // =7
-; CHECK-NEXT:    cmp x0, #0 // =0
+; CHECK-NEXT:    add x8, x0, #7
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    csel x8, x8, x0, lt
 ; CHECK-NEXT:    asr x0, x8, #3
 ; CHECK-NEXT:    ret
@@ -53,8 +53,8 @@ define i64 @test4(i64 %x) {
 define i64 @test5(i64 %x) {
 ; CHECK-LABEL: test5:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #7 // =7
-; CHECK-NEXT:    cmp x0, #0 // =0
+; CHECK-NEXT:    add x8, x0, #7
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    csel x8, x8, x0, lt
 ; CHECK-NEXT:    neg x0, x8, asr #3
 ; CHECK-NEXT:    ret
@@ -65,8 +65,8 @@ define i64 @test5(i64 %x) {
 define i64 @test6(i64 %x) {
 ; CHECK-LABEL: test6:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #63 // =63
-; CHECK-NEXT:    cmp x0, #0 // =0
+; CHECK-NEXT:    add x8, x0, #63
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    csel x8, x8, x0, lt
 ; CHECK-NEXT:    asr x0, x8, #6
 ; CHECK-NEXT:    ret
@@ -79,7 +79,7 @@ define i64 @test7(i64 %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #281474976710655
 ; CHECK-NEXT:    add x8, x0, x8
-; CHECK-NEXT:    cmp x0, #0 // =0
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    csel x8, x8, x0, lt
 ; CHECK-NEXT:    asr x0, x8, #48
 ; CHECK-NEXT:    ret
@@ -90,15 +90,15 @@ define i64 @test7(i64 %x) {
 define i64 @test8(i64 %x) {
 ; ISEL-LABEL: test8:
 ; ISEL:       // %bb.0:
-; ISEL-NEXT:    cmp x0, #0 // =0
+; ISEL-NEXT:    cmp x0, #0
 ; ISEL-NEXT:    cinc x8, x0, lt
 ; ISEL-NEXT:    asr x0, x8, #1
 ; ISEL-NEXT:    ret
 ;
 ; FAST-LABEL: test8:
 ; FAST:       // %bb.0:
-; FAST-NEXT:    add x8, x0, #1 // =1
-; FAST-NEXT:    cmp x0, #0 // =0
+; FAST-NEXT:    add x8, x0, #1
+; FAST-NEXT:    cmp x0, #0
 ; FAST-NEXT:    csel x8, x8, x0, lt
 ; FAST-NEXT:    asr x0, x8, #1
 ; FAST-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/select_const.ll b/llvm/test/CodeGen/AArch64/select_const.ll
index f58232e2ee898..a033bb61833e1 100644
--- a/llvm/test/CodeGen/AArch64/select_const.ll
+++ b/llvm/test/CodeGen/AArch64/select_const.ll
@@ -69,7 +69,7 @@ define i32 @select_0_or_neg1(i1 %cond) {
 ; CHECK-LABEL: select_0_or_neg1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0x1
-; CHECK-NEXT:    sub w0, w8, #1 // =1
+; CHECK-NEXT:    sub w0, w8, #1
 ; CHECK-NEXT:    ret
   %sel = select i1 %cond, i32 0, i32 -1
   ret i32 %sel
@@ -78,7 +78,7 @@ define i32 @select_0_or_neg1(i1 %cond) {
 define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
 ; CHECK-LABEL: select_0_or_neg1_zeroext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub w0, w0, #1 // =1
+; CHECK-NEXT:    sub w0, w0, #1
 ; CHECK-NEXT:    ret
   %sel = select i1 %cond, i32 0, i32 -1
   ret i32 %sel
@@ -137,7 +137,7 @@ define i32 @select_Cplus1_C(i1 %cond) {
 define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
 ; CHECK-LABEL: select_Cplus1_C_zeroext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w8, #41
 ; CHECK-NEXT:    cinc w0, w8, ne
 ; CHECK-NEXT:    ret
@@ -172,7 +172,7 @@ define i32 @select_C_Cplus1(i1 %cond) {
 define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
 ; CHECK-LABEL: select_C_Cplus1_zeroext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w8, #41
 ; CHECK-NEXT:    cinc w0, w8, eq
 ; CHECK-NEXT:    ret
@@ -209,7 +209,7 @@ define i32 @select_C1_C2(i1 %cond) {
 define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
 ; CHECK-LABEL: select_C1_C2_zeroext:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w8, #42
 ; CHECK-NEXT:    mov w9, #421
 ; CHECK-NEXT:    csel w0, w9, w8, ne

diff  --git a/llvm/test/CodeGen/AArch64/shift-mod.ll b/llvm/test/CodeGen/AArch64/shift-mod.ll
index 6c3521d260085..eee65fa23d4e0 100644
--- a/llvm/test/CodeGen/AArch64/shift-mod.ll
+++ b/llvm/test/CodeGen/AArch64/shift-mod.ll
@@ -78,7 +78,7 @@ entry:
 define i64 @ashr_add_shl_i32(i64 %r) {
 ; CHECK-LABEL: ashr_add_shl_i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #1 // =1
+; CHECK-NEXT:    add w8, w0, #1
 ; CHECK-NEXT:    sxtw x0, w8
 ; CHECK-NEXT:    ret
   %conv = shl i64 %r, 32
@@ -90,7 +90,7 @@ define i64 @ashr_add_shl_i32(i64 %r) {
 define i64 @ashr_add_shl_i8(i64 %r) {
 ; CHECK-LABEL: ashr_add_shl_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #1 // =1
+; CHECK-NEXT:    add w8, w0, #1
 ; CHECK-NEXT:    sxtb x0, w8
 ; CHECK-NEXT:    ret
   %conv = shl i64 %r, 56

diff  --git a/llvm/test/CodeGen/AArch64/signbit-shift.ll b/llvm/test/CodeGen/AArch64/signbit-shift.ll
index 250290aa2348f..747eda4bf2ada 100644
--- a/llvm/test/CodeGen/AArch64/signbit-shift.ll
+++ b/llvm/test/CodeGen/AArch64/signbit-shift.ll
@@ -18,7 +18,7 @@ define i32 @add_zext_ifpos(i32 %x) {
 ; CHECK-LABEL: add_zext_ifpos:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr w8, w0, #31
-; CHECK-NEXT:    add w0, w8, #42 // =42
+; CHECK-NEXT:    add w0, w8, #42
 ; CHECK-NEXT:    ret
   %c = icmp sgt i32 %x, -1
   %e = zext i1 %c to i32
@@ -43,7 +43,7 @@ define <4 x i32> @add_zext_ifpos_vec_splat(<4 x i32> %x) {
 define i32 @sel_ifpos_tval_bigger(i32 %x) {
 ; CHECK-LABEL: sel_ifpos_tval_bigger:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w8, #41
 ; CHECK-NEXT:    cinc w0, w8, ge
 ; CHECK-NEXT:    ret
@@ -67,7 +67,7 @@ define i32 @add_sext_ifpos(i32 %x) {
 ; CHECK-LABEL: add_sext_ifpos:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #31
-; CHECK-NEXT:    add w0, w8, #41 // =41
+; CHECK-NEXT:    add w0, w8, #41
 ; CHECK-NEXT:    ret
   %c = icmp sgt i32 %x, -1
   %e = sext i1 %c to i32
@@ -92,7 +92,7 @@ define <4 x i32> @add_sext_ifpos_vec_splat(<4 x i32> %x) {
 define i32 @sel_ifpos_fval_bigger(i32 %x) {
 ; CHECK-LABEL: sel_ifpos_fval_bigger:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w8, #41
 ; CHECK-NEXT:    cinc w0, w8, lt
 ; CHECK-NEXT:    ret
@@ -117,7 +117,7 @@ define i32 @add_zext_ifneg(i32 %x) {
 ; CHECK-LABEL: add_zext_ifneg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    lsr w8, w0, #31
-; CHECK-NEXT:    add w0, w8, #41 // =41
+; CHECK-NEXT:    add w0, w8, #41
 ; CHECK-NEXT:    ret
   %c = icmp slt i32 %x, 0
   %e = zext i1 %c to i32
@@ -128,7 +128,7 @@ define i32 @add_zext_ifneg(i32 %x) {
 define i32 @sel_ifneg_tval_bigger(i32 %x) {
 ; CHECK-LABEL: sel_ifneg_tval_bigger:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w8, #41
 ; CHECK-NEXT:    cinc w0, w8, lt
 ; CHECK-NEXT:    ret
@@ -151,7 +151,7 @@ define i32 @add_sext_ifneg(i32 %x) {
 ; CHECK-LABEL: add_sext_ifneg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr w8, w0, #31
-; CHECK-NEXT:    add w0, w8, #42 // =42
+; CHECK-NEXT:    add w0, w8, #42
 ; CHECK-NEXT:    ret
   %c = icmp slt i32 %x, 0
   %e = sext i1 %c to i32
@@ -162,7 +162,7 @@ define i32 @add_sext_ifneg(i32 %x) {
 define i32 @sel_ifneg_fval_bigger(i32 %x) {
 ; CHECK-LABEL: sel_ifneg_fval_bigger:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    mov w8, #41
 ; CHECK-NEXT:    cinc w0, w8, ge
 ; CHECK-NEXT:    ret
@@ -175,7 +175,7 @@ define i32 @add_lshr_not(i32 %x) {
 ; CHECK-LABEL: add_lshr_not:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr w8, w0, #31
-; CHECK-NEXT:    add w0, w8, #42 // =42
+; CHECK-NEXT:    add w0, w8, #42
 ; CHECK-NEXT:    ret
   %not = xor i32 %x, -1
   %sh = lshr i32 %not, 31
@@ -247,7 +247,7 @@ define i32 @sub_const_op_lshr(i32 %x) {
 ; CHECK-LABEL: sub_const_op_lshr:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr w8, w0, #31
-; CHECK-NEXT:    add w0, w8, #43 // =43
+; CHECK-NEXT:    add w0, w8, #43
 ; CHECK-NEXT:    ret
   %sh = lshr i32 %x, 31
   %r = sub i32 43, %sh

diff  --git a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
index e976144861dbd..905a128438661 100644
--- a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
+++ b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
@@ -273,7 +273,7 @@ define i1 @add_ultcmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, w1
 ; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #256 // =256
+; CHECK-NEXT:    cmp w8, #256
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, %y
@@ -285,7 +285,7 @@ define i1 @add_ultcmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind {
 define i1 @add_ultcmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_cmp:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    add w8, w0, #128
 ; CHECK-NEXT:    and w8, w8, #0xffff
 ; CHECK-NEXT:    cmp w8, w1, uxth
 ; CHECK-NEXT:    cset w0, lo
@@ -300,7 +300,7 @@ define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i8_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xffff
-; CHECK-NEXT:    add w8, w8, #128 // =128
+; CHECK-NEXT:    add w8, w8, #128
 ; CHECK-NEXT:    lsr w0, w8, #16
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
@@ -312,9 +312,9 @@ define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #192 // =192
+; CHECK-NEXT:    add w8, w0, #192
 ; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #256 // =256
+; CHECK-NEXT:    cmp w8, #256
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1))
@@ -326,9 +326,9 @@ define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_c1notpoweroftwo:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    add w8, w0, #128
 ; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #768 // =768
+; CHECK-NEXT:    cmp w8, #768
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
@@ -340,9 +340,9 @@ define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_magic:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #64 // =64
+; CHECK-NEXT:    add w8, w0, #64
 ; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #256 // =256
+; CHECK-NEXT:    cmp w8, #256
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 64 ; 1U << (8-1-1)
@@ -354,9 +354,9 @@ define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i16_i4:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #8 // =8
+; CHECK-NEXT:    add w8, w0, #8
 ; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #16 // =16
+; CHECK-NEXT:    cmp w8, #16
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 8 ; 1U << (4-1)
@@ -368,9 +368,9 @@ define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i24_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128 // =128
+; CHECK-NEXT:    add w8, w0, #128
 ; CHECK-NEXT:    and w8, w8, #0xffffff
-; CHECK-NEXT:    cmp w8, #256 // =256
+; CHECK-NEXT:    cmp w8, #256
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i24 %x, 128 ; 1U << (8-1)

diff  --git a/llvm/test/CodeGen/AArch64/sink-addsub-of-const.ll b/llvm/test/CodeGen/AArch64/sink-addsub-of-const.ll
index d55310cad76b7..604e1a9718bab 100644
--- a/llvm/test/CodeGen/AArch64/sink-addsub-of-const.ll
+++ b/llvm/test/CodeGen/AArch64/sink-addsub-of-const.ll
@@ -10,7 +10,7 @@ define i32 @sink_add_of_const_to_add0(i32 %a, i32 %b) {
 ; CHECK-LABEL: sink_add_of_const_to_add0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, w1
-; CHECK-NEXT:    add w0, w8, #32 // =32
+; CHECK-NEXT:    add w0, w8, #32
 ; CHECK-NEXT:    ret
   %t0 = add i32 %a, 32 ; constant always on RHS
   %r = add i32 %t0, %b
@@ -20,7 +20,7 @@ define i32 @sink_add_of_const_to_add1(i32 %a, i32 %b) {
 ; CHECK-LABEL: sink_add_of_const_to_add1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, w1
-; CHECK-NEXT:    add w0, w8, #32 // =32
+; CHECK-NEXT:    add w0, w8, #32
 ; CHECK-NEXT:    ret
   %t0 = add i32 %a, 32 ; constant always on RHS
   %r = add i32 %b, %t0
@@ -34,7 +34,7 @@ define i32 @sink_sub_of_const_to_add0(i32 %a, i32 %b) {
 ; CHECK-LABEL: sink_sub_of_const_to_add0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, w1
-; CHECK-NEXT:    sub w0, w8, #32 // =32
+; CHECK-NEXT:    sub w0, w8, #32
 ; CHECK-NEXT:    ret
   %t0 = sub i32 %a, 32
   %r = add i32 %t0, %b
@@ -44,7 +44,7 @@ define i32 @sink_sub_of_const_to_add1(i32 %a, i32 %b) {
 ; CHECK-LABEL: sink_sub_of_const_to_add1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, w1
-; CHECK-NEXT:    sub w0, w8, #32 // =32
+; CHECK-NEXT:    sub w0, w8, #32
 ; CHECK-NEXT:    ret
   %t0 = sub i32 %a, 32
   %r = add i32 %b, %t0
@@ -58,7 +58,7 @@ define i32 @sink_sub_from_const_to_add0(i32 %a, i32 %b) {
 ; CHECK-LABEL: sink_sub_from_const_to_add0:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w1, w0
-; CHECK-NEXT:    add w0, w8, #32 // =32
+; CHECK-NEXT:    add w0, w8, #32
 ; CHECK-NEXT:    ret
   %t0 = sub i32 32, %a
   %r = add i32 %t0, %b
@@ -68,7 +68,7 @@ define i32 @sink_sub_from_const_to_add1(i32 %a, i32 %b) {
 ; CHECK-LABEL: sink_sub_from_const_to_add1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w1, w0
-; CHECK-NEXT:    add w0, w8, #32 // =32
+; CHECK-NEXT:    add w0, w8, #32
 ; CHECK-NEXT:    ret
   %t0 = sub i32 32, %a
   %r = add i32 %b, %t0
@@ -82,7 +82,7 @@ define i32 @sink_add_of_const_to_sub(i32 %a, i32 %b) {
 ; CHECK-LABEL: sink_add_of_const_to_sub:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w0, w1
-; CHECK-NEXT:    add w0, w8, #32 // =32
+; CHECK-NEXT:    add w0, w8, #32
 ; CHECK-NEXT:    ret
   %t0 = add i32 %a, 32 ; constant always on RHS
   %r = sub i32 %t0, %b
@@ -92,7 +92,7 @@ define i32 @sink_add_of_const_to_sub2(i32 %a, i32 %b) {
 ; CHECK-LABEL: sink_add_of_const_to_sub2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w1, w0
-; CHECK-NEXT:    sub w0, w8, #32 // =32
+; CHECK-NEXT:    sub w0, w8, #32
 ; CHECK-NEXT:    ret
   %t0 = add i32 %a, 32 ; constant always on RHS
   %r = sub i32 %b, %t0
@@ -106,7 +106,7 @@ define i32 @sink_sub_of_const_to_sub(i32 %a, i32 %b) {
 ; CHECK-LABEL: sink_sub_of_const_to_sub:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w0, w1
-; CHECK-NEXT:    sub w0, w8, #32 // =32
+; CHECK-NEXT:    sub w0, w8, #32
 ; CHECK-NEXT:    ret
   %t0 = sub i32 %a, 32
   %r = sub i32 %t0, %b
@@ -116,7 +116,7 @@ define i32 @sink_sub_of_const_to_sub2(i32 %a, i32 %b) {
 ; CHECK-LABEL: sink_sub_of_const_to_sub2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    sub w8, w1, w0
-; CHECK-NEXT:    add w0, w8, #32 // =32
+; CHECK-NEXT:    add w0, w8, #32
 ; CHECK-NEXT:    ret
   %t0 = sub i32 %a, 32
   %r = sub i32 %b, %t0
@@ -141,7 +141,7 @@ define i32 @sink_sub_from_const_to_sub2(i32 %a, i32 %b) {
 ; CHECK-LABEL: sink_sub_from_const_to_sub2:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, w1
-; CHECK-NEXT:    sub w0, w8, #32 // =32
+; CHECK-NEXT:    sub w0, w8, #32
 ; CHECK-NEXT:    ret
   %t0 = sub i32 32, %a
   %r = sub i32 %b, %t0

diff  --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll
index ee69b7945fa45..1ff9c0bb0a523 100644
--- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll
+++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll
@@ -24,10 +24,10 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %
 ; CHECK-NEXT:    cntd x9
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    str q1, [sp]
-; CHECK-NEXT:    sub x9, x9, #2 // =2
+; CHECK-NEXT:    sub x9, x9, #2
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp]
 ; CHECK-NEXT:    mov w8, #2
-; CHECK-NEXT:    cmp x9, #2 // =2
+; CHECK-NEXT:    cmp x9, #2
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    addvl x10, sp, #1
 ; CHECK-NEXT:    lsl x8, x8, #3
@@ -35,7 +35,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %
 ; CHECK-NEXT:    str q2, [x10, x8]
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    mov w8, #4
-; CHECK-NEXT:    cmp x9, #4 // =4
+; CHECK-NEXT:    cmp x9, #4
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    addvl x10, sp, #2
 ; CHECK-NEXT:    lsl x8, x8, #3
@@ -43,7 +43,7 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %
 ; CHECK-NEXT:    str q3, [x10, x8]
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #2, mul vl]
 ; CHECK-NEXT:    mov w8, #6
-; CHECK-NEXT:    cmp x9, #6 // =6
+; CHECK-NEXT:    cmp x9, #6
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    addvl x10, sp, #3
 ; CHECK-NEXT:    lsl x8, x8, #3
@@ -74,10 +74,10 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x
 ; CHECK-NEXT:    cntd x9
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    str q1, [sp]
-; CHECK-NEXT:    sub x9, x9, #2 // =2
+; CHECK-NEXT:    sub x9, x9, #2
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp]
 ; CHECK-NEXT:    mov w8, #2
-; CHECK-NEXT:    cmp x9, #2 // =2
+; CHECK-NEXT:    cmp x9, #2
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    addvl x10, sp, #1
 ; CHECK-NEXT:    lsl x8, x8, #3
@@ -85,7 +85,7 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x
 ; CHECK-NEXT:    str q2, [x10, x8]
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #1, mul vl]
 ; CHECK-NEXT:    mov w8, #4
-; CHECK-NEXT:    cmp x9, #4 // =4
+; CHECK-NEXT:    cmp x9, #4
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    addvl x10, sp, #2
 ; CHECK-NEXT:    lsl x8, x8, #3
@@ -93,7 +93,7 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x
 ; CHECK-NEXT:    str q3, [x10, x8]
 ; CHECK-NEXT:    ld1d { z0.d }, p0/z, [sp, #2, mul vl]
 ; CHECK-NEXT:    mov w8, #6
-; CHECK-NEXT:    cmp x9, #6 // =6
+; CHECK-NEXT:    cmp x9, #6
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    addvl x10, sp, #3
 ; CHECK-NEXT:    lsl x8, x8, #3

diff  --git a/llvm/test/CodeGen/AArch64/srem-lkk.ll b/llvm/test/CodeGen/AArch64/srem-lkk.ll
index 321791e9228fc..97af0578520d4 100644
--- a/llvm/test/CodeGen/AArch64/srem-lkk.ll
+++ b/llvm/test/CodeGen/AArch64/srem-lkk.ll
@@ -95,8 +95,8 @@ define i32 @combine_srem_sdiv(i32 %x) {
 define i32 @dont_fold_srem_power_of_two(i32 %x) {
 ; CHECK-LABEL: dont_fold_srem_power_of_two:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #63 // =63
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    add w8, w0, #63
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, w8, w0, lt
 ; CHECK-NEXT:    and w8, w8, #0xffffffc0
 ; CHECK-NEXT:    sub w0, w0, w8
@@ -121,7 +121,7 @@ define i32 @dont_fold_srem_i32_smax(i32 %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #2147483647
 ; CHECK-NEXT:    add w8, w0, w8
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, w8, w0, lt
 ; CHECK-NEXT:    and w8, w8, #0x80000000
 ; CHECK-NEXT:    add w0, w0, w8

diff  --git a/llvm/test/CodeGen/AArch64/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/AArch64/srem-seteq-illegal-types.ll
index f4c86c082332c..0d63fa216bfde 100644
--- a/llvm/test/CodeGen/AArch64/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/AArch64/srem-seteq-illegal-types.ll
@@ -32,7 +32,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
 ; CHECK-NEXT:    add w9, w9, w10
 ; CHECK-NEXT:    mov w10, #6
 ; CHECK-NEXT:    msub w8, w9, w10, w8
-; CHECK-NEXT:    cmp w8, #1 // =1
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    cset w0, eq
 ; CHECK-NEXT:    ret
   %srem = srem i4 %X, 6

diff  --git a/llvm/test/CodeGen/AArch64/srem-seteq.ll b/llvm/test/CodeGen/AArch64/srem-seteq.ll
index f91f485cc3bcb..e70ec99e4e583 100644
--- a/llvm/test/CodeGen/AArch64/srem-seteq.ll
+++ b/llvm/test/CodeGen/AArch64/srem-seteq.ll
@@ -50,7 +50,7 @@ define i32 @test_srem_odd_bit30(i32 %X) nounwind {
 ; CHECK-NEXT:    movk w8, #27306, lsl #16
 ; CHECK-NEXT:    orr w9, wzr, #0x1
 ; CHECK-NEXT:    madd w8, w0, w8, w9
-; CHECK-NEXT:    cmp w8, #3 // =3
+; CHECK-NEXT:    cmp w8, #3
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 1073741827
@@ -67,7 +67,7 @@ define i32 @test_srem_odd_bit31(i32 %X) nounwind {
 ; CHECK-NEXT:    movk w8, #54613, lsl #16
 ; CHECK-NEXT:    orr w9, wzr, #0x1
 ; CHECK-NEXT:    madd w8, w0, w8, w9
-; CHECK-NEXT:    cmp w8, #3 // =3
+; CHECK-NEXT:    cmp w8, #3
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 2147483651
@@ -126,7 +126,7 @@ define i32 @test_srem_even_bit30(i32 %X) nounwind {
 ; CHECK-NEXT:    orr w9, wzr, #0x8
 ; CHECK-NEXT:    madd w8, w0, w8, w9
 ; CHECK-NEXT:    ror w8, w8, #3
-; CHECK-NEXT:    cmp w8, #3 // =3
+; CHECK-NEXT:    cmp w8, #3
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 1073741928
@@ -144,7 +144,7 @@ define i32 @test_srem_even_bit31(i32 %X) nounwind {
 ; CHECK-NEXT:    orr w9, wzr, #0x2
 ; CHECK-NEXT:    madd w8, w0, w8, w9
 ; CHECK-NEXT:    ror w8, w8, #1
-; CHECK-NEXT:    cmp w8, #3 // =3
+; CHECK-NEXT:    cmp w8, #3
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %srem = srem i32 %X, 2147483750
@@ -234,8 +234,8 @@ define i32 @test_srem_one(i32 %X) nounwind {
 define i32 @test_srem_pow2(i32 %X) nounwind {
 ; CHECK-LABEL: test_srem_pow2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #15 // =15
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    add w8, w0, #15
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, w8, w0, lt
 ; CHECK-NEXT:    and w8, w8, #0xfffffff0
 ; CHECK-NEXT:    cmp w0, w8
@@ -253,7 +253,7 @@ define i32 @test_srem_int_min(i32 %X) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #2147483647
 ; CHECK-NEXT:    add w8, w0, w8
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, w8, w0, lt
 ; CHECK-NEXT:    and w8, w8, #0x80000000
 ; CHECK-NEXT:    cmn w0, w8

diff  --git a/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll b/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll
index 5597e16576ccc..6d5e8c63341b8 100644
--- a/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll
+++ b/llvm/test/CodeGen/AArch64/srem-vector-lkk.ll
@@ -157,8 +157,8 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    smov w8, v0.h[1]
-; CHECK-NEXT:    add w12, w8, #31 // =31
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    add w12, w8, #31
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    mov w11, #37253
 ; CHECK-NEXT:    csel w12, w12, w8, lt
 ; CHECK-NEXT:    smov w9, v0.h[0]
@@ -166,9 +166,9 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) {
 ; CHECK-NEXT:    movk w11, #44150, lsl #16
 ; CHECK-NEXT:    and w12, w12, #0xffffffe0
 ; CHECK-NEXT:    sub w8, w8, w12
-; CHECK-NEXT:    add w12, w9, #63 // =63
+; CHECK-NEXT:    add w12, w9, #63
 ; CHECK-NEXT:    smull x11, w10, w11
-; CHECK-NEXT:    cmp w9, #0 // =0
+; CHECK-NEXT:    cmp w9, #0
 ; CHECK-NEXT:    lsr x11, x11, #32
 ; CHECK-NEXT:    csel w12, w12, w9, lt
 ; CHECK-NEXT:    add w11, w11, w10
@@ -178,8 +178,8 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) {
 ; CHECK-NEXT:    add w11, w12, w11, lsr #31
 ; CHECK-NEXT:    smov w12, v0.h[2]
 ; CHECK-NEXT:    fmov s0, w9
-; CHECK-NEXT:    add w9, w12, #7 // =7
-; CHECK-NEXT:    cmp w12, #0 // =0
+; CHECK-NEXT:    add w9, w12, #7
+; CHECK-NEXT:    cmp w12, #0
 ; CHECK-NEXT:    csel w9, w9, w12, lt
 ; CHECK-NEXT:    and w9, w9, #0xfffffff8
 ; CHECK-NEXT:    sub w9, w12, w9
@@ -263,7 +263,7 @@ define <4 x i16> @dont_fold_srem_i16_smax(<4 x i16> %x) {
 ; CHECK-NEXT:    add w10, w10, w11
 ; CHECK-NEXT:    mov w11, #32767
 ; CHECK-NEXT:    add w11, w8, w11
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csel w11, w11, w8, lt
 ; CHECK-NEXT:    and w11, w11, #0xffff8000
 ; CHECK-NEXT:    sub w8, w8, w11

diff  --git a/llvm/test/CodeGen/AArch64/ssub_sat.ll b/llvm/test/CodeGen/AArch64/ssub_sat.ll
index 4fab863460f31..2dd854d2898b8 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat.ll
@@ -13,7 +13,7 @@ define i32 @func(i32 %x, i32 %y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs w8, w0, w1
 ; CHECK-NEXT:    mov w9, #2147483647
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    cinv w8, w9, ge
 ; CHECK-NEXT:    subs w9, w0, w1
 ; CHECK-NEXT:    csel w0, w8, w9, vs
@@ -27,7 +27,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x8, x0, x1
 ; CHECK-NEXT:    mov x9, #9223372036854775807
-; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cmp x8, #0
 ; CHECK-NEXT:    cinv x8, x9, ge
 ; CHECK-NEXT:    subs x9, x0, x1
 ; CHECK-NEXT:    csel x0, x8, x9, vs
@@ -58,9 +58,9 @@ define i8 @func8(i8 %x, i8 %y) nounwind {
 ; CHECK-NEXT:    sxtb w8, w0
 ; CHECK-NEXT:    sub w8, w8, w1, sxtb
 ; CHECK-NEXT:    mov w9, #127
-; CHECK-NEXT:    cmp w8, #127 // =127
+; CHECK-NEXT:    cmp w8, #127
 ; CHECK-NEXT:    csel w8, w8, w9, lt
-; CHECK-NEXT:    cmn w8, #128 // =128
+; CHECK-NEXT:    cmn w8, #128
 ; CHECK-NEXT:    mov w9, #-128
 ; CHECK-NEXT:    csel w0, w8, w9, gt
 ; CHECK-NEXT:    ret
@@ -75,9 +75,9 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
 ; CHECK-NEXT:    sbfx w9, w0, #0, #4
 ; CHECK-NEXT:    sub w8, w9, w8, asr #28
 ; CHECK-NEXT:    mov w10, #7
-; CHECK-NEXT:    cmp w8, #7 // =7
+; CHECK-NEXT:    cmp w8, #7
 ; CHECK-NEXT:    csel w8, w8, w10, lt
-; CHECK-NEXT:    cmn w8, #8 // =8
+; CHECK-NEXT:    cmn w8, #8
 ; CHECK-NEXT:    mov w9, #-8
 ; CHECK-NEXT:    csel w0, w8, w9, gt
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll b/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll
index 1528b752c24ec..8a753b071c76a 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat_plus.ll
@@ -13,7 +13,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
 ; CHECK-NEXT:    mul w8, w1, w2
 ; CHECK-NEXT:    subs w10, w0, w8
 ; CHECK-NEXT:    mov w9, #2147483647
-; CHECK-NEXT:    cmp w10, #0 // =0
+; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    cinv w9, w9, ge
 ; CHECK-NEXT:    subs w8, w0, w8
 ; CHECK-NEXT:    csel w0, w9, w8, vs
@@ -28,7 +28,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    subs x8, x0, x2
 ; CHECK-NEXT:    mov x9, #9223372036854775807
-; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cmp x8, #0
 ; CHECK-NEXT:    cinv x8, x9, ge
 ; CHECK-NEXT:    subs x9, x0, x2
 ; CHECK-NEXT:    csel x0, x8, x9, vs
@@ -63,9 +63,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; CHECK-NEXT:    mul w9, w1, w2
 ; CHECK-NEXT:    sub w8, w8, w9, sxtb
 ; CHECK-NEXT:    mov w10, #127
-; CHECK-NEXT:    cmp w8, #127 // =127
+; CHECK-NEXT:    cmp w8, #127
 ; CHECK-NEXT:    csel w8, w8, w10, lt
-; CHECK-NEXT:    cmn w8, #128 // =128
+; CHECK-NEXT:    cmn w8, #128
 ; CHECK-NEXT:    mov w9, #-128
 ; CHECK-NEXT:    csel w0, w8, w9, gt
 ; CHECK-NEXT:    ret
@@ -82,9 +82,9 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; CHECK-NEXT:    lsl w9, w9, #28
 ; CHECK-NEXT:    sub w8, w8, w9, asr #28
 ; CHECK-NEXT:    mov w10, #7
-; CHECK-NEXT:    cmp w8, #7 // =7
+; CHECK-NEXT:    cmp w8, #7
 ; CHECK-NEXT:    csel w8, w8, w10, lt
-; CHECK-NEXT:    cmn w8, #8 // =8
+; CHECK-NEXT:    cmn w8, #8
 ; CHECK-NEXT:    mov w9, #-8
 ; CHECK-NEXT:    csel w0, w8, w9, gt
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
index 7c2e2330608e8..8babee052fed6 100644
--- a/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
@@ -136,8 +136,8 @@ define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1 { v0.b }[0], [x1]
 ; CHECK-NEXT:    ld1 { v1.b }[0], [x0]
-; CHECK-NEXT:    add x8, x0, #1 // =1
-; CHECK-NEXT:    add x9, x1, #1 // =1
+; CHECK-NEXT:    add x8, x0, #1
+; CHECK-NEXT:    add x9, x1, #1
 ; CHECK-NEXT:    ld1 { v0.b }[4], [x9]
 ; CHECK-NEXT:    ld1 { v1.b }[4], [x8]
 ; CHECK-NEXT:    shl v0.2s, v0.2s, #24
@@ -176,8 +176,8 @@ define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ld1 { v0.h }[0], [x1]
 ; CHECK-NEXT:    ld1 { v1.h }[0], [x0]
-; CHECK-NEXT:    add x8, x0, #2 // =2
-; CHECK-NEXT:    add x9, x1, #2 // =2
+; CHECK-NEXT:    add x8, x0, #2
+; CHECK-NEXT:    add x9, x1, #2
 ; CHECK-NEXT:    ld1 { v0.h }[2], [x9]
 ; CHECK-NEXT:    ld1 { v1.h }[2], [x8]
 ; CHECK-NEXT:    shl v0.2s, v0.2s, #16
@@ -357,7 +357,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-NEXT:    sbcs x12, x3, x7
 ; CHECK-NEXT:    mov x9, #9223372036854775807
 ; CHECK-NEXT:    eor x10, x3, x7
-; CHECK-NEXT:    cmp x12, #0 // =0
+; CHECK-NEXT:    cmp x12, #0
 ; CHECK-NEXT:    eor x13, x3, x12
 ; CHECK-NEXT:    cinv x14, x9, ge
 ; CHECK-NEXT:    tst x10, x13
@@ -367,7 +367,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-NEXT:    subs x8, x0, x4
 ; CHECK-NEXT:    sbcs x10, x1, x5
 ; CHECK-NEXT:    eor x11, x1, x5
-; CHECK-NEXT:    cmp x10, #0 // =0
+; CHECK-NEXT:    cmp x10, #0
 ; CHECK-NEXT:    eor x12, x1, x10
 ; CHECK-NEXT:    cinv x9, x9, ge
 ; CHECK-NEXT:    tst x11, x12

diff  --git a/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll b/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
index c4f3059ba2876..527f6fa309609 100644
--- a/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
@@ -8,9 +8,9 @@
 define i32 @test_stack_guard_remat2() ssp {
 ; CHECK-LABEL: test_stack_guard_remat2:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    sub sp, sp, #64 ; =64
+; CHECK-NEXT:    sub sp, sp, #64
 ; CHECK-NEXT:    stp x29, x30, [sp, #48] ; 16-byte Folded Spill
-; CHECK-NEXT:    add x29, sp, #48 ; =48
+; CHECK-NEXT:    add x29, sp, #48
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
@@ -41,7 +41,7 @@ define i32 @test_stack_guard_remat2() ssp {
 ; CHECK-NEXT:  ; %bb.1: ; %entry
 ; CHECK-NEXT:    ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
 ; CHECK-NEXT:    mov w0, #-1
-; CHECK-NEXT:    add sp, sp, #64 ; =64
+; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  LBB0_2: ; %entry
 ; CHECK-NEXT:    bl ___stack_chk_fail

diff  --git a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
index 60b39282b158f..148e9bdcf7aa1 100644
--- a/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
+++ b/llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
@@ -43,7 +43,7 @@ define dso_local void @foo(i64 %t) local_unnamed_addr #0 {
 ; CHECK:         // %bb.0: // %entry
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
 ; CHECK-NEXT:    mov x29, sp
-; CHECK-NEXT:    sub sp, sp, #16 // =16
+; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
@@ -58,7 +58,7 @@ define dso_local void @foo(i64 %t) local_unnamed_addr #0 {
 ; CHECK-MINUS-257-OFFSET:      sub x8, x8, #257
 ; CHECK-MINUS-257-OFFSET-NEXT: ldr x8, [x8]
 ; CHECK-NEXT:    lsl x9, x0, #2
-; CHECK-NEXT:    add x9, x9, #15 // =15
+; CHECK-NEXT:    add x9, x9, #15
 ; CHECK-NEXT:    and x9, x9, #0xfffffffffffffff0
 ; CHECK-NEXT:    stur x8, [x29, #-8]
 ; CHECK-NEXT:    mov x8, sp

diff  --git a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
index 6f8e9424c2e39..6a20d8ece72c2 100644
--- a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
+++ b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
@@ -81,13 +81,13 @@ entry:
 define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" {
 ; CHECK-LABEL: test_relocate:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub sp, sp, #16 // =16
+; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    str x0, [sp, #8]
 ; CHECK-NEXT:    bl return_i1
 ; CHECK-NEXT:  .Ltmp7:
 ; CHECK-NEXT:    and w0, w0, #0x1
-; CHECK-NEXT:    add sp, sp, #16 // =16
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
 ; Check that an ununsed relocate has no code-generation impact
 entry:
@@ -176,7 +176,7 @@ declare void @consume_attributes(i32, i8* nest, i32, %struct2* byval(%struct2))
 define void @test_attributes(%struct2* byval(%struct2) %s) gc "statepoint-example" {
 ; CHECK-LABEL: test_attributes:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa_offset 32
 ; CHECK-NEXT:    ldr x8, [sp, #48]
 ; CHECK-NEXT:    ldr q0, [sp, #32]
@@ -187,7 +187,7 @@ define void @test_attributes(%struct2* byval(%struct2) %s) gc "statepoint-exampl
 ; CHECK-NEXT:    str q0, [sp]
 ; CHECK-NEXT:    bl consume_attributes
 ; CHECK-NEXT:  .Ltmp11:
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    ret
 entry:
 ; Check that arguments with attributes are lowered correctly.

diff  --git a/llvm/test/CodeGen/AArch64/sub-of-not.ll b/llvm/test/CodeGen/AArch64/sub-of-not.ll
index abd5692bf9805..18055b58ec34f 100644
--- a/llvm/test/CodeGen/AArch64/sub-of-not.ll
+++ b/llvm/test/CodeGen/AArch64/sub-of-not.ll
@@ -10,7 +10,7 @@ define i8 @scalar_i8(i8 %x, i8 %y) nounwind {
 ; CHECK-LABEL: scalar_i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w1, w0
-; CHECK-NEXT:    add w0, w8, #1 // =1
+; CHECK-NEXT:    add w0, w8, #1
 ; CHECK-NEXT:    ret
   %t0 = xor i8 %x, -1
   %t1 = sub i8 %y, %t0
@@ -21,7 +21,7 @@ define i16 @scalar_i16(i16 %x, i16 %y) nounwind {
 ; CHECK-LABEL: scalar_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w1, w0
-; CHECK-NEXT:    add w0, w8, #1 // =1
+; CHECK-NEXT:    add w0, w8, #1
 ; CHECK-NEXT:    ret
   %t0 = xor i16 %x, -1
   %t1 = sub i16 %y, %t0
@@ -32,7 +32,7 @@ define i32 @scalar_i32(i32 %x, i32 %y) nounwind {
 ; CHECK-LABEL: scalar_i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w1, w0
-; CHECK-NEXT:    add w0, w8, #1 // =1
+; CHECK-NEXT:    add w0, w8, #1
 ; CHECK-NEXT:    ret
   %t0 = xor i32 %x, -1
   %t1 = sub i32 %y, %t0
@@ -43,7 +43,7 @@ define i64 @scalar_i64(i64 %x, i64 %y) nounwind {
 ; CHECK-LABEL: scalar_i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x1, x0
-; CHECK-NEXT:    add x0, x8, #1 // =1
+; CHECK-NEXT:    add x0, x8, #1
 ; CHECK-NEXT:    ret
   %t0 = xor i64 %x, -1
   %t1 = sub i64 %y, %t0

diff  --git a/llvm/test/CodeGen/AArch64/sub1.ll b/llvm/test/CodeGen/AArch64/sub1.ll
index f882adfe178af..bb6a116ea12ff 100644
--- a/llvm/test/CodeGen/AArch64/sub1.ll
+++ b/llvm/test/CodeGen/AArch64/sub1.ll
@@ -4,7 +4,7 @@
 define i64 @sub1_disguised_constant(i64 %x) {
 ; CHECK-LABEL: sub1_disguised_constant:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub w8, w0, #1 // =1
+; CHECK-NEXT:    sub w8, w0, #1
 ; CHECK-NEXT:    and w8, w0, w8
 ; CHECK-NEXT:    and x0, x8, #0xffff
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
index 806dd7e57dee8..2a9cbf4c9d414 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
@@ -42,13 +42,13 @@ define float @foo2(double* %x0, double* %x1) nounwind {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-4
-; CHECK-NEXT:    sub sp, sp, #16 // =16
+; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld4d { z1.d, z2.d, z3.d, z4.d }, p0/z, [x0]
 ; CHECK-NEXT:    ld4d { z16.d, z17.d, z18.d, z19.d }, p0/z, [x1]
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    add x8, sp, #16 // =16
-; CHECK-NEXT:    add x9, sp, #16 // =16
+; CHECK-NEXT:    add x8, sp, #16
+; CHECK-NEXT:    add x9, sp, #16
 ; CHECK-NEXT:    fmov s0, #1.00000000
 ; CHECK-NEXT:    mov w1, #1
 ; CHECK-NEXT:    mov w2, #2
@@ -65,7 +65,7 @@ define float @foo2(double* %x0, double* %x1) nounwind {
 ; CHECK-NEXT:    str x8, [sp]
 ; CHECK-NEXT:    bl callee2
 ; CHECK-NEXT:    addvl sp, sp, #4
-; CHECK-NEXT:    add sp, sp, #16 // =16
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/AArch64/sve-extract-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-vector.ll
index 8940441764567..22e5e8ee36139 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-vector.ll
@@ -18,9 +18,9 @@ define <2 x i64> @extract_v2i64_nxv2i64_idx2(<vscale x 2 x i64> %vec) nounwind {
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cntd x9
-; CHECK-NEXT:    sub x9, x9, #2 // =2
+; CHECK-NEXT:    sub x9, x9, #2
 ; CHECK-NEXT:    mov w8, #2
-; CHECK-NEXT:    cmp x9, #2 // =2
+; CHECK-NEXT:    cmp x9, #2
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
@@ -51,9 +51,9 @@ define <4 x i32> @extract_v4i32_nxv4i32_idx4(<vscale x 4 x i32> %vec) nounwind {
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cntw x9
-; CHECK-NEXT:    sub x9, x9, #4 // =4
+; CHECK-NEXT:    sub x9, x9, #4
 ; CHECK-NEXT:    mov w8, #4
-; CHECK-NEXT:    cmp x9, #4 // =4
+; CHECK-NEXT:    cmp x9, #4
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    st1w { z0.s }, p0, [sp]
@@ -84,9 +84,9 @@ define <8 x i16> @extract_v8i16_nxv8i16_idx8(<vscale x 8 x i16> %vec) nounwind {
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cnth x9
-; CHECK-NEXT:    sub x9, x9, #8 // =8
+; CHECK-NEXT:    sub x9, x9, #8
 ; CHECK-NEXT:    mov w8, #8
-; CHECK-NEXT:    cmp x9, #8 // =8
+; CHECK-NEXT:    cmp x9, #8
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    st1h { z0.h }, p0, [sp]
@@ -117,10 +117,10 @@ define <16 x i8> @extract_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec) nounwind
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    rdvl x9, #1
-; CHECK-NEXT:    sub x9, x9, #16 // =16
+; CHECK-NEXT:    sub x9, x9, #16
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov w8, #16
-; CHECK-NEXT:    cmp x9, #16 // =16
+; CHECK-NEXT:    cmp x9, #16
 ; CHECK-NEXT:    st1b { z0.b }, p0, [sp]
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    mov x9, sp
@@ -159,9 +159,9 @@ define <2 x i64> @extract_fixed_v2i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cntd x9
-; CHECK-NEXT:    sub x9, x9, #2 // =2
+; CHECK-NEXT:    sub x9, x9, #2
 ; CHECK-NEXT:    mov w8, #2
-; CHECK-NEXT:    cmp x9, #2 // =2
+; CHECK-NEXT:    cmp x9, #2
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
@@ -181,11 +181,11 @@ define <4 x i64> @extract_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cntd x9
-; CHECK-NEXT:    subs x9, x9, #4 // =4
+; CHECK-NEXT:    subs x9, x9, #4
 ; CHECK-NEXT:    csel x9, xzr, x9, lo
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov w10, #4
-; CHECK-NEXT:    cmp x9, #4 // =4
+; CHECK-NEXT:    cmp x9, #4
 ; CHECK-NEXT:    ptrue p1.d, vl4
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    csel x9, x9, x10, lo

diff  --git a/llvm/test/CodeGen/AArch64/sve-insert-element.ll b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
index 4ef66be15ac63..cb2bc60791328 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-element.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-element.ll
@@ -506,7 +506,7 @@ define <vscale x 32 x i1> @test_predicate_insert_32xi1(<vscale x 32 x i1> %val,
 ; CHECK-NEXT:    rdvl x10, #2
 ; CHECK-NEXT:    // kill: def $w1 killed $w1 def $x1
 ; CHECK-NEXT:    sxtw x9, w1
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    cmp x9, x10
 ; CHECK-NEXT:    mov z0.b, p1/z, #1 // =0x1
 ; CHECK-NEXT:    ptrue p1.b

diff  --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
index 669c65e1e4a89..7e8d9deeb6e7e 100644
--- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll
@@ -23,9 +23,9 @@ define <vscale x 2 x i64> @insert_v2i64_nxv2i64_idx2(<vscale x 2 x i64> %vec, <2
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cntd x9
-; CHECK-NEXT:    sub x9, x9, #2 // =2
+; CHECK-NEXT:    sub x9, x9, #2
 ; CHECK-NEXT:    mov w8, #2
-; CHECK-NEXT:    cmp x9, #2 // =2
+; CHECK-NEXT:    cmp x9, #2
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    lsl x8, x8, #3
@@ -62,9 +62,9 @@ define <vscale x 4 x i32> @insert_v4i32_nxv4i32_idx4(<vscale x 4 x i32> %vec, <4
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cntw x9
-; CHECK-NEXT:    sub x9, x9, #4 // =4
+; CHECK-NEXT:    sub x9, x9, #4
 ; CHECK-NEXT:    mov w8, #4
-; CHECK-NEXT:    cmp x9, #4 // =4
+; CHECK-NEXT:    cmp x9, #4
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    lsl x8, x8, #2
@@ -101,9 +101,9 @@ define <vscale x 8 x i16> @insert_v8i16_nxv8i16_idx8(<vscale x 8 x i16> %vec, <8
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cnth x9
-; CHECK-NEXT:    sub x9, x9, #8 // =8
+; CHECK-NEXT:    sub x9, x9, #8
 ; CHECK-NEXT:    mov w8, #8
-; CHECK-NEXT:    cmp x9, #8 // =8
+; CHECK-NEXT:    cmp x9, #8
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    lsl x8, x8, #1
@@ -140,9 +140,9 @@ define <vscale x 16 x i8> @insert_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec, <
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    rdvl x9, #1
-; CHECK-NEXT:    sub x9, x9, #16 // =16
+; CHECK-NEXT:    sub x9, x9, #16
 ; CHECK-NEXT:    mov w8, #16
-; CHECK-NEXT:    cmp x9, #16 // =16
+; CHECK-NEXT:    cmp x9, #16
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    mov x9, sp
@@ -307,9 +307,9 @@ define <vscale x 2 x i64> @insert_fixed_v2i64_nxv2i64(<vscale x 2 x i64> %vec, <
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    addvl sp, sp, #-1
 ; CHECK-NEXT:    cntd x9
-; CHECK-NEXT:    sub x9, x9, #2 // =2
+; CHECK-NEXT:    sub x9, x9, #2
 ; CHECK-NEXT:    mov w8, #2
-; CHECK-NEXT:    cmp x9, #2 // =2
+; CHECK-NEXT:    cmp x9, #2
 ; CHECK-NEXT:    csel x8, x9, x8, lo
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    lsl x8, x8, #3
@@ -332,10 +332,10 @@ define <vscale x 2 x i64> @insert_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, <
 ; CHECK-NEXT:    ptrue p0.d, vl4
 ; CHECK-NEXT:    cntd x8
 ; CHECK-NEXT:    ld1d { z1.d }, p0/z, [x0]
-; CHECK-NEXT:    subs x8, x8, #4 // =4
+; CHECK-NEXT:    subs x8, x8, #4
 ; CHECK-NEXT:    csel x8, xzr, x8, lo
 ; CHECK-NEXT:    mov w9, #4
-; CHECK-NEXT:    cmp x8, #4 // =4
+; CHECK-NEXT:    cmp x8, #4
 ; CHECK-NEXT:    ptrue p1.d
 ; CHECK-NEXT:    csel x8, x8, x9, lo
 ; CHECK-NEXT:    mov x9, sp

diff  --git a/llvm/test/CodeGen/AArch64/sve-ld1r.ll b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
index f428f98845c37..3a964e2b0a57f 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1r.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1r.ll
@@ -17,7 +17,7 @@
 define <vscale x 16 x i8> @ld1r_stack() {
 ; CHECK-LABEL: ld1r_stack:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16 // =16
+; CHECK-NEXT:    sub sp, sp, #16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    adrp x8, :got:g8
 ; CHECK-NEXT:    ldr x8, [x8, :got_lo12:g8]
@@ -25,7 +25,7 @@ define <vscale x 16 x i8> @ld1r_stack() {
 ; CHECK-NEXT:    ldrb w8, [x8]
 ; CHECK-NEXT:    strb w8, [sp, #12]
 ; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [sp, #14]
-; CHECK-NEXT:    add sp, sp, #16 // =16
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %valp = alloca i8
   %valp2  = load volatile i8, i8* @g8
@@ -65,7 +65,7 @@ define <vscale x 16 x i8> @ld1rb_gep(i8* %valp) {
 define <vscale x 16 x i8> @ld1rb_gep_out_of_range_up(i8* %valp) {
 ; CHECK-LABEL: ld1rb_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #64 // =64
+; CHECK-NEXT:    add x8, x0, #64
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -79,7 +79,7 @@ define <vscale x 16 x i8> @ld1rb_gep_out_of_range_up(i8* %valp) {
 define <vscale x 16 x i8> @ld1rb_gep_out_of_range_down(i8* %valp) {
 ; CHECK-LABEL: ld1rb_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub x8, x0, #1 // =1
+; CHECK-NEXT:    sub x8, x0, #1
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    ld1rb { z0.b }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -196,7 +196,7 @@ define <vscale x 8 x i16> @ld1rh_gep(i16* %valp) {
 define <vscale x 8 x i16> @ld1rh_gep_out_of_range_up(i16* %valp) {
 ; CHECK-LABEL: ld1rh_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #128 // =128
+; CHECK-NEXT:    add x8, x0, #128
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -210,7 +210,7 @@ define <vscale x 8 x i16> @ld1rh_gep_out_of_range_up(i16* %valp) {
 define <vscale x 8 x i16> @ld1rh_gep_out_of_range_down(i16* %valp) {
 ; CHECK-LABEL: ld1rh_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub x8, x0, #2 // =2
+; CHECK-NEXT:    sub x8, x0, #2
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -301,7 +301,7 @@ define <vscale x 4 x i32> @ld1rw_gep(i32* %valp) {
 define <vscale x 4 x i32> @ld1rw_gep_out_of_range_up(i32* %valp) {
 ; CHECK-LABEL: ld1rw_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #256 // =256
+; CHECK-NEXT:    add x8, x0, #256
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -315,7 +315,7 @@ define <vscale x 4 x i32> @ld1rw_gep_out_of_range_up(i32* %valp) {
 define <vscale x 4 x i32> @ld1rw_gep_out_of_range_down(i32* %valp) {
 ; CHECK-LABEL: ld1rw_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub x8, x0, #4 // =4
+; CHECK-NEXT:    sub x8, x0, #4
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -380,7 +380,7 @@ define <vscale x 2 x i64> @ld1rd_gep(i64* %valp) {
 define <vscale x 2 x i64> @ld1rd_gep_out_of_range_up(i64* %valp) {
 ; CHECK-LABEL: ld1rd_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #512 // =512
+; CHECK-NEXT:    add x8, x0, #512
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -394,7 +394,7 @@ define <vscale x 2 x i64> @ld1rd_gep_out_of_range_up(i64* %valp) {
 define <vscale x 2 x i64> @ld1rd_gep_out_of_range_down(i64* %valp) {
 ; CHECK-LABEL: ld1rd_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub x8, x0, #8 // =8
+; CHECK-NEXT:    sub x8, x0, #8
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -433,7 +433,7 @@ define <vscale x 8 x half> @ld1rh_half_gep(half* %valp) {
 define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_up(half* %valp) {
 ; CHECK-LABEL: ld1rh_half_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #128 // =128
+; CHECK-NEXT:    add x8, x0, #128
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -447,7 +447,7 @@ define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_up(half* %valp) {
 define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_down(half* %valp) {
 ; CHECK-LABEL: ld1rh_half_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub x8, x0, #2 // =2
+; CHECK-NEXT:    sub x8, x0, #2
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    ld1rh { z0.h }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -486,7 +486,7 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4_gep(half* %valp) {
 define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_up(half* %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked4_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #128 // =128
+; CHECK-NEXT:    add x8, x0, #128
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -500,7 +500,7 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_up(half* %valp
 define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_down(half* %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked4_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub x8, x0, #2 // =2
+; CHECK-NEXT:    sub x8, x0, #2
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rh { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -539,7 +539,7 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2_gep(half* %valp) {
 define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_up(half* %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked2_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #128 // =128
+; CHECK-NEXT:    add x8, x0, #128
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -553,7 +553,7 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_up(half* %valp
 define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_down(half* %valp) {
 ; CHECK-LABEL: ld1rh_half_unpacked2_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub x8, x0, #2 // =2
+; CHECK-NEXT:    sub x8, x0, #2
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rh { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -592,7 +592,7 @@ define <vscale x 4 x float> @ld1rw_float_gep(float* %valp) {
 define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_up(float* %valp) {
 ; CHECK-LABEL: ld1rw_float_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #256 // =256
+; CHECK-NEXT:    add x8, x0, #256
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -606,7 +606,7 @@ define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_up(float* %valp) {
 define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_down(float* %valp) {
 ; CHECK-LABEL: ld1rw_float_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub x8, x0, #4 // =4
+; CHECK-NEXT:    sub x8, x0, #4
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    ld1rw { z0.s }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -645,7 +645,7 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2_gep(float* %valp) {
 define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_up(float* %valp) {
 ; CHECK-LABEL: ld1rw_float_unpacked2_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #256 // =256
+; CHECK-NEXT:    add x8, x0, #256
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -659,7 +659,7 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_up(float* %v
 define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_down(float* %valp) {
 ; CHECK-LABEL: ld1rw_float_unpacked2_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub x8, x0, #4 // =4
+; CHECK-NEXT:    sub x8, x0, #4
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rw { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -698,7 +698,7 @@ define <vscale x 2 x double> @ld1rd_double_gep(double* %valp) {
 define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_up(double* %valp) {
 ; CHECK-LABEL: ld1rd_double_gep_out_of_range_up:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #512 // =512
+; CHECK-NEXT:    add x8, x0, #512
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret
@@ -712,7 +712,7 @@ define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_up(double* %valp) {
 define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_down(double* %valp) {
 ; CHECK-LABEL: ld1rd_double_gep_out_of_range_down:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub x8, x0, #8 // =8
+; CHECK-NEXT:    sub x8, x0, #8
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    ld1rd { z0.d }, p0/z, [x8]
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll b/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll
index e86b6e7112d68..e274bce1e8cd0 100644
--- a/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll
+++ b/llvm/test/CodeGen/AArch64/sve-lsr-scaled-index-addressing-mode.ll
@@ -47,7 +47,7 @@ define void @ld_st_nxv8i16(i16* %in, i16* %out) {
 ; ASM-NEXT:    add z1.h, z1.h, z0.h
 ; ASM-NEXT:    st1h { z1.h }, p0, [x1, x8, lsl #1]
 ; ASM-NEXT:    add x8, x8, x9
-; ASM-NEXT:    cmp x8, #1024 // =1024
+; ASM-NEXT:    cmp x8, #1024
 ; ASM-NEXT:    b.ne .LBB0_1
 ; ASM-NEXT:  // %bb.2: // %exit
 ; ASM-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
index 291eadca07638..a64a4fdab0895 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
@@ -26,7 +26,7 @@ define i8 @split_extract_32i8_idx(<vscale x 32 x i8> %a, i32 %idx) {
 ; CHECK-NEXT:    rdvl x10, #2
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    sxtw x9, w0
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    cmp x9, x10
@@ -51,7 +51,7 @@ define i16 @split_extract_16i16_idx(<vscale x 16 x i16> %a, i32 %idx) {
 ; CHECK-NEXT:    rdvl x10, #1
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    sxtw x9, w0
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    cmp x9, x10
@@ -76,7 +76,7 @@ define i32 @split_extract_8i32_idx(<vscale x 8 x i32> %a, i32 %idx) {
 ; CHECK-NEXT:    cnth x10
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    sxtw x9, w0
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    cmp x9, x10
@@ -101,7 +101,7 @@ define i64 @split_extract_8i64_idx(<vscale x 8 x i64> %a, i32 %idx) {
 ; CHECK-NEXT:    cnth x10
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    sxtw x9, w0
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    cmp x9, x10
@@ -146,11 +146,11 @@ define i16 @split_extract_16i16(<vscale x 16 x i16> %a) {
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    rdvl x10, #1
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w9, #128
-; CHECK-NEXT:    cmp x10, #128 // =128
+; CHECK-NEXT:    cmp x10, #128
 ; CHECK-NEXT:    st1h { z1.h }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    st1h { z0.h }, p0, [sp]
 ; CHECK-NEXT:    csel x9, x10, x9, lo
@@ -172,7 +172,7 @@ define i32 @split_extract_16i32(<vscale x 16 x i32> %a) {
 ; CHECK-NEXT:    mov w9, #34464
 ; CHECK-NEXT:    rdvl x10, #1
 ; CHECK-NEXT:    movk w9, #1, lsl #16
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    cmp x10, x9
@@ -197,11 +197,11 @@ define i64 @split_extract_4i64(<vscale x 4 x i64> %a) {
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    cntw x10
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    mov w9, #10
-; CHECK-NEXT:    cmp x10, #10 // =10
+; CHECK-NEXT:    cmp x10, #10
 ; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #1, mul vl]
 ; CHECK-NEXT:    st1d { z0.d }, p0, [sp]
 ; CHECK-NEXT:    csel x9, x10, x9, lo

diff  --git a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
index 5e6dedf4a4cc1..2f055b6f9fac3 100644
--- a/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
@@ -24,7 +24,7 @@ define <vscale x 32 x i8> @split_insert_32i8_idx(<vscale x 32 x i8> %a, i8 %elt,
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    rdvl x8, #2
-; CHECK-NEXT:    sub x8, x8, #1 // =1
+; CHECK-NEXT:    sub x8, x8, #1
 ; CHECK-NEXT:    cmp x1, x8
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    csel x8, x1, x8, lo
@@ -49,7 +49,7 @@ define <vscale x 8 x float> @split_insert_8f32_idx(<vscale x 8 x float> %a, floa
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    cnth x8
-; CHECK-NEXT:    sub x8, x8, #1 // =1
+; CHECK-NEXT:    sub x8, x8, #1
 ; CHECK-NEXT:    cmp x0, x8
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    csel x8, x0, x8, lo
@@ -74,7 +74,7 @@ define <vscale x 8 x i64> @split_insert_8i64_idx(<vscale x 8 x i64> %a, i64 %elt
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    cnth x8
-; CHECK-NEXT:    sub x8, x8, #1 // =1
+; CHECK-NEXT:    sub x8, x8, #1
 ; CHECK-NEXT:    cmp x1, x8
 ; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    csel x8, x1, x8, lo
@@ -136,9 +136,9 @@ define <vscale x 32 x i16> @split_insert_32i16(<vscale x 32 x i16> %a, i16 %elt)
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
 ; CHECK-NEXT:    .cfi_offset w29, -16
 ; CHECK-NEXT:    rdvl x10, #2
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    mov w9, #128
-; CHECK-NEXT:    cmp x10, #128 // =128
+; CHECK-NEXT:    cmp x10, #128
 ; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov x8, sp
 ; CHECK-NEXT:    csel x9, x10, x9, lo
@@ -168,7 +168,7 @@ define <vscale x 8 x i32> @split_insert_8i32(<vscale x 8 x i32> %a, i32 %elt) {
 ; CHECK-NEXT:    mov w9, #16960
 ; CHECK-NEXT:    cnth x10
 ; CHECK-NEXT:    movk w9, #15, lsl #16
-; CHECK-NEXT:    sub x10, x10, #1 // =1
+; CHECK-NEXT:    sub x10, x10, #1
 ; CHECK-NEXT:    cmp x10, x9
 ; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov x8, sp

diff  --git a/llvm/test/CodeGen/AArch64/uadd_sat.ll b/llvm/test/CodeGen/AArch64/uadd_sat.ll
index 120f101d1e68d..7b48130e3f161 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat.ll
@@ -45,7 +45,7 @@ define i8 @func8(i8 %x, i8 %y) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0xff
 ; CHECK-NEXT:    add w8, w8, w1, uxtb
-; CHECK-NEXT:    cmp w8, #255 // =255
+; CHECK-NEXT:    cmp w8, #255
 ; CHECK-NEXT:    mov w9, #255
 ; CHECK-NEXT:    csel w0, w8, w9, lo
 ; CHECK-NEXT:    ret
@@ -59,7 +59,7 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
 ; CHECK-NEXT:    and w8, w1, #0xf
 ; CHECK-NEXT:    and w9, w0, #0xf
 ; CHECK-NEXT:    add w8, w9, w8
-; CHECK-NEXT:    cmp w8, #15 // =15
+; CHECK-NEXT:    cmp w8, #15
 ; CHECK-NEXT:    mov w9, #15
 ; CHECK-NEXT:    csel w0, w8, w9, lo
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll b/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll
index 277f40d3755d4..4d98af6355dfe 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat_plus.ll
@@ -51,7 +51,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
 ; CHECK-NEXT:    and w8, w0, #0xff
 ; CHECK-NEXT:    mul w9, w1, w2
 ; CHECK-NEXT:    add w8, w8, w9, uxtb
-; CHECK-NEXT:    cmp w8, #255 // =255
+; CHECK-NEXT:    cmp w8, #255
 ; CHECK-NEXT:    mov w9, #255
 ; CHECK-NEXT:    csel w0, w8, w9, lo
 ; CHECK-NEXT:    ret
@@ -67,7 +67,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
 ; CHECK-NEXT:    and w8, w0, #0xf
 ; CHECK-NEXT:    and w9, w9, #0xf
 ; CHECK-NEXT:    add w8, w8, w9
-; CHECK-NEXT:    cmp w8, #15 // =15
+; CHECK-NEXT:    cmp w8, #15
 ; CHECK-NEXT:    mov w9, #15
 ; CHECK-NEXT:    csel w0, w8, w9, lo
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
index 2b52e4c934c9d..b4b08e60e06d1 100644
--- a/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
@@ -355,7 +355,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-NEXT:    cmp x9, x3
 ; CHECK-NEXT:    cset w11, lo
 ; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0 // =0
+; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csinv x3, x9, xzr, eq
 ; CHECK-NEXT:    csinv x2, x8, xzr, eq
 ; CHECK-NEXT:    adds x8, x0, x4
@@ -365,7 +365,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-NEXT:    cmp x9, x1
 ; CHECK-NEXT:    cset w11, lo
 ; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0 // =0
+; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csinv x8, x8, xzr, eq
 ; CHECK-NEXT:    csinv x1, x9, xzr, eq
 ; CHECK-NEXT:    fmov d0, x8

diff  --git a/llvm/test/CodeGen/AArch64/uaddo.ll b/llvm/test/CodeGen/AArch64/uaddo.ll
index cb34de5f3992c..275d9a2fd771d 100644
--- a/llvm/test/CodeGen/AArch64/uaddo.ll
+++ b/llvm/test/CodeGen/AArch64/uaddo.ll
@@ -8,7 +8,7 @@
 define i1 @uaddo_i64_increment_alt(i64 %x, i64* %p) {
 ; CHECK-LABEL: uaddo_i64_increment_alt:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adds x8, x0, #1 // =1
+; CHECK-NEXT:    adds x8, x0, #1
 ; CHECK-NEXT:    cset w0, hs
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
@@ -23,7 +23,7 @@ define i1 @uaddo_i64_increment_alt(i64 %x, i64* %p) {
 define i1 @uaddo_i64_increment_alt_dom(i64 %x, i64* %p) {
 ; CHECK-LABEL: uaddo_i64_increment_alt_dom:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    adds x8, x0, #1 // =1
+; CHECK-NEXT:    adds x8, x0, #1
 ; CHECK-NEXT:    cset w0, hs
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
@@ -38,7 +38,7 @@ define i1 @uaddo_i64_increment_alt_dom(i64 %x, i64* %p) {
 define i1 @uaddo_i64_decrement_alt(i64 %x, i64* %p) {
 ; CHECK-LABEL: uaddo_i64_decrement_alt:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    subs x8, x0, #1 // =1
+; CHECK-NEXT:    subs x8, x0, #1
 ; CHECK-NEXT:    cset w0, hs
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret
@@ -53,7 +53,7 @@ define i1 @uaddo_i64_decrement_alt(i64 %x, i64* %p) {
 define i1 @uaddo_i64_decrement_alt_dom(i64 %x, i64* %p) {
 ; CHECK-LABEL: uaddo_i64_decrement_alt_dom:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    subs x8, x0, #1 // =1
+; CHECK-NEXT:    subs x8, x0, #1
 ; CHECK-NEXT:    cset w0, hs
 ; CHECK-NEXT:    str x8, [x1]
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
index 4f28392f4bdde..1fb80f6ec7a75 100644
--- a/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
+++ b/llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
@@ -4,10 +4,10 @@
 define { i128, i8 } @muloti_test(i128 %l, i128 %r) unnamed_addr #0 {
 ; AARCH-LABEL: muloti_test:
 ; AARCH:       // %bb.0: // %start
-; AARCH-NEXT:    cmp x3, #0 // =0
+; AARCH-NEXT:    cmp x3, #0
 ; AARCH-NEXT:    umulh x8, x1, x2
 ; AARCH-NEXT:    cset w10, ne
-; AARCH-NEXT:    cmp x1, #0 // =0
+; AARCH-NEXT:    cmp x1, #0
 ; AARCH-NEXT:    mul x9, x3, x0
 ; AARCH-NEXT:    cset w11, ne
 ; AARCH-NEXT:    cmp xzr, x8

diff  --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
index 0652b7b24d78e..94af074748739 100644
--- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll
+++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
@@ -264,7 +264,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; CHECK:       .Lfunc_begin1:
 ; CHECK-NEXT:    .cfi_startproc
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #304 // =304
+; CHECK-NEXT:    sub sp, sp, #304
 ; CHECK-NEXT:    stp q23, q22, [sp, #32] // 32-byte Folded Spill
 ; CHECK-NEXT:    stp q21, q20, [sp, #64] // 32-byte Folded Spill
 ; CHECK-NEXT:    stp q19, q18, [sp, #96] // 32-byte Folded Spill
@@ -310,7 +310,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; CHECK-NEXT:    ldp q19, q18, [sp, #96] // 32-byte Folded Reload
 ; CHECK-NEXT:    ldp q21, q20, [sp, #64] // 32-byte Folded Reload
 ; CHECK-NEXT:    ldp q23, q22, [sp, #32] // 32-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #304 // =304
+; CHECK-NEXT:    add sp, sp, #304
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB1_2: // %.Lunwind
 ; CHECK-NEXT:  .Ltmp5:
@@ -324,14 +324,14 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; CHECK-NEXT:    ldp q19, q18, [sp, #96] // 32-byte Folded Reload
 ; CHECK-NEXT:    ldp q21, q20, [sp, #64] // 32-byte Folded Reload
 ; CHECK-NEXT:    ldp q23, q22, [sp, #32] // 32-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #304 // =304
+; CHECK-NEXT:    add sp, sp, #304
 ; CHECK-NEXT:    ret
 ;
 ; GISEL-LABEL: invoke_callee_may_throw_neon:
 ; GISEL:       .Lfunc_begin1:
 ; GISEL-NEXT:    .cfi_startproc
 ; GISEL-NEXT:  // %bb.0:
-; GISEL-NEXT:    sub sp, sp, #304 // =304
+; GISEL-NEXT:    sub sp, sp, #304
 ; GISEL-NEXT:    stp q23, q22, [sp, #32] // 32-byte Folded Spill
 ; GISEL-NEXT:    stp q21, q20, [sp, #64] // 32-byte Folded Spill
 ; GISEL-NEXT:    stp q19, q18, [sp, #96] // 32-byte Folded Spill
@@ -377,7 +377,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; GISEL-NEXT:    ldp q19, q18, [sp, #96] // 32-byte Folded Reload
 ; GISEL-NEXT:    ldp q21, q20, [sp, #64] // 32-byte Folded Reload
 ; GISEL-NEXT:    ldp q23, q22, [sp, #32] // 32-byte Folded Reload
-; GISEL-NEXT:    add sp, sp, #304 // =304
+; GISEL-NEXT:    add sp, sp, #304
 ; GISEL-NEXT:    ret
 ; GISEL-NEXT:  .LBB1_2: // %.Lunwind
 ; GISEL-NEXT:  .Ltmp5:
@@ -391,7 +391,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; GISEL-NEXT:    ldp q19, q18, [sp, #96] // 32-byte Folded Reload
 ; GISEL-NEXT:    ldp q21, q20, [sp, #64] // 32-byte Folded Reload
 ; GISEL-NEXT:    ldp q23, q22, [sp, #32] // 32-byte Folded Reload
-; GISEL-NEXT:    add sp, sp, #304 // =304
+; GISEL-NEXT:    add sp, sp, #304
 ; GISEL-NEXT:    ret
   %result = invoke aarch64_vector_pcs <4 x i32> @may_throw_neon(<4 x i32> %v) to label %.Lcontinue unwind label %.Lunwind
 .Lcontinue:

diff  --git a/llvm/test/CodeGen/AArch64/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/AArch64/urem-seteq-illegal-types.ll
index fbd44970f1175..4e10b50ba57c3 100644
--- a/llvm/test/CodeGen/AArch64/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/AArch64/urem-seteq-illegal-types.ll
@@ -7,7 +7,7 @@ define i1 @test_urem_odd(i13 %X) nounwind {
 ; CHECK-NEXT:    mov w8, #3277
 ; CHECK-NEXT:    mul w8, w0, w8
 ; CHECK-NEXT:    and w8, w8, #0x1fff
-; CHECK-NEXT:    cmp w8, #1639 // =1639
+; CHECK-NEXT:    cmp w8, #1639
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %urem = urem i13 %X, 5
@@ -40,7 +40,7 @@ define i1 @test_urem_odd_setne(i4 %X) nounwind {
 ; CHECK-NEXT:    mov w8, #13
 ; CHECK-NEXT:    mul w8, w0, w8
 ; CHECK-NEXT:    and w8, w8, #0xf
-; CHECK-NEXT:    cmp w8, #3 // =3
+; CHECK-NEXT:    cmp w8, #3
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %urem = urem i4 %X, 5
@@ -54,7 +54,7 @@ define i1 @test_urem_negative_odd(i9 %X) nounwind {
 ; CHECK-NEXT:    mov w8, #307
 ; CHECK-NEXT:    mul w8, w0, w8
 ; CHECK-NEXT:    and w8, w8, #0x1ff
-; CHECK-NEXT:    cmp w8, #1 // =1
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %urem = urem i9 %X, -5

diff  --git a/llvm/test/CodeGen/AArch64/urem-seteq-nonzero.ll b/llvm/test/CodeGen/AArch64/urem-seteq-nonzero.ll
index a3d5c7da7b6d4..6771882ee98fa 100644
--- a/llvm/test/CodeGen/AArch64/urem-seteq-nonzero.ll
+++ b/llvm/test/CodeGen/AArch64/urem-seteq-nonzero.ll
@@ -139,7 +139,7 @@ define i1 @t32_6_3(i32 %X) nounwind {
 ; CHECK-NEXT:    mov w8, #43691
 ; CHECK-NEXT:    movk w8, #43690, lsl #16
 ; CHECK-NEXT:    mul w8, w0, w8
-; CHECK-NEXT:    sub w8, w8, #1 // =1
+; CHECK-NEXT:    sub w8, w8, #1
 ; CHECK-NEXT:    mov w9, #43691
 ; CHECK-NEXT:    ror w8, w8, #1
 ; CHECK-NEXT:    movk w9, #10922, lsl #16
@@ -212,9 +212,9 @@ define i1 @t8_3_2(i8 %X) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #-85
 ; CHECK-NEXT:    mul w8, w0, w8
-; CHECK-NEXT:    sub w8, w8, #86 // =86
+; CHECK-NEXT:    sub w8, w8, #86
 ; CHECK-NEXT:    and w8, w8, #0xff
-; CHECK-NEXT:    cmp w8, #85 // =85
+; CHECK-NEXT:    cmp w8, #85
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %urem = urem i8 %X, 3

diff  --git a/llvm/test/CodeGen/AArch64/urem-seteq.ll b/llvm/test/CodeGen/AArch64/urem-seteq.ll
index 74659f44808bf..7f0611f40ac11 100644
--- a/llvm/test/CodeGen/AArch64/urem-seteq.ll
+++ b/llvm/test/CodeGen/AArch64/urem-seteq.ll
@@ -46,7 +46,7 @@ define i32 @test_urem_odd_bit30(i32 %X) nounwind {
 ; CHECK-NEXT:    mov w8, #43691
 ; CHECK-NEXT:    movk w8, #27306, lsl #16
 ; CHECK-NEXT:    mul w8, w0, w8
-; CHECK-NEXT:    cmp w8, #4 // =4
+; CHECK-NEXT:    cmp w8, #4
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %urem = urem i32 %X, 1073741827
@@ -62,7 +62,7 @@ define i32 @test_urem_odd_bit31(i32 %X) nounwind {
 ; CHECK-NEXT:    mov w8, #43691
 ; CHECK-NEXT:    movk w8, #10922, lsl #16
 ; CHECK-NEXT:    mul w8, w0, w8
-; CHECK-NEXT:    cmp w8, #2 // =2
+; CHECK-NEXT:    cmp w8, #2
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %urem = urem i32 %X, 2147483651
@@ -84,7 +84,7 @@ define i16 @test_urem_even(i16 %X) nounwind {
 ; CHECK-NEXT:    lsr w9, w9, #1
 ; CHECK-NEXT:    bfi w9, w8, #15, #17
 ; CHECK-NEXT:    ubfx w8, w9, #1, #15
-; CHECK-NEXT:    cmp w8, #2340 // =2340
+; CHECK-NEXT:    cmp w8, #2340
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %urem = urem i16 %X, 14
@@ -119,7 +119,7 @@ define i32 @test_urem_even_bit30(i32 %X) nounwind {
 ; CHECK-NEXT:    movk w8, #64748, lsl #16
 ; CHECK-NEXT:    mul w8, w0, w8
 ; CHECK-NEXT:    ror w8, w8, #3
-; CHECK-NEXT:    cmp w8, #4 // =4
+; CHECK-NEXT:    cmp w8, #4
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %urem = urem i32 %X, 1073741928
@@ -136,7 +136,7 @@ define i32 @test_urem_even_bit31(i32 %X) nounwind {
 ; CHECK-NEXT:    movk w8, #47866, lsl #16
 ; CHECK-NEXT:    mul w8, w0, w8
 ; CHECK-NEXT:    ror w8, w8, #1
-; CHECK-NEXT:    cmp w8, #2 // =2
+; CHECK-NEXT:    cmp w8, #2
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %urem = urem i32 %X, 2147483750
@@ -172,7 +172,7 @@ define i32 @test_urem_negative_odd(i32 %X) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #858993459
 ; CHECK-NEXT:    mul w8, w0, w8
-; CHECK-NEXT:    cmp w8, #1 // =1
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %urem = urem i32 %X, -5
@@ -187,7 +187,7 @@ define i32 @test_urem_negative_even(i32 %X) nounwind {
 ; CHECK-NEXT:    movk w8, #51492, lsl #16
 ; CHECK-NEXT:    mul w8, w0, w8
 ; CHECK-NEXT:    ror w8, w8, #1
-; CHECK-NEXT:    cmp w8, #1 // =1
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %urem = urem i32 %X, -14
@@ -243,7 +243,7 @@ define i32 @test_urem_allones(i32 %X) nounwind {
 ; CHECK-LABEL: test_urem_allones:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    neg w8, w0
-; CHECK-NEXT:    cmp w8, #2 // =2
+; CHECK-NEXT:    cmp w8, #2
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %urem = urem i32 %X, 4294967295

diff  --git a/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll b/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll
index 43f52d437012b..b2c413659c6ec 100644
--- a/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll
+++ b/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll
@@ -18,7 +18,7 @@ define i64 @ll_a_op_b__2(i64 %a, i64 %b) {
 ; CHECK-LABEL: ll_a_op_b__2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    lsl x8, x0, x1
-; CHECK-NEXT:    cmn x8, #2 // =2
+; CHECK-NEXT:    cmn x8, #2
 ; CHECK-NEXT:    csinc x8, x1, xzr, eq
 ; CHECK-NEXT:    mul x8, x8, x0
 ; CHECK-NEXT:    csel x0, x1, x8, gt
@@ -42,10 +42,10 @@ define i64 @ll_a_op_b__1(i64 %a, i64 %b) {
 ; CHECK-LABEL: ll_a_op_b__1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    lsl x8, x0, x1
-; CHECK-NEXT:    cmn x8, #1 // =1
+; CHECK-NEXT:    cmn x8, #1
 ; CHECK-NEXT:    csinc x9, x1, xzr, eq
 ; CHECK-NEXT:    mul x9, x9, x0
-; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cmp x8, #0
 ; CHECK-NEXT:    csel x0, x1, x9, ge
 ; CHECK-NEXT:    ret
 entry:
@@ -67,7 +67,7 @@ define i64 @ll_a_op_b_0(i64 %a, i64 %b) {
 ; CHECK-LABEL: ll_a_op_b_0:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    lsl x8, x0, x1
-; CHECK-NEXT:    cmp x8, #0 // =0
+; CHECK-NEXT:    cmp x8, #0
 ; CHECK-NEXT:    csinc x8, x1, xzr, eq
 ; CHECK-NEXT:    mul x8, x8, x0
 ; CHECK-NEXT:    csel x0, x1, x8, gt
@@ -91,7 +91,7 @@ define i64 @ll_a_op_b_1(i64 %a, i64 %b) {
 ; CHECK-LABEL: ll_a_op_b_1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    lsl x8, x0, x1
-; CHECK-NEXT:    cmp x8, #1 // =1
+; CHECK-NEXT:    cmp x8, #1
 ; CHECK-NEXT:    csinc x8, x1, xzr, eq
 ; CHECK-NEXT:    mul x8, x8, x0
 ; CHECK-NEXT:    csel x0, x1, x8, gt
@@ -115,7 +115,7 @@ define i64 @ll_a_op_b_2(i64 %a, i64 %b) {
 ; CHECK-LABEL: ll_a_op_b_2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    lsl x8, x0, x1
-; CHECK-NEXT:    cmp x8, #2 // =2
+; CHECK-NEXT:    cmp x8, #2
 ; CHECK-NEXT:    csinc x8, x1, xzr, eq
 ; CHECK-NEXT:    mul x8, x8, x0
 ; CHECK-NEXT:    csel x0, x1, x8, gt
@@ -138,7 +138,7 @@ return:                                           ; preds = %entry
 define i64 @ll_a__2(i64 %a, i64 %b) {
 ; CHECK-LABEL: ll_a__2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmn x0, #2 // =2
+; CHECK-NEXT:    cmn x0, #2
 ; CHECK-NEXT:    csinc x8, x1, xzr, eq
 ; CHECK-NEXT:    mul x8, x8, x0
 ; CHECK-NEXT:    csel x0, x1, x8, gt
@@ -160,10 +160,10 @@ return:                                           ; preds = %entry
 define i64 @ll_a__1(i64 %a, i64 %b) {
 ; CHECK-LABEL: ll_a__1:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmn x0, #1 // =1
+; CHECK-NEXT:    cmn x0, #1
 ; CHECK-NEXT:    csinc x8, x1, xzr, eq
 ; CHECK-NEXT:    mul x8, x8, x0
-; CHECK-NEXT:    cmp x0, #0 // =0
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    csel x0, x1, x8, ge
 ; CHECK-NEXT:    ret
 entry:
@@ -183,7 +183,7 @@ return:                                           ; preds = %entry
 define i64 @ll_a_0(i64 %a, i64 %b) {
 ; CHECK-LABEL: ll_a_0:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmp x0, #0 // =0
+; CHECK-NEXT:    cmp x0, #0
 ; CHECK-NEXT:    csinc x8, x1, xzr, eq
 ; CHECK-NEXT:    mul x8, x8, x0
 ; CHECK-NEXT:    csel x0, x1, x8, gt
@@ -205,7 +205,7 @@ return:                                           ; preds = %entry
 define i64 @ll_a_1(i64 %a, i64 %b) {
 ; CHECK-LABEL: ll_a_1:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmp x0, #1 // =1
+; CHECK-NEXT:    cmp x0, #1
 ; CHECK-NEXT:    csinc x8, x1, xzr, eq
 ; CHECK-NEXT:    mul x8, x8, x0
 ; CHECK-NEXT:    csel x0, x1, x8, gt
@@ -227,7 +227,7 @@ return:                                           ; preds = %entry
 define i64 @ll_a_2(i64 %a, i64 %b) {
 ; CHECK-LABEL: ll_a_2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmp x0, #2 // =2
+; CHECK-NEXT:    cmp x0, #2
 ; CHECK-NEXT:    csinc x8, x1, xzr, eq
 ; CHECK-NEXT:    mul x8, x8, x0
 ; CHECK-NEXT:    csel x0, x1, x8, gt
@@ -250,7 +250,7 @@ define i64 @i_a_op_b__2(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: i_a_op_b__2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    lsl w8, w0, w1
-; CHECK-NEXT:    cmn w8, #2 // =2
+; CHECK-NEXT:    cmn w8, #2
 ; CHECK-NEXT:    csinc w8, w1, wzr, eq
 ; CHECK-NEXT:    mul w8, w8, w0
 ; CHECK-NEXT:    csel w8, w1, w8, gt
@@ -277,10 +277,10 @@ define i64 @i_a_op_b__1(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: i_a_op_b__1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    lsl w8, w0, w1
-; CHECK-NEXT:    cmn w8, #1 // =1
+; CHECK-NEXT:    cmn w8, #1
 ; CHECK-NEXT:    csinc w9, w1, wzr, eq
 ; CHECK-NEXT:    mul w9, w9, w0
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csel w8, w1, w9, ge
 ; CHECK-NEXT:    sxtw x0, w8
 ; CHECK-NEXT:    ret
@@ -305,7 +305,7 @@ define i64 @i_a_op_b_0(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: i_a_op_b_0:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    lsl w8, w0, w1
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csinc w8, w1, wzr, eq
 ; CHECK-NEXT:    mul w8, w8, w0
 ; CHECK-NEXT:    csel w8, w1, w8, gt
@@ -332,7 +332,7 @@ define i64 @i_a_op_b_1(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: i_a_op_b_1:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    lsl w8, w0, w1
-; CHECK-NEXT:    cmp w8, #1 // =1
+; CHECK-NEXT:    cmp w8, #1
 ; CHECK-NEXT:    csinc w8, w1, wzr, eq
 ; CHECK-NEXT:    mul w8, w8, w0
 ; CHECK-NEXT:    csel w8, w1, w8, gt
@@ -359,7 +359,7 @@ define i64 @i_a_op_b_2(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: i_a_op_b_2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    lsl w8, w0, w1
-; CHECK-NEXT:    cmp w8, #2 // =2
+; CHECK-NEXT:    cmp w8, #2
 ; CHECK-NEXT:    csinc w8, w1, wzr, eq
 ; CHECK-NEXT:    mul w8, w8, w0
 ; CHECK-NEXT:    csel w8, w1, w8, gt
@@ -385,7 +385,7 @@ return:                                           ; preds = %if.end, %entry
 define i64 @i_a__2(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: i_a__2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmn w0, #2 // =2
+; CHECK-NEXT:    cmn w0, #2
 ; CHECK-NEXT:    csinc w8, w1, wzr, eq
 ; CHECK-NEXT:    mul w8, w8, w0
 ; CHECK-NEXT:    csel w8, w1, w8, gt
@@ -410,10 +410,10 @@ return:                                           ; preds = %if.end, %entry
 define i64 @i_a__1(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: i_a__1:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmn w0, #1 // =1
+; CHECK-NEXT:    cmn w0, #1
 ; CHECK-NEXT:    csinc w8, w1, wzr, eq
 ; CHECK-NEXT:    mul w8, w8, w0
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csel w8, w1, w8, ge
 ; CHECK-NEXT:    sxtw x0, w8
 ; CHECK-NEXT:    ret
@@ -436,7 +436,7 @@ return:                                           ; preds = %if.end, %entry
 define i64 @i_a_0(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: i_a_0:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmp w0, #0 // =0
+; CHECK-NEXT:    cmp w0, #0
 ; CHECK-NEXT:    csinc w8, w1, wzr, eq
 ; CHECK-NEXT:    mul w8, w8, w0
 ; CHECK-NEXT:    csel w8, w1, w8, gt
@@ -461,7 +461,7 @@ return:                                           ; preds = %if.end, %entry
 define i64 @i_a_1(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: i_a_1:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmp w0, #1 // =1
+; CHECK-NEXT:    cmp w0, #1
 ; CHECK-NEXT:    csinc w8, w1, wzr, eq
 ; CHECK-NEXT:    mul w8, w8, w0
 ; CHECK-NEXT:    csel w8, w1, w8, gt
@@ -486,7 +486,7 @@ return:                                           ; preds = %if.end, %entry
 define i64 @i_a_2(i32 signext %a, i32 signext %b) {
 ; CHECK-LABEL: i_a_2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmp w0, #2 // =2
+; CHECK-NEXT:    cmp w0, #2
 ; CHECK-NEXT:    csinc w8, w1, wzr, eq
 ; CHECK-NEXT:    mul w8, w8, w0
 ; CHECK-NEXT:    csel w8, w1, w8, gt

diff  --git a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
index 63bbac3be3fb8..4e3461457f891 100644
--- a/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/usub_sat_vec.ll
@@ -351,7 +351,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-NEXT:    cmp x9, x3
 ; CHECK-NEXT:    cset w11, hi
 ; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0 // =0
+; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csel x3, xzr, x9, ne
 ; CHECK-NEXT:    csel x2, xzr, x8, ne
 ; CHECK-NEXT:    subs x8, x0, x4
@@ -361,7 +361,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
 ; CHECK-NEXT:    cmp x9, x1
 ; CHECK-NEXT:    cset w11, hi
 ; CHECK-NEXT:    csel w10, w10, w11, eq
-; CHECK-NEXT:    cmp w10, #0 // =0
+; CHECK-NEXT:    cmp w10, #0
 ; CHECK-NEXT:    csel x8, xzr, x8, ne
 ; CHECK-NEXT:    csel x1, xzr, x9, ne
 ; CHECK-NEXT:    fmov d0, x8

diff  --git a/llvm/test/CodeGen/AArch64/vec-libcalls.ll b/llvm/test/CodeGen/AArch64/vec-libcalls.ll
index e96a4b815d6bb..a602db157461e 100644
--- a/llvm/test/CodeGen/AArch64/vec-libcalls.ll
+++ b/llvm/test/CodeGen/AArch64/vec-libcalls.ll
@@ -50,7 +50,7 @@ define <1 x float> @sin_v1f32(<1 x float> %x) nounwind {
 define <2 x float> @sin_v2f32(<2 x float> %x) nounwind {
 ; CHECK-LABEL: sin_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov s0, v0.s[1]
@@ -65,7 +65,7 @@ define <2 x float> @sin_v2f32(<2 x float> %x) nounwind {
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-NEXT:    mov v0.s[1], v1.s[0]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %r = call <2 x float> @llvm.sin.v2f32(<2 x float> %x)
   ret <2 x float> %r
@@ -74,7 +74,7 @@ define <2 x float> @sin_v2f32(<2 x float> %x) nounwind {
 define <3 x float> @sin_v3f32(<3 x float> %x) nounwind {
 ; CHECK-LABEL: sin_v3f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov s0, v0.s[1]
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
@@ -95,7 +95,7 @@ define <3 x float> @sin_v3f32(<3 x float> %x) nounwind {
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-NEXT:    mov v1.s[2], v0.s[0]
 ; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %r = call <3 x float> @llvm.sin.v3f32(<3 x float> %x)
   ret <3 x float> %r
@@ -104,7 +104,7 @@ define <3 x float> @sin_v3f32(<3 x float> %x) nounwind {
 define <4 x float> @sin_v4f32(<4 x float> %x) nounwind {
 ; CHECK-LABEL: sin_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov s0, v0.s[1]
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
@@ -132,7 +132,7 @@ define <4 x float> @sin_v4f32(<4 x float> %x) nounwind {
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-NEXT:    mov v1.s[3], v0.s[0]
 ; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %r = call <4 x float> @llvm.sin.v4f32(<4 x float> %x)
   ret <4 x float> %r
@@ -266,7 +266,7 @@ define <3 x float> @ceil_v3f32(<3 x float> %x) nounwind {
 define <3 x float> @cos_v3f32(<3 x float> %x) nounwind {
 ; CHECK-LABEL: cos_v3f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov s0, v0.s[1]
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
@@ -287,7 +287,7 @@ define <3 x float> @cos_v3f32(<3 x float> %x) nounwind {
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-NEXT:    mov v1.s[2], v0.s[0]
 ; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %r = call <3 x float> @llvm.cos.v3f32(<3 x float> %x)
   ret <3 x float> %r
@@ -296,7 +296,7 @@ define <3 x float> @cos_v3f32(<3 x float> %x) nounwind {
 define <3 x float> @exp_v3f32(<3 x float> %x) nounwind {
 ; CHECK-LABEL: exp_v3f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov s0, v0.s[1]
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
@@ -317,7 +317,7 @@ define <3 x float> @exp_v3f32(<3 x float> %x) nounwind {
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-NEXT:    mov v1.s[2], v0.s[0]
 ; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %r = call <3 x float> @llvm.exp.v3f32(<3 x float> %x)
   ret <3 x float> %r
@@ -326,7 +326,7 @@ define <3 x float> @exp_v3f32(<3 x float> %x) nounwind {
 define <3 x float> @exp2_v3f32(<3 x float> %x) nounwind {
 ; CHECK-LABEL: exp2_v3f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov s0, v0.s[1]
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
@@ -347,7 +347,7 @@ define <3 x float> @exp2_v3f32(<3 x float> %x) nounwind {
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-NEXT:    mov v1.s[2], v0.s[0]
 ; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %r = call <3 x float> @llvm.exp2.v3f32(<3 x float> %x)
   ret <3 x float> %r
@@ -365,7 +365,7 @@ define <3 x float> @floor_v3f32(<3 x float> %x) nounwind {
 define <3 x float> @log_v3f32(<3 x float> %x) nounwind {
 ; CHECK-LABEL: log_v3f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov s0, v0.s[1]
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
@@ -386,7 +386,7 @@ define <3 x float> @log_v3f32(<3 x float> %x) nounwind {
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-NEXT:    mov v1.s[2], v0.s[0]
 ; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %r = call <3 x float> @llvm.log.v3f32(<3 x float> %x)
   ret <3 x float> %r
@@ -395,7 +395,7 @@ define <3 x float> @log_v3f32(<3 x float> %x) nounwind {
 define <3 x float> @log10_v3f32(<3 x float> %x) nounwind {
 ; CHECK-LABEL: log10_v3f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov s0, v0.s[1]
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
@@ -416,7 +416,7 @@ define <3 x float> @log10_v3f32(<3 x float> %x) nounwind {
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-NEXT:    mov v1.s[2], v0.s[0]
 ; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %r = call <3 x float> @llvm.log10.v3f32(<3 x float> %x)
   ret <3 x float> %r
@@ -425,7 +425,7 @@ define <3 x float> @log10_v3f32(<3 x float> %x) nounwind {
 define <3 x float> @log2_v3f32(<3 x float> %x) nounwind {
 ; CHECK-LABEL: log2_v3f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov s0, v0.s[1]
 ; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
@@ -446,7 +446,7 @@ define <3 x float> @log2_v3f32(<3 x float> %x) nounwind {
 ; CHECK-NEXT:    // kill: def $s0 killed $s0 def $q0
 ; CHECK-NEXT:    mov v1.s[2], v0.s[0]
 ; CHECK-NEXT:    mov v0.16b, v1.16b
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret
   %r = call <3 x float> @llvm.log2.v3f32(<3 x float> %x)
   ret <3 x float> %r

diff  --git a/llvm/test/CodeGen/AArch64/vec_uaddo.ll b/llvm/test/CodeGen/AArch64/vec_uaddo.ll
index 6374196fe232b..ba2bf6769fcab 100644
--- a/llvm/test/CodeGen/AArch64/vec_uaddo.ll
+++ b/llvm/test/CodeGen/AArch64/vec_uaddo.ll
@@ -50,7 +50,7 @@ define <3 x i32> @uaddo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
 ; CHECK-LABEL: uaddo_v3i32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add v1.4s, v0.4s, v1.4s
-; CHECK-NEXT:    add x8, x0, #8 // =8
+; CHECK-NEXT:    add x8, x0, #8
 ; CHECK-NEXT:    cmhi v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    st1 { v1.s }[2], [x8]
 ; CHECK-NEXT:    str d1, [x0]
@@ -86,8 +86,8 @@ define <6 x i32> @uaddo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun
 ; CHECK-NEXT:    mov x9, sp
 ; CHECK-NEXT:    mov v2.s[1], w7
 ; CHECK-NEXT:    ld1 { v2.s }[2], [x9]
-; CHECK-NEXT:    add x8, sp, #24 // =24
-; CHECK-NEXT:    add x10, sp, #8 // =8
+; CHECK-NEXT:    add x8, sp, #24
+; CHECK-NEXT:    add x10, sp, #8
 ; CHECK-NEXT:    ld1 { v0.s }[1], [x8]
 ; CHECK-NEXT:    fmov s3, w0
 ; CHECK-NEXT:    ldr x11, [sp, #32]

diff  --git a/llvm/test/CodeGen/AArch64/vec_umulo.ll b/llvm/test/CodeGen/AArch64/vec_umulo.ll
index d43c37721312a..0aa4eb4365d51 100644
--- a/llvm/test/CodeGen/AArch64/vec_umulo.ll
+++ b/llvm/test/CodeGen/AArch64/vec_umulo.ll
@@ -57,7 +57,7 @@ define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
 ; CHECK-NEXT:    umull v3.2d, v0.2s, v1.2s
 ; CHECK-NEXT:    mul v1.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    uzp2 v0.4s, v3.4s, v2.4s
-; CHECK-NEXT:    add x8, x0, #8 // =8
+; CHECK-NEXT:    add x8, x0, #8
 ; CHECK-NEXT:    cmtst v0.4s, v0.4s, v0.4s
 ; CHECK-NEXT:    st1 { v1.s }[2], [x8]
 ; CHECK-NEXT:    str d1, [x0]
@@ -97,8 +97,8 @@ define <6 x i32> @umulo_v6i32(<6 x i32> %a0, <6 x i32> %a1, <6 x i32>* %p2) noun
 ; CHECK-NEXT:    mov x9, sp
 ; CHECK-NEXT:    mov v2.s[1], w7
 ; CHECK-NEXT:    ld1 { v2.s }[2], [x9]
-; CHECK-NEXT:    add x8, sp, #24 // =24
-; CHECK-NEXT:    add x10, sp, #8 // =8
+; CHECK-NEXT:    add x8, sp, #24
+; CHECK-NEXT:    add x10, sp, #8
 ; CHECK-NEXT:    ld1 { v0.s }[1], [x8]
 ; CHECK-NEXT:    fmov s3, w0
 ; CHECK-NEXT:    ldr x11, [sp, #32]
@@ -316,10 +316,10 @@ define <4 x i32> @umulo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
 define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2) nounwind {
 ; CHECK-LABEL: umulo_v2i128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x7, #0 // =0
+; CHECK-NEXT:    cmp x7, #0
 ; CHECK-NEXT:    umulh x8, x3, x6
 ; CHECK-NEXT:    cset w13, ne
-; CHECK-NEXT:    cmp x3, #0 // =0
+; CHECK-NEXT:    cmp x3, #0
 ; CHECK-NEXT:    umulh x9, x7, x2
 ; CHECK-NEXT:    mul x10, x7, x2
 ; CHECK-NEXT:    cset w14, ne
@@ -336,11 +336,11 @@ define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; CHECK-NEXT:    mul x12, x2, x6
 ; CHECK-NEXT:    orr w13, w13, w14
 ; CHECK-NEXT:    cset w14, hs
-; CHECK-NEXT:    cmp x5, #0 // =0
+; CHECK-NEXT:    cmp x5, #0
 ; CHECK-NEXT:    umulh x17, x1, x4
 ; CHECK-NEXT:    stp x12, x10, [x8, #16]
 ; CHECK-NEXT:    cset w10, ne
-; CHECK-NEXT:    cmp x1, #0 // =0
+; CHECK-NEXT:    cmp x1, #0
 ; CHECK-NEXT:    umulh x9, x5, x0
 ; CHECK-NEXT:    mul x11, x5, x0
 ; CHECK-NEXT:    cset w12, ne

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
index 98d5bf43b9664..58020d28702b2 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
@@ -20,7 +20,7 @@ define i32 @reduce_and_v1(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    smov w8, v0.b[0]
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csel w0, w0, w1, lt
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i8> %a0, zeroinitializer
@@ -114,7 +114,7 @@ define i32 @reduce_or_v1(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    smov w8, v0.b[0]
-; CHECK-NEXT:    cmp w8, #0 // =0
+; CHECK-NEXT:    cmp w8, #0
 ; CHECK-NEXT:    csel w0, w0, w1, lt
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i8> %a0, zeroinitializer

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll
index 17b26ca01b2a2..93707c3c4c837 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd-legalization-strict.ll
@@ -141,7 +141,7 @@ define float @test_v5f32_neutral(<5 x float> %a) nounwind {
 define fp128 @test_v2f128(<2 x fp128> %a, fp128 %s) nounwind {
 ; CHECK-LABEL: test_v2f128:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32 // =32
+; CHECK-NEXT:    sub sp, sp, #32
 ; CHECK-NEXT:    str q1, [sp] // 16-byte Folded Spill
 ; CHECK-NEXT:    mov v1.16b, v0.16b
 ; CHECK-NEXT:    mov v0.16b, v2.16b
@@ -149,7 +149,7 @@ define fp128 @test_v2f128(<2 x fp128> %a, fp128 %s) nounwind {
 ; CHECK-NEXT:    bl __addtf3
 ; CHECK-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-NEXT:    add sp, sp, #32 // =32
+; CHECK-NEXT:    add sp, sp, #32
 ; CHECK-NEXT:    b __addtf3
   %b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 %s, <2 x fp128> %a)
   ret fp128 %b

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
index db70219f43760..6b9d81ddd2f89 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmax-legalization.ll
@@ -186,7 +186,7 @@ define half @test_v11f16(<11 x half> %a) nounwind {
 ; CHECK-FP-NEXT:    movi v16.8h, #252, lsl #8
 ; CHECK-FP-NEXT:    mov x8, sp
 ; CHECK-FP-NEXT:    ld1 { v16.h }[0], [x8]
-; CHECK-FP-NEXT:    add x8, sp, #8 // =8
+; CHECK-FP-NEXT:    add x8, sp, #8
 ; CHECK-FP-NEXT:    // kill: def $h0 killed $h0 def $q0
 ; CHECK-FP-NEXT:    // kill: def $h1 killed $h1 def $q1
 ; CHECK-FP-NEXT:    // kill: def $h2 killed $h2 def $q2
@@ -199,7 +199,7 @@ define half @test_v11f16(<11 x half> %a) nounwind {
 ; CHECK-FP-NEXT:    ld1 { v16.h }[1], [x8]
 ; CHECK-FP-NEXT:    mov v0.h[2], v2.h[0]
 ; CHECK-FP-NEXT:    mov v0.h[3], v3.h[0]
-; CHECK-FP-NEXT:    add x8, sp, #16 // =16
+; CHECK-FP-NEXT:    add x8, sp, #16
 ; CHECK-FP-NEXT:    mov v0.h[4], v4.h[0]
 ; CHECK-FP-NEXT:    ld1 { v16.h }[2], [x8]
 ; CHECK-FP-NEXT:    mov v0.h[5], v5.h[0]
@@ -294,7 +294,7 @@ define half @test_v11f16_ninf(<11 x half> %a) nounwind {
 ; CHECK-FP-NEXT:    mvni v16.8h, #4, lsl #8
 ; CHECK-FP-NEXT:    mov x8, sp
 ; CHECK-FP-NEXT:    ld1 { v16.h }[0], [x8]
-; CHECK-FP-NEXT:    add x8, sp, #8 // =8
+; CHECK-FP-NEXT:    add x8, sp, #8
 ; CHECK-FP-NEXT:    // kill: def $h0 killed $h0 def $q0
 ; CHECK-FP-NEXT:    // kill: def $h1 killed $h1 def $q1
 ; CHECK-FP-NEXT:    // kill: def $h2 killed $h2 def $q2
@@ -307,7 +307,7 @@ define half @test_v11f16_ninf(<11 x half> %a) nounwind {
 ; CHECK-FP-NEXT:    ld1 { v16.h }[1], [x8]
 ; CHECK-FP-NEXT:    mov v0.h[2], v2.h[0]
 ; CHECK-FP-NEXT:    mov v0.h[3], v3.h[0]
-; CHECK-FP-NEXT:    add x8, sp, #16 // =16
+; CHECK-FP-NEXT:    add x8, sp, #16
 ; CHECK-FP-NEXT:    mov v0.h[4], v4.h[0]
 ; CHECK-FP-NEXT:    ld1 { v16.h }[2], [x8]
 ; CHECK-FP-NEXT:    mov v0.h[5], v5.h[0]

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll
index 4925f049f9532..07ddb507a7ed6 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll
@@ -186,7 +186,7 @@ define half @test_v11f16(<11 x half> %a) nounwind {
 ; CHECK-FP-NEXT:    movi v16.8h, #124, lsl #8
 ; CHECK-FP-NEXT:    mov x8, sp
 ; CHECK-FP-NEXT:    ld1 { v16.h }[0], [x8]
-; CHECK-FP-NEXT:    add x8, sp, #8 // =8
+; CHECK-FP-NEXT:    add x8, sp, #8
 ; CHECK-FP-NEXT:    // kill: def $h0 killed $h0 def $q0
 ; CHECK-FP-NEXT:    // kill: def $h1 killed $h1 def $q1
 ; CHECK-FP-NEXT:    // kill: def $h2 killed $h2 def $q2
@@ -199,7 +199,7 @@ define half @test_v11f16(<11 x half> %a) nounwind {
 ; CHECK-FP-NEXT:    ld1 { v16.h }[1], [x8]
 ; CHECK-FP-NEXT:    mov v0.h[2], v2.h[0]
 ; CHECK-FP-NEXT:    mov v0.h[3], v3.h[0]
-; CHECK-FP-NEXT:    add x8, sp, #16 // =16
+; CHECK-FP-NEXT:    add x8, sp, #16
 ; CHECK-FP-NEXT:    mov v0.h[4], v4.h[0]
 ; CHECK-FP-NEXT:    ld1 { v16.h }[2], [x8]
 ; CHECK-FP-NEXT:    mov v0.h[5], v5.h[0]
@@ -294,7 +294,7 @@ define half @test_v11f16_ninf(<11 x half> %a) nounwind {
 ; CHECK-FP-NEXT:    mvni v16.8h, #132, lsl #8
 ; CHECK-FP-NEXT:    mov x8, sp
 ; CHECK-FP-NEXT:    ld1 { v16.h }[0], [x8]
-; CHECK-FP-NEXT:    add x8, sp, #8 // =8
+; CHECK-FP-NEXT:    add x8, sp, #8
 ; CHECK-FP-NEXT:    // kill: def $h0 killed $h0 def $q0
 ; CHECK-FP-NEXT:    // kill: def $h1 killed $h1 def $q1
 ; CHECK-FP-NEXT:    // kill: def $h2 killed $h2 def $q2
@@ -307,7 +307,7 @@ define half @test_v11f16_ninf(<11 x half> %a) nounwind {
 ; CHECK-FP-NEXT:    ld1 { v16.h }[1], [x8]
 ; CHECK-FP-NEXT:    mov v0.h[2], v2.h[0]
 ; CHECK-FP-NEXT:    mov v0.h[3], v3.h[0]
-; CHECK-FP-NEXT:    add x8, sp, #16 // =16
+; CHECK-FP-NEXT:    add x8, sp, #16
 ; CHECK-FP-NEXT:    mov v0.h[4], v4.h[0]
 ; CHECK-FP-NEXT:    ld1 { v16.h }[2], [x8]
 ; CHECK-FP-NEXT:    mov v0.h[5], v5.h[0]

diff  --git a/llvm/test/CodeGen/AArch64/vldn_shuffle.ll b/llvm/test/CodeGen/AArch64/vldn_shuffle.ll
index 99100a2ab4c82..9f890c6370a6c 100644
--- a/llvm/test/CodeGen/AArch64/vldn_shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/vldn_shuffle.ll
@@ -11,7 +11,7 @@ define void @vld2(float* nocapture readonly %pSrc, float* noalias nocapture %pDs
 ; CHECK-NEXT:    fmul v2.4s, v0.4s, v0.4s
 ; CHECK-NEXT:    fmla v2.4s, v1.4s, v1.4s
 ; CHECK-NEXT:    str q2, [x1, x8]
-; CHECK-NEXT:    add x8, x8, #16 // =16
+; CHECK-NEXT:    add x8, x8, #16
 ; CHECK-NEXT:    cmp x8, #1, lsl #12 // =4096
 ; CHECK-NEXT:    b.ne .LBB0_1
 ; CHECK-NEXT:  // %bb.2: // %while.end
@@ -52,7 +52,7 @@ define void @vld3(float* nocapture readonly %pSrc, float* noalias nocapture %pDs
 ; CHECK-NEXT:    fmla v3.4s, v1.4s, v1.4s
 ; CHECK-NEXT:    fmla v3.4s, v2.4s, v2.4s
 ; CHECK-NEXT:    str q3, [x1, x8]
-; CHECK-NEXT:    add x8, x8, #16 // =16
+; CHECK-NEXT:    add x8, x8, #16
 ; CHECK-NEXT:    cmp x8, #1, lsl #12 // =4096
 ; CHECK-NEXT:    b.ne .LBB1_1
 ; CHECK-NEXT:  // %bb.2: // %while.end
@@ -93,7 +93,7 @@ define void @vld4(float* nocapture readonly %pSrc, float* noalias nocapture %pDs
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0], #64
 ; CHECK-NEXT:    add x9, x1, x8
-; CHECK-NEXT:    add x8, x8, #32 // =32
+; CHECK-NEXT:    add x8, x8, #32
 ; CHECK-NEXT:    cmp x8, #2, lsl #12 // =8192
 ; CHECK-NEXT:    fmul v4.4s, v0.4s, v0.4s
 ; CHECK-NEXT:    fmla v4.4s, v1.4s, v1.4s
@@ -145,7 +145,7 @@ define void @twosrc(float* nocapture readonly %pSrc, float* nocapture readonly %
 ; CHECK-NEXT:    add x10, x1, x8
 ; CHECK-NEXT:    ld2 { v0.4s, v1.4s }, [x9]
 ; CHECK-NEXT:    ld2 { v2.4s, v3.4s }, [x10]
-; CHECK-NEXT:    add x8, x8, #32 // =32
+; CHECK-NEXT:    add x8, x8, #32
 ; CHECK-NEXT:    cmp x8, #2, lsl #12 // =8192
 ; CHECK-NEXT:    fmul v4.4s, v2.4s, v0.4s
 ; CHECK-NEXT:    fmla v4.4s, v1.4s, v3.4s

diff  --git a/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll b/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll
index 1552a554eb4e5..5573a8215babc 100644
--- a/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll
+++ b/llvm/test/CodeGen/AArch64/wineh-try-catch-nobase.ll
@@ -9,7 +9,7 @@
 ; CHECK:             stp     x29, x30, [sp, #-16]!   // 16-byte Folded Spill
 ; CHECK-NEXT:        .seh_save_fplr_x 16
 ; CHECK-NEXT:        .seh_endprologue
-; CHECK-NEXT:        sub     x0, x29, #16            // =16
+; CHECK-NEXT:        sub     x0, x29, #16
 ; CHECK-NEXT:        mov     x1, xzr
 ; CHECK-NEXT:        bl      "?bb@@YAXPEAHH at Z"
 ; CHECK-NEXT:        adrp    x0, .LBB0_1

diff  --git a/llvm/test/Transforms/CanonicalizeFreezeInLoops/aarch64.ll b/llvm/test/Transforms/CanonicalizeFreezeInLoops/aarch64.ll
index 3b2f98335652e..cf3795474c5b3 100644
--- a/llvm/test/Transforms/CanonicalizeFreezeInLoops/aarch64.ll
+++ b/llvm/test/Transforms/CanonicalizeFreezeInLoops/aarch64.ll
@@ -7,12 +7,12 @@
 define void @f(i8* %p, i32 %n, i32 %m) {
 ; CHECK-LABEL: f:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    add w8, w2, #1 // =1
+; CHECK-NEXT:    add w8, w2, #1
 ; CHECK-NEXT:  .LBB0_1: // %loop
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    strb wzr, [x0, w8, sxtw]
-; CHECK-NEXT:    subs w1, w1, #1 // =1
-; CHECK-NEXT:    add w8, w8, #1 // =1
+; CHECK-NEXT:    subs w1, w1, #1
+; CHECK-NEXT:    add w8, w8, #1
 ; CHECK-NEXT:    b.ne .LBB0_1
 ; CHECK-NEXT:  // %bb.2: // %exit
 ; CHECK-NEXT:    ret
@@ -34,12 +34,12 @@ exit:
 define void @f_without_freeze(i8* %p, i32 %n, i32 %m) {
 ; CHECK-LABEL: f_without_freeze:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    add w8, w2, #1 // =1
+; CHECK-NEXT:    add w8, w2, #1
 ; CHECK-NEXT:  .LBB1_1: // %loop
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    strb wzr, [x0, w8, sxtw]
-; CHECK-NEXT:    subs w1, w1, #1 // =1
-; CHECK-NEXT:    add w8, w8, #1 // =1
+; CHECK-NEXT:    subs w1, w1, #1
+; CHECK-NEXT:    add w8, w8, #1
 ; CHECK-NEXT:    b.ne .LBB1_1
 ; CHECK-NEXT:  // %bb.2: // %exit
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-pre-inc-offset-check.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-pre-inc-offset-check.ll
index 1aab663f1fe4f..5c1cb2197bda4 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-pre-inc-offset-check.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-pre-inc-offset-check.ll
@@ -18,15 +18,15 @@
 define void @test_lsr_pre_inc_offset_check(%"Type"* %p) {
 ; CHECK-LABEL: test_lsr_pre_inc_offset_check:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    add x8, x0, #340 // =340
+; CHECK-NEXT:    add x8, x0, #340
 ; CHECK-NEXT:    mov w9, #165
 ; CHECK-NEXT:    mov w10, #2
 ; CHECK-NEXT:  .LBB0_1: // %main
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    stur wzr, [x8, #-1]
 ; CHECK-NEXT:    strb w10, [x8]
-; CHECK-NEXT:    subs x9, x9, #1 // =1
-; CHECK-NEXT:    add x8, x8, #338 // =338
+; CHECK-NEXT:    subs x9, x9, #1
+; CHECK-NEXT:    add x8, x8, #338
 ; CHECK-NEXT:    b.ne .LBB0_1
 ; CHECK-NEXT:  // %bb.2: // %exit
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll
index af39bec33013e..0353b2296bf67 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll
@@ -20,7 +20,7 @@ define float @test1(float* nocapture readonly %arr, i64 %start, float %threshold
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x1, .LBB0_4
 ; CHECK-NEXT:  // %bb.1: // %for.body.preheader
-; CHECK-NEXT:    add x8, x0, #28 // =28
+; CHECK-NEXT:    add x8, x0, #28
 ; CHECK-NEXT:  .LBB0_2: // %for.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldr s1, [x8, x1, lsl #2]
@@ -28,7 +28,7 @@ define float @test1(float* nocapture readonly %arr, i64 %start, float %threshold
 ; CHECK-NEXT:    b.gt .LBB0_5
 ; CHECK-NEXT:  // %bb.3: // %for.cond
 ; CHECK-NEXT:    // in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT:    add x1, x1, #1 // =1
+; CHECK-NEXT:    add x1, x1, #1
 ; CHECK-NEXT:    cbnz x1, .LBB0_2
 ; CHECK-NEXT:  .LBB0_4:
 ; CHECK-NEXT:    fmov s0, #-7.00000000
@@ -65,7 +65,7 @@ define float @test2(float* nocapture readonly %arr, i64 %start, float %threshold
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cbz x1, .LBB1_4
 ; CHECK-NEXT:  // %bb.1: // %for.body.preheader
-; CHECK-NEXT:    add x8, x0, #28 // =28
+; CHECK-NEXT:    add x8, x0, #28
 ; CHECK-NEXT:  .LBB1_2: // %for.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ldr s1, [x8, x1, lsl #2]
@@ -75,7 +75,7 @@ define float @test2(float* nocapture readonly %arr, i64 %start, float %threshold
 ; CHECK-NEXT:    b.gt .LBB1_5
 ; CHECK-NEXT:  // %bb.3: // %for.cond
 ; CHECK-NEXT:    // in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT:    add x1, x1, #1 // =1
+; CHECK-NEXT:    add x1, x1, #1
 ; CHECK-NEXT:    cbnz x1, .LBB1_2
 ; CHECK-NEXT:  .LBB1_4:
 ; CHECK-NEXT:    fmov s0, #-7.00000000

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
index 39a189d1c5f6f..13264bbf585cd 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.generated.expected
@@ -64,9 +64,9 @@ define dso_local i32 @main() #0 {
 attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" }
 ; CHECK-LABEL: check_boundaries:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    stp x29, x30, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT:    add x29, sp, #32 // =32
+; CHECK-NEXT:    add x29, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
@@ -94,9 +94,9 @@ attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" }
 ;
 ; CHECK-LABEL: main:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    stp x29, x30, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT:    add x29, sp, #32 // =32
+; CHECK-NEXT:    add x29, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
@@ -128,5 +128,5 @@ attributes #0 = { noredzone nounwind ssp uwtable "frame-pointer"="all" }
 ; CHECK-LABEL: OUTLINED_FUNCTION_1:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w0, wzr
-; CHECK-NEXT:    add sp, sp, #48 // =48
+; CHECK-NEXT:    add sp, sp, #48
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected
index 77568d028225d..4aaac20201d92 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/aarch64_generated_funcs.ll.nogenerated.expected
@@ -5,9 +5,9 @@
 define dso_local i32 @check_boundaries() #0 {
 ; CHECK-LABEL: check_boundaries:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    stp x29, x30, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT:    add x29, sp, #32 // =32
+; CHECK-NEXT:    add x29, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16
@@ -71,9 +71,9 @@ define dso_local i32 @check_boundaries() #0 {
 define dso_local i32 @main() #0 {
 ; CHECK-LABEL: main:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #48 // =48
+; CHECK-NEXT:    sub sp, sp, #48
 ; CHECK-NEXT:    stp x29, x30, [sp, #32] // 16-byte Folded Spill
-; CHECK-NEXT:    add x29, sp, #32 // =32
+; CHECK-NEXT:    add x29, sp, #32
 ; CHECK-NEXT:    .cfi_def_cfa w29, 16
 ; CHECK-NEXT:    .cfi_offset w30, -8
 ; CHECK-NEXT:    .cfi_offset w29, -16

diff  --git a/llvm/test/tools/llvm-objdump/ELF/AArch64/disassemble-align.s b/llvm/test/tools/llvm-objdump/ELF/AArch64/disassemble-align.s
index 30528d94cd339..09696f6dd04fd 100644
--- a/llvm/test/tools/llvm-objdump/ELF/AArch64/disassemble-align.s
+++ b/llvm/test/tools/llvm-objdump/ELF/AArch64/disassemble-align.s
@@ -3,7 +3,7 @@
 
 ## Use '|' to show where the tabs line up.
 #       CHECK:0000000000000000 <$x.0>:
-#  CHECK-NEXT:       0: 62 10 00 91  |add|x2, x3, #4              // =4
+#  CHECK-NEXT:       0: 62 10 00 91  |add|x2, x3, #4{{$}}
 #  CHECK-NEXT:       4: 1f 20 03 d5  |nop
 # CHECK-EMPTY:
 #  CHECK-NEXT:0000000000000008 <$d.1>:


        


More information about the llvm-commits mailing list