[llvm] [llvm][test] Fix filecheck annotation typos [2/n] (PR #95433)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 13 09:36:13 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-loongarch

@llvm/pr-subscribers-backend-x86

Author: klensy (klensy)

<details>
<summary>Changes</summary>

blocked on #<!-- -->93673, actual changes in last commit only, other ones from previous.

---

Patch is 261.42 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/95433.diff


184 Files Affected:

- (modified) llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll (+1-1) 
- (modified) llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll (+2-2) 
- (modified) llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll (+2-2) 
- (modified) llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll (+1-1) 
- (modified) llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll (+2-2) 
- (modified) llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll (+1-1) 
- (modified) llvm/test/Assembler/bfloat.ll (+4-4) 
- (modified) llvm/test/Bitcode/convergence-control.ll (+1-1) 
- (modified) llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll (+2-2) 
- (modified) llvm/test/CodeGen/AArch64/aarch64-mulv.ll (+1-1) 
- (modified) llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll (+1-1) 
- (modified) llvm/test/CodeGen/AArch64/arm64_32-atomics.ll (+10-10) 
- (modified) llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll (+1-1) 
- (modified) llvm/test/CodeGen/AArch64/cxx-tlscc.ll (+2-2) 
- (modified) llvm/test/CodeGen/AArch64/fp16-fmla.ll (+22-22) 
- (modified) llvm/test/CodeGen/AArch64/fpimm.ll (+1-1) 
- (modified) llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll (+1-1) 
- (modified) llvm/test/CodeGen/AArch64/misched-fusion-lit.ll (+7-7) 
- (modified) llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll (+3-3) 
- (modified) llvm/test/CodeGen/AArch64/stp-opt-with-renaming-undef-assert.mir (+1-1) 
- (modified) llvm/test/CodeGen/AArch64/sve-calling-convention.ll (+2-2) 
- (modified) llvm/test/CodeGen/AArch64/swift-error.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/and.ll (+9-9) 
- (modified) llvm/test/CodeGen/AMDGPU/attr-amdgpu-num-sgpr.ll (+4-4) 
- (modified) llvm/test/CodeGen/AMDGPU/callee-special-input-vgprs.ll (+3-3) 
- (modified) llvm/test/CodeGen/AMDGPU/calling-conventions.ll (+2-2) 
- (modified) llvm/test/CodeGen/AMDGPU/cgp-addressing-modes.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/commute-compares.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll (+2-2) 
- (modified) llvm/test/CodeGen/AMDGPU/default-fp-mode.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/fmuladd.f32.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/internalize.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sdot4.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/load-global-i8.ll (+4-4) 
- (modified) llvm/test/CodeGen/AMDGPU/load-local-i16.ll (+2-2) 
- (modified) llvm/test/CodeGen/AMDGPU/load-local-i8.ll (+4-4) 
- (modified) llvm/test/CodeGen/AMDGPU/local-atomics.ll (+2-2) 
- (modified) llvm/test/CodeGen/AMDGPU/local-atomics64.ll (+2-2) 
- (modified) llvm/test/CodeGen/AMDGPU/merge-out-of-order-ldst.ll (+2-2) 
- (modified) llvm/test/CodeGen/AMDGPU/mfma-loop.ll (+94-94) 
- (modified) llvm/test/CodeGen/AMDGPU/reduction.ll (+15-15) 
- (modified) llvm/test/CodeGen/AMDGPU/remove-incompatible-functions.ll (+2-2) 
- (modified) llvm/test/CodeGen/ARM/ParallelDSP/complex_dot_prod.ll (+1-1) 
- (modified) llvm/test/CodeGen/ARM/armv8.2a-fp16-vector-intrinsics.ll (+71-71) 
- (modified) llvm/test/CodeGen/ARM/debug-frame-large-stack.ll (+10-11) 
- (modified) llvm/test/CodeGen/ARM/dsp-loop-indexing.ll (+1-1) 
- (modified) llvm/test/CodeGen/ARM/shifter_operand.ll (-1) 
- (modified) llvm/test/CodeGen/ARM/speculation-hardening-sls.ll (+2-2) 
- (modified) llvm/test/CodeGen/ARM/sxt_rot.ll (-1) 
- (modified) llvm/test/CodeGen/Hexagon/inline-division.ll (+1-1) 
- (modified) llvm/test/CodeGen/Hexagon/verify-liveness-at-def.mir (+1-1) 
- (modified) llvm/test/CodeGen/Lanai/lshift64.ll (+1-1) 
- (modified) llvm/test/CodeGen/MIR/AArch64/function-info-noredzone-present.mir (+1-1) 
- (modified) llvm/test/CodeGen/MIR/AArch64/unnamed-stack.ll (+2-2) 
- (modified) llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll (+2-2) 
- (modified) llvm/test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll (+1-1) 
- (modified) llvm/test/CodeGen/Mips/optimizeAndPlusShift.ll (+9-9) 
- (modified) llvm/test/CodeGen/Mips/tailcall/tailcall.ll (+1-1) 
- (modified) llvm/test/CodeGen/NVPTX/idioms.ll (+5-5) 
- (modified) llvm/test/CodeGen/PowerPC/convert-ri-addi-to-ri.mir (+2-2) 
- (modified) llvm/test/CodeGen/PowerPC/fixup-kill-dead-flag-crash.mir (+2-2) 
- (modified) llvm/test/CodeGen/PowerPC/livevars-crash1.mir (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/fallback.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/patchable-function-entry.ll (+4-4) 
- (modified) llvm/test/CodeGen/SPARC/inlineasm.ll (+1-1) 
- (modified) llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_inline_assembly/inline_asm.ll (+3-3) 
- (modified) llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_usm_storage_classes/intel-usm-addrspaces.ll (+2-2) 
- (modified) llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse-subbyte.ll (+1-1) 
- (modified) llvm/test/CodeGen/SPIRV/transcoding/OpBitReverse_i2.ll (+1-1) 
- (modified) llvm/test/CodeGen/SystemZ/prefetch-04.ll (+1-1) 
- (modified) llvm/test/CodeGen/Thumb2/LowOverheadLoops/branch-targets.ll (+3-3) 
- (modified) llvm/test/CodeGen/Thumb2/float-intrinsics-double.ll (+3-3) 
- (modified) llvm/test/CodeGen/Thumb2/float-ops.ll (+1-1) 
- (modified) llvm/test/CodeGen/Thumb2/pacbti-m-outliner-1.ll (+3-3) 
- (modified) llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir (-1) 
- (modified) llvm/test/CodeGen/X86/dynamic-regmask-preserve-all.ll (+1-1) 
- (modified) llvm/test/CodeGen/X86/global-sections.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/haddsub.ll (+17-17) 
- (modified) llvm/test/CodeGen/X86/sjlj.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/tailregccpic.ll (+2-2) 
- (modified) llvm/test/DebugInfo/COFF/jump-table.ll (+2-2) 
- (modified) llvm/test/DebugInfo/MIR/InstrRef/deref-spills-with-size.mir (+1-1) 
- (modified) llvm/test/DebugInfo/MIR/InstrRef/livedebugvalues_illegal_locs.mir (+3-3) 
- (modified) llvm/test/DebugInfo/MIR/InstrRef/single-assign-propagation.mir (+3-3) 
- (modified) llvm/test/DebugInfo/MIR/InstrRef/x86-fixup-bw-inst-subreb.mir (+2-2) 
- (modified) llvm/test/DebugInfo/MIR/InstrRef/x86-lea-fixup-2.mir (+2-2) 
- (modified) llvm/test/DebugInfo/MIR/X86/multiple-param-dbg-value-entry.mir (+1-1) 
- (modified) llvm/test/DebugInfo/MSP430/ranges_always.ll (+1-1) 
- (modified) llvm/test/DebugInfo/PDB/DIA/pdbdump-symbol-format.test (+1-1) 
- (modified) llvm/test/DebugInfo/X86/instr-ref-selectiondag.ll (+3-3) 
- (modified) llvm/test/DebugInfo/X86/ranges_always.ll (+1-1) 
- (modified) llvm/test/DebugInfo/assignment-tracking/X86/dbg-phi-produces-undef.ll (+1-1) 
- (modified) llvm/test/DebugInfo/assignment-tracking/X86/sdag-dangling-dbgassign.ll (+1-1) 
- (modified) llvm/test/DebugInfo/dwarfdump-dump-gdbindex-v8.test (+1-1) 
- (modified) llvm/test/Feature/optnone-llc.ll (+1) 
- (modified) llvm/test/Instrumentation/AddressSanitizer/aarch64be.ll (+1-1) 
- (modified) llvm/test/Instrumentation/InstrProfiling/inline-data-var.ll (+1-1) 
- (modified) llvm/test/MC/AArch64/SME/feature.s (+1-1) 
- (modified) llvm/test/MC/AArch64/armv8.7a-xs.s (+79-79) 
- (modified) llvm/test/MC/AArch64/basic-a64-diagnostics.s (+57-57) 
- (modified) llvm/test/MC/ARM/coff-relocations.s (+1-1) 
- (modified) llvm/test/MC/ARM/neon-complex.s (+20-20) 
- (modified) llvm/test/MC/AsmParser/labels.s (+2-2) 
- (modified) llvm/test/MC/COFF/cv-inline-linetable.s (+3-3) 
- (modified) llvm/test/MC/Disassembler/AArch64/armv8.6a-bf16.txt (+8-8) 
- (modified) llvm/test/MC/Disassembler/AArch64/armv8.7a-xs.txt (+80-80) 
- (modified) llvm/test/MC/Disassembler/AArch64/tme.txt (+3-3) 
- (modified) llvm/test/MC/Disassembler/ARM/arm-tests.txt (+1-1) 
- (modified) llvm/test/MC/Disassembler/Mips/mips32r6/valid-mips32r6.txt (+3-3) 
- (modified) llvm/test/MC/Disassembler/Mips/mips64r6/valid-mips64r6.txt (+3-3) 
- (modified) llvm/test/MC/Disassembler/PowerPC/ppc64-encoding-dfp.txt (+1-1) 
- (modified) llvm/test/MC/Disassembler/PowerPC/ppc64-encoding.txt (+1-1) 
- (modified) llvm/test/MC/Disassembler/PowerPC/ppc64le-encoding.txt (+1-1) 
- (modified) llvm/test/MC/Disassembler/X86/x86-16.txt (+16-16) 
- (modified) llvm/test/MC/Hexagon/hvx-tmp-accum-no-erros.s (+1-1) 
- (modified) llvm/test/MC/LoongArch/Relocations/relax-align.s (+1-1) 
- (modified) llvm/test/MC/M68k/Arith/Classes/MxFBinary_FF.s (+8-8) 
- (modified) llvm/test/MC/MachO/lto-set-conditional.s (+2-2) 
- (modified) llvm/test/MC/Mips/expansion-jal-sym-pic.s (+8-8) 
- (modified) llvm/test/MC/Mips/macro-rem.s (+1-1) 
- (modified) llvm/test/MC/Mips/micromips-dsp/invalid.s (+4-6) 
- (modified) llvm/test/MC/Mips/micromips/valid.s (+2-2) 
- (modified) llvm/test/MC/Mips/mips-pdr-bad.s (+2-2) 
- (modified) llvm/test/MC/Mips/mips32r6/invalid.s (+8-9) 
- (modified) llvm/test/MC/Mips/mips64r6/invalid.s (+8-9) 
- (modified) llvm/test/MC/PowerPC/ppc64-encoding-ISA31.s (+13-13) 
- (modified) llvm/test/MC/PowerPC/ppc64-encoding-vmx.s (+3-3) 
- (modified) llvm/test/MC/RISCV/compress-rv64i.s (+2-2) 
- (modified) llvm/test/MC/RISCV/csr-aliases.s (+10-10) 
- (modified) llvm/test/MC/RISCV/elf-flags.s (+2-2) 
- (modified) llvm/test/MC/RISCV/zicfiss-valid.s (+6-6) 
- (modified) llvm/test/MC/WebAssembly/globals.s (+1-1) 
- (modified) llvm/test/MC/X86/apx/evex-format-intel.s (+2-2) 
- (modified) llvm/test/MC/Xtensa/Relocations/relocations.s (+31-31) 
- (modified) llvm/test/Other/constant-fold-gep-address-spaces.ll (+47-47) 
- (modified) llvm/test/Other/new-pm-thinlto-postlink-defaults.ll (+1-1) 
- (modified) llvm/test/TableGen/MixedCasedMnemonic.td (+15-15) 
- (modified) llvm/test/Transforms/Attributor/returned.ll (+1-1) 
- (modified) llvm/test/Transforms/CallSiteSplitting/callsite-split.ll (+1-1) 
- (modified) llvm/test/Transforms/Coroutines/coro-await-suspend-lower-invoke.ll (+1-1) 
- (modified) llvm/test/Transforms/Coroutines/coro-debug-coro-frame.ll (+1-1) 
- (modified) llvm/test/Transforms/FunctionAttrs/nonnull.ll (-1) 
- (modified) llvm/test/Transforms/GVNSink/sink-common-code.ll (+6-6) 
- (modified) llvm/test/Transforms/Inline/update_invoke_prof.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/lifetime-sanitizer.ll (+1-1) 
- (modified) llvm/test/Transforms/InstCombine/str-int-2.ll (+2-2) 
- (modified) llvm/test/Transforms/InstCombine/str-int.ll (+2-2) 
- (modified) llvm/test/Transforms/LoopUnroll/peel-loop2.ll (+1-1) 
- (modified) llvm/test/Transforms/LoopVectorize/AArch64/nontemporal-load-store.ll (+11-11) 
- (modified) llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll (+1-1) 
- (modified) llvm/test/Transforms/LoopVectorize/branch-weights.ll (+2-2) 
- (modified) llvm/test/Transforms/LoopVectorize/memdep.ll (+1-1) 
- (modified) llvm/test/Transforms/LoopVersioning/wrapping-pointer-non-integral-addrspace.ll (+1-1) 
- (modified) llvm/test/Transforms/ObjCARC/rv.ll (+3-3) 
- (modified) llvm/test/Transforms/PGOProfile/counter_promo_exit_catchswitch.ll (+1-1) 
- (modified) llvm/test/Transforms/PGOProfile/icp_covariant_invoke_return.ll (+4-4) 
- (modified) llvm/test/Transforms/PartiallyInlineLibCalls/X86/good-prototype.ll (+1-1) 
- (modified) llvm/test/Transforms/SampleProfile/pseudo-probe-dangle.ll (+4-4) 
- (modified) llvm/test/Transforms/SampleProfile/pseudo-probe-selectionDAG.ll (+2-2) 
- (modified) llvm/test/Verifier/convergencectrl-invalid.ll (+2-2) 
- (modified) llvm/test/tools/dsymutil/fat-binary-output.test (+1-1) 
- (modified) llvm/test/tools/gold/X86/global_with_section.ll (+1-1) 
- (modified) llvm/test/tools/llvm-ar/replace-update.test (+1-1) 
- (modified) llvm/test/tools/llvm-cov/Inputs/binary-formats.canonical.json (+1-1) 
- (modified) llvm/test/tools/llvm-cov/coverage_watermark.test (+5-5) 
- (modified) llvm/test/tools/llvm-cov/zeroFunctionFile.c (+1-1) 
- (modified) llvm/test/tools/llvm-dwarfdump/X86/simplified-template-names-fail.s (+1-1) 
- (modified) llvm/test/tools/llvm-dwarfutil/ELF/X86/dwarf5-rnglists.test (+1-1) 
- (modified) llvm/test/tools/llvm-lib/duplicate.test (+1-1) 
- (modified) llvm/test/tools/llvm-objcopy/ELF/update-section.test (+1-1) 
- (modified) llvm/test/tools/llvm-objcopy/MachO/code_signature_lc_update.test (+18-18) 
- (modified) llvm/test/tools/llvm-objdump/ELF/ARM/v5te-subarch.s (+1-1) 
- (modified) llvm/test/tools/llvm-objdump/X86/start-stop-address.test (+1-1) 
- (modified) llvm/test/tools/llvm-profgen/disassemble.test (+7-7) 
- (modified) llvm/test/tools/llvm-profgen/filter-ambiguous-profile.test (+5-5) 
- (modified) llvm/test/tools/llvm-profgen/recursion-compression-pseudoprobe.test (+2-2) 
- (modified) llvm/test/tools/llvm-readobj/COFF/codeview-linetables.test (+78-78) 
- (modified) llvm/test/tools/llvm-reduce/skip-delta-passes.ll (+1-1) 
- (modified) llvm/test/tools/llvm-remarkutil/no-instruction-count.test (+1-1) 
- (modified) llvm/test/tools/llvm-symbolizer/flag-grouping.test (+1-1) 
- (modified) llvm/test/tools/lto/discard-value-names.ll (+1-1) 


``````````diff
diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll b/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
index a2526d9f5591a..c2aab35194831 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-shuffle-broadcast.ll
@@ -31,7 +31,7 @@ define void  @broadcast() #0{
 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction:   %22 = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction:   %23 = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
 ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction:   %24 = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
-; CHECK-NETX: Cost Model: Found an estimated cost of 0 for instruction:   ret void
+; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction:   ret void
 
   %zero = shufflevector <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
   %1 = shufflevector <vscale x 32 x i8> undef, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
index a214451bfd3fd..f0a5e9045c3b2 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/pointer-phis.ll
@@ -293,7 +293,7 @@ define i32 @store_with_pointer_phi_incoming_phi(ptr %A, ptr %B, ptr %C, i1 %c.0,
 ; CHECK-EMPTY:
 ; CHECK-NEXT:      Expressions re-written:
 ;
-; CHECK-EMPTY
+; CHECK-EMPTY:
 entry:
   br label %loop.header
 
@@ -376,7 +376,7 @@ define i32 @store_with_pointer_phi_incoming_phi_irreducible_cycle(ptr %A, ptr %B
 ; CHECK-EMPTY:
 ; CHECK-NEXT:      Expressions re-written:
 ;
-; CHECK-EMPTY
+; CHECK-EMPTY:
 entry:
   br label %loop.header
 
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
index 7fd8ac40e4bec..500fcc41dc40c 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/branch-outside.ll
@@ -1,6 +1,6 @@
 ; RUN: opt %s -mtriple amdgcn-- -passes='print<uniformity>' -disable-output 2>&1 | FileCheck %s
 
-; CHECK=LABEL: UniformityInfo for function 'basic':
+; CHECK-LABEL: UniformityInfo for function 'basic':
 ; CHECK: CYCLES ASSSUMED DIVERGENT:
 ; CHECK:   depth=1: entries(P T) Q
 define amdgpu_kernel void @basic(i32 %a, i32 %b, i32 %c) {
@@ -37,7 +37,7 @@ exit:
   ret void
 }
 
-; CHECK=LABEL: UniformityInfo for function 'nested':
+; CHECK-LABEL: UniformityInfo for function 'nested':
 ; CHECK: CYCLES ASSSUMED DIVERGENT:
 ; CHECK:  depth=1: entries(P T) Q A C B
 define amdgpu_kernel void @nested(i32 %a, i32 %b, i32 %c) {
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
index 2a3ff4166213d..4b6fced1d58dd 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/exit-divergence.ll
@@ -1,6 +1,6 @@
 ; RUN: opt %s -mtriple amdgcn-- -passes='print<uniformity>' -disable-output 2>&1 | FileCheck %s
 
-; CHECK=LABEL: UniformityInfo for function 'basic':
+; CHECK-LABEL: UniformityInfo for function 'basic':
 ; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
 ; CHECK: CYCLES WITH DIVERGENT EXIT:
 ; CHECK:   depth=1: entries(P T) Q
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
index feb29497f80c9..6edd6384db7d1 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/irreducible/reducible-headers.ll
@@ -31,7 +31,7 @@
 ; at P should not be marked divergent.
 
 define amdgpu_kernel void @nested_irreducible(i32 %a, i32 %b, i32 %c) {
-; CHECK=LABEL: UniformityInfo for function 'nested_irreducible':
+; CHECK-LABEL: UniformityInfo for function 'nested_irreducible':
 ; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
 ; CHECK: CYCLES WITH DIVERGENT EXIT:
 ; CHECK-DAG:   depth=2: entries(P T) R Q
@@ -118,7 +118,7 @@ exit:
 ; Thus, any PHI at P should not be marked divergent.
 
 define amdgpu_kernel void @header_label_1(i32 %a, i32 %b, i32 %c) {
-; CHECK=LABEL: UniformityInfo for function 'header_label_1':
+; CHECK-LABEL: UniformityInfo for function 'header_label_1':
 ; CHECK-NOT: CYCLES ASSSUMED DIVERGENT:
 ; CHECK: CYCLES WITH DIVERGENT EXIT:
 ; CHECK:  depth=1: entries(H) Q P U T R
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
index 395d7125e3c8d..3015e1326a406 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/temporal_diverge.ll
@@ -169,7 +169,7 @@ X:
   br label %G
 
 G:
-; C HECK: DIVERGENT: %div.user =
+; CHECK: DIVERGENT: %div.user =
   %div.user = add i32 %uni.inc, 5
   br i1 %uni.cond, label %G, label %Y
 ; CHECK: DIVERGENT: %div.user =
diff --git a/llvm/test/Assembler/bfloat.ll b/llvm/test/Assembler/bfloat.ll
index 3a3b4c2b277db..6f935c5dac154 100644
--- a/llvm/test/Assembler/bfloat.ll
+++ b/llvm/test/Assembler/bfloat.ll
@@ -37,25 +37,25 @@ define float @check_bfloat_convert() {
   ret float %tmp
 }
 
-; ASSEM-DISASS-LABEL @snan_bfloat
+; ASSEM-DISASS-LABEL: @snan_bfloat
 define bfloat @snan_bfloat() {
 ; ASSEM-DISASS: ret bfloat 0xR7F81
     ret bfloat 0xR7F81
 }
 
-; ASSEM-DISASS-LABEL @qnan_bfloat
+; ASSEM-DISASS-LABEL: @qnan_bfloat
 define bfloat @qnan_bfloat() {
 ; ASSEM-DISASS: ret bfloat 0xR7FC0
     ret bfloat 0xR7FC0
 }
 
-; ASSEM-DISASS-LABEL @pos_inf_bfloat
+; ASSEM-DISASS-LABEL: @pos_inf_bfloat
 define bfloat @pos_inf_bfloat() {
 ; ASSEM-DISASS: ret bfloat 0xR7F80
     ret bfloat 0xR7F80
 }
 
-; ASSEM-DISASS-LABEL @neg_inf_bfloat
+; ASSEM-DISASS-LABEL: @neg_inf_bfloat
 define bfloat @neg_inf_bfloat() {
 ; ASSEM-DISASS: ret bfloat 0xRFF80
     ret bfloat 0xRFF80
diff --git a/llvm/test/Bitcode/convergence-control.ll b/llvm/test/Bitcode/convergence-control.ll
index 7ba5609b6a7cc..6988ab029f42a 100644
--- a/llvm/test/Bitcode/convergence-control.ll
+++ b/llvm/test/Bitcode/convergence-control.ll
@@ -18,7 +18,7 @@ B:
 C:
   ; CHECK-LABEL: C:
   ; CHECK: [[C:%.*]] = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token [[B]]) ]
-  ; CHEC K: call void @f() [ "convergencectrl"(token [[C]]) ]
+  ; CHECK: call void @f() [ "convergencectrl"(token [[C]]) ]
   ;
   %c = call token @llvm.experimental.convergence.loop() [ "convergencectrl"(token %b) ]
   call void @f() [ "convergencectrl"(token %c) ]
diff --git a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
index b2643dc8f9dcb..44071a113a4a0 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-bf16-ldst-intrinsics.ll
@@ -320,8 +320,8 @@ declare { <4 x bfloat>, <4 x bfloat>, <4 x bfloat> } @llvm.aarch64.neon.ld3lane.
 define %struct.bfloat16x8x3_t @test_vld3q_lane_bf16(ptr %ptr, [3 x <8 x bfloat>] %src.coerce) local_unnamed_addr nounwind {
 ; CHECK-LABEL: test_vld3q_lane_bf16:
 ; CHECK:       // %bb.0: // %entry
-; CHECKT:    ld3 { v0.h, v1.h, v2.h }[7], [x0]
-; CHECKT:    ret
+; CHECK:    ld3 { v0.h, v1.h, v2.h }[7], [x0]
+; CHECK:    ret
 entry:
   %src.coerce.fca.0.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 0
   %src.coerce.fca.1.extract = extractvalue [3 x <8 x bfloat>] %src.coerce, 1
diff --git a/llvm/test/CodeGen/AArch64/aarch64-mulv.ll b/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
index e11ae9a251590..aa4f374d5d7e7 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-mulv.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=aarch64 -aarch64-enable-sink-fold=true -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc -mtriple=aarch64 -aarch64-enable-sink-fold=true -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK_GI:        warning: Instruction selection used fallback path for mulv_v3i64
+; CHECK-GI:        warning: Instruction selection used fallback path for mulv_v3i64
 
 declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>)
 declare i8 @llvm.vector.reduce.mul.v3i8(<3 x i8>)
diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
index 6bcd2f04849b2..d999959bba46f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
@@ -8,7 +8,7 @@ define i32 @fct(i32 %i1, i32 %i2) {
 ; Sign extension is used more than once, thus it should not be folded.
 ; CodeGenPrepare is not sharing sext across uses, thus this is folded because
 ; of that.
-; _CHECK-NOT: , sxtw]
+; CHECK-NOT: , sxtw]
 entry:
   %idxprom = sext i32 %i1 to i64
   %0 = load ptr, ptr @block, align 8
diff --git a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
index 0000262e833da..19b9205dc1786 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
@@ -2,70 +2,70 @@
 ; RUN: llc -mtriple=arm64_32-apple-ios7.0 -mattr=+outline-atomics -o - %s | FileCheck %s -check-prefix=OUTLINE-ATOMICS
 
 define i8 @test_load_8(ptr %addr) {
-; CHECK-LABAL: test_load_8:
+; CHECK-LABEL: test_load_8:
 ; CHECK: ldarb w0, [x0]
   %val = load atomic i8, ptr %addr seq_cst, align 1
   ret i8 %val
 }
 
 define i16 @test_load_16(ptr %addr) {
-; CHECK-LABAL: test_load_16:
+; CHECK-LABEL: test_load_16:
 ; CHECK: ldarh w0, [x0]
   %val = load atomic i16, ptr %addr acquire, align 2
   ret i16 %val
 }
 
 define i32 @test_load_32(ptr %addr) {
-; CHECK-LABAL: test_load_32:
+; CHECK-LABEL: test_load_32:
 ; CHECK: ldar w0, [x0]
   %val = load atomic i32, ptr %addr seq_cst, align 4
   ret i32 %val
 }
 
 define i64 @test_load_64(ptr %addr) {
-; CHECK-LABAL: test_load_64:
+; CHECK-LABEL: test_load_64:
 ; CHECK: ldar x0, [x0]
   %val = load atomic i64, ptr %addr seq_cst, align 8
   ret i64 %val
 }
 
 define ptr @test_load_ptr(ptr %addr) {
-; CHECK-LABAL: test_load_ptr:
+; CHECK-LABEL: test_load_ptr:
 ; CHECK: ldar w0, [x0]
   %val = load atomic ptr, ptr %addr seq_cst, align 8
   ret ptr %val
 }
 
 define void @test_store_8(ptr %addr) {
-; CHECK-LABAL: test_store_8:
+; CHECK-LABEL: test_store_8:
 ; CHECK: stlrb wzr, [x0]
   store atomic i8 0, ptr %addr seq_cst, align 1
   ret void
 }
 
 define void @test_store_16(ptr %addr) {
-; CHECK-LABAL: test_store_16:
+; CHECK-LABEL: test_store_16:
 ; CHECK: stlrh wzr, [x0]
   store atomic i16 0, ptr %addr seq_cst, align 2
   ret void
 }
 
 define void @test_store_32(ptr %addr) {
-; CHECK-LABAL: test_store_32:
+; CHECK-LABEL: test_store_32:
 ; CHECK: stlr wzr, [x0]
   store atomic i32 0, ptr %addr seq_cst, align 4
   ret void
 }
 
 define void @test_store_64(ptr %addr) {
-; CHECK-LABAL: test_store_64:
+; CHECK-LABEL: test_store_64:
 ; CHECK: stlr xzr, [x0]
   store atomic i64 0, ptr %addr seq_cst, align 8
   ret void
 }
 
 define void @test_store_ptr(ptr %addr) {
-; CHECK-LABAL: test_store_ptr:
+; CHECK-LABEL: test_store_ptr:
 ; CHECK: stlr wzr, [x0]
   store atomic ptr null, ptr %addr seq_cst, align 8
   ret void
diff --git a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
index e9556b9d5cbee..c550a24754c96 100644
--- a/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64ec-entry-thunks.ll
@@ -1,7 +1,7 @@
 ; RUN: llc -mtriple=arm64ec-pc-windows-msvc < %s | FileCheck %s
 
 define void @no_op() nounwind {
-; CHECK-LABEL     .def    $ientry_thunk$cdecl$v$v;
+; CHECK-LABEL:    .def    $ientry_thunk$cdecl$v$v;
 ; CHECK:          .section        .wowthk$aa,"xr",discard,$ientry_thunk$cdecl$v$v
 ; CHECK:          // %bb.0:
 ; CHECK-NEXT:     stp     q6, q7, [sp, #-176]!            // 32-byte Folded Spill
diff --git a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
index 21367aaa8b07f..5a2be8e0e47a9 100644
--- a/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
+++ b/llvm/test/CodeGen/AArch64/cxx-tlscc.ll
@@ -46,7 +46,7 @@ __tls_init.exit:
 ; CHECK-NOT: stp x20, x19
 ; FIXME: The splitting logic in the register allocator fails to split along
 ;        control flow here, we used to get this right by accident before...
-; CHECK-NOTXX: stp x14, x13
+; COM: CHECK-NOT: stp x14, x13
 ; CHECK-NOT: stp x12, x11
 ; CHECK-NOT: stp x10, x9
 ; CHECK-NOT: stp x8, x7
@@ -65,7 +65,7 @@ __tls_init.exit:
 ; CHECK-NOT: ldp x8, x7
 ; CHECK-NOT: ldp x10, x9
 ; CHECK-NOT: ldp x12, x11
-; CHECK-NOTXX: ldp x14, x13
+; COM: CHECK-NOT: ldp x14, x13
 ; CHECK-NOT: ldp x20, x19
 ; CHECK-NOT: ldp d1, d0
 ; CHECK-NOT: ldp d3, d2
diff --git a/llvm/test/CodeGen/AArch64/fp16-fmla.ll b/llvm/test/CodeGen/AArch64/fp16-fmla.ll
index a81721afb8453..916fbeb94dcf8 100644
--- a/llvm/test/CodeGen/AArch64/fp16-fmla.ll
+++ b/llvm/test/CodeGen/AArch64/fp16-fmla.ll
@@ -84,11 +84,11 @@ entry:
 
 define <4 x half> @test_FMLAv4i16_indexed_OP1(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
 ; CHECK-LABEL: test_FMLAv4i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
 ; CHECK: mul
 ; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla    {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla    {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
 entry:
   %mul = mul <4 x i16> %c, %b
   %m = bitcast <4 x i16> %mul to <4 x half>
@@ -98,11 +98,11 @@ entry:
 
 define <4 x half> @test_FMLAv4i16_indexed_OP2(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
 ; CHECK-LABEL: test_FMLAv4i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
 ; CHECK: mul
 ; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla    {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla    {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
 entry:
   %mul = mul <4 x i16> %c, %b
   %m = bitcast <4 x i16> %mul to <4 x half>
@@ -112,11 +112,11 @@ entry:
 
 define <8 x half> @test_FMLAv8i16_indexed_OP1(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
 ; CHECK-LABEL: test_FMLAv8i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
 ; CHECK: mul
 ; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla    {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla    {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
 entry:
   %mul = mul <8 x i16> %c, %b
   %m = bitcast <8 x i16> %mul to <8 x half>
@@ -126,11 +126,11 @@ entry:
 
 define <8 x half> @test_FMLAv8i16_indexed_OP2(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
 ; CHECK-LABEL: test_FMLAv8i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
 ; CHECK: mul
 ; CHECK: fadd
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmla    {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmla    {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
 entry:
   %mul = mul <8 x i16> %c, %b
   %m = bitcast <8 x i16> %mul to <8 x half>
@@ -178,11 +178,11 @@ entry:
 
 define <4 x half> @test_FMLSv4i16_indexed_OP2(<4 x half> %a, <4 x i16> %b, <4 x i16> %c) {
 ; CHECK-LABEL: test_FMLSv4i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
 ; CHECK: mul
 ; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmls    {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmls    {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
 entry:
   %mul = mul <4 x i16> %c, %b
   %m = bitcast <4 x i16> %mul to <4 x half>
@@ -192,12 +192,12 @@ entry:
 
 define <8 x half> @test_FMLSv8i16_indexed_OP1(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
 ; CHECK-LABEL: test_FMLSv8i16_indexed_OP1:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
 ; CHECK: mul
 ; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fneg    {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-; CHECK-FIXME: fmla    {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fneg    {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; COM: CHECK: fmla    {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
 entry:
   %mul = mul <8 x i16> %c, %b
   %m = bitcast <8 x i16> %mul to <8 x half>
@@ -207,11 +207,11 @@ entry:
 
 define <8 x half> @test_FMLSv8i16_indexed_OP2(<8 x half> %a, <8 x i16> %b, <8 x i16> %c) {
 ; CHECK-LABEL: test_FMLSv8i16_indexed_OP2:
-; CHECK-FIXME: Currently LLVM produces inefficient code:
+; FIXME: Currently LLVM produces inefficient code:
 ; CHECK: mul
 ; CHECK: fsub
-; CHECK-FIXME: It should instead produce the following instruction:
-; CHECK-FIXME: fmls    {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
+; FIXME: It should instead produce the following instruction:
+; COM: CHECK: fmls    {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
 entry:
   %mul = mul <8 x i16> %c, %b
   %m = bitcast <8 x i16> %mul to <8 x half>
diff --git a/llvm/test/CodeGen/AArch64/fpimm.ll b/llvm/test/CodeGen/AArch64/fpimm.ll
index b92bb4245c7f3..e2944243338f5 100644
--- a/llvm/test/CodeGen/AArch64/fpimm.ll
+++ b/llvm/test/CodeGen/AArch64/fpimm.ll
@@ -38,7 +38,7 @@ define void @check_double() {
 ; 64-bit ORR followed by MOVK.
 ; CHECK-DAG: mov  [[XFP0:x[0-9]+]], #1082331758844
 ; CHECK-DAG: movk [[XFP0]], #64764, lsl #16
-; CHECk-DAG: fmov {{d[0-9]+}}, [[XFP0]]
+; CHECK-DAG: fmov {{d[0-9]+}}, [[XFP0]]
   %newval3 = fadd double %val, 0xFCFCFC00FC
   store volatile double %newval3, ptr @varf64
 
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
index a5757a70843a9..fa63df35ac857 100644
--- a/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-retaddr-sign-diff-scope-same-key.ll
@@ -28,7 +28,7 @@ define void @a() "sign-return-address"="all" {
 }
 
 define void @b() "sign-return-address"="non-leaf" {
-; CHECK-LABE:      b:                                     // @b
+; CHECK-LABEL:     b:                                     // @b
 ; V8A-NOT:         hint #25
 ; V83A-NOT:        paciasp
 ; CHECK-NOT:       .cfi_negate_ra_state
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
index fedbb642a3620..c589d356e6937 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-lit.ll
@@ -83,11 +83,11 @@ entry:
   ret double 0x400921FB54442D18
 
 ; CHECK-LABEL: litf:
-; CHECK-DONT:      adrp [[ADDR:x[0-9]+]], [[CSTLABEL:.LCP.*]]
-; CHECK-DONT-NEXT: ldr  {{d[0-9]+}}, {{[[]}}[[ADDR]], :lo12:[[CSTLABEL]]{{[]]}}
-; CHECK-FUSE:      mov  [[R:x[0-9]+]], #11544
-; CHECK-FUSE:      movk [[R]], #21572, lsl #16
-; CHECK-FUSE:      movk [[R]], #8699, lsl #32
-; CHECK-FUSE:      movk [[R]], #16393, lsl #48
-; CHECK-FUSE:      fmov {{d[0-9]+}}, [[R]]
+; CHECKDONT:      adrp [[ADDR:x[0-9]+]], [[CSTLABEL:.LCP.*]]
+; CHECKDONT-NEXT: ldr  {{d[0-9]+}}, {{[[]}}[[ADDR]], :lo12:[[CSTLABEL]]{{[]]}}
+; CHECKFUSE:      mov  [[R:...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/95433


More information about the llvm-commits mailing list