[llvm] [AsmPrinter] Renumber basic blocks before printing (PR #186688)

via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 15 11:06:44 PDT 2026


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-debuginfo
@llvm/pr-subscribers-backend-x86
@llvm/pr-subscribers-backend-risc-v

@llvm/pr-subscribers-backend-loongarch

Author: Alexis Engelke (aengelke)

<details>
<summary>Changes</summary>

Make the final assembly label names independent of whether previous passes renumbered the basic blocks by unconditionally renumbering before printing.

The diff is somewhat huge, but this should avoid an impact of later changes when passes start/stop renumbering blocks.

---

Patch is 4.64 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/186688.diff


357 Files Affected:

- (modified) llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp (+5-1) 
- (modified) llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp (+2) 
- (modified) llvm/test/CodeGen/AArch64/basic-block-sections-cold.ll (+3-3) 
- (modified) llvm/test/CodeGen/AArch64/basic-block-sections-unsafe.ll (+2-2) 
- (modified) llvm/test/CodeGen/AArch64/branch-relax-b.ll (+19-19) 
- (modified) llvm/test/CodeGen/AArch64/branch-relax-bcc.ll (+6-6) 
- (modified) llvm/test/CodeGen/AMDGPU/branch-relax-spill.ll (+10-10) 
- (modified) llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.mir (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll (+138-138) 
- (modified) llvm/test/CodeGen/AMDGPU/branch-relaxation-inst-size-gfx11.ll (+3-3) 
- (modified) llvm/test/CodeGen/AMDGPU/branch-relaxation.ll (+126-126) 
- (modified) llvm/test/CodeGen/AMDGPU/dagcombine-reassociate-multi-memop.ll (+4-4) 
- (modified) llvm/test/CodeGen/AMDGPU/lds-dma-workgroup-release.ll (+48-48) 
- (modified) llvm/test/CodeGen/AMDGPU/literal-constant-like-operand-instruction-size.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/long-branch-reserve-register.ll (+18-18) 
- (modified) llvm/test/CodeGen/AMDGPU/preload-implicit-kernargs.ll (+197-197) 
- (modified) llvm/test/CodeGen/AMDGPU/preload-kernarg-header.ll (+4-4) 
- (modified) llvm/test/CodeGen/AMDGPU/preload-kernargs.ll (+311-311) 
- (modified) llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll (+4-4) 
- (modified) llvm/test/CodeGen/BPF/gotol.ll (+2-2) 
- (modified) llvm/test/CodeGen/LoongArch/atomicrmw-cond-sub-clamp.ll (+80-80) 
- (modified) llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll (+80-80) 
- (modified) llvm/test/CodeGen/LoongArch/branch-relaxation-spill-32.ll (+8-8) 
- (modified) llvm/test/CodeGen/LoongArch/branch-relaxation-spill-64.ll (+11-11) 
- (modified) llvm/test/CodeGen/LoongArch/branch-relaxation.ll (+17-17) 
- (modified) llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll (+400-400) 
- (modified) llvm/test/CodeGen/NVPTX/i128.ll (+16-16) 
- (modified) llvm/test/CodeGen/NVPTX/jump-table.ll (+59-59) 
- (modified) llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll (+83-83) 
- (modified) llvm/test/CodeGen/RISCV/atomic-signext.ll (+210-210) 
- (modified) llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll (+98-98) 
- (modified) llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll (+161-161) 
- (modified) llvm/test/CodeGen/RISCV/branch-relaxation-rv32.ll (+70-70) 
- (modified) llvm/test/CodeGen/RISCV/branch-relaxation-rv32e.ll (+35-35) 
- (modified) llvm/test/CodeGen/RISCV/branch-relaxation-rv64.ll (+70-70) 
- (modified) llvm/test/CodeGen/RISCV/pr65025.ll (+9-9) 
- (modified) llvm/test/CodeGen/RISCV/rvv/expandload.ll (+2692-2692) 
- (modified) llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll (+36-36) 
- (modified) llvm/test/CodeGen/SPIRV/const-array-gep.ll (+1-1) 
- (modified) llvm/test/CodeGen/SPIRV/pointers/structured-buffer-access.ll (+1-1) 
- (modified) llvm/test/CodeGen/SPIRV/pointers/structured-buffer-vector-access.ll (+1-1) 
- (modified) llvm/test/CodeGen/VE/Scalar/atomic_cmp_swap.ll (+54-54) 
- (modified) llvm/test/CodeGen/VE/Scalar/br_analyze.ll (+12-12) 
- (modified) llvm/test/CodeGen/VE/Scalar/br_jt.ll (+91-91) 
- (modified) llvm/test/CodeGen/VE/Scalar/brind.ll (+2-2) 
- (modified) llvm/test/CodeGen/VE/Scalar/builtin_sjlj.ll (+53-19) 
- (modified) llvm/test/CodeGen/VE/Scalar/builtin_sjlj_bp.ll (+31-8) 
- (modified) llvm/test/CodeGen/VE/Scalar/builtin_sjlj_callsite.ll (+28-28) 
- (modified) llvm/test/CodeGen/VE/Scalar/builtin_sjlj_landingpad.ll (+32-32) 
- (modified) llvm/test/CodeGen/VE/Scalar/load_stk.ll (+24-24) 
- (modified) llvm/test/CodeGen/VE/Scalar/store_stk.ll (+24-24) 
- (modified) llvm/test/CodeGen/VE/Vector/load_stk_ldvm.ll (+36-36) 
- (modified) llvm/test/CodeGen/VE/Vector/store_stk_stvm.ll (+30-30) 
- (modified) llvm/test/CodeGen/WinEH/wineh-dynamic-alloca.ll (+11-11) 
- (modified) llvm/test/CodeGen/WinEH/wineh-inlined-inalloca.ll (+7-7) 
- (modified) llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll (+127-127) 
- (modified) llvm/test/CodeGen/X86/2007-02-16-BranchFold.ll (+15-15) 
- (modified) llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll (+25-25) 
- (modified) llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll (+5-5) 
- (modified) llvm/test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll (+14-14) 
- (modified) llvm/test/CodeGen/X86/2008-04-09-BranchFolding.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/2008-04-16-ReMatBug.ll (+13-13) 
- (modified) llvm/test/CodeGen/X86/2008-04-17-CoalescerBug.ll (+42-42) 
- (modified) llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/2009-08-12-badswitch.ll (+60-60) 
- (modified) llvm/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll (+50-50) 
- (modified) llvm/test/CodeGen/X86/AMX/amx-across-func.ll (+14-14) 
- (modified) llvm/test/CodeGen/X86/AMX/amx-ldtilecfg-insert.ll (+24-24) 
- (modified) llvm/test/CodeGen/X86/AMX/amx-tile-basic.ll (+11-11) 
- (modified) llvm/test/CodeGen/X86/MachineSink-Issue98477.ll (+13-13) 
- (modified) llvm/test/CodeGen/X86/MachineSink-eflags.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/PR40322.ll (+5-5) 
- (modified) llvm/test/CodeGen/X86/PR71178-register-coalescer-crash.ll (+24-24) 
- (modified) llvm/test/CodeGen/X86/SwitchLowering.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/abs.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/absolute-bt.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/absolute-constant.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/addr-mode-matcher-2.ll (+60-60) 
- (modified) llvm/test/CodeGen/X86/align-branch-boundary-suppressions.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/and-sink.ll (+14-14) 
- (modified) llvm/test/CodeGen/X86/andnot-patterns.ll (+15-15) 
- (modified) llvm/test/CodeGen/X86/apx/ccmp.ll (+224-224) 
- (modified) llvm/test/CodeGen/X86/apx/check-nf-in-suppress-reloc-pass.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/apx/ctest.ll (+124-124) 
- (modified) llvm/test/CodeGen/X86/apx/kmov-postrapseudos.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/apx/nf-regressions.ll (+16-16) 
- (modified) llvm/test/CodeGen/X86/asm-label.ll (+2-2) 
- (modified) llvm/test/CodeGen/X86/atomic-bit-test.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/atomic-flags.ll (+12-12) 
- (modified) llvm/test/CodeGen/X86/atomic-rm-bit-test-64.ll (+33-33) 
- (modified) llvm/test/CodeGen/X86/atomic-rm-bit-test.ll (+219-219) 
- (modified) llvm/test/CodeGen/X86/atomic-unordered.ll (+36-36) 
- (modified) llvm/test/CodeGen/X86/atomic32.ll (+40-40) 
- (modified) llvm/test/CodeGen/X86/atomic64.ll (+20-20) 
- (modified) llvm/test/CodeGen/X86/atomicrmw-cond-sub-clamp.ll (+63-63) 
- (modified) llvm/test/CodeGen/X86/avx2-masked-gather.ll (+84-84) 
- (modified) llvm/test/CodeGen/X86/avx2-vbroadcast.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/avx512-cmp.ll (+7-7) 
- (modified) llvm/test/CodeGen/X86/avx512-i1test.ll (+7-7) 
- (modified) llvm/test/CodeGen/X86/avx512-mask-op.ll (+150-150) 
- (modified) llvm/test/CodeGen/X86/avx512-select.ll (+12-12) 
- (modified) llvm/test/CodeGen/X86/avx512vnni-combine.ll (+22-22) 
- (modified) llvm/test/CodeGen/X86/avxvnni-combine.ll (+88-88) 
- (modified) llvm/test/CodeGen/X86/basic-block-sections-cloning-1.ll (+11-11) 
- (modified) llvm/test/CodeGen/X86/basic-block-sections-cloning-2.ll (+13-13) 
- (modified) llvm/test/CodeGen/X86/basic-block-sections-cloning-indirect.ll (+2-2) 
- (modified) llvm/test/CodeGen/X86/basic-block-sections-cloning-invalid.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/basic-block-sections-clusters-bb-hash.ll (+7-7) 
- (modified) llvm/test/CodeGen/X86/basic-block-sections-clusters-branches.ll (+2-2) 
- (modified) llvm/test/CodeGen/X86/basic-block-sections-clusters-eh.ll (+9-9) 
- (modified) llvm/test/CodeGen/X86/basic-block-sections-clusters.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/basic-block-sections-entryblock.ll (+1-1) 
- (modified) llvm/test/CodeGen/X86/bfloat.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll (+15-15) 
- (modified) llvm/test/CodeGen/X86/bittest-big-integer.ll (+57-57) 
- (modified) llvm/test/CodeGen/X86/block-placement.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/bmi-select-distrib.ll (+12-12) 
- (modified) llvm/test/CodeGen/X86/bmi.ll (+9-9) 
- (modified) llvm/test/CodeGen/X86/branch-hint.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/break-false-dep.ll (+24-24) 
- (modified) llvm/test/CodeGen/X86/bsf.ll (+68-68) 
- (modified) llvm/test/CodeGen/X86/bsr.ll (+76-76) 
- (modified) llvm/test/CodeGen/X86/bt.ll (+24-24) 
- (modified) llvm/test/CodeGen/X86/bypass-slow-division-32.ll (+22-22) 
- (modified) llvm/test/CodeGen/X86/bypass-slow-division-64.ll (+18-18) 
- (modified) llvm/test/CodeGen/X86/bypass-slow-division-tune.ll (+27-27) 
- (modified) llvm/test/CodeGen/X86/callbr-asm-sink.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/callbr-asm.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/cgp-usubo.ll (+10-10) 
- (modified) llvm/test/CodeGen/X86/clear-highbits.ll (+15-15) 
- (modified) llvm/test/CodeGen/X86/clear-lowbits.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/cmov-fp.ll (+72-72) 
- (modified) llvm/test/CodeGen/X86/cmov-into-branch.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/cmovcmov.ll (+19-19) 
- (modified) llvm/test/CodeGen/X86/cmp-bool.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/cmp.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/coalesce-esp.ll (+11-11) 
- (modified) llvm/test/CodeGen/X86/coalescer-commute4.ll (+5-5) 
- (modified) llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll (+14-14) 
- (modified) llvm/test/CodeGen/X86/coalescer-implicit-def-regression.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/code-align-loops.ll (+1-2) 
- (modified) llvm/test/CodeGen/X86/combine-sbb.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/combine-storetomstore.ll (+12-12) 
- (modified) llvm/test/CodeGen/X86/concat-fpext-v2bf16.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/conditional-tailcall.ll (+24-24) 
- (modified) llvm/test/CodeGen/X86/copy-eflags.ll (+13-13) 
- (modified) llvm/test/CodeGen/X86/ctlo.ll (+24-24) 
- (modified) llvm/test/CodeGen/X86/ctlz.ll (+63-63) 
- (modified) llvm/test/CodeGen/X86/cttz.ll (+39-39) 
- (modified) llvm/test/CodeGen/X86/dag-update-nodetomatch.ll (+46-46) 
- (modified) llvm/test/CodeGen/X86/dagcombine-select.ll (+9-9) 
- (modified) llvm/test/CodeGen/X86/div-rem-pair-recomposition-signed.ll (+14-14) 
- (modified) llvm/test/CodeGen/X86/div-rem-pair-recomposition-unsigned.ll (+15-15) 
- (modified) llvm/test/CodeGen/X86/dup-cost.ll (+9-9) 
- (modified) llvm/test/CodeGen/X86/expand-large-fp-optnone.ll (+18-18) 
- (modified) llvm/test/CodeGen/X86/extract-bits.ll (+18-18) 
- (modified) llvm/test/CodeGen/X86/extract-lowbits.ll (+15-15) 
- (modified) llvm/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/fdiv-combine.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/fma-intrinsics-phi-213-to-231.ll (+96-96) 
- (modified) llvm/test/CodeGen/X86/fold-add-32.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/fold-int-pow2-with-fmul-or-fdiv.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/fold-load.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/fold-loop-of-urem.ll (+79-79) 
- (modified) llvm/test/CodeGen/X86/fp-int-fp-cvt.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/fp-strict-scalar-cmp-fp16.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/fp-strict-scalar-cmp.ll (+144-144) 
- (modified) llvm/test/CodeGen/X86/fp-une-cmp.ll (+18-18) 
- (modified) llvm/test/CodeGen/X86/fp128-i128.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/fp128-libcalls-strict.ll (+12-12) 
- (modified) llvm/test/CodeGen/X86/fp128-select.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/fp80-strict-scalar-cmp.ll (+72-72) 
- (modified) llvm/test/CodeGen/X86/fptosi-sat-scalar.ll (+90-90) 
- (modified) llvm/test/CodeGen/X86/fptoui-sat-scalar.ll (+24-24) 
- (modified) llvm/test/CodeGen/X86/fshl.ll (+21-21) 
- (modified) llvm/test/CodeGen/X86/fshr.ll (+20-20) 
- (modified) llvm/test/CodeGen/X86/funnel-shift-i256.ll (+12-12) 
- (modified) llvm/test/CodeGen/X86/funnel-shift.ll (+9-9) 
- (modified) llvm/test/CodeGen/X86/gc-empty-basic-blocks.ll (+3-4) 
- (modified) llvm/test/CodeGen/X86/half.ll (+30-30) 
- (modified) llvm/test/CodeGen/X86/hipe-prologue.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/hoist-invariant-load.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/icall-branch-funnel.ll (+105-100) 
- (modified) llvm/test/CodeGen/X86/implicit-null-check.ll (+72-72) 
- (modified) llvm/test/CodeGen/X86/indirect-branch-tracking-eh.ll (+2-2) 
- (modified) llvm/test/CodeGen/X86/indirect-branch-tracking-eh2.ll (+38-38) 
- (modified) llvm/test/CodeGen/X86/indirect-branch-tracking.ll (+1-1) 
- (modified) llvm/test/CodeGen/X86/inline-asm-pic.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/inline-spiller-impdef-on-implicit-def-regression.ll (+7-7) 
- (modified) llvm/test/CodeGen/X86/isel-br.ll (+5-5) 
- (modified) llvm/test/CodeGen/X86/isel-brcond-fcmp.ll (+220-220) 
- (modified) llvm/test/CodeGen/X86/isel-brcond-icmp.ll (+240-240) 
- (modified) llvm/test/CodeGen/X86/isel-int-to-fp.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/isel-phi.ll (+24-24) 
- (modified) llvm/test/CodeGen/X86/isel-select-cmov.ll (+43-43) 
- (modified) llvm/test/CodeGen/X86/isel-sink2.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/issue76416.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/jcc-indirect-thunk-kernel.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/jump_sign.ll (+14-14) 
- (modified) llvm/test/CodeGen/X86/large-constants.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/large-pic-jump-table.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/lea-opt-memop-check-2.ll (+5-5) 
- (modified) llvm/test/CodeGen/X86/legalize-shift-64.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/lifetime-alias.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/load-local-v3i1.ll (+22-22) 
- (modified) llvm/test/CodeGen/X86/loop-blocks.ll (+191-82) 
- (modified) llvm/test/CodeGen/X86/loop-search.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/loop-strength-reduce7.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/lrshrink-debug.ll (+11-11) 
- (modified) llvm/test/CodeGen/X86/lrshrink-ehpad-phis.ll (+10-10) 
- (modified) llvm/test/CodeGen/X86/lsr-addrecloops.ll (+37-37) 
- (modified) llvm/test/CodeGen/X86/lvi-hardening-loads.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/lzcnt.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/masked_compressstore.ll (+1665-1665) 
- (modified) llvm/test/CodeGen/X86/masked_expandload.ll (+1473-1473) 
- (modified) llvm/test/CodeGen/X86/masked_gather.ll (+717-717) 
- (modified) llvm/test/CodeGen/X86/masked_gather_scatter.ll (+538-538) 
- (modified) llvm/test/CodeGen/X86/masked_gather_scatter_widen.ll (+105-105) 
- (modified) llvm/test/CodeGen/X86/masked_load.ll (+2394-2394) 
- (modified) llvm/test/CodeGen/X86/masked_store.ll (+2537-2537) 
- (modified) llvm/test/CodeGen/X86/masked_store_trunc.ll (+3249-3249) 
- (modified) llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll (+3237-3237) 
- (modified) llvm/test/CodeGen/X86/masked_store_trunc_usat.ll (+3233-3233) 
- (modified) llvm/test/CodeGen/X86/memcmp-more-load-pairs-x32.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/memcmp-optsize-x32.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/memcmp-optsize.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/memcmp-pgso-x32.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/memcmp-pgso.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/memcmp-x32.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/memcmp.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/mmx-arith.ll (+5-5) 
- (modified) llvm/test/CodeGen/X86/mmx-coalescing.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/mul-constant-result.ll (+77-77) 
- (modified) llvm/test/CodeGen/X86/muloti.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/negative-stride-fptosi-user.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/no-split-size.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/optimize-max-0.ll (+36-36) 
- (modified) llvm/test/CodeGen/X86/partial-tail-dup.ll (+7-7) 
- (modified) llvm/test/CodeGen/X86/peep-test-5.ll (+5-5) 
- (modified) llvm/test/CodeGen/X86/peephole-na-phys-copy-folding.ll (+40-40) 
- (modified) llvm/test/CodeGen/X86/pic.ll (+20-20) 
- (modified) llvm/test/CodeGen/X86/postalloc-coalescing.ll (+2-2) 
- (modified) llvm/test/CodeGen/X86/pr142513.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/pr174871.ll (+13-13) 
- (modified) llvm/test/CodeGen/X86/pr31271.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/pr32282.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/pr33828.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/pr38539.ll (+23-23) 
- (modified) llvm/test/CodeGen/X86/pr38743.ll (+2-2) 
- (modified) llvm/test/CodeGen/X86/pr38795.ll (+77-77) 
- (modified) llvm/test/CodeGen/X86/pr39666.ll (+7-7) 
- (modified) llvm/test/CodeGen/X86/pr49451.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/pr50431.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/pr50782.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/pr53990-incorrect-machine-sink.ll (+11-11) 
- (modified) llvm/test/CodeGen/X86/pr57402.ll (+5-5) 
- (modified) llvm/test/CodeGen/X86/pr61524.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/pr62145.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/pr63692.ll (+2-2) 
- (modified) llvm/test/CodeGen/X86/pr94829.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/probe-stack-eflags.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/pseudo_cmov_lower2.ll (+24-24) 
- (modified) llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll (+113-113) 
- (modified) llvm/test/CodeGen/X86/reverse_branches.ll (+35-35) 
- (modified) llvm/test/CodeGen/X86/scalar-int-to-fp.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/scheduler-backtracking.ll (+15-15) 
- (modified) llvm/test/CodeGen/X86/segmented-stacks-dynamic.ll (+30-30) 
- (modified) llvm/test/CodeGen/X86/segmented-stacks.ll (+311-311) 
- (modified) llvm/test/CodeGen/X86/seh-catchpad.ll (+2-2) 
- (modified) llvm/test/CodeGen/X86/seh-except-finally.ll (+1-1) 
- (modified) llvm/test/CodeGen/X86/seh-except-restore.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/select-constant-xor.ll (+9-9) 
- (modified) llvm/test/CodeGen/X86/select-mmx.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/select-smin-smax.ll (+7-7) 
- (modified) llvm/test/CodeGen/X86/select-testb-volatile-load.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/select-to-and-zext.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/select.ll (+51-51) 
- (modified) llvm/test/CodeGen/X86/select_const.ll (+27-27) 
- (modified) llvm/test/CodeGen/X86/setcc-freeze.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/setuge.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/shadow-stack.ll (+12-12) 
- (modified) llvm/test/CodeGen/X86/shift-combine.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/shrink-compare-pgso.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/shrink-compare.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/shuffle-half.ll (+125-125) 
- (modified) llvm/test/CodeGen/X86/sibcall.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/smul-with-overflow.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/speculative-load-hardening-indirect.ll (+76-76) 
- (modified) llvm/test/CodeGen/X86/speculative-load-hardening.ll (+53-53) 
- (modified) llvm/test/CodeGen/X86/split-reg-with-hint.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/srem-seteq-optsize.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/sse-load-ret.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/sse-scalar-fp-arith.ll (+12-12) 
- (modified) llvm/test/CodeGen/X86/sse1.ll (+28-28) 
- (modified) llvm/test/CodeGen/X86/stack-clash-dynamic-alloca.ll (+8-8) 
- (modified) llvm/test/CodeGen/X86/stack-clash-small-alloc-medium-align.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/stack-coloring-wineh.ll (+17-17) 
- (modified) llvm/test/CodeGen/X86/stack-protector-msvc.ll (+24-24) 
- (modified) llvm/test/CodeGen/X86/stack-protector-phi.ll (+3-3) 
- (modified) llvm/test/CodeGen/X86/statepoint-invoke.ll (+20-20) 
- (modified) llvm/test/CodeGen/X86/sub-with-overflow.ll (+6-6) 
- (modified) llvm/test/CodeGen/X86/subreg-to-reg-6.ll (+3-3) 


``````````diff
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 40a80576ba86b..7f350557692d4 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -2056,6 +2056,9 @@ void AsmPrinter::emitDanglingPrefetchTargets() {
 /// EmitFunctionBody - This method emits the body and trailer for a
 /// function.
 void AsmPrinter::emitFunctionBody() {
+  // Renumber blocks for consistent output of labels.
+  MF->RenumberBlocks();
+
   emitFunctionHeader();
 
   // Emit target-specific gunk before the function body.
@@ -2068,7 +2071,8 @@ void AsmPrinter::emitFunctionBody() {
       OwnedMDT = std::make_unique<MachineDominatorTree>();
       OwnedMDT->recalculate(*MF);
       MDT = OwnedMDT.get();
-    }
+    } else
+      MDT->updateBlockNumbers(); // We renumbered the function above.
 
     // Get MachineLoopInfo or compute it on the fly if it's unavailable
     MLI = GetMLI(*MF);
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index 38bc72ff154fa..2a8cda775997f 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -2686,6 +2686,8 @@ static void addMBBNames(const Module &M, const SPIRVInstrInfo &TII,
             .isValid())
       continue;
     MachineRegisterInfo &MRI = MF->getRegInfo();
+    // Ensure that blocks are numbered in a consistent order.
+    MF->RenumberBlocks();
     for (auto &MBB : *MF) {
       if (!MBB.hasName() || MBB.empty())
         continue;
diff --git a/llvm/test/CodeGen/AArch64/basic-block-sections-cold.ll b/llvm/test/CodeGen/AArch64/basic-block-sections-cold.ll
index 6641ef6a51c14..ed9ab70bd7fe8 100644
--- a/llvm/test/CodeGen/AArch64/basic-block-sections-cold.ll
+++ b/llvm/test/CodeGen/AArch64/basic-block-sections-cold.ll
@@ -41,11 +41,11 @@ declare i32 @_Z3foov() #1
 ; SECTIONS: .section	.text.split._Z3bazb,"ax", at progbits
 ; SECTIONS: _Z3bazb.cold:
 ; SECTIONS-NOT: .section        .text.hot._Z3bazb._Z3bazb.2,"ax", at progbits,unique
-; SECTIONS: .LBB0_2:
+; SECTIONS: .LBB0_5:
 ; SECTIONS: .size   _Z3bazb, .Lfunc_end{{[0-9]}}-_Z3bazb
 
 ; SPLIT:      .section	.text.unlikely._Z3bazb,"ax", at progbits
 ; SPLIT-NEXT: _Z3bazb.cold:
 ; SPLIT-NEXT:   bl _Z3barv
-; SPLIT:      .LBB0_2:
-; SPLIT:      .LBB_END0_2:
+; SPLIT:      .LBB0_5:
+; SPLIT:      .LBB_END0_5:
diff --git a/llvm/test/CodeGen/AArch64/basic-block-sections-unsafe.ll b/llvm/test/CodeGen/AArch64/basic-block-sections-unsafe.ll
index a83a47c9c129c..20c65463e8075 100644
--- a/llvm/test/CodeGen/AArch64/basic-block-sections-unsafe.ll
+++ b/llvm/test/CodeGen/AArch64/basic-block-sections-unsafe.ll
@@ -20,9 +20,9 @@ define void @_Z3asm_goto(i1 zeroext %0, i1 zeroext %1) nounwind {
   ; CHECK:        .section	.text.unlikely._Z3asm_goto,"ax", at progbits
   ; CHECK-NEXT:     _Z3asm_goto.cold:
   ; CHECK-NEXT:       bl bam
-  ; CHECK:          .LBB0_4:
+  ; CHECK:          .LBB0_7:
   ; CHECK:            ret
-  ; CHECK:          .LBB_END0_4:
+  ; CHECK:          .LBB_END0_7:
 
   br i1 %0, label %3, label %5
 
diff --git a/llvm/test/CodeGen/AArch64/branch-relax-b.ll b/llvm/test/CodeGen/AArch64/branch-relax-b.ll
index 44b730f2207ff..04cc98a592728 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-b.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-b.ll
@@ -4,15 +4,15 @@ define void @relax_b_nospill(i1 zeroext %0) {
 ; CHECK-LABEL: relax_b_nospill:
 ; CHECK:       // %bb.0:                               // %entry
 ; CHECK-NEXT:    tbnz w0,
-; CHECK-SAME:                 LBB0_1
-; CHECK-NEXT:  // %bb.3:                               // %entry
-; CHECK-NEXT:          b      .LBB0_2
-; CHECK-NEXT:  .LBB0_1:                                // %iftrue
+; CHECK-SAME:                 LBB0_2
+; CHECK-NEXT:  // %bb.1:                               // %entry
+; CHECK-NEXT:          b      .LBB0_3
+; CHECK-NEXT:  .LBB0_2:                                // %iftrue
 ; CHECK-NEXT:          //APP
 ; CHECK-NEXT:          .zero   2048
 ; CHECK-NEXT:          //NO_APP
 ; CHECK-NEXT:          ret
-; CHECK-NEXT:  .LBB0_2:                                // %iffalse
+; CHECK-NEXT:  .LBB0_3:                                // %iffalse
 ; CHECK-NEXT:          //APP
 ; CHECK-NEXT:          .zero   8
 ; CHECK-NEXT:          //NO_APP
@@ -38,25 +38,25 @@ define void @relax_b_spill() {
 ; CHECK-COUNT-29:         mov     {{x[0-9]+}},
 ; CHECK-NOT:              mov     {{x[0-9]+}},
 ; CHECK-NEXT:             //NO_APP
-; CHECK-NEXT:             b.eq    .LBB1_1
-; CHECK-NEXT:     // %bb.4:                               // %entry
+; CHECK-NEXT:             b.eq    .LBB1_2
+; CHECK-NEXT:     // %bb.1:                               // %entry
 ; CHECK-NEXT:             str     [[SPILL_REGISTER:x[0-9]+]], [sp,
 ; CHECK-SAME:                                                       -16]!
-; CHECK-NEXT:             b       .LBB1_5
-; CHECK-NEXT:     .LBB1_1:                                // %iftrue
+; CHECK-NEXT:             b       .LBB1_3
+; CHECK-NEXT:     .LBB1_2:                                // %iftrue
 ; CHECK-NEXT:             //APP
 ; CHECK-NEXT:             .zero   2048
 ; CHECK-NEXT:             //NO_APP
-; CHECK-NEXT:             b       .LBB1_3
-; CHECK-NEXT:     .LBB1_5:                                // %iffalse
+; CHECK-NEXT:             b       .LBB1_5
+; CHECK-NEXT:     .LBB1_3:                                // %iffalse
 ; CHECK-NEXT:             ldr     [[SPILL_REGISTER]], [sp], 
 ; CHECK-SAME:                                                        16
-; CHECK-NEXT:     // %bb.2:                               // %iffalse
+; CHECK-NEXT:     // %bb.4:                               // %iffalse
 ; CHECK-NEXT:             //APP
 ; CHECK-COUNT-29:         // reg use {{x[0-9]+}}
 ; CHECK-NOT:              // reg use {{x[0-9]+}}
 ; CHECK-NEXT:             //NO_APP
-; CHECK-NEXT:     .LBB1_3:                                // %common.ret
+; CHECK-NEXT:     .LBB1_5:                                // %common.ret
 ; CHECK-COUNT-5:          // 16-byte Folded Reload
 ; CHECK-NOT:              // 16-byte Folded Reload
 ; CHECK-NEXT:             ret
@@ -141,20 +141,20 @@ define void @relax_b_x16_taken() {
 ; CHECK-NEXT:             //APP
 ; CHECK-NEXT:             mov     x16, #1
 ; CHECK-NEXT:             //NO_APP
-; CHECK-NEXT:             cbnz    x16, .LBB2_1
-; CHECK-NEXT:     // %bb.3:                               // %entry
+; CHECK-NEXT:             cbnz    x16, .LBB2_2
+; CHECK-NEXT:     // %bb.1:                               // %entry
 ; CHECK-NEXT:             str     [[SPILL_REGISTER]], [sp,
 ; CHECK-SAME:                                                       -16]!
-; CHECK-NEXT:             b       .LBB2_4
-; CHECK-NEXT:     .LBB2_1:                                // %iftrue
+; CHECK-NEXT:             b       .LBB2_3
+; CHECK-NEXT:     .LBB2_2:                                // %iftrue
 ; CHECK-NEXT:             //APP
 ; CHECK-NEXT:             .zero   2048
 ; CHECK-NEXT:             //NO_APP
 ; CHECK-NEXT:             ret
-; CHECK-NEXT:     .LBB2_4:                                // %iffalse
+; CHECK-NEXT:     .LBB2_3:                                // %iffalse
 ; CHECK-NEXT:             ldr     [[SPILL_REGISTER]], [sp], 
 ; CHECK-SAME:                                                        16
-; CHECK-NEXT:     // %bb.2:                               // %iffalse
+; CHECK-NEXT:     // %bb.4:                               // %iffalse
 ; CHECK-NEXT:             //APP
 ; CHECK-NEXT:             // reg use x16
 ; CHECK-NEXT:             //NO_APP
diff --git a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
index 1a901dc40f14c..f75a99ceb7d0b 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
@@ -7,14 +7,14 @@ define i32 @invert_bcc(float %x, float %y) #0 {
 ; CHECK-NEXT:    fcmp s0, s1
 ; CHECK-NEXT:    mov w0, wzr
 ; CHECK-NEXT:    mov w8, #42 ; =0x2a
-; CHECK-NEXT:    b.pl LBB0_3
-; CHECK-NEXT:    b LBB0_2
-; CHECK-NEXT:  LBB0_3:
-; CHECK-NEXT:    b.gt LBB0_2
-; CHECK-NEXT:  ; %bb.1: ; %common.ret
+; CHECK-NEXT:    b.pl LBB0_1
+; CHECK-NEXT:    b LBB0_3
+; CHECK-NEXT:  LBB0_1:
+; CHECK-NEXT:    b.gt LBB0_3
+; CHECK-NEXT:  ; %bb.2: ; %common.ret
 ; CHECK-NEXT:    str w8, [x8]
 ; CHECK-NEXT:    ret
-; CHECK-NEXT:  LBB0_2: ; %bb2
+; CHECK-NEXT:  LBB0_3: ; %bb2
 ; CHECK-NEXT:    mov w0, #1 ; =0x1
 ; CHECK-NEXT:    mov w8, #9 ; =0x9
 ; CHECK-NEXT:    ; InlineAsm Start
diff --git a/llvm/test/CodeGen/AMDGPU/branch-relax-spill.ll b/llvm/test/CodeGen/AMDGPU/branch-relax-spill.ll
index ab2ad19d0f1bf..dbec0fe3151aa 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relax-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relax-spill.ll
@@ -323,8 +323,8 @@ define amdgpu_kernel void @spill(ptr addrspace(1) %arg, i32 %cnd) #0 {
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    s_mov_b32 vcc_hi, 0
 ; CHECK-NEXT:    ;;#ASMEND
-; CHECK-NEXT:    s_cbranch_scc0 .LBB0_1
-; CHECK-NEXT:  ; %bb.3: ; %entry
+; CHECK-NEXT:    s_cbranch_scc0 .LBB0_2
+; CHECK-NEXT:  ; %bb.1: ; %entry
 ; CHECK-NEXT:    s_not_b64 exec, exec
 ; CHECK-NEXT:    buffer_store_dword v0, off, s[96:99], 0
 ; CHECK-NEXT:    v_writelane_b32 v0, s0, 0
@@ -334,7 +334,7 @@ define amdgpu_kernel void @spill(ptr addrspace(1) %arg, i32 %cnd) #0 {
 ; CHECK-NEXT:    s_add_u32 s0, s0, (.LBB0_4-.Lpost_getpc0)&4294967295
 ; CHECK-NEXT:    s_addc_u32 s1, s1, (.LBB0_4-.Lpost_getpc0)>>32
 ; CHECK-NEXT:    s_setpc_b64 s[0:1]
-; CHECK-NEXT:  .LBB0_1: ; %bb2
+; CHECK-NEXT:  .LBB0_2: ; %bb2
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    v_nop_e64
 ; CHECK-NEXT:    v_nop_e64
@@ -345,13 +345,13 @@ define amdgpu_kernel void @spill(ptr addrspace(1) %arg, i32 %cnd) #0 {
 ; CHECK-NEXT:    v_nop_e64
 ; CHECK-NEXT:    v_nop_e64
 ; CHECK-NEXT:    ;;#ASMEND
-; CHECK-NEXT:    s_branch .LBB0_2
+; CHECK-NEXT:    s_branch .LBB0_40
 ; CHECK-NEXT:  .LBB0_4: ; %bb3
 ; CHECK-NEXT:    v_readlane_b32 s0, v0, 0
 ; CHECK-NEXT:    v_readlane_b32 s1, v0, 1
 ; CHECK-NEXT:    buffer_load_dword v0, off, s[96:99], 0
 ; CHECK-NEXT:    s_not_b64 exec, exec
-; CHECK-NEXT:  .LBB0_2: ; %bb3
+; CHECK-NEXT:  .LBB0_40: ; %bb3
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    ; reg use s0
 ; CHECK-NEXT:    ;;#ASMEND
@@ -1255,8 +1255,8 @@ define void @spill_func(ptr addrspace(1) %arg) #0 {
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    s_mov_b32 vcc_hi, 0
 ; CHECK-NEXT:    ;;#ASMEND
-; CHECK-NEXT:    s_cbranch_scc0 .LBB1_1
-; CHECK-NEXT:  ; %bb.3: ; %entry
+; CHECK-NEXT:    s_cbranch_scc0 .LBB1_2
+; CHECK-NEXT:  ; %bb.1: ; %entry
 ; CHECK-NEXT:    s_not_b64 exec, exec
 ; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4
 ; CHECK-NEXT:    v_writelane_b32 v1, s0, 0
@@ -1266,7 +1266,7 @@ define void @spill_func(ptr addrspace(1) %arg) #0 {
 ; CHECK-NEXT:    s_add_u32 s0, s0, (.LBB1_4-.Lpost_getpc1)&4294967295
 ; CHECK-NEXT:    s_addc_u32 s1, s1, (.LBB1_4-.Lpost_getpc1)>>32
 ; CHECK-NEXT:    s_setpc_b64 s[0:1]
-; CHECK-NEXT:  .LBB1_1: ; %bb2
+; CHECK-NEXT:  .LBB1_2: ; %bb2
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    v_nop_e64
 ; CHECK-NEXT:    v_nop_e64
@@ -1277,13 +1277,13 @@ define void @spill_func(ptr addrspace(1) %arg) #0 {
 ; CHECK-NEXT:    v_nop_e64
 ; CHECK-NEXT:    v_nop_e64
 ; CHECK-NEXT:    ;;#ASMEND
-; CHECK-NEXT:    s_branch .LBB1_2
+; CHECK-NEXT:    s_branch .LBB1_40
 ; CHECK-NEXT:  .LBB1_4: ; %bb3
 ; CHECK-NEXT:    v_readlane_b32 s0, v1, 0
 ; CHECK-NEXT:    v_readlane_b32 s1, v1, 1
 ; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4
 ; CHECK-NEXT:    s_not_b64 exec, exec
-; CHECK-NEXT:  .LBB1_2: ; %bb3
+; CHECK-NEXT:  .LBB1_40: ; %bb3
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    ; reg use s0
 ; CHECK-NEXT:    ;;#ASMEND
diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.mir b/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.mir
index 5f0f2dd1e8b08..e7eac7c837172 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.mir
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.mir
@@ -4,7 +4,7 @@
 # block as the branch expansion.
 
 # GCN-LABEL: long_branch_dbg_value:
-# GCN:  ; %bb.5: ; %bb
+# GCN:  ; %bb.1: ; %bb
 # GCN-NEXT:    ;DEBUG_VALUE: test_debug_value:globalptr_arg <- [DW_OP_plus_uconst 12, DW_OP_stack_value]
 # GCN-NEXT:    .loc 1 0 42 is_stmt 0 ; /tmp/test_debug_value.cl:0:42
 # GCN-NEXT:    s_getpc_b64 s[[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]]
diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
index 779118bd33027..fadb34646f8c8 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation-gfx1250.ll
@@ -23,25 +23,25 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
 define amdgpu_kernel void @uniform_conditional_max_short_forward_branch(ptr addrspace(1) %arg, i32 %cnd) #0 {
 ; GCN-LABEL: uniform_conditional_max_short_forward_branch:
 ; GCN:       ; %bb.0: ; %bb
-; GCN-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GCN-NEXT:    s_load_b32 s0, s[4:5], 0x2c
+; GCN-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
+; GCN-NEXT:    s_load_b32 s0, s[4:5], 0x2c nv
 ; GCN-NEXT:    s_wait_kmcnt 0x0
 ; GCN-NEXT:    s_cmp_eq_u32 s0, 0
-; GCN-NEXT:    s_cbranch_scc0 .LBB0_1
-; GCN-NEXT:  ; %bb.3: ; %bb
+; GCN-NEXT:    s_cbranch_scc0 .LBB0_20
+; GCN-NEXT:  ; %bb.1: ; %bb
 ; GCN-NEXT:    s_get_pc_i64 s[2:3]
 ; GCN-NEXT:  .Lpost_getpc0:
 ; GCN-NEXT:    s_add_co_u32 s2, s2, (.LBB0_2-.Lpost_getpc0)&4294967295
 ; GCN-NEXT:    s_add_co_ci_u32 s3, s3, (.LBB0_2-.Lpost_getpc0)>>32
 ; GCN-NEXT:    s_set_pc_i64 s[2:3]
-; GCN-NEXT:  .LBB0_1: ; %bb2
+; GCN-NEXT:  .LBB0_20: ; %bb2
 ; GCN-NEXT:    ;;#ASMSTART
 ; GCN-NEXT:    v_nop_e64
 ; GCN-NEXT:    v_nop_e64
 ; GCN-NEXT:    ;;#ASMEND
 ; GCN-NEXT:    s_sleep 0
 ; GCN-NEXT:  .LBB0_2: ; %bb3
-; GCN-NEXT:    s_load_b64 s[2:3], s[4:5], 0x24
+; GCN-NEXT:    s_load_b64 s[2:3], s[4:5], 0x24 nv
 ; GCN-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0
 ; GCN-NEXT:    s_wait_kmcnt 0x0
 ; GCN-NEXT:    global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS
@@ -50,22 +50,22 @@ define amdgpu_kernel void @uniform_conditional_max_short_forward_branch(ptr addr
 ;
 ; GCN-ADD-PC64-LABEL: uniform_conditional_max_short_forward_branch:
 ; GCN-ADD-PC64:       ; %bb.0: ; %bb
-; GCN-ADD-PC64-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GCN-ADD-PC64-NEXT:    s_load_b32 s0, s[4:5], 0x2c
+; GCN-ADD-PC64-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
+; GCN-ADD-PC64-NEXT:    s_load_b32 s0, s[4:5], 0x2c nv
 ; GCN-ADD-PC64-NEXT:    s_wait_kmcnt 0x0
 ; GCN-ADD-PC64-NEXT:    s_cmp_eq_u32 s0, 0
-; GCN-ADD-PC64-NEXT:    s_cbranch_scc0 .LBB0_1
-; GCN-ADD-PC64-NEXT:  ; %bb.3: ; %bb
+; GCN-ADD-PC64-NEXT:    s_cbranch_scc0 .LBB0_20
+; GCN-ADD-PC64-NEXT:  ; %bb.1: ; %bb
 ; GCN-ADD-PC64-NEXT:    s_add_pc_i64 .LBB0_2-.Lpost_addpc0
 ; GCN-ADD-PC64-NEXT:  .Lpost_addpc0:
-; GCN-ADD-PC64-NEXT:  .LBB0_1: ; %bb2
+; GCN-ADD-PC64-NEXT:  .LBB0_20: ; %bb2
 ; GCN-ADD-PC64-NEXT:    ;;#ASMSTART
 ; GCN-ADD-PC64-NEXT:    v_nop_e64
 ; GCN-ADD-PC64-NEXT:    v_nop_e64
 ; GCN-ADD-PC64-NEXT:    ;;#ASMEND
 ; GCN-ADD-PC64-NEXT:    s_sleep 0
 ; GCN-ADD-PC64-NEXT:  .LBB0_2: ; %bb3
-; GCN-ADD-PC64-NEXT:    s_load_b64 s[2:3], s[4:5], 0x24
+; GCN-ADD-PC64-NEXT:    s_load_b64 s[2:3], s[4:5], 0x24 nv
 ; GCN-ADD-PC64-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0
 ; GCN-ADD-PC64-NEXT:    s_wait_kmcnt 0x0
 ; GCN-ADD-PC64-NEXT:    global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS
@@ -114,18 +114,18 @@ bb3:
 define amdgpu_kernel void @uniform_conditional_min_long_forward_branch(ptr addrspace(1) %arg, i32 %cnd) #0 {
 ; GCN-LABEL: uniform_conditional_min_long_forward_branch:
 ; GCN:       ; %bb.0: ; %bb0
-; GCN-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GCN-NEXT:    s_load_b32 s0, s[4:5], 0x2c
+; GCN-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
+; GCN-NEXT:    s_load_b32 s0, s[4:5], 0x2c nv
 ; GCN-NEXT:    s_wait_kmcnt 0x0
 ; GCN-NEXT:    s_cmp_eq_u32 s0, 0
-; GCN-NEXT:    s_cbranch_scc0 .LBB1_1
-; GCN-NEXT:  ; %bb.3: ; %bb0
+; GCN-NEXT:    s_cbranch_scc0 .LBB1_20
+; GCN-NEXT:  ; %bb.1: ; %bb0
 ; GCN-NEXT:    s_get_pc_i64 s[2:3]
 ; GCN-NEXT:  .Lpost_getpc1:
 ; GCN-NEXT:    s_add_co_u32 s2, s2, (.LBB1_2-.Lpost_getpc1)&4294967295
 ; GCN-NEXT:    s_add_co_ci_u32 s3, s3, (.LBB1_2-.Lpost_getpc1)>>32
 ; GCN-NEXT:    s_set_pc_i64 s[2:3]
-; GCN-NEXT:  .LBB1_1: ; %bb2
+; GCN-NEXT:  .LBB1_20: ; %bb2
 ; GCN-NEXT:    ;;#ASMSTART
 ; GCN-NEXT:    v_nop_e64
 ; GCN-NEXT:    v_nop_e64
@@ -133,7 +133,7 @@ define amdgpu_kernel void @uniform_conditional_min_long_forward_branch(ptr addrs
 ; GCN-NEXT:    s_sleep 0
 ; GCN-NEXT:    s_sleep 0
 ; GCN-NEXT:  .LBB1_2: ; %bb3
-; GCN-NEXT:    s_load_b64 s[2:3], s[4:5], 0x24
+; GCN-NEXT:    s_load_b64 s[2:3], s[4:5], 0x24 nv
 ; GCN-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0
 ; GCN-NEXT:    s_wait_kmcnt 0x0
 ; GCN-NEXT:    global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS
@@ -142,15 +142,15 @@ define amdgpu_kernel void @uniform_conditional_min_long_forward_branch(ptr addrs
 ;
 ; GCN-ADD-PC64-LABEL: uniform_conditional_min_long_forward_branch:
 ; GCN-ADD-PC64:       ; %bb.0: ; %bb0
-; GCN-ADD-PC64-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GCN-ADD-PC64-NEXT:    s_load_b32 s0, s[4:5], 0x2c
+; GCN-ADD-PC64-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
+; GCN-ADD-PC64-NEXT:    s_load_b32 s0, s[4:5], 0x2c nv
 ; GCN-ADD-PC64-NEXT:    s_wait_kmcnt 0x0
 ; GCN-ADD-PC64-NEXT:    s_cmp_eq_u32 s0, 0
-; GCN-ADD-PC64-NEXT:    s_cbranch_scc0 .LBB1_1
-; GCN-ADD-PC64-NEXT:  ; %bb.3: ; %bb0
+; GCN-ADD-PC64-NEXT:    s_cbranch_scc0 .LBB1_20
+; GCN-ADD-PC64-NEXT:  ; %bb.1: ; %bb0
 ; GCN-ADD-PC64-NEXT:    s_add_pc_i64 .LBB1_2-.Lpost_addpc1
 ; GCN-ADD-PC64-NEXT:  .Lpost_addpc1:
-; GCN-ADD-PC64-NEXT:  .LBB1_1: ; %bb2
+; GCN-ADD-PC64-NEXT:  .LBB1_20: ; %bb2
 ; GCN-ADD-PC64-NEXT:    ;;#ASMSTART
 ; GCN-ADD-PC64-NEXT:    v_nop_e64
 ; GCN-ADD-PC64-NEXT:    v_nop_e64
@@ -158,7 +158,7 @@ define amdgpu_kernel void @uniform_conditional_min_long_forward_branch(ptr addrs
 ; GCN-ADD-PC64-NEXT:    s_sleep 0
 ; GCN-ADD-PC64-NEXT:    s_sleep 0
 ; GCN-ADD-PC64-NEXT:  .LBB1_2: ; %bb3
-; GCN-ADD-PC64-NEXT:    s_load_b64 s[2:3], s[4:5], 0x24
+; GCN-ADD-PC64-NEXT:    s_load_b64 s[2:3], s[4:5], 0x24 nv
 ; GCN-ADD-PC64-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0
 ; GCN-ADD-PC64-NEXT:    s_wait_kmcnt 0x0
 ; GCN-ADD-PC64-NEXT:    global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS
@@ -209,18 +209,18 @@ bb3:
 define amdgpu_kernel void @uniform_conditional_min_long_forward_vcnd_branch(ptr addrspace(1) %arg, float %cnd) #0 {
 ; GCN-LABEL: uniform_conditional_min_long_forward_vcnd_branch:
 ; GCN:       ; %bb.0: ; %bb0
-; GCN-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1
-; GCN-NEXT:    s_load_b32 s0, s[4:5], 0x2c
+; GCN-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_WAVE_MODE, 25, 1), 1 ; msbs: dst=0 src0=0 src1=0 src2=0
+; GCN-NEXT:    s_load_b32 s0, s[4:5], 0x2c nv
 ; GCN-NEXT:    s_wait_kmcnt 0x0
 ; GCN-NEXT:    s_cmp_eq_f32 s0, 0
-; GCN-NEXT:    s_cbranch_scc0 .LBB2_1
-; GCN-NEXT:  ; %bb.3: ; %bb0
+; GCN-NEXT:    s_cbranch_scc0 .LBB2_20
+; GCN-NEXT:  ; %bb.1: ; %bb0
 ; GCN-NEXT:    s_get_pc_i64 s[2:3]
 ; GCN-NEXT:  .Lpost_getpc2:
 ; GCN-NEXT:    s_add_co_u32 s2, s2, (.LBB2_2-.Lpost_getpc2)&4294967295
 ; GCN-NEXT:    s_add_co_ci_u32 s3, s3, (.LBB2_2-.Lpost_getpc2)>>32
 ; GCN-NEXT:    s_set_pc_i64 s[2:3]
-; GCN-NEXT:  .LBB2_1: ; %bb2
+; GCN-NEXT:  .LBB2_20: ; %bb2
 ; GCN-NEXT:    ;;#ASMSTART
 ; GCN-NEXT:     ; 32 bytes
 ; GCN-NEXT:    v_nop_e64
@@ -229,7 +229,7 @@ define amdgpu_kernel void @uniform_conditional_min_long_forward_vcnd_branch(ptr
 ; GCN-NEXT:    s_sleep 0
 ; GCN-NEXT:    s_sleep 0
 ; GCN-NEXT:  .LBB2_2: ; %bb3
-; GCN-NEXT:    s_load_b64 s[2:3], s[4:5], 0x24
+; GCN-NEXT:    s_load_b64 s[2:3], s[4:5], 0x24 nv
 ; GCN-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s0
 ; GCN-NEXT:    s_wait_kmcnt 0x0
 ; GCN-NEXT:    global_store_b32 v0, v1, s[2:3] scope:SCOPE_SYS
@@ -238,15 +238,15 @@ define amdgpu_kernel void @uniform_conditional_min_long_forward_vcnd_branch(ptr
 ;
 ; GCN-ADD-PC64-LABEL: uniform_conditional_min_long_forward_vcnd...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/186688


More information about the llvm-commits mailing list