[llvm] r323922 - Followup on Proposal to move MIR physical register namespace to '$' sigil.

Puyan Lotfi via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 31 14:04:29 PST 2018


Author: zer0
Date: Wed Jan 31 14:04:26 2018
New Revision: 323922

URL: http://llvm.org/viewvc/llvm-project?rev=323922&view=rev
Log:
Followup on Proposal to move MIR physical register namespace to '$' sigil.

Discussed here:

http://lists.llvm.org/pipermail/llvm-dev/2018-January/120320.html

In preparation for adding support for named vregs we are changing the sigil for
physical registers in MIR to '$' from '%'. This will prevent name clashes of
named physical register with named vregs.



Modified:
    llvm/trunk/lib/CodeGen/MIRParser/MILexer.cpp
    llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/call-translator.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-add.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-and.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-div.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-gep.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/machine-cse-mid-pipeline.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/no-regclass.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/reg-bank-128bit.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bitcast-bigendian.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-br.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bswap.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-constant.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-fma.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-large.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-imm.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-hint.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-load.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-mul.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-muladd.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-phi.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-store.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-xor.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/select.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/vastart.ll
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
    llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
    llvm/trunk/test/CodeGen/AArch64/aarch64-combine-fmul-fsub.mir
    llvm/trunk/test/CodeGen/AArch64/arm64-csldst-mmo.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-ldst-unscaled-pre-post.mir
    llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-misched-multimmo.ll
    llvm/trunk/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir
    llvm/trunk/test/CodeGen/AArch64/ccmp-successor-probs.mir
    llvm/trunk/test/CodeGen/AArch64/cfi_restore.mir
    llvm/trunk/test/CodeGen/AArch64/falkor-hwpf-fix.mir
    llvm/trunk/test/CodeGen/AArch64/fast-regalloc-empty-bb-with-liveins.mir
    llvm/trunk/test/CodeGen/AArch64/ldst-opt-aa.mir
    llvm/trunk/test/CodeGen/AArch64/ldst-opt-zr-clobber.mir
    llvm/trunk/test/CodeGen/AArch64/ldst-opt.mir
    llvm/trunk/test/CodeGen/AArch64/live-interval-analysis.mir
    llvm/trunk/test/CodeGen/AArch64/loh.mir
    llvm/trunk/test/CodeGen/AArch64/machine-combiner.mir
    llvm/trunk/test/CodeGen/AArch64/machine-copy-remove.mir
    llvm/trunk/test/CodeGen/AArch64/machine-dead-copy.mir
    llvm/trunk/test/CodeGen/AArch64/machine-outliner.mir
    llvm/trunk/test/CodeGen/AArch64/machine-scheduler.mir
    llvm/trunk/test/CodeGen/AArch64/machine-sink-zr.mir
    llvm/trunk/test/CodeGen/AArch64/machine-zero-copy-remove.mir
    llvm/trunk/test/CodeGen/AArch64/movimm-wzr.mir
    llvm/trunk/test/CodeGen/AArch64/phi-dbg.ll
    llvm/trunk/test/CodeGen/AArch64/reg-scavenge-frame.mir
    llvm/trunk/test/CodeGen/AArch64/regcoal-physreg.mir
    llvm/trunk/test/CodeGen/AArch64/scheduledag-constreg.mir
    llvm/trunk/test/CodeGen/AArch64/spill-fold.mir
    llvm/trunk/test/CodeGen/AArch64/spill-undef.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir
    llvm/trunk/test/CodeGen/AMDGPU/break-smem-soft-clauses.mir
    llvm/trunk/test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir
    llvm/trunk/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
    llvm/trunk/test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir
    llvm/trunk/test/CodeGen/AMDGPU/cluster-flat-loads.mir
    llvm/trunk/test/CodeGen/AMDGPU/coalescer-subreg-join.mir
    llvm/trunk/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
    llvm/trunk/test/CodeGen/AMDGPU/dead_copy.mir
    llvm/trunk/test/CodeGen/AMDGPU/debug-value2.ll
    llvm/trunk/test/CodeGen/AMDGPU/detect-dead-lanes.mir
    llvm/trunk/test/CodeGen/AMDGPU/endpgm-dce.mir
    llvm/trunk/test/CodeGen/AMDGPU/fix-vgpr-copies.mir
    llvm/trunk/test/CodeGen/AMDGPU/fix-wwm-liveness.mir
    llvm/trunk/test/CodeGen/AMDGPU/flat-load-clustering.mir
    llvm/trunk/test/CodeGen/AMDGPU/fold-cndmask.mir
    llvm/trunk/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
    llvm/trunk/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
    llvm/trunk/test/CodeGen/AMDGPU/fold-multiple.mir
    llvm/trunk/test/CodeGen/AMDGPU/fold-operands-order.mir
    llvm/trunk/test/CodeGen/AMDGPU/hazard-inlineasm.mir
    llvm/trunk/test/CodeGen/AMDGPU/hazard.mir
    llvm/trunk/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir
    llvm/trunk/test/CodeGen/AMDGPU/insert-waits-callee.mir
    llvm/trunk/test/CodeGen/AMDGPU/insert-waits-exp.mir
    llvm/trunk/test/CodeGen/AMDGPU/inserted-wait-states.mir
    llvm/trunk/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
    llvm/trunk/test/CodeGen/AMDGPU/limit-coalesce.mir
    llvm/trunk/test/CodeGen/AMDGPU/liveness.mir
    llvm/trunk/test/CodeGen/AMDGPU/llvm.dbg.value.ll
    llvm/trunk/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir
    llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
    llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
    llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir
    llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir
    llvm/trunk/test/CodeGen/AMDGPU/merge-load-store-vreg.mir
    llvm/trunk/test/CodeGen/AMDGPU/merge-load-store.mir
    llvm/trunk/test/CodeGen/AMDGPU/merge-m0.mir
    llvm/trunk/test/CodeGen/AMDGPU/misched-killflags.mir
    llvm/trunk/test/CodeGen/AMDGPU/movrels-bug.mir
    llvm/trunk/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir
    llvm/trunk/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
    llvm/trunk/test/CodeGen/AMDGPU/readlane_exec0.mir
    llvm/trunk/test/CodeGen/AMDGPU/reduce-saveexec.mir
    llvm/trunk/test/CodeGen/AMDGPU/regcoal-subrange-join.mir
    llvm/trunk/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
    llvm/trunk/test/CodeGen/AMDGPU/regcoalesce-prune.mir
    llvm/trunk/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
    llvm/trunk/test/CodeGen/AMDGPU/rename-independent-subregs.mir
    llvm/trunk/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir
    llvm/trunk/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
    llvm/trunk/test/CodeGen/AMDGPU/schedule-regpressure.mir
    llvm/trunk/test/CodeGen/AMDGPU/sdwa-gfx9.mir
    llvm/trunk/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
    llvm/trunk/test/CodeGen/AMDGPU/sdwa-preserve.mir
    llvm/trunk/test/CodeGen/AMDGPU/sdwa-scalar-ops.mir
    llvm/trunk/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir
    llvm/trunk/test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir
    llvm/trunk/test/CodeGen/AMDGPU/shrink-carry.mir
    llvm/trunk/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
    llvm/trunk/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir
    llvm/trunk/test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll
    llvm/trunk/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
    llvm/trunk/test/CodeGen/AMDGPU/splitkit.mir
    llvm/trunk/test/CodeGen/AMDGPU/stack-slot-color-sgpr-vgpr-spills.mir
    llvm/trunk/test/CodeGen/AMDGPU/subreg-intervals.mir
    llvm/trunk/test/CodeGen/AMDGPU/subreg_interference.mir
    llvm/trunk/test/CodeGen/AMDGPU/syncscopes.ll
    llvm/trunk/test/CodeGen/AMDGPU/twoaddr-mad.mir
    llvm/trunk/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir
    llvm/trunk/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
    llvm/trunk/test/CodeGen/AMDGPU/vop-shrink-frame-index.mir
    llvm/trunk/test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir
    llvm/trunk/test/CodeGen/AMDGPU/waitcnt-permute.mir
    llvm/trunk/test/CodeGen/AMDGPU/waitcnt.mir
    llvm/trunk/test/CodeGen/AMDGPU/wqm.mir
    llvm/trunk/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll
    llvm/trunk/test/CodeGen/ARM/ARMLoadStoreDBG.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-call-lowering.ll
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-instruction-select-cmp.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-instruction-select-combos.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-legalize-fp.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-legalize-vfp4.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-param-lowering.ll
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-select-copy_to_regclass-of-fptosi.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-select-globals-pic.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-select-globals-ropi-rwpi.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-select-globals-static.mir
    llvm/trunk/test/CodeGen/ARM/GlobalISel/select-pr35926.mir
    llvm/trunk/test/CodeGen/ARM/PR32721_ifcvt_triangle_unanalyzable.mir
    llvm/trunk/test/CodeGen/ARM/Windows/vla-cpsr.ll
    llvm/trunk/test/CodeGen/ARM/cmp1-peephole-thumb.mir
    llvm/trunk/test/CodeGen/ARM/cmp2-peephole-thumb.mir
    llvm/trunk/test/CodeGen/ARM/constant-islands-cfg.mir
    llvm/trunk/test/CodeGen/ARM/dbg-range-extension.mir
    llvm/trunk/test/CodeGen/ARM/debug-info-arg.ll
    llvm/trunk/test/CodeGen/ARM/debug-info-branch-folding.ll
    llvm/trunk/test/CodeGen/ARM/expand-pseudos.mir
    llvm/trunk/test/CodeGen/ARM/fpoffset_overflow.mir
    llvm/trunk/test/CodeGen/ARM/ifcvt_canFallThroughTo.mir
    llvm/trunk/test/CodeGen/ARM/ifcvt_diamond_unanalyzable.mir
    llvm/trunk/test/CodeGen/ARM/ifcvt_forked_diamond_unanalyzable.mir
    llvm/trunk/test/CodeGen/ARM/ifcvt_simple_bad_zero_prob_succ.mir
    llvm/trunk/test/CodeGen/ARM/ifcvt_simple_unanalyzable.mir
    llvm/trunk/test/CodeGen/ARM/ifcvt_triangleWoCvtToNextEdge.mir
    llvm/trunk/test/CodeGen/ARM/imm-peephole-arm.mir
    llvm/trunk/test/CodeGen/ARM/imm-peephole-thumb.mir
    llvm/trunk/test/CodeGen/ARM/load_store_opt_kill.mir
    llvm/trunk/test/CodeGen/ARM/machine-copyprop.mir
    llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir
    llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir
    llvm/trunk/test/CodeGen/ARM/peephole-phi.mir
    llvm/trunk/test/CodeGen/ARM/pei-swiftself.mir
    llvm/trunk/test/CodeGen/ARM/prera-ldst-aliasing.mir
    llvm/trunk/test/CodeGen/ARM/prera-ldst-insertpt.mir
    llvm/trunk/test/CodeGen/ARM/scavenging.mir
    llvm/trunk/test/CodeGen/ARM/sched-it-debug-nodes.mir
    llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir
    llvm/trunk/test/CodeGen/ARM/tail-dup-bundle.mir
    llvm/trunk/test/CodeGen/ARM/thumb1-ldst-opt.ll
    llvm/trunk/test/CodeGen/ARM/v6-jumptable-clobber.mir
    llvm/trunk/test/CodeGen/ARM/virtregrewriter-subregliveness.mir
    llvm/trunk/test/CodeGen/ARM/vldm-liveness.mir
    llvm/trunk/test/CodeGen/BPF/sockex2.ll
    llvm/trunk/test/CodeGen/Hexagon/addrmode-globoff.mir
    llvm/trunk/test/CodeGen/Hexagon/addrmode-keepdeadphis.mir
    llvm/trunk/test/CodeGen/Hexagon/addrmode-rr-to-io.mir
    llvm/trunk/test/CodeGen/Hexagon/anti-dep-partial.mir
    llvm/trunk/test/CodeGen/Hexagon/bank-conflict-load.mir
    llvm/trunk/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir
    llvm/trunk/test/CodeGen/Hexagon/branchfolder-insert-impdef.mir
    llvm/trunk/test/CodeGen/Hexagon/cext-opt-basic.mir
    llvm/trunk/test/CodeGen/Hexagon/cext-opt-numops.mir
    llvm/trunk/test/CodeGen/Hexagon/cext-opt-range-assert.mir
    llvm/trunk/test/CodeGen/Hexagon/cext-opt-range-offset.mir
    llvm/trunk/test/CodeGen/Hexagon/cext-opt-shifted-range.mir
    llvm/trunk/test/CodeGen/Hexagon/duplex-addi-global-imm.mir
    llvm/trunk/test/CodeGen/Hexagon/early-if-debug.mir
    llvm/trunk/test/CodeGen/Hexagon/expand-condsets-def-undef.mir
    llvm/trunk/test/CodeGen/Hexagon/expand-condsets-imm.mir
    llvm/trunk/test/CodeGen/Hexagon/expand-condsets-impuse.mir
    llvm/trunk/test/CodeGen/Hexagon/expand-condsets-rm-reg.mir
    llvm/trunk/test/CodeGen/Hexagon/expand-condsets-same-inputs.mir
    llvm/trunk/test/CodeGen/Hexagon/hwloop-redef-imm.mir
    llvm/trunk/test/CodeGen/Hexagon/ifcvt-common-kill.mir
    llvm/trunk/test/CodeGen/Hexagon/ifcvt-impuse-livein.mir
    llvm/trunk/test/CodeGen/Hexagon/ifcvt-live-subreg.mir
    llvm/trunk/test/CodeGen/Hexagon/invalid-dotnew-attempt.mir
    llvm/trunk/test/CodeGen/Hexagon/livephysregs-add-pristines.mir
    llvm/trunk/test/CodeGen/Hexagon/livephysregs-lane-masks.mir
    llvm/trunk/test/CodeGen/Hexagon/livephysregs-lane-masks2.mir
    llvm/trunk/test/CodeGen/Hexagon/mux-kill1.mir
    llvm/trunk/test/CodeGen/Hexagon/mux-kill2.mir
    llvm/trunk/test/CodeGen/Hexagon/mux-kill3.mir
    llvm/trunk/test/CodeGen/Hexagon/newvaluejump-c4.mir
    llvm/trunk/test/CodeGen/Hexagon/newvaluejump-kill2.mir
    llvm/trunk/test/CodeGen/Hexagon/newvaluejump-solo.mir
    llvm/trunk/test/CodeGen/Hexagon/packetize-load-store-aliasing.mir
    llvm/trunk/test/CodeGen/Hexagon/packetize-nvj-no-prune.mir
    llvm/trunk/test/CodeGen/Hexagon/post-ra-kill-update.mir
    llvm/trunk/test/CodeGen/Hexagon/postinc-baseoffset.mir
    llvm/trunk/test/CodeGen/Hexagon/rdf-copy-renamable-reserved.mir
    llvm/trunk/test/CodeGen/Hexagon/rdf-ehlabel-live.mir
    llvm/trunk/test/CodeGen/Hexagon/regalloc-bad-undef.mir
    llvm/trunk/test/CodeGen/Hexagon/regalloc-liveout-undef.mir
    llvm/trunk/test/CodeGen/Hexagon/target-flag-ext.mir
    llvm/trunk/test/CodeGen/Hexagon/unreachable-mbb-phi-subreg.mir
    llvm/trunk/test/CodeGen/Hexagon/vextract-basic.mir
    llvm/trunk/test/CodeGen/Lanai/peephole-compare.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/addrspace-memoperands.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/atomic-memoperands.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/cfi.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/expected-target-flag-name.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/generic-virtual-registers-error.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/generic-virtual-registers-with-regbank-error.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/intrinsics.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/invalid-target-flag-name.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/invalid-target-memoperands.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/multiple-lhs-operands.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/register-operand-bank.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/swp.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/target-flags.mir
    llvm/trunk/test/CodeGen/MIR/AArch64/target-memoperands.mir
    llvm/trunk/test/CodeGen/MIR/AMDGPU/expected-target-index-name.mir
    llvm/trunk/test/CodeGen/MIR/AMDGPU/invalid-target-index-operand.mir
    llvm/trunk/test/CodeGen/MIR/AMDGPU/syncscopes.mir
    llvm/trunk/test/CodeGen/MIR/AMDGPU/target-flags.mir
    llvm/trunk/test/CodeGen/MIR/AMDGPU/target-index-operands.mir
    llvm/trunk/test/CodeGen/MIR/ARM/bundled-instructions.mir
    llvm/trunk/test/CodeGen/MIR/ARM/cfi-same-value.mir
    llvm/trunk/test/CodeGen/MIR/ARM/expected-closing-brace.mir
    llvm/trunk/test/CodeGen/MIR/ARM/extraneous-closing-brace-error.mir
    llvm/trunk/test/CodeGen/MIR/ARM/nested-instruction-bundle-error.mir
    llvm/trunk/test/CodeGen/MIR/Hexagon/parse-lane-masks.mir
    llvm/trunk/test/CodeGen/MIR/Hexagon/target-flags.mir
    llvm/trunk/test/CodeGen/MIR/Mips/expected-global-value-or-symbol-after-call-entry.mir
    llvm/trunk/test/CodeGen/MIR/Mips/memory-operands.mir
    llvm/trunk/test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir
    llvm/trunk/test/CodeGen/MIR/X86/auto-successor.mir
    llvm/trunk/test/CodeGen/MIR/X86/basic-block-liveins.mir
    llvm/trunk/test/CodeGen/MIR/X86/basic-block-not-at-start-of-line-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/block-address-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/branch-probabilities.mir
    llvm/trunk/test/CodeGen/MIR/X86/callee-saved-info.mir
    llvm/trunk/test/CodeGen/MIR/X86/cfi-def-cfa-offset.mir
    llvm/trunk/test/CodeGen/MIR/X86/cfi-def-cfa-register.mir
    llvm/trunk/test/CodeGen/MIR/X86/cfi-offset.mir
    llvm/trunk/test/CodeGen/MIR/X86/constant-pool.mir
    llvm/trunk/test/CodeGen/MIR/X86/dead-register-flag.mir
    llvm/trunk/test/CodeGen/MIR/X86/def-register-already-tied-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/diexpr-win32.mir
    llvm/trunk/test/CodeGen/MIR/X86/duplicate-memory-operand-flag.mir
    llvm/trunk/test/CodeGen/MIR/X86/duplicate-register-flag-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/early-clobber-register-flag.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-align-in-memory-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-alignment-after-align-in-memory-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-basic-block-at-start-of-body.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-block-reference-in-blockaddress.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-comma-after-cfi-register.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-comma-after-memory-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-different-implicit-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-different-implicit-register-flag.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-function-reference-after-blockaddress.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-global-value-after-blockaddress.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-integer-after-offset-sign.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-integer-after-tied-def.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-integer-in-successor-weight.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-load-or-store-in-memory-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-machine-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-metadata-node-after-debug-location.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-metadata-node-after-exclaim.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-metadata-node-in-stack-object.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-named-register-in-allocation-hint.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-named-register-in-callee-saved-register.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-named-register-livein.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-newline-at-end-of-list.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-number-after-bb.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-offset-after-cfi-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-pointer-value-in-memory-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-positive-alignment-after-align.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-register-after-cfi-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-register-after-flags.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-size-integer-after-memory-operation.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-stack-object.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-subregister-after-colon.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-target-flag-name.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-tied-def-after-lparen.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-value-in-memory-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/expected-virtual-register-in-functions-livein.mir
    llvm/trunk/test/CodeGen/MIR/X86/external-symbol-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/fixed-stack-memory-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/fixed-stack-object-redefinition-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/fixed-stack-objects.mir
    llvm/trunk/test/CodeGen/MIR/X86/frame-info-save-restore-points.mir
    llvm/trunk/test/CodeGen/MIR/X86/frame-info-stack-references.mir
    llvm/trunk/test/CodeGen/MIR/X86/frame-setup-instruction-flag.mir
    llvm/trunk/test/CodeGen/MIR/X86/function-liveins.mir
    llvm/trunk/test/CodeGen/MIR/X86/generic-instr-type.mir
    llvm/trunk/test/CodeGen/MIR/X86/global-value-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/immediate-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/implicit-register-flag.mir
    llvm/trunk/test/CodeGen/MIR/X86/inline-asm-registers.mir
    llvm/trunk/test/CodeGen/MIR/X86/instructions-debug-location.mir
    llvm/trunk/test/CodeGen/MIR/X86/invalid-constant-pool-item.mir
    llvm/trunk/test/CodeGen/MIR/X86/invalid-target-flag-name.mir
    llvm/trunk/test/CodeGen/MIR/X86/invalid-tied-def-index-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/jump-table-info.mir
    llvm/trunk/test/CodeGen/MIR/X86/jump-table-redefinition-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/killed-register-flag.mir
    llvm/trunk/test/CodeGen/MIR/X86/large-cfi-offset-number-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/large-immediate-operand-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/large-index-number-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/large-offset-number-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/large-size-in-memory-operand-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/liveout-register-mask.mir
    llvm/trunk/test/CodeGen/MIR/X86/machine-basic-block-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/machine-instructions.mir
    llvm/trunk/test/CodeGen/MIR/X86/machine-verifier.mir
    llvm/trunk/test/CodeGen/MIR/X86/memory-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/metadata-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/missing-closing-quote.mir
    llvm/trunk/test/CodeGen/MIR/X86/missing-comma.mir
    llvm/trunk/test/CodeGen/MIR/X86/missing-implicit-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/named-registers.mir
    llvm/trunk/test/CodeGen/MIR/X86/newline-handling.mir
    llvm/trunk/test/CodeGen/MIR/X86/null-register-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/register-mask-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/register-operand-class-invalid0.mir
    llvm/trunk/test/CodeGen/MIR/X86/register-operand-class-invalid1.mir
    llvm/trunk/test/CodeGen/MIR/X86/register-operand-class.mir
    llvm/trunk/test/CodeGen/MIR/X86/register-operands-target-flag-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/renamable-register-flag.mir
    llvm/trunk/test/CodeGen/MIR/X86/roundtrip.mir
    llvm/trunk/test/CodeGen/MIR/X86/simple-register-allocation-hints.mir
    llvm/trunk/test/CodeGen/MIR/X86/spill-slot-fixed-stack-objects.mir
    llvm/trunk/test/CodeGen/MIR/X86/stack-object-invalid-name.mir
    llvm/trunk/test/CodeGen/MIR/X86/stack-object-operand-name-mismatch-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/stack-object-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/stack-object-redefinition-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/stack-objects.mir
    llvm/trunk/test/CodeGen/MIR/X86/standalone-register-error.mir
    llvm/trunk/test/CodeGen/MIR/X86/subreg-on-physreg.mir
    llvm/trunk/test/CodeGen/MIR/X86/subregister-index-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/subregister-operands.mir
    llvm/trunk/test/CodeGen/MIR/X86/successor-basic-blocks-weights.mir
    llvm/trunk/test/CodeGen/MIR/X86/successor-basic-blocks.mir
    llvm/trunk/test/CodeGen/MIR/X86/tied-def-operand-invalid.mir
    llvm/trunk/test/CodeGen/MIR/X86/tied-physical-regs-match.mir
    llvm/trunk/test/CodeGen/MIR/X86/undef-register-flag.mir
    llvm/trunk/test/CodeGen/MIR/X86/undefined-fixed-stack-object.mir
    llvm/trunk/test/CodeGen/MIR/X86/undefined-global-value.mir
    llvm/trunk/test/CodeGen/MIR/X86/undefined-ir-block-in-blockaddress.mir
    llvm/trunk/test/CodeGen/MIR/X86/undefined-ir-block-slot-in-blockaddress.mir
    llvm/trunk/test/CodeGen/MIR/X86/undefined-jump-table-id.mir
    llvm/trunk/test/CodeGen/MIR/X86/undefined-named-global-value.mir
    llvm/trunk/test/CodeGen/MIR/X86/undefined-stack-object.mir
    llvm/trunk/test/CodeGen/MIR/X86/undefined-value-in-memory-operand.mir
    llvm/trunk/test/CodeGen/MIR/X86/undefined-virtual-register.mir
    llvm/trunk/test/CodeGen/MIR/X86/unexpected-type-phys.mir
    llvm/trunk/test/CodeGen/MIR/X86/unknown-machine-basic-block.mir
    llvm/trunk/test/CodeGen/MIR/X86/unknown-metadata-keyword.mir
    llvm/trunk/test/CodeGen/MIR/X86/unknown-metadata-node.mir
    llvm/trunk/test/CodeGen/MIR/X86/unknown-named-machine-basic-block.mir
    llvm/trunk/test/CodeGen/MIR/X86/unknown-register.mir
    llvm/trunk/test/CodeGen/MIR/X86/unknown-subregister-index-op.mir
    llvm/trunk/test/CodeGen/MIR/X86/unknown-subregister-index.mir
    llvm/trunk/test/CodeGen/MIR/X86/variable-sized-stack-objects.mir
    llvm/trunk/test/CodeGen/MIR/X86/virtual-registers.mir
    llvm/trunk/test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir
    llvm/trunk/test/CodeGen/Mips/compactbranches/empty-block.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dext-pos.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dext-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dextm-pos-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dextm-pos.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dextm-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dextu-pos-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dextu-pos.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dextu-size-valid.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dextu-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dins-pos-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dins-pos.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dins-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dinsm-pos-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dinsm-pos.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dinsm-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dinsu-pos-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dinsu-pos.mir
    llvm/trunk/test/CodeGen/Mips/instverify/dinsu-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/ext-pos-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/ext-pos.mir
    llvm/trunk/test/CodeGen/Mips/instverify/ext-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/ins-pos-size.mir
    llvm/trunk/test/CodeGen/Mips/instverify/ins-pos.mir
    llvm/trunk/test/CodeGen/Mips/instverify/ins-size.mir
    llvm/trunk/test/CodeGen/Mips/llvm-ir/call.ll
    llvm/trunk/test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir
    llvm/trunk/test/CodeGen/Mips/mirparser/target-flags-pic-o32.mir
    llvm/trunk/test/CodeGen/Mips/mirparser/target-flags-pic.mir
    llvm/trunk/test/CodeGen/Mips/mirparser/target-flags-static-tls.mir
    llvm/trunk/test/CodeGen/Mips/msa/emergency-spill.mir
    llvm/trunk/test/CodeGen/Mips/sll-micromips-r6-encoding.mir
    llvm/trunk/test/CodeGen/PowerPC/aantidep-def-ec.mir
    llvm/trunk/test/CodeGen/PowerPC/addegluecrash.ll
    llvm/trunk/test/CodeGen/PowerPC/addisdtprelha-nonr3.mir
    llvm/trunk/test/CodeGen/PowerPC/aggressive-anti-dep-breaker-subreg.ll
    llvm/trunk/test/CodeGen/PowerPC/byval-agg-info.ll
    llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir
    llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir
    llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
    llvm/trunk/test/CodeGen/PowerPC/debuginfo-split-int.ll
    llvm/trunk/test/CodeGen/PowerPC/debuginfo-stackarg.ll
    llvm/trunk/test/CodeGen/PowerPC/expand-isel-1.mir
    llvm/trunk/test/CodeGen/PowerPC/expand-isel-10.mir
    llvm/trunk/test/CodeGen/PowerPC/expand-isel-2.mir
    llvm/trunk/test/CodeGen/PowerPC/expand-isel-3.mir
    llvm/trunk/test/CodeGen/PowerPC/expand-isel-4.mir
    llvm/trunk/test/CodeGen/PowerPC/expand-isel-5.mir
    llvm/trunk/test/CodeGen/PowerPC/expand-isel-6.mir
    llvm/trunk/test/CodeGen/PowerPC/expand-isel-7.mir
    llvm/trunk/test/CodeGen/PowerPC/expand-isel-8.mir
    llvm/trunk/test/CodeGen/PowerPC/expand-isel-9.mir
    llvm/trunk/test/CodeGen/PowerPC/fp64-to-int16.ll
    llvm/trunk/test/CodeGen/PowerPC/livephysregs.mir
    llvm/trunk/test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir
    llvm/trunk/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll
    llvm/trunk/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir
    llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll
    llvm/trunk/test/CodeGen/PowerPC/scavenging.mir
    llvm/trunk/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
    llvm/trunk/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
    llvm/trunk/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
    llvm/trunk/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
    llvm/trunk/test/CodeGen/SystemZ/cond-move-04.mir
    llvm/trunk/test/CodeGen/SystemZ/cond-move-05.mir
    llvm/trunk/test/CodeGen/SystemZ/fp-cmp-07.mir
    llvm/trunk/test/CodeGen/SystemZ/fp-conv-17.mir
    llvm/trunk/test/CodeGen/SystemZ/load-and-test.mir
    llvm/trunk/test/CodeGen/SystemZ/lower-copy-undef-src.mir
    llvm/trunk/test/CodeGen/SystemZ/pr32505.ll
    llvm/trunk/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir
    llvm/trunk/test/CodeGen/SystemZ/store_nonbytesized_vecs.ll
    llvm/trunk/test/CodeGen/Thumb/machine-cse-physreg.mir
    llvm/trunk/test/CodeGen/Thumb/tbb-reuse.mir
    llvm/trunk/test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir
    llvm/trunk/test/CodeGen/Thumb2/t2sizereduction.mir
    llvm/trunk/test/CodeGen/Thumb2/tbb-removeadd.mir
    llvm/trunk/test/CodeGen/X86/2006-11-17-IllegalMove.ll
    llvm/trunk/test/CodeGen/X86/2010-05-28-Crash.ll
    llvm/trunk/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
    llvm/trunk/test/CodeGen/X86/3addr-or.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/gep.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-brcond.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-cmp.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-constant.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-ext.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-GV.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v128.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v256.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-x32.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-add.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-and-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-blsi.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-blsr.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-brcond.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-cmp.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-constant.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-copy.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-ext.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-fconstant.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-gep.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-inc.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-leaf-constant.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-mul-vec.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-or-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v128.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v256.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-trunc.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-undef.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir
    llvm/trunk/test/CodeGen/X86/add-i64.ll
    llvm/trunk/test/CodeGen/X86/add-sub-nsw-nuw.ll
    llvm/trunk/test/CodeGen/X86/add.ll
    llvm/trunk/test/CodeGen/X86/addcarry.ll
    llvm/trunk/test/CodeGen/X86/and-encoding.ll
    llvm/trunk/test/CodeGen/X86/anyext.ll
    llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll
    llvm/trunk/test/CodeGen/X86/avx-cast.ll
    llvm/trunk/test/CodeGen/X86/avx-cmp.ll
    llvm/trunk/test/CodeGen/X86/avx-insertelt.ll
    llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
    llvm/trunk/test/CodeGen/X86/avx-load-store.ll
    llvm/trunk/test/CodeGen/X86/avx-splat.ll
    llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll
    llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll
    llvm/trunk/test/CodeGen/X86/avx2-conversions.ll
    llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll
    llvm/trunk/test/CodeGen/X86/avx2-shift.ll
    llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll
    llvm/trunk/test/CodeGen/X86/avx512-arith.ll
    llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll
    llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll
    llvm/trunk/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
    llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
    llvm/trunk/test/CodeGen/X86/avx512-ext.ll
    llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll
    llvm/trunk/test/CodeGen/X86/avx512-hadd-hsub.ll
    llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll
    llvm/trunk/test/CodeGen/X86/avx512-insert-extract_i1.ll
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll
    llvm/trunk/test/CodeGen/X86/avx512-memfold.ll
    llvm/trunk/test/CodeGen/X86/avx512-regcall-Mask.ll
    llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll
    llvm/trunk/test/CodeGen/X86/avx512-schedule.ll
    llvm/trunk/test/CodeGen/X86/avx512-select.ll
    llvm/trunk/test/CodeGen/X86/avx512-shift.ll
    llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
    llvm/trunk/test/CodeGen/X86/avx512-trunc.ll
    llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll
    llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll
    llvm/trunk/test/CodeGen/X86/avx512-vec3-crash.ll
    llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
    llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll
    llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
    llvm/trunk/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll
    llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
    llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512dq-mask-op.ll
    llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
    llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512f-vec-test-testn.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-vec-cmp.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
    llvm/trunk/test/CodeGen/X86/avx512vl-vec-test-testn.ll
    llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-128.ll
    llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-256.ll
    llvm/trunk/test/CodeGen/X86/bitcast-and-setcc-512.ll
    llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
    llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
    llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
    llvm/trunk/test/CodeGen/X86/bitcast-int-to-vector.ll
    llvm/trunk/test/CodeGen/X86/bitcast-setcc-128.ll
    llvm/trunk/test/CodeGen/X86/bitcast-setcc-256.ll
    llvm/trunk/test/CodeGen/X86/bitcast-setcc-512.ll
    llvm/trunk/test/CodeGen/X86/bitreverse.ll
    llvm/trunk/test/CodeGen/X86/block-placement.mir
    llvm/trunk/test/CodeGen/X86/bmi-schedule.ll
    llvm/trunk/test/CodeGen/X86/bmi.ll
    llvm/trunk/test/CodeGen/X86/bool-simplify.ll
    llvm/trunk/test/CodeGen/X86/bool-vector.ll
    llvm/trunk/test/CodeGen/X86/branchfolding-undef.mir
    llvm/trunk/test/CodeGen/X86/broadcastm-lowering.ll
    llvm/trunk/test/CodeGen/X86/bypass-slow-division-32.ll
    llvm/trunk/test/CodeGen/X86/bypass-slow-division-64.ll
    llvm/trunk/test/CodeGen/X86/clz.ll
    llvm/trunk/test/CodeGen/X86/cmov-into-branch.ll
    llvm/trunk/test/CodeGen/X86/cmov-promotion.ll
    llvm/trunk/test/CodeGen/X86/cmov.ll
    llvm/trunk/test/CodeGen/X86/combine-abs.ll
    llvm/trunk/test/CodeGen/X86/compress_expand.ll
    llvm/trunk/test/CodeGen/X86/conditional-tailcall-samedest.mir
    llvm/trunk/test/CodeGen/X86/critical-edge-split-2.ll
    llvm/trunk/test/CodeGen/X86/ctpop-combine.ll
    llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll
    llvm/trunk/test/CodeGen/X86/divide-by-constant.ll
    llvm/trunk/test/CodeGen/X86/divrem.ll
    llvm/trunk/test/CodeGen/X86/divrem8_ext.ll
    llvm/trunk/test/CodeGen/X86/domain-reassignment.mir
    llvm/trunk/test/CodeGen/X86/dynamic-regmask.ll
    llvm/trunk/test/CodeGen/X86/eflags-copy-expansion.mir
    llvm/trunk/test/CodeGen/X86/evex-to-vex-compress.mir
    llvm/trunk/test/CodeGen/X86/expand-vr64-gr64-copy.mir
    llvm/trunk/test/CodeGen/X86/extractelement-index.ll
    llvm/trunk/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/fast-isel-cmp.ll
    llvm/trunk/test/CodeGen/X86/fast-isel-nontemporal.ll
    llvm/trunk/test/CodeGen/X86/fast-isel-sext-zext.ll
    llvm/trunk/test/CodeGen/X86/fast-isel-shift.ll
    llvm/trunk/test/CodeGen/X86/fixup-bw-copy.ll
    llvm/trunk/test/CodeGen/X86/fixup-bw-copy.mir
    llvm/trunk/test/CodeGen/X86/fixup-bw-inst.mir
    llvm/trunk/test/CodeGen/X86/gpr-to-mask.ll
    llvm/trunk/test/CodeGen/X86/greedy_regalloc_bad_eviction_sequence.ll
    llvm/trunk/test/CodeGen/X86/half.ll
    llvm/trunk/test/CodeGen/X86/horizontal-reduce-smax.ll
    llvm/trunk/test/CodeGen/X86/horizontal-reduce-smin.ll
    llvm/trunk/test/CodeGen/X86/horizontal-reduce-umax.ll
    llvm/trunk/test/CodeGen/X86/horizontal-reduce-umin.ll
    llvm/trunk/test/CodeGen/X86/iabs.ll
    llvm/trunk/test/CodeGen/X86/illegal-bitfield-loadstore.ll
    llvm/trunk/test/CodeGen/X86/implicit-null-checks.mir
    llvm/trunk/test/CodeGen/X86/implicit-use-spill.mir
    llvm/trunk/test/CodeGen/X86/imul.ll
    llvm/trunk/test/CodeGen/X86/invalid-liveness.mir
    llvm/trunk/test/CodeGen/X86/ipra-inline-asm.ll
    llvm/trunk/test/CodeGen/X86/ipra-reg-alias.ll
    llvm/trunk/test/CodeGen/X86/ipra-reg-usage.ll
    llvm/trunk/test/CodeGen/X86/lea-3.ll
    llvm/trunk/test/CodeGen/X86/lea-opt-cse3.ll
    llvm/trunk/test/CodeGen/X86/lea-opt-with-debug.mir
    llvm/trunk/test/CodeGen/X86/lea32-schedule.ll
    llvm/trunk/test/CodeGen/X86/leaFixup32.mir
    llvm/trunk/test/CodeGen/X86/leaFixup64.mir
    llvm/trunk/test/CodeGen/X86/loop-search.ll
    llvm/trunk/test/CodeGen/X86/lzcnt-schedule.ll
    llvm/trunk/test/CodeGen/X86/lzcnt-zext-cmp.ll
    llvm/trunk/test/CodeGen/X86/machine-combiner-int.ll
    llvm/trunk/test/CodeGen/X86/machine-copy-prop.mir
    llvm/trunk/test/CodeGen/X86/machine-cse.ll
    llvm/trunk/test/CodeGen/X86/machine-region-info.mir
    llvm/trunk/test/CodeGen/X86/masked_gather_scatter.ll
    llvm/trunk/test/CodeGen/X86/masked_memop.ll
    llvm/trunk/test/CodeGen/X86/misched-copy.ll
    llvm/trunk/test/CodeGen/X86/movmsk.ll
    llvm/trunk/test/CodeGen/X86/movtopush.mir
    llvm/trunk/test/CodeGen/X86/mul-constant-i16.ll
    llvm/trunk/test/CodeGen/X86/mul-constant-i32.ll
    llvm/trunk/test/CodeGen/X86/mul-constant-result.ll
    llvm/trunk/test/CodeGen/X86/negate-i1.ll
    llvm/trunk/test/CodeGen/X86/non-value-mem-operand.mir
    llvm/trunk/test/CodeGen/X86/oddshuffles.ll
    llvm/trunk/test/CodeGen/X86/or-lea.ll
    llvm/trunk/test/CodeGen/X86/patchpoint-verifiable.mir
    llvm/trunk/test/CodeGen/X86/peephole-recurrence.mir
    llvm/trunk/test/CodeGen/X86/pmul.ll
    llvm/trunk/test/CodeGen/X86/popcnt-schedule.ll
    llvm/trunk/test/CodeGen/X86/popcnt.ll
    llvm/trunk/test/CodeGen/X86/post-ra-sched-with-debug.mir
    llvm/trunk/test/CodeGen/X86/pr22970.ll
    llvm/trunk/test/CodeGen/X86/pr27681.mir
    llvm/trunk/test/CodeGen/X86/pr28173.ll
    llvm/trunk/test/CodeGen/X86/pr28560.ll
    llvm/trunk/test/CodeGen/X86/pr29061.ll
    llvm/trunk/test/CodeGen/X86/pr30430.ll
    llvm/trunk/test/CodeGen/X86/pr32282.ll
    llvm/trunk/test/CodeGen/X86/pr32284.ll
    llvm/trunk/test/CodeGen/X86/pr32329.ll
    llvm/trunk/test/CodeGen/X86/pr32345.ll
    llvm/trunk/test/CodeGen/X86/pr32484.ll
    llvm/trunk/test/CodeGen/X86/pr34592.ll
    llvm/trunk/test/CodeGen/X86/pr34653.ll
    llvm/trunk/test/CodeGen/X86/pr35765.ll
    llvm/trunk/test/CodeGen/X86/pre-coalesce.mir
    llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-extend.ll
    llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
    llvm/trunk/test/CodeGen/X86/prefer-avx256-popcnt.ll
    llvm/trunk/test/CodeGen/X86/prefer-avx256-shift.ll
    llvm/trunk/test/CodeGen/X86/prefer-avx256-trunc.ll
    llvm/trunk/test/CodeGen/X86/promote-vec3.ll
    llvm/trunk/test/CodeGen/X86/psubus.ll
    llvm/trunk/test/CodeGen/X86/rdpid-schedule.ll
    llvm/trunk/test/CodeGen/X86/rdpid.ll
    llvm/trunk/test/CodeGen/X86/reduce-trunc-shl.ll
    llvm/trunk/test/CodeGen/X86/regalloc-advanced-split-cost.ll
    llvm/trunk/test/CodeGen/X86/remat-phys-dead.ll
    llvm/trunk/test/CodeGen/X86/sar_fold64.ll
    llvm/trunk/test/CodeGen/X86/scalar-fp-to-i64.ll
    llvm/trunk/test/CodeGen/X86/scalar_widen_div.ll
    llvm/trunk/test/CodeGen/X86/scavenger.mir
    llvm/trunk/test/CodeGen/X86/schedule-x86-64-shld.ll
    llvm/trunk/test/CodeGen/X86/schedule-x86_64.ll
    llvm/trunk/test/CodeGen/X86/select.ll
    llvm/trunk/test/CodeGen/X86/select_const.ll
    llvm/trunk/test/CodeGen/X86/setcc-lowering.ll
    llvm/trunk/test/CodeGen/X86/sext-i1.ll
    llvm/trunk/test/CodeGen/X86/shift-combine.ll
    llvm/trunk/test/CodeGen/X86/shift-double.ll
    llvm/trunk/test/CodeGen/X86/shrink-compare.ll
    llvm/trunk/test/CodeGen/X86/shrink_wrap_dbg_value.mir
    llvm/trunk/test/CodeGen/X86/shuffle-vs-trunc-256.ll
    llvm/trunk/test/CodeGen/X86/simple-register-allocation-read-undef.mir
    llvm/trunk/test/CodeGen/X86/sqrt-fastmath-mir.ll
    llvm/trunk/test/CodeGen/X86/sse2-schedule.ll
    llvm/trunk/test/CodeGen/X86/sse42-schedule.ll
    llvm/trunk/test/CodeGen/X86/subvector-broadcast.ll
    llvm/trunk/test/CodeGen/X86/swift-return.ll
    llvm/trunk/test/CodeGen/X86/switch-lower-peel-top-case.ll
    llvm/trunk/test/CodeGen/X86/tail-call-conditional.mir
    llvm/trunk/test/CodeGen/X86/tail-dup-debugloc.ll
    llvm/trunk/test/CodeGen/X86/tail-merge-after-mbp.mir
    llvm/trunk/test/CodeGen/X86/tail-merge-debugloc.ll
    llvm/trunk/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
    llvm/trunk/test/CodeGen/X86/tbm_patterns.ll
    llvm/trunk/test/CodeGen/X86/trunc-subvector.ll
    llvm/trunk/test/CodeGen/X86/umul-with-overflow.ll
    llvm/trunk/test/CodeGen/X86/unreachable-mbb-undef-phi.mir
    llvm/trunk/test/CodeGen/X86/update-terminator-debugloc.ll
    llvm/trunk/test/CodeGen/X86/update-terminator.mir
    llvm/trunk/test/CodeGen/X86/urem-i8-constant.ll
    llvm/trunk/test/CodeGen/X86/urem-power-of-two.ll
    llvm/trunk/test/CodeGen/X86/var-permute-256.ll
    llvm/trunk/test/CodeGen/X86/vec_cmp_sint-128.ll
    llvm/trunk/test/CodeGen/X86/vec_cmp_uint-128.ll
    llvm/trunk/test/CodeGen/X86/vec_fp_to_int.ll
    llvm/trunk/test/CodeGen/X86/vec_ins_extract-1.ll
    llvm/trunk/test/CodeGen/X86/vec_insert-4.ll
    llvm/trunk/test/CodeGen/X86/vec_insert-5.ll
    llvm/trunk/test/CodeGen/X86/vec_insert-8.ll
    llvm/trunk/test/CodeGen/X86/vec_insert-mmx.ll
    llvm/trunk/test/CodeGen/X86/vec_int_to_fp.ll
    llvm/trunk/test/CodeGen/X86/vec_minmax_sint.ll
    llvm/trunk/test/CodeGen/X86/vec_minmax_uint.ll
    llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll
    llvm/trunk/test/CodeGen/X86/vector-bitreverse.ll
    llvm/trunk/test/CodeGen/X86/vector-compare-all_of.ll
    llvm/trunk/test/CodeGen/X86/vector-compare-any_of.ll
    llvm/trunk/test/CodeGen/X86/vector-compare-results.ll
    llvm/trunk/test/CodeGen/X86/vector-extend-inreg.ll
    llvm/trunk/test/CodeGen/X86/vector-half-conversions.ll
    llvm/trunk/test/CodeGen/X86/vector-lzcnt-128.ll
    llvm/trunk/test/CodeGen/X86/vector-lzcnt-256.ll
    llvm/trunk/test/CodeGen/X86/vector-popcnt-128.ll
    llvm/trunk/test/CodeGen/X86/vector-popcnt-256.ll
    llvm/trunk/test/CodeGen/X86/vector-rotate-128.ll
    llvm/trunk/test/CodeGen/X86/vector-rotate-256.ll
    llvm/trunk/test/CodeGen/X86/vector-sext.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v16.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-512-v8.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-avx512.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-v1.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-128.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-variable-256.ll
    llvm/trunk/test/CodeGen/X86/vector-trunc-math.ll
    llvm/trunk/test/CodeGen/X86/vector-trunc-packus.ll
    llvm/trunk/test/CodeGen/X86/vector-trunc-ssat.ll
    llvm/trunk/test/CodeGen/X86/vector-trunc-usat.ll
    llvm/trunk/test/CodeGen/X86/vector-trunc.ll
    llvm/trunk/test/CodeGen/X86/vector-tzcnt-128.ll
    llvm/trunk/test/CodeGen/X86/vector-tzcnt-256.ll
    llvm/trunk/test/CodeGen/X86/verifier-phi-fail0.mir
    llvm/trunk/test/CodeGen/X86/verifier-phi.mir
    llvm/trunk/test/CodeGen/X86/virtual-registers-cleared-in-machine-functions-liveins.ll
    llvm/trunk/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
    llvm/trunk/test/CodeGen/X86/vselect-pcmp.ll
    llvm/trunk/test/CodeGen/X86/widen_bitops-0.ll
    llvm/trunk/test/CodeGen/X86/x86-64-baseptr.ll
    llvm/trunk/test/CodeGen/X86/x86-interleaved-access.ll
    llvm/trunk/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll
    llvm/trunk/test/CodeGen/X86/xor-combine-debugloc.ll
    llvm/trunk/test/CodeGen/X86/xray-empty-firstmbb.mir
    llvm/trunk/test/CodeGen/X86/xray-empty-function.mir
    llvm/trunk/test/CodeGen/X86/xray-multiplerets-in-blocks.mir
    llvm/trunk/test/CodeGen/X86/zext-demanded.ll
    llvm/trunk/test/DebugInfo/ARM/PR16736.ll
    llvm/trunk/test/DebugInfo/ARM/sdag-split-arg.ll
    llvm/trunk/test/DebugInfo/ARM/sdag-split-arg1.ll
    llvm/trunk/test/DebugInfo/COFF/fpo-csrs.ll
    llvm/trunk/test/DebugInfo/COFF/local-variable-gap.ll
    llvm/trunk/test/DebugInfo/COFF/pieces.ll
    llvm/trunk/test/DebugInfo/COFF/register-variables.ll
    llvm/trunk/test/DebugInfo/MIR/AArch64/clobber-sp.mir
    llvm/trunk/test/DebugInfo/MIR/AArch64/implicit-def-dead-scope.mir
    llvm/trunk/test/DebugInfo/MIR/ARM/split-superreg-complex.mir
    llvm/trunk/test/DebugInfo/MIR/ARM/split-superreg-piece.mir
    llvm/trunk/test/DebugInfo/MIR/ARM/split-superreg.mir
    llvm/trunk/test/DebugInfo/MIR/Mips/last-inst-bundled.mir
    llvm/trunk/test/DebugInfo/MIR/X86/bit-piece-dh.mir
    llvm/trunk/test/DebugInfo/MIR/X86/empty-inline.mir
    llvm/trunk/test/DebugInfo/MIR/X86/kill-after-spill.mir
    llvm/trunk/test/DebugInfo/MIR/X86/live-debug-values-3preds.mir
    llvm/trunk/test/DebugInfo/MIR/X86/live-debug-values-spill.mir
    llvm/trunk/test/DebugInfo/MIR/X86/live-debug-values.mir
    llvm/trunk/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
    llvm/trunk/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg.mir
    llvm/trunk/test/DebugInfo/MIR/X86/livedebugvalues-limit.mir
    llvm/trunk/test/DebugInfo/MIR/X86/mlicm-hoist.mir
    llvm/trunk/test/DebugInfo/MIR/X86/no-cfi-loc.mir
    llvm/trunk/test/DebugInfo/MIR/X86/regcoalescer.mir
    llvm/trunk/test/DebugInfo/MSP430/sdagsplit-1.ll
    llvm/trunk/test/DebugInfo/X86/bbjoin.ll
    llvm/trunk/test/DebugInfo/X86/dbg-addr-dse.ll
    llvm/trunk/test/DebugInfo/X86/dbg-addr.ll
    llvm/trunk/test/DebugInfo/X86/dbg-value-dag-combine.ll
    llvm/trunk/test/DebugInfo/X86/dbg-value-frame-index.ll
    llvm/trunk/test/DebugInfo/X86/dbg-value-regmask-clobber.ll
    llvm/trunk/test/DebugInfo/X86/dbg-value-transfer-order.ll
    llvm/trunk/test/DebugInfo/X86/debug-loc-asan.ll
    llvm/trunk/test/DebugInfo/X86/live-debug-values.ll
    llvm/trunk/test/DebugInfo/X86/live-debug-vars-dse.mir
    llvm/trunk/test/DebugInfo/X86/op_deref.ll
    llvm/trunk/test/DebugInfo/X86/pieces-4.ll
    llvm/trunk/test/DebugInfo/X86/pr34545.ll
    llvm/trunk/test/DebugInfo/X86/sdag-combine.ll
    llvm/trunk/test/DebugInfo/X86/sdag-salvage-add.ll
    llvm/trunk/test/DebugInfo/X86/sdag-split-arg.ll
    llvm/trunk/test/DebugInfo/X86/sdagsplit-1.ll
    llvm/trunk/test/DebugInfo/X86/spill-indirect-nrvo.ll
    llvm/trunk/test/DebugInfo/X86/spill-nontrivial-param.ll
    llvm/trunk/test/DebugInfo/X86/spill-nospill.ll
    llvm/trunk/test/DebugInfo/X86/vla.ll
    llvm/trunk/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
    llvm/trunk/test/Verifier/test_g_phi.mir
    llvm/trunk/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
    llvm/trunk/unittests/CodeGen/MachineInstrTest.cpp
    llvm/trunk/unittests/CodeGen/MachineOperandTest.cpp
    llvm/trunk/unittests/MI/LiveIntervalTest.cpp

Modified: llvm/trunk/lib/CodeGen/MIRParser/MILexer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MIRParser/MILexer.cpp?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MIRParser/MILexer.cpp (original)
+++ llvm/trunk/lib/CodeGen/MIRParser/MILexer.cpp Wed Jan 31 14:04:26 2018
@@ -410,17 +410,26 @@ static bool isRegisterChar(char C) {
   return isIdentifierChar(C) && C != '.';
 }
 
-static Cursor maybeLexRegister(Cursor C, MIToken &Token) {
-  if (C.peek() != '%')
+static Cursor maybeLexRegister(Cursor C, MIToken &Token,
+                               ErrorCallbackType ErrorCallback) {
+  if (C.peek() != '%' && C.peek() != '$')
     return None;
-  if (isdigit(C.peek(1)))
-    return lexVirtualRegister(C, Token);
+
+  if (C.peek() == '%') {
+    if (isdigit(C.peek(1)))
+      return lexVirtualRegister(C, Token);
+
+    // ErrorCallback(Token.location(), "Named vregs are not yet supported.");
+    return None;
+  }
+
+  assert(C.peek() == '$');
   auto Range = C;
-  C.advance(); // Skip '%'
+  C.advance(); // Skip '$'
   while (isRegisterChar(C.peek()))
     C.advance();
   Token.reset(MIToken::NamedRegister, Range.upto(C))
-      .setStringValue(Range.upto(C).drop_front(1)); // Drop the '%'
+      .setStringValue(Range.upto(C).drop_front(1)); // Drop the '$'
   return C;
 }
 
@@ -642,7 +651,7 @@ StringRef llvm::lexMIToken(StringRef Sou
     return R.remaining();
   if (Cursor R = maybeLexIRValue(C, Token, ErrorCallback))
     return R.remaining();
-  if (Cursor R = maybeLexRegister(C, Token))
+  if (Cursor R = maybeLexRegister(C, Token, ErrorCallback))
     return R.remaining();
   if (Cursor R = maybeLexGlobalValue(C, Token, ErrorCallback))
     return R.remaining();

Modified: llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetRegisterInfo.cpp Wed Jan 31 14:04:26 2018
@@ -89,15 +89,15 @@ Printable printReg(unsigned Reg, const T
                    unsigned SubIdx) {
   return Printable([Reg, TRI, SubIdx](raw_ostream &OS) {
     if (!Reg)
-      OS << "%noreg";
+      OS << "$noreg";
     else if (TargetRegisterInfo::isStackSlot(Reg))
       OS << "SS#" << TargetRegisterInfo::stackSlot2Index(Reg);
     else if (TargetRegisterInfo::isVirtualRegister(Reg))
       OS << '%' << TargetRegisterInfo::virtReg2Index(Reg);
     else if (!TRI)
-      OS << '%' << "physreg" << Reg;
+      OS << '$' << "physreg" << Reg;
     else if (Reg < TRI->getNumRegs()) {
-      OS << '%';
+      OS << '$';
       printLowerCase(TRI->getName(Reg), OS);
     } else
       llvm_unreachable("Register kind is unsupported.");

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-callingconv-ios.ll Wed Jan 31 14:04:26 2018
@@ -13,9 +13,9 @@ target triple = "aarch64-apple-ios9.0"
 ; CHECK: [[F_ONE:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
 ; CHECK: [[TWO:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
 
-; CHECK: %w0 = COPY [[ANSWER]]
-; CHECK: %d0 = COPY [[D_ONE]]
-; CHECK: %x1 = COPY [[TWELVE]]
+; CHECK: $w0 = COPY [[ANSWER]]
+; CHECK: $d0 = COPY [[D_ONE]]
+; CHECK: $x1 = COPY [[TWELVE]]
 ; CHECK: G_STORE [[THREE]](s8), {{%[0-9]+}}(p0) :: (store 1 into stack, align 0)
 ; CHECK: G_STORE [[ONE]](s16), {{%[0-9]+}}(p0) :: (store 2 into stack + 8, align 0)
 ; CHECK: G_STORE [[FOUR]](s32), {{%[0-9]+}}(p0) :: (store 4 into stack + 16, align 0)

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-callingconv.ll Wed Jan 31 14:04:26 2018
@@ -4,15 +4,15 @@ target datalayout = "e-m:o-i64:64-i128:1
 target triple = "aarch64-linux-gnu"
 
 ; CHECK-LABEL: name: args_i32
-; CHECK: %[[ARG0:[0-9]+]]:_(s32) = COPY %w0
-; CHECK: %{{[0-9]+}}:_(s32) = COPY %w1
-; CHECK: %{{[0-9]+}}:_(s32) = COPY %w2
-; CHECK: %{{[0-9]+}}:_(s32) = COPY %w3
-; CHECK: %{{[0-9]+}}:_(s32) = COPY %w4
-; CHECK: %{{[0-9]+}}:_(s32) = COPY %w5
-; CHECK: %{{[0-9]+}}:_(s32) = COPY %w6
-; CHECK: %{{[0-9]+}}:_(s32) = COPY %w7
-; CHECK: %w0 = COPY %[[ARG0]]
+; CHECK: %[[ARG0:[0-9]+]]:_(s32) = COPY $w0
+; CHECK: %{{[0-9]+}}:_(s32) = COPY $w1
+; CHECK: %{{[0-9]+}}:_(s32) = COPY $w2
+; CHECK: %{{[0-9]+}}:_(s32) = COPY $w3
+; CHECK: %{{[0-9]+}}:_(s32) = COPY $w4
+; CHECK: %{{[0-9]+}}:_(s32) = COPY $w5
+; CHECK: %{{[0-9]+}}:_(s32) = COPY $w6
+; CHECK: %{{[0-9]+}}:_(s32) = COPY $w7
+; CHECK: $w0 = COPY %[[ARG0]]
 
 define i32 @args_i32(i32 %w0, i32 %w1, i32 %w2, i32 %w3,
                      i32 %w4, i32 %w5, i32 %w6, i32 %w7) {
@@ -20,15 +20,15 @@ define i32 @args_i32(i32 %w0, i32 %w1, i
 }
 
 ; CHECK-LABEL: name: args_i64
-; CHECK: %[[ARG0:[0-9]+]]:_(s64) = COPY %x0
-; CHECK: %{{[0-9]+}}:_(s64) = COPY %x1
-; CHECK: %{{[0-9]+}}:_(s64) = COPY %x2
-; CHECK: %{{[0-9]+}}:_(s64) = COPY %x3
-; CHECK: %{{[0-9]+}}:_(s64) = COPY %x4
-; CHECK: %{{[0-9]+}}:_(s64) = COPY %x5
-; CHECK: %{{[0-9]+}}:_(s64) = COPY %x6
-; CHECK: %{{[0-9]+}}:_(s64) = COPY %x7
-; CHECK: %x0 = COPY %[[ARG0]]
+; CHECK: %[[ARG0:[0-9]+]]:_(s64) = COPY $x0
+; CHECK: %{{[0-9]+}}:_(s64) = COPY $x1
+; CHECK: %{{[0-9]+}}:_(s64) = COPY $x2
+; CHECK: %{{[0-9]+}}:_(s64) = COPY $x3
+; CHECK: %{{[0-9]+}}:_(s64) = COPY $x4
+; CHECK: %{{[0-9]+}}:_(s64) = COPY $x5
+; CHECK: %{{[0-9]+}}:_(s64) = COPY $x6
+; CHECK: %{{[0-9]+}}:_(s64) = COPY $x7
+; CHECK: $x0 = COPY %[[ARG0]]
 define i64 @args_i64(i64 %x0, i64 %x1, i64 %x2, i64 %x3,
                      i64 %x4, i64 %x5, i64 %x6, i64 %x7) {
   ret i64 %x0
@@ -36,23 +36,23 @@ define i64 @args_i64(i64 %x0, i64 %x1, i
 
 
 ; CHECK-LABEL: name: args_ptrs
-; CHECK: %[[ARG0:[0-9]+]]:_(p0) = COPY %x0
-; CHECK: %{{[0-9]+}}:_(p0) = COPY %x1
-; CHECK: %{{[0-9]+}}:_(p0) = COPY %x2
-; CHECK: %{{[0-9]+}}:_(p0) = COPY %x3
-; CHECK: %{{[0-9]+}}:_(p0) = COPY %x4
-; CHECK: %{{[0-9]+}}:_(p0) = COPY %x5
-; CHECK: %{{[0-9]+}}:_(p0) = COPY %x6
-; CHECK: %{{[0-9]+}}:_(p0) = COPY %x7
-; CHECK: %x0 = COPY %[[ARG0]]
+; CHECK: %[[ARG0:[0-9]+]]:_(p0) = COPY $x0
+; CHECK: %{{[0-9]+}}:_(p0) = COPY $x1
+; CHECK: %{{[0-9]+}}:_(p0) = COPY $x2
+; CHECK: %{{[0-9]+}}:_(p0) = COPY $x3
+; CHECK: %{{[0-9]+}}:_(p0) = COPY $x4
+; CHECK: %{{[0-9]+}}:_(p0) = COPY $x5
+; CHECK: %{{[0-9]+}}:_(p0) = COPY $x6
+; CHECK: %{{[0-9]+}}:_(p0) = COPY $x7
+; CHECK: $x0 = COPY %[[ARG0]]
 define i8* @args_ptrs(i8* %x0, i16* %x1, <2 x i8>* %x2, {i8, i16, i32}* %x3,
                       [3 x float]* %x4, double* %x5, i8* %x6, i8* %x7) {
   ret i8* %x0
 }
 
 ; CHECK-LABEL: name: args_arr
-; CHECK: %[[ARG0:[0-9]+]]:_(s64) = COPY %d0
-; CHECK: %d0 = COPY %[[ARG0]]
+; CHECK: %[[ARG0:[0-9]+]]:_(s64) = COPY $d0
+; CHECK: $d0 = COPY %[[ARG0]]
 define [1 x double] @args_arr([1 x double] %d0) {
   ret [1 x double] %d0
 }
@@ -67,16 +67,16 @@ define [1 x double] @args_arr([1 x doubl
 ; CHECK: [[F_ONE:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
 ; CHECK: [[TWO:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
 
-; CHECK: %w0 = COPY [[ANSWER]]
-; CHECK: %d0 = COPY [[D_ONE]]
-; CHECK: %x1 = COPY [[TWELVE]]
+; CHECK: $w0 = COPY [[ANSWER]]
+; CHECK: $d0 = COPY [[D_ONE]]
+; CHECK: $x1 = COPY [[TWELVE]]
 ; CHECK: [[THREE_TMP:%[0-9]+]]:_(s32) = G_ANYEXT [[THREE]]
-; CHECK: %w2 = COPY [[THREE_TMP]](s32)
+; CHECK: $w2 = COPY [[THREE_TMP]](s32)
 ; CHECK: [[ONE_TMP:%[0-9]+]]:_(s32) = G_ANYEXT [[ONE]]
-; CHECK: %w3 = COPY [[ONE_TMP]](s32)
-; CHECK: %w4 = COPY [[FOUR]](s32)
-; CHECK: %s1 = COPY [[F_ONE]](s32)
-; CHECK: %d2 = COPY [[TWO]](s64)
+; CHECK: $w3 = COPY [[ONE_TMP]](s32)
+; CHECK: $w4 = COPY [[FOUR]](s32)
+; CHECK: $s1 = COPY [[F_ONE]](s32)
+; CHECK: $d2 = COPY [[TWO]](s64)
 declare void @varargs(i32, double, i64, ...)
 define void @test_varargs() {
   call void(i32, double, i64, ...) @varargs(i32 42, double 1.0, i64 12, i8 3, i16 1, i32 4, float 1.0, double 2.0)

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll Wed Jan 31 14:04:26 2018
@@ -7,22 +7,22 @@ target triple = "aarch64--"
 
 ; Tests for add.
 ; CHECK-LABEL: name: addi64
-; CHECK:      [[ARG1:%[0-9]+]]:_(s64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1
+; CHECK:      [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_ADD [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %x0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %x0
+; CHECK-NEXT: $x0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $x0
 define i64 @addi64(i64 %arg1, i64 %arg2) {
   %res = add i64 %arg1, %arg2
   ret i64 %res
 }
 
 ; CHECK-LABEL: name: muli64
-; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1
+; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_MUL [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %x0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %x0
+; CHECK-NEXT: $x0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $x0
 define i64 @muli64(i64 %arg1, i64 %arg2) {
   %res = mul i64 %arg1, %arg2
   ret i64 %res
@@ -107,7 +107,7 @@ end:
 ; CHECK-NEXT: successors: %[[TRUE:bb.[0-9]+]](0x40000000),
 ; CHECK:                  %[[FALSE:bb.[0-9]+]](0x40000000)
 ;
-; CHECK: [[ADDR:%.*]]:_(p0) = COPY %x0
+; CHECK: [[ADDR:%.*]]:_(p0) = COPY $x0
 ;
 ; Check that we emit the correct branch.
 ; CHECK: [[TST:%.*]]:_(s1) = G_LOAD [[ADDR]](p0)
@@ -135,7 +135,7 @@ false:
 ;
 ; CHECK: bb.{{[a-zA-Z0-9.]+}}:
 ; CHECK-NEXT: successors: %[[BB_CASE100:bb.[0-9]+]](0x40000000), %[[BB_NOTCASE100_CHECKNEXT:bb.[0-9]+]](0x40000000)
-; CHECK: %0:_(s32) = COPY %w0
+; CHECK: %0:_(s32) = COPY $w0
 ; CHECK: %[[reg100:[0-9]+]]:_(s32) = G_CONSTANT i32 100
 ; CHECK: %[[reg200:[0-9]+]]:_(s32) = G_CONSTANT i32 200
 ; CHECK: %[[reg0:[0-9]+]]:_(s32) = G_CONSTANT i32 0
@@ -171,8 +171,8 @@ false:
 ;
 ; CHECK: [[BB_RET]].{{[a-zA-Z0-9.]+}}:
 ; CHECK-NEXT: %[[regret:[0-9]+]]:_(s32) = G_PHI %[[regretdefault]](s32), %[[BB_DEFAULT]], %[[regretc100]](s32), %[[BB_CASE100]]
-; CHECK:  %w0 = COPY %[[regret]](s32)
-; CHECK:  RET_ReallyLR implicit %w0
+; CHECK:  $w0 = COPY %[[regret]](s32)
+; CHECK:  RET_ReallyLR implicit $w0
 ;
 define i32 @switch(i32 %argc) {
 entry:
@@ -289,22 +289,22 @@ L2:
 
 ; Tests for or.
 ; CHECK-LABEL: name: ori64
-; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1
+; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_OR [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %x0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %x0
+; CHECK-NEXT: $x0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $x0
 define i64 @ori64(i64 %arg1, i64 %arg2) {
   %res = or i64 %arg1, %arg2
   ret i64 %res
 }
 
 ; CHECK-LABEL: name: ori32
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_OR [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %w0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %w0
+; CHECK-NEXT: $w0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $w0
 define i32 @ori32(i32 %arg1, i32 %arg2) {
   %res = or i32 %arg1, %arg2
   ret i32 %res
@@ -312,22 +312,22 @@ define i32 @ori32(i32 %arg1, i32 %arg2)
 
 ; Tests for xor.
 ; CHECK-LABEL: name: xori64
-; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1
+; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_XOR [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %x0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %x0
+; CHECK-NEXT: $x0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $x0
 define i64 @xori64(i64 %arg1, i64 %arg2) {
   %res = xor i64 %arg1, %arg2
   ret i64 %res
 }
 
 ; CHECK-LABEL: name: xori32
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_XOR [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %w0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %w0
+; CHECK-NEXT: $w0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $w0
 define i32 @xori32(i32 %arg1, i32 %arg2) {
   %res = xor i32 %arg1, %arg2
   ret i32 %res
@@ -335,22 +335,22 @@ define i32 @xori32(i32 %arg1, i32 %arg2)
 
 ; Tests for and.
 ; CHECK-LABEL: name: andi64
-; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1
+; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_AND [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %x0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %x0
+; CHECK-NEXT: $x0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $x0
 define i64 @andi64(i64 %arg1, i64 %arg2) {
   %res = and i64 %arg1, %arg2
   ret i64 %res
 }
 
 ; CHECK-LABEL: name: andi32
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_AND [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %w0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %w0
+; CHECK-NEXT: $w0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $w0
 define i32 @andi32(i32 %arg1, i32 %arg2) {
   %res = and i32 %arg1, %arg2
   ret i32 %res
@@ -358,58 +358,58 @@ define i32 @andi32(i32 %arg1, i32 %arg2)
 
 ; Tests for sub.
 ; CHECK-LABEL: name: subi64
-; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY %x1
+; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s64) = COPY $x1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s64) = G_SUB [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %x0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %x0
+; CHECK-NEXT: $x0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $x0
 define i64 @subi64(i64 %arg1, i64 %arg2) {
   %res = sub i64 %arg1, %arg2
   ret i64 %res
 }
 
 ; CHECK-LABEL: name: subi32
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SUB [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %w0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %w0
+; CHECK-NEXT: $w0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $w0
 define i32 @subi32(i32 %arg1, i32 %arg2) {
   %res = sub i32 %arg1, %arg2
   ret i32 %res
 }
 
 ; CHECK-LABEL: name: ptrtoint
-; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_PTRTOINT [[ARG1]]
-; CHECK: %x0 = COPY [[RES]]
-; CHECK: RET_ReallyLR implicit %x0
+; CHECK: $x0 = COPY [[RES]]
+; CHECK: RET_ReallyLR implicit $x0
 define i64 @ptrtoint(i64* %a) {
   %val = ptrtoint i64* %a to i64
   ret i64 %val
 }
 
 ; CHECK-LABEL: name: inttoptr
-; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0
+; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
 ; CHECK: [[RES:%[0-9]+]]:_(p0) = G_INTTOPTR [[ARG1]]
-; CHECK: %x0 = COPY [[RES]]
-; CHECK: RET_ReallyLR implicit %x0
+; CHECK: $x0 = COPY [[RES]]
+; CHECK: RET_ReallyLR implicit $x0
 define i64* @inttoptr(i64 %a) {
   %val = inttoptr i64 %a to i64*
   ret i64* %val
 }
 
 ; CHECK-LABEL: name: trivial_bitcast
-; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: %x0 = COPY [[ARG1]]
-; CHECK: RET_ReallyLR implicit %x0
+; CHECK: [[ARG1:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: $x0 = COPY [[ARG1]]
+; CHECK: RET_ReallyLR implicit $x0
 define i64* @trivial_bitcast(i8* %a) {
   %val = bitcast i8* %a to i64*
   ret i64* %val
 }
 
 ; CHECK-LABEL: name: trivial_bitcast_with_copy
-; CHECK:     [[A:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK:     [[A:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK:     G_BR %[[CAST:bb\.[0-9]+]]
 
 ; CHECK: [[END:bb\.[0-9]+]].{{[a-zA-Z0-9.]+}}:
@@ -429,11 +429,11 @@ cast:
 }
 
 ; CHECK-LABEL: name: bitcast
-; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0
+; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
 ; CHECK: [[RES1:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[ARG1]]
 ; CHECK: [[RES2:%[0-9]+]]:_(s64) = G_BITCAST [[RES1]]
-; CHECK: %x0 = COPY [[RES2]]
-; CHECK: RET_ReallyLR implicit %x0
+; CHECK: $x0 = COPY [[RES2]]
+; CHECK: RET_ReallyLR implicit $x0
 define i64 @bitcast(i64 %a) {
   %res1 = bitcast i64 %a to <2 x i32>
   %res2 = bitcast <2 x i32> %res1 to i64
@@ -441,7 +441,7 @@ define i64 @bitcast(i64 %a) {
 }
 
 ; CHECK-LABEL: name: trunc
-; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY %x0
+; CHECK: [[ARG1:%[0-9]+]]:_(s64) = COPY $x0
 ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_LOAD
 ; CHECK: [[RES1:%[0-9]+]]:_(s8) = G_TRUNC [[ARG1]]
 ; CHECK: [[RES2:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[VEC]]
@@ -454,15 +454,15 @@ define void @trunc(i64 %a) {
 }
 
 ; CHECK-LABEL: name: load
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY %x1
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY $x1
 ; CHECK: [[VAL1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load 8 from %ir.addr, align 16)
 ; CHECK: [[VAL2:%[0-9]+]]:_(s64) = G_LOAD [[ADDR42]](p42) :: (load 8 from %ir.addr42, addrspace 42)
 ; CHECK: [[SUM2:%.*]]:_(s64) = G_ADD [[VAL1]], [[VAL2]]
 ; CHECK: [[VAL3:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (volatile load 8 from %ir.addr)
 ; CHECK: [[SUM3:%[0-9]+]]:_(s64) = G_ADD [[SUM2]], [[VAL3]]
-; CHECK: %x0 = COPY [[SUM3]]
-; CHECK: RET_ReallyLR implicit %x0
+; CHECK: $x0 = COPY [[SUM3]]
+; CHECK: RET_ReallyLR implicit $x0
 define i64 @load(i64* %addr, i64 addrspace(42)* %addr42) {
   %val1 = load i64, i64* %addr, align 16
 
@@ -475,10 +475,10 @@ define i64 @load(i64* %addr, i64 addrspa
 }
 
 ; CHECK-LABEL: name: store
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY %x1
-; CHECK: [[VAL1:%[0-9]+]]:_(s64) = COPY %x2
-; CHECK: [[VAL2:%[0-9]+]]:_(s64) = COPY %x3
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY $x1
+; CHECK: [[VAL1:%[0-9]+]]:_(s64) = COPY $x2
+; CHECK: [[VAL2:%[0-9]+]]:_(s64) = COPY $x3
 ; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (store 8 into %ir.addr, align 16)
 ; CHECK: G_STORE [[VAL2]](s64), [[ADDR42]](p42) :: (store 8 into %ir.addr42, addrspace 42)
 ; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (volatile store 8 into %ir.addr)
@@ -492,8 +492,8 @@ define void @store(i64* %addr, i64 addrs
 }
 
 ; CHECK-LABEL: name: intrinsics
-; CHECK: [[CUR:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK: [[BITS:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[CUR:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK: [[BITS:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK: [[CREG:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_INTRINSIC intrinsic(@llvm.returnaddress), [[CREG]]
 ; CHECK: [[PTR_VEC:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.ptr.vec
@@ -522,7 +522,7 @@ define void @intrinsics(i32 %cur, i32 %b
 ; CHECK:     [[RES2:%[0-9]+]]:_(s32) = G_LOAD
 
 ; CHECK:     [[RES:%[0-9]+]]:_(s32) = G_PHI [[RES1]](s32), %[[TRUE]], [[RES2]](s32), %[[FALSE]]
-; CHECK:     %w0 = COPY [[RES]]
+; CHECK:     $w0 = COPY [[RES]]
 define i32 @test_phi(i32* %addr1, i32* %addr2, i1 %tst) {
   br i1 %tst, label %true, label %false
 
@@ -551,14 +551,14 @@ define void @unreachable(i32 %a) {
   ; It's important that constants are after argument passing, but before the
   ; rest of the entry block.
 ; CHECK-LABEL: name: constant_int
-; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[ONE:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
 
 ; CHECK: bb.{{[0-9]+}}.{{[a-zA-Z0-9.]+}}:
 ; CHECK: [[SUM1:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]]
 ; CHECK: [[SUM2:%[0-9]+]]:_(s32) = G_ADD [[IN]], [[ONE]]
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ADD [[SUM1]], [[SUM2]]
-; CHECK: %w0 = COPY [[RES]]
+; CHECK: $w0 = COPY [[RES]]
 
 define i32 @constant_int(i32 %in) {
   br label %next
@@ -581,7 +581,7 @@ define i32 @constant_int_start() {
 
 ; CHECK-LABEL: name: test_undef
 ; CHECK: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
-; CHECK: %w0 = COPY [[UNDEF]]
+; CHECK: $w0 = COPY [[UNDEF]]
 define i32 @test_undef() {
   ret i32 undef
 }
@@ -589,7 +589,7 @@ define i32 @test_undef() {
 ; CHECK-LABEL: name: test_constant_inttoptr
 ; CHECK: [[ONE:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ONE]]
-; CHECK: %x0 = COPY [[PTR]]
+; CHECK: $x0 = COPY [[PTR]]
 define i8* @test_constant_inttoptr() {
   ret i8* inttoptr(i64 1 to i8*)
 }
@@ -598,35 +598,35 @@ define i8* @test_constant_inttoptr() {
   ; functions, so reuse the "i64 1" from above.
 ; CHECK-LABEL: name: test_reused_constant
 ; CHECK: [[ONE:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-; CHECK: %x0 = COPY [[ONE]]
+; CHECK: $x0 = COPY [[ONE]]
 define i64 @test_reused_constant() {
   ret i64 1
 }
 
 ; CHECK-LABEL: name: test_sext
-; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_SEXT [[IN]]
-; CHECK: %x0 = COPY [[RES]]
+; CHECK: $x0 = COPY [[RES]]
 define i64 @test_sext(i32 %in) {
   %res = sext i32 %in to i64
   ret i64 %res
 }
 
 ; CHECK-LABEL: name: test_zext
-; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ZEXT [[IN]]
-; CHECK: %x0 = COPY [[RES]]
+; CHECK: $x0 = COPY [[RES]]
 define i64 @test_zext(i32 %in) {
   %res = zext i32 %in to i64
   ret i64 %res
 }
 
 ; CHECK-LABEL: name: test_shl
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SHL [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %w0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %w0
+; CHECK-NEXT: $w0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $w0
 define i32 @test_shl(i32 %arg1, i32 %arg2) {
   %res = shl i32 %arg1, %arg2
   ret i32 %res
@@ -634,66 +634,66 @@ define i32 @test_shl(i32 %arg1, i32 %arg
 
 
 ; CHECK-LABEL: name: test_lshr
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_LSHR [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %w0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %w0
+; CHECK-NEXT: $w0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $w0
 define i32 @test_lshr(i32 %arg1, i32 %arg2) {
   %res = lshr i32 %arg1, %arg2
   ret i32 %res
 }
 
 ; CHECK-LABEL: name: test_ashr
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_ASHR [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %w0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %w0
+; CHECK-NEXT: $w0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $w0
 define i32 @test_ashr(i32 %arg1, i32 %arg2) {
   %res = ashr i32 %arg1, %arg2
   ret i32 %res
 }
 
 ; CHECK-LABEL: name: test_sdiv
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SDIV [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %w0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %w0
+; CHECK-NEXT: $w0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $w0
 define i32 @test_sdiv(i32 %arg1, i32 %arg2) {
   %res = sdiv i32 %arg1, %arg2
   ret i32 %res
 }
 
 ; CHECK-LABEL: name: test_udiv
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_UDIV [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %w0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %w0
+; CHECK-NEXT: $w0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $w0
 define i32 @test_udiv(i32 %arg1, i32 %arg2) {
   %res = udiv i32 %arg1, %arg2
   ret i32 %res
 }
 
 ; CHECK-LABEL: name: test_srem
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_SREM [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %w0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %w0
+; CHECK-NEXT: $w0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $w0
 define i32 @test_srem(i32 %arg1, i32 %arg2) {
   %res = srem i32 %arg1, %arg2
   ret i32 %res
 }
 
 ; CHECK-LABEL: name: test_urem
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_UREM [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %w0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %w0
+; CHECK-NEXT: $w0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $w0
 define i32 @test_urem(i32 %arg1, i32 %arg2) {
   %res = urem i32 %arg1, %arg2
   ret i32 %res
@@ -701,13 +701,13 @@ define i32 @test_urem(i32 %arg1, i32 %ar
 
 ; CHECK-LABEL: name: test_constant_null
 ; CHECK: [[NULL:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
-; CHECK: %x0 = COPY [[NULL]]
+; CHECK: $x0 = COPY [[NULL]]
 define i8* @test_constant_null() {
   ret i8* null
 }
 
 ; CHECK-LABEL: name: test_struct_memops
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[VAL:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load 8 from  %ir.addr, align 4)
 ; CHECK: G_STORE [[VAL]](s64), [[ADDR]](p0) :: (store 8 into  %ir.addr, align 4)
 define void @test_struct_memops({ i8, i32 }* %addr) {
@@ -717,7 +717,7 @@ define void @test_struct_memops({ i8, i3
 }
 
 ; CHECK-LABEL: name: test_i1_memops
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[VAL:%[0-9]+]]:_(s1) = G_LOAD [[ADDR]](p0) :: (load 1 from  %ir.addr)
 ; CHECK: G_STORE [[VAL]](s1), [[ADDR]](p0) :: (store 1 into  %ir.addr)
 define void @test_i1_memops(i1* %addr) {
@@ -727,9 +727,9 @@ define void @test_i1_memops(i1* %addr) {
 }
 
 ; CHECK-LABEL: name: int_comparison
-; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[LHS]](s32), [[RHS]]
 ; CHECK: G_STORE [[TST]](s1), [[ADDR]](p0)
 define void @int_comparison(i32 %a, i32 %b, i1* %addr) {
@@ -739,9 +739,9 @@ define void @int_comparison(i32 %a, i32
 }
 
 ; CHECK-LABEL: name: ptr_comparison
-; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY %x1
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY $x1
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LHS]](p0), [[RHS]]
 ; CHECK: G_STORE [[TST]](s1), [[ADDR]](p0)
 define void @ptr_comparison(i8* %a, i8* %b, i1* %addr) {
@@ -751,64 +751,64 @@ define void @ptr_comparison(i8* %a, i8*
 }
 
 ; CHECK-LABEL: name: test_fadd
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %s1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FADD [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %s0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %s0
+; CHECK-NEXT: $s0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $s0
 define float @test_fadd(float %arg1, float %arg2) {
   %res = fadd float %arg1, %arg2
   ret float %res
 }
 
 ; CHECK-LABEL: name: test_fsub
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %s1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FSUB [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %s0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %s0
+; CHECK-NEXT: $s0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $s0
 define float @test_fsub(float %arg1, float %arg2) {
   %res = fsub float %arg1, %arg2
   ret float %res
 }
 
 ; CHECK-LABEL: name: test_fmul
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %s1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FMUL [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %s0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %s0
+; CHECK-NEXT: $s0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $s0
 define float @test_fmul(float %arg1, float %arg2) {
   %res = fmul float %arg1, %arg2
   ret float %res
 }
 
 ; CHECK-LABEL: name: test_fdiv
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %s1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FDIV [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %s0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %s0
+; CHECK-NEXT: $s0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $s0
 define float @test_fdiv(float %arg1, float %arg2) {
   %res = fdiv float %arg1, %arg2
   ret float %res
 }
 
 ; CHECK-LABEL: name: test_frem
-; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY %s0
-; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY %s1
+; CHECK: [[ARG1:%[0-9]+]]:_(s32) = COPY $s0
+; CHECK-NEXT: [[ARG2:%[0-9]+]]:_(s32) = COPY $s1
 ; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_FREM [[ARG1]], [[ARG2]]
-; CHECK-NEXT: %s0 = COPY [[RES]]
-; CHECK-NEXT: RET_ReallyLR implicit %s0
+; CHECK-NEXT: $s0 = COPY [[RES]]
+; CHECK-NEXT: RET_ReallyLR implicit $s0
 define float @test_frem(float %arg1, float %arg2) {
   %res = frem float %arg1, %arg2
   ret float %res
 }
 
 ; CHECK-LABEL: name: test_sadd_overflow
-; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SADDO [[LHS]], [[RHS]]
 ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
 ; CHECK: [[TMP1:%[0-9]+]]:_(s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
@@ -822,9 +822,9 @@ define void @test_sadd_overflow(i32 %lhs
 }
 
 ; CHECK-LABEL: name: test_uadd_overflow
-; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[ZERO:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UADDE [[LHS]], [[RHS]], [[ZERO]]
 ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
@@ -839,9 +839,9 @@ define void @test_uadd_overflow(i32 %lhs
 }
 
 ; CHECK-LABEL: name: test_ssub_overflow
-; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SSUBO [[LHS]], [[RHS]]
 ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
 ; CHECK: [[TMP1:%[0-9]+]]:_(s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
@@ -855,9 +855,9 @@ define void @test_ssub_overflow(i32 %lhs
 }
 
 ; CHECK-LABEL: name: test_usub_overflow
-; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[ZERO:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_USUBE [[LHS]], [[RHS]], [[ZERO]]
 ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
@@ -872,9 +872,9 @@ define void @test_usub_overflow(i32 %lhs
 }
 
 ; CHECK-LABEL: name: test_smul_overflow
-; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SMULO [[LHS]], [[RHS]]
 ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
 ; CHECK: [[TMP1:%[0-9]+]]:_(s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
@@ -888,9 +888,9 @@ define void @test_smul_overflow(i32 %lhs
 }
 
 ; CHECK-LABEL: name: test_umul_overflow
-; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w1
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UMULO [[LHS]], [[RHS]]
 ; CHECK: [[TMP:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
 ; CHECK: [[TMP1:%[0-9]+]]:_(s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
@@ -906,7 +906,7 @@ define void @test_umul_overflow(i32 %lhs
 ; CHECK-LABEL: name: test_extractvalue
 ; CHECK: [[STRUCT:%[0-9]+]]:_(s128) = G_LOAD
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_EXTRACT [[STRUCT]](s128), 64
-; CHECK: %w0 = COPY [[RES]]
+; CHECK: $w0 = COPY [[RES]]
 %struct.nested = type {i8, { i8, i32 }, i32}
 define i32 @test_extractvalue(%struct.nested* %addr) {
   %struct = load %struct.nested, %struct.nested* %addr
@@ -926,7 +926,7 @@ define void @test_extractvalue_agg(%stru
 }
 
 ; CHECK-LABEL: name: test_insertvalue
-; CHECK: [[VAL:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[VAL:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK: [[STRUCT:%[0-9]+]]:_(s128) = G_LOAD
 ; CHECK: [[NEWSTRUCT:%[0-9]+]]:_(s128) = G_INSERT [[STRUCT]], [[VAL]](s32), 64
 ; CHECK: G_STORE [[NEWSTRUCT]](s128),
@@ -939,20 +939,20 @@ define void @test_insertvalue(%struct.ne
 
 define [1 x i64] @test_trivial_insert([1 x i64] %s, i64 %val) {
 ; CHECK-LABEL: name: test_trivial_insert
-; CHECK: [[STRUCT:%[0-9]+]]:_(s64) = COPY %x0
-; CHECK: [[VAL:%[0-9]+]]:_(s64) = COPY %x1
+; CHECK: [[STRUCT:%[0-9]+]]:_(s64) = COPY $x0
+; CHECK: [[VAL:%[0-9]+]]:_(s64) = COPY $x1
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = COPY [[VAL]](s64)
-; CHECK: %x0 = COPY [[RES]]
+; CHECK: $x0 = COPY [[RES]]
   %res = insertvalue [1 x i64] %s, i64 %val, 0
   ret [1 x i64] %res
 }
 
 define [1 x i8*] @test_trivial_insert_ptr([1 x i8*] %s, i8* %val) {
 ; CHECK-LABEL: name: test_trivial_insert_ptr
-; CHECK: [[STRUCT:%[0-9]+]]:_(s64) = COPY %x0
-; CHECK: [[VAL:%[0-9]+]]:_(p0) = COPY %x1
+; CHECK: [[STRUCT:%[0-9]+]]:_(s64) = COPY $x0
+; CHECK: [[VAL:%[0-9]+]]:_(p0) = COPY $x1
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_PTRTOINT [[VAL]](p0)
-; CHECK: %x0 = COPY [[RES]]
+; CHECK: $x0 = COPY [[RES]]
   %res = insertvalue [1 x i8*] %s, i8* %val, 0
   ret [1 x i8*] %res
 }
@@ -971,48 +971,48 @@ define void @test_insertvalue_agg(%struc
 }
 
 ; CHECK-LABEL: name: test_select
-; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]]
-; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %w1
-; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %w2
+; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $w1
+; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w2
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]]
-; CHECK: %w0 = COPY [[RES]]
+; CHECK: $w0 = COPY [[RES]]
 define i32 @test_select(i1 %tst, i32 %lhs, i32 %rhs) {
   %res = select i1 %tst, i32 %lhs, i32 %rhs
   ret i32 %res
 }
 
 ; CHECK-LABEL: name: test_select_ptr
-; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]]
-; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY %x1
-; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY %x2
+; CHECK: [[LHS:%[0-9]+]]:_(p0) = COPY $x1
+; CHECK: [[RHS:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[RES:%[0-9]+]]:_(p0) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]]
-; CHECK: %x0 = COPY [[RES]]
+; CHECK: $x0 = COPY [[RES]]
 define i8* @test_select_ptr(i1 %tst, i8* %lhs, i8* %rhs) {
   %res = select i1 %tst, i8* %lhs, i8* %rhs
   ret i8* %res
 }
 
 ; CHECK-LABEL: name: test_select_vec
-; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[TST_C:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_TRUNC [[TST_C]]
-; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY %q0
-; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY %q1
+; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY $q1
 ; CHECK: [[RES:%[0-9]+]]:_(<4 x s32>) = G_SELECT [[TST]](s1), [[LHS]], [[RHS]]
-; CHECK: %q0 = COPY [[RES]]
+; CHECK: $q0 = COPY [[RES]]
 define <4 x i32> @test_select_vec(i1 %tst, <4 x i32> %lhs, <4 x i32> %rhs) {
   %res = select i1 %tst, <4 x i32> %lhs, <4 x i32> %rhs
   ret <4 x i32> %res
 }
 
 ; CHECK-LABEL: name: test_vselect_vec
-; CHECK: [[TST32:%[0-9]+]]:_(<4 x s32>) = COPY %q0
-; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY %q1
-; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY %q2
+; CHECK: [[TST32:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+; CHECK: [[LHS:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+; CHECK: [[RHS:%[0-9]+]]:_(<4 x s32>) = COPY $q2
 ; CHECK: [[TST:%[0-9]+]]:_(<4 x s1>) = G_TRUNC [[TST32]](<4 x s32>)
 ; CHECK: [[RES:%[0-9]+]]:_(<4 x s32>) = G_SELECT [[TST]](<4 x s1>), [[LHS]], [[RHS]]
-; CHECK: %q0 = COPY [[RES]]
+; CHECK: $q0 = COPY [[RES]]
 define <4 x i32> @test_vselect_vec(<4 x i32> %tst32, <4 x i32> %lhs, <4 x i32> %rhs) {
   %tst = trunc <4 x i32> %tst32 to <4 x i1>
   %res = select <4 x i1> %tst, <4 x i32> %lhs, <4 x i32> %rhs
@@ -1020,10 +1020,10 @@ define <4 x i32> @test_vselect_vec(<4 x
 }
 
 ; CHECK-LABEL: name: test_fptosi
-; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[FP:%[0-9]+]]:_(s32) = G_LOAD [[FPADDR]](p0)
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPTOSI [[FP]](s32)
-; CHECK: %x0 = COPY [[RES]]
+; CHECK: $x0 = COPY [[RES]]
 define i64 @test_fptosi(float* %fp.addr) {
   %fp = load float, float* %fp.addr
   %res = fptosi float %fp to i64
@@ -1031,10 +1031,10 @@ define i64 @test_fptosi(float* %fp.addr)
 }
 
 ; CHECK-LABEL: name: test_fptoui
-; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[FPADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[FP:%[0-9]+]]:_(s32) = G_LOAD [[FPADDR]](p0)
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPTOUI [[FP]](s32)
-; CHECK: %x0 = COPY [[RES]]
+; CHECK: $x0 = COPY [[RES]]
 define i64 @test_fptoui(float* %fp.addr) {
   %fp = load float, float* %fp.addr
   %res = fptoui float %fp to i64
@@ -1042,8 +1042,8 @@ define i64 @test_fptoui(float* %fp.addr)
 }
 
 ; CHECK-LABEL: name: test_sitofp
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK: [[FP:%[0-9]+]]:_(s64) = G_SITOFP [[IN]](s32)
 ; CHECK: G_STORE [[FP]](s64), [[ADDR]](p0)
 define void @test_sitofp(double* %addr, i32 %in) {
@@ -1053,8 +1053,8 @@ define void @test_sitofp(double* %addr,
 }
 
 ; CHECK-LABEL: name: test_uitofp
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK: [[FP:%[0-9]+]]:_(s64) = G_UITOFP [[IN]](s32)
 ; CHECK: G_STORE [[FP]](s64), [[ADDR]](p0)
 define void @test_uitofp(double* %addr, i32 %in) {
@@ -1064,25 +1064,25 @@ define void @test_uitofp(double* %addr,
 }
 
 ; CHECK-LABEL: name: test_fpext
-; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %s0
+; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $s0
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FPEXT [[IN]](s32)
-; CHECK: %d0 = COPY [[RES]]
+; CHECK: $d0 = COPY [[RES]]
 define double @test_fpext(float %in) {
   %res = fpext float %in to double
   ret double %res
 }
 
 ; CHECK-LABEL: name: test_fptrunc
-; CHECK: [[IN:%[0-9]+]]:_(s64) = COPY %d0
+; CHECK: [[IN:%[0-9]+]]:_(s64) = COPY $d0
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FPTRUNC [[IN]](s64)
-; CHECK: %s0 = COPY [[RES]]
+; CHECK: $s0 = COPY [[RES]]
 define float @test_fptrunc(double %in) {
   %res = fptrunc double %in to float
   ret float %res
 }
 
 ; CHECK-LABEL: name: test_constant_float
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[TMP:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.500000e+00
 ; CHECK: G_STORE [[TMP]](s32), [[ADDR]](p0)
 define void @test_constant_float(float* %addr) {
@@ -1091,9 +1091,9 @@ define void @test_constant_float(float*
 }
 
 ; CHECK-LABEL: name: float_comparison
-; CHECK: [[LHSADDR:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: [[RHSADDR:%[0-9]+]]:_(p0) = COPY %x1
-; CHECK: [[BOOLADDR:%[0-9]+]]:_(p0) = COPY %x2
+; CHECK: [[LHSADDR:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[RHSADDR:%[0-9]+]]:_(p0) = COPY $x1
+; CHECK: [[BOOLADDR:%[0-9]+]]:_(p0) = COPY $x2
 ; CHECK: [[LHS:%[0-9]+]]:_(s32) = G_LOAD [[LHSADDR]](p0)
 ; CHECK: [[RHS:%[0-9]+]]:_(s32) = G_LOAD [[RHSADDR]](p0)
 ; CHECK: [[TST:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[LHS]](s32), [[RHS]]
@@ -1124,7 +1124,7 @@ define i1 @trivial_float_comparison(doub
 define i32* @test_global() {
 ; CHECK-LABEL: name: test_global
 ; CHECK: [[TMP:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var{{$}}
-; CHECK: %x0 = COPY [[TMP]](p0)
+; CHECK: $x0 = COPY [[TMP]](p0)
 
   ret i32* @var
 }
@@ -1133,7 +1133,7 @@ define i32* @test_global() {
 define i32 addrspace(42)* @test_global_addrspace() {
 ; CHECK-LABEL: name: test_global
 ; CHECK: [[TMP:%[0-9]+]]:_(p42) = G_GLOBAL_VALUE @var1{{$}}
-; CHECK: %x0 = COPY [[TMP]](p42)
+; CHECK: $x0 = COPY [[TMP]](p42)
 
   ret i32 addrspace(42)* @var1
 }
@@ -1142,7 +1142,7 @@ define i32 addrspace(42)* @test_global_a
 define void()* @test_global_func() {
 ; CHECK-LABEL: name: test_global_func
 ; CHECK: [[TMP:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @allocai64{{$}}
-; CHECK: %x0 = COPY [[TMP]](p0)
+; CHECK: $x0 = COPY [[TMP]](p0)
 
   ret void()* @allocai64
 }
@@ -1150,13 +1150,13 @@ define void()* @test_global_func() {
 declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
 define void @test_memcpy(i8* %dst, i8* %src, i64 %size) {
 ; CHECK-LABEL: name: test_memcpy
-; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY %x1
-; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY %x2
-; CHECK: %x0 = COPY [[DST]]
-; CHECK: %x1 = COPY [[SRC]]
-; CHECK: %x2 = COPY [[SIZE]]
-; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %x1, implicit %x2
+; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
+; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
+; CHECK: $x0 = COPY [[DST]]
+; CHECK: $x1 = COPY [[SRC]]
+; CHECK: $x2 = COPY [[SIZE]]
+; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
   ret void
 }
@@ -1164,13 +1164,13 @@ define void @test_memcpy(i8* %dst, i8* %
 declare void @llvm.memmove.p0i8.p0i8.i64(i8*, i8*, i64, i1)
 define void @test_memmove(i8* %dst, i8* %src, i64 %size) {
 ; CHECK-LABEL: name: test_memmove
-; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY %x1
-; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY %x2
-; CHECK: %x0 = COPY [[DST]]
-; CHECK: %x1 = COPY [[SRC]]
-; CHECK: %x2 = COPY [[SIZE]]
-; CHECK: BL &memmove, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %x1, implicit %x2
+; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
+; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
+; CHECK: $x0 = COPY [[DST]]
+; CHECK: $x1 = COPY [[SRC]]
+; CHECK: $x2 = COPY [[SIZE]]
+; CHECK: BL &memmove, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2
   call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
   ret void
 }
@@ -1178,15 +1178,15 @@ define void @test_memmove(i8* %dst, i8*
 declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i1)
 define void @test_memset(i8* %dst, i8 %val, i64 %size) {
 ; CHECK-LABEL: name: test_memset
-; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: [[SRC_C:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[SRC_C:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK: [[SRC:%[0-9]+]]:_(s8) = G_TRUNC [[SRC_C]]
-; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY %x2
-; CHECK: %x0 = COPY [[DST]]
+; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
+; CHECK: $x0 = COPY [[DST]]
 ; CHECK: [[SRC_TMP:%[0-9]+]]:_(s32) = G_ANYEXT [[SRC]]
-; CHECK: %w1 = COPY [[SRC_TMP]]
-; CHECK: %x2 = COPY [[SIZE]]
-; CHECK: BL &memset, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %w1, implicit %x2
+; CHECK: $w1 = COPY [[SRC_TMP]]
+; CHECK: $x2 = COPY [[SIZE]]
+; CHECK: BL &memset, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $w1, implicit $x2
   call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i1 0)
   ret void
 }
@@ -1195,8 +1195,8 @@ declare i64 @llvm.objectsize.i64(i8*, i1
 declare i32 @llvm.objectsize.i32(i8*, i1)
 define void @test_objectsize(i8* %addr0, i8* %addr1) {
 ; CHECK-LABEL: name: test_objectsize
-; CHECK: [[ADDR0:%[0-9]+]]:_(p0) = COPY %x0
-; CHECK: [[ADDR1:%[0-9]+]]:_(p0) = COPY %x1
+; CHECK: [[ADDR0:%[0-9]+]]:_(p0) = COPY $x0
+; CHECK: [[ADDR1:%[0-9]+]]:_(p0) = COPY $x1
 ; CHECK: {{%[0-9]+}}:_(s64) = G_CONSTANT i64 -1
 ; CHECK: {{%[0-9]+}}:_(s64) = G_CONSTANT i64 0
 ; CHECK: {{%[0-9]+}}:_(s32) = G_CONSTANT i32 -1
@@ -1210,7 +1210,7 @@ define void @test_objectsize(i8* %addr0,
 
 define void @test_large_const(i128* %addr) {
 ; CHECK-LABEL: name: test_large_const
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[VAL:%[0-9]+]]:_(s128) = G_CONSTANT i128 42
 ; CHECK: G_STORE [[VAL]](s128), [[ADDR]](p0)
   store i128 42, i128* %addr
@@ -1245,7 +1245,7 @@ define void @test_va_end(i8* %list) {
 
 define void @test_va_arg(i8* %list) {
 ; CHECK-LABEL: test_va_arg
-; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: G_VAARG [[LIST]](p0), 8
 ; CHECK: G_VAARG [[LIST]](p0), 1
 ; CHECK: G_VAARG [[LIST]](p0), 16
@@ -1259,10 +1259,10 @@ define void @test_va_arg(i8* %list) {
 declare float @llvm.pow.f32(float, float)
 define float @test_pow_intrin(float %l, float %r) {
 ; CHECK-LABEL: name: test_pow_intrin
-; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY %s0
-; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY %s1
+; CHECK: [[LHS:%[0-9]+]]:_(s32) = COPY $s0
+; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $s1
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FPOW [[LHS]], [[RHS]]
-; CHECK: %s0 = COPY [[RES]]
+; CHECK: $s0 = COPY [[RES]]
   %res = call float @llvm.pow.f32(float %l, float %r)
   ret float %res
 }
@@ -1270,11 +1270,11 @@ define float @test_pow_intrin(float %l,
 declare float @llvm.fma.f32(float, float, float)
 define float @test_fma_intrin(float %a, float %b, float %c) {
 ; CHECK-LABEL: name: test_fma_intrin
-; CHECK: [[A:%[0-9]+]]:_(s32) = COPY %s0
-; CHECK: [[B:%[0-9]+]]:_(s32) = COPY %s1
-; CHECK: [[C:%[0-9]+]]:_(s32) = COPY %s2
+; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
+; CHECK: [[B:%[0-9]+]]:_(s32) = COPY $s1
+; CHECK: [[C:%[0-9]+]]:_(s32) = COPY $s2
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FMA [[A]], [[B]], [[C]]
-; CHECK: %s0 = COPY [[RES]]
+; CHECK: $s0 = COPY [[RES]]
   %res = call float @llvm.fma.f32(float %a, float %b, float %c)
   ret float %res
 }
@@ -1282,9 +1282,9 @@ define float @test_fma_intrin(float %a,
 declare float @llvm.exp.f32(float)
 define float @test_exp_intrin(float %a) {
 ; CHECK-LABEL: name: test_exp_intrin
-; CHECK: [[A:%[0-9]+]]:_(s32) = COPY %s0
+; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FEXP [[A]]
-; CHECK: %s0 = COPY [[RES]]
+; CHECK: $s0 = COPY [[RES]]
   %res = call float @llvm.exp.f32(float %a)
   ret float %res
 }
@@ -1292,9 +1292,9 @@ define float @test_exp_intrin(float %a)
 declare float @llvm.exp2.f32(float)
 define float @test_exp2_intrin(float %a) {
 ; CHECK-LABEL: name: test_exp2_intrin
-; CHECK: [[A:%[0-9]+]]:_(s32) = COPY %s0
+; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FEXP2 [[A]]
-; CHECK: %s0 = COPY [[RES]]
+; CHECK: $s0 = COPY [[RES]]
   %res = call float @llvm.exp2.f32(float %a)
   ret float %res
 }
@@ -1302,9 +1302,9 @@ define float @test_exp2_intrin(float %a)
 declare float @llvm.log.f32(float)
 define float @test_log_intrin(float %a) {
 ; CHECK-LABEL: name: test_log_intrin
-; CHECK: [[A:%[0-9]+]]:_(s32) = COPY %s0
+; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FLOG [[A]]
-; CHECK: %s0 = COPY [[RES]]
+; CHECK: $s0 = COPY [[RES]]
   %res = call float @llvm.log.f32(float %a)
   ret float %res
 }
@@ -1312,9 +1312,9 @@ define float @test_log_intrin(float %a)
 declare float @llvm.log2.f32(float)
 define float @test_log2_intrin(float %a) {
 ; CHECK-LABEL: name: test_log2_intrin
-; CHECK: [[A:%[0-9]+]]:_(s32) = COPY %s0
+; CHECK: [[A:%[0-9]+]]:_(s32) = COPY $s0
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FLOG2 [[A]]
-; CHECK: %s0 = COPY [[RES]]
+; CHECK: $s0 = COPY [[RES]]
   %res = call float @llvm.log2.f32(float %a)
   ret float %res
 }
@@ -1331,7 +1331,7 @@ define void @test_lifetime_intrin() {
 
 define void @test_load_store_atomics(i8* %addr) {
 ; CHECK-LABEL: name: test_load_store_atomics
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[V0:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr)
 ; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
 ; CHECK: [[V1:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
@@ -1352,18 +1352,18 @@ define void @test_load_store_atomics(i8*
 
 define float @test_fneg_f32(float %x) {
 ; CHECK-LABEL: name: test_fneg_f32
-; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY %s0
+; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $s0
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_FNEG [[ARG]]
-; CHECK: %s0 = COPY [[RES]](s32)
+; CHECK: $s0 = COPY [[RES]](s32)
   %neg = fsub float -0.000000e+00, %x
   ret float %neg
 }
 
 define double @test_fneg_f64(double %x) {
 ; CHECK-LABEL: name: test_fneg_f64
-; CHECK: [[ARG:%[0-9]+]]:_(s64) = COPY %d0
+; CHECK: [[ARG:%[0-9]+]]:_(s64) = COPY $d0
 ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_FNEG [[ARG]]
-; CHECK: %d0 = COPY [[RES]](s64)
+; CHECK: $d0 = COPY [[RES]](s64)
   %neg = fsub double -0.000000e+00, %x
   ret double %neg
 }
@@ -1379,31 +1379,31 @@ define void @test_trivial_inlineasm() {
 
 define <2 x i32> @test_insertelement(<2 x i32> %vec, i32 %elt, i32 %idx){
 ; CHECK-LABEL: name: test_insertelement
-; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY %d0
-; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK: [[RES:%[0-9]+]]:_(<2 x s32>) = G_INSERT_VECTOR_ELT [[VEC]], [[ELT]](s32), [[IDX]](s32)
-; CHECK: %d0 = COPY [[RES]](<2 x s32>)
+; CHECK: $d0 = COPY [[RES]](<2 x s32>)
   %res = insertelement <2 x i32> %vec, i32 %elt, i32 %idx
   ret <2 x i32> %res
 }
 
 define i32 @test_extractelement(<2 x i32> %vec, i32 %idx) {
 ; CHECK-LABEL: name: test_extractelement
-; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY %d0
-; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+; CHECK: [[IDX:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[VEC]](<2 x s32>), [[IDX]](s32)
-; CHECK: %w0 = COPY [[RES]](s32)
+; CHECK: $w0 = COPY [[RES]](s32)
   %res = extractelement <2 x i32> %vec, i32 %idx
   ret i32 %res
 }
 
 define i32 @test_singleelementvector(i32 %elt){
 ; CHECK-LABEL: name: test_singleelementvector
-; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[ELT:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK-NOT: G_INSERT_VECTOR_ELT
 ; CHECK-NOT: G_EXTRACT_VECTOR_ELT
-; CHECK: %w0 = COPY [[ELT]](s32)
+; CHECK: $w0 = COPY [[ELT]](s32)
   %vec = insertelement <1 x i32> undef, i32 %elt, i32 0
   %res = extractelement <1 x i32> %vec, i32 0
   ret i32 %res
@@ -1413,7 +1413,7 @@ define <2 x i32> @test_constantaggzerove
 ; CHECK-LABEL: name: test_constantaggzerovector_v2i32
 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[ZERO]](s32), [[ZERO]](s32)
-; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
   ret <2 x i32> zeroinitializer
 }
 
@@ -1421,7 +1421,7 @@ define <2 x float> @test_constantaggzero
 ; CHECK-LABEL: name: test_constantaggzerovector_v2f32
 ; CHECK: [[ZERO:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[ZERO]](s32), [[ZERO]](s32)
-; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
   ret <2 x float> zeroinitializer
 }
 
@@ -1439,7 +1439,7 @@ define <2 x i32> @test_constantdatavecto
 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32)
-; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
   ret <2 x i32> <i32 1, i32 2>
 }
 
@@ -1461,7 +1461,7 @@ define <4 x i32> @test_constantdatavecto
 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
 ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
 ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32), [[C3]](s32), [[C4]](s32)
-; CHECK: %q0 = COPY [[VEC]](<4 x s32>)
+; CHECK: $q0 = COPY [[VEC]](<4 x s32>)
   ret <4 x i32> <i32 1, i32 2, i32 3, i32 4>
 }
 
@@ -1470,13 +1470,13 @@ define <2 x double> @test_constantdatave
 ; CHECK: [[FC1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
 ; CHECK: [[FC2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s64>) = G_MERGE_VALUES [[FC1]](s64), [[FC2]](s64)
-; CHECK: %q0 = COPY [[VEC]](<2 x s64>)
+; CHECK: $q0 = COPY [[VEC]](<2 x s64>)
   ret <2 x double> <double 1.0, double 2.0>
 }
 
 define i32 @test_constantaggzerovector_v1s32(i32 %arg){
 ; CHECK-LABEL: name: test_constantaggzerovector_v1s32
-; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
 ; CHECK-NOT: G_MERGE_VALUES
 ; CHECK: G_ADD [[ARG]], [[C0]]
@@ -1488,7 +1488,7 @@ define i32 @test_constantaggzerovector_v
 
 define i32 @test_constantdatavector_v1s32(i32 %arg){
 ; CHECK-LABEL: name: test_constantdatavector_v1s32
-; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
 ; CHECK-NOT: G_MERGE_VALUES
 ; CHECK: G_ADD [[ARG]], [[C1]]
@@ -1501,21 +1501,21 @@ define i32 @test_constantdatavector_v1s3
 declare ghccc float @different_call_conv_target(float %x)
 define float @test_different_call_conv_target(float %x) {
 ; CHECK-LABEL: name: test_different_call_conv
-; CHECK: [[X:%[0-9]+]]:_(s32) = COPY %s0
-; CHECK: %s8 = COPY [[X]]
-; CHECK: BL @different_call_conv_target, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s8, implicit-def %s0
+; CHECK: [[X:%[0-9]+]]:_(s32) = COPY $s0
+; CHECK: $s8 = COPY [[X]]
+; CHECK: BL @different_call_conv_target, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s8, implicit-def $s0
   %res = call ghccc float @different_call_conv_target(float %x)
   ret float %res
 }
 
 define <2 x i32> @test_shufflevector_s32_v2s32(i32 %arg) {
 ; CHECK-LABEL: name: test_shufflevector_s32_v2s32
-; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[ARG:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
 ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
 ; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32)
 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], [[MASK]](<2 x s32>)
-; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
   %vec = insertelement <1 x i32> undef, i32 %arg, i32 0
   %res = shufflevector <1 x i32> %vec, <1 x i32> undef, <2 x i32> zeroinitializer
   ret <2 x i32> %res
@@ -1523,11 +1523,11 @@ define <2 x i32> @test_shufflevector_s32
 
 define i32 @test_shufflevector_v2s32_s32(<2 x i32> %arg) {
 ; CHECK-LABEL: name: test_shufflevector_v2s32_s32
-; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY %d0
+; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
 ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
 ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], [[C1]](s32)
-; CHECK: %w0 = COPY [[RES]](s32)
+; CHECK: $w0 = COPY [[RES]](s32)
   %vec = shufflevector <2 x i32> %arg, <2 x i32> undef, <1 x i32> <i32 1>
   %res = extractelement <1 x i32> %vec, i32 0
   ret i32 %res
@@ -1535,20 +1535,20 @@ define i32 @test_shufflevector_v2s32_s32
 
 define <2 x i32> @test_shufflevector_v2s32_v2s32(<2 x i32> %arg) {
 ; CHECK-LABEL: name: test_shufflevector_v2s32_v2s32
-; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY %d0
+; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
 ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
 ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
 ; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32)
 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], [[MASK]](<2 x s32>)
-; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
   %res = shufflevector <2 x i32> %arg, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
   ret <2 x i32> %res
 }
 
 define i32 @test_shufflevector_v2s32_v3s32(<2 x i32> %arg) {
 ; CHECK-LABEL: name: test_shufflevector_v2s32_v3s32
-; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY %d0
+; CHECK: [[ARG:%[0-9]+]]:_(<2 x s32>) = COPY $d0
 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<2 x s32>) = G_IMPLICIT_DEF
 ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
 ; CHECK-DAG: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
@@ -1562,28 +1562,28 @@ define i32 @test_shufflevector_v2s32_v3s
 
 define <4 x i32> @test_shufflevector_v2s32_v4s32(<2 x i32> %arg1, <2 x i32> %arg2) {
 ; CHECK-LABEL: name: test_shufflevector_v2s32_v4s32
-; CHECK: [[ARG1:%[0-9]+]]:_(<2 x s32>) = COPY %d0
-; CHECK: [[ARG2:%[0-9]+]]:_(<2 x s32>) = COPY %d1
+; CHECK: [[ARG1:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+; CHECK: [[ARG2:%[0-9]+]]:_(<2 x s32>) = COPY $d1
 ; CHECK: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
 ; CHECK: [[MASK:%[0-9]+]]:_(<4 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C1]](s32), [[C2]](s32), [[C3]](s32)
 ; CHECK: [[VEC:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[ARG1]](<2 x s32>), [[ARG2]], [[MASK]](<4 x s32>)
-; CHECK: %q0 = COPY [[VEC]](<4 x s32>)
+; CHECK: $q0 = COPY [[VEC]](<4 x s32>)
   %res = shufflevector <2 x i32> %arg1, <2 x i32> %arg2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x i32> %res
 }
 
 define <2 x i32> @test_shufflevector_v4s32_v2s32(<4 x i32> %arg) {
 ; CHECK-LABEL: name: test_shufflevector_v4s32_v2s32
-; CHECK: [[ARG:%[0-9]+]]:_(<4 x s32>) = COPY %q0
+; CHECK: [[ARG:%[0-9]+]]:_(<4 x s32>) = COPY $q0
 ; CHECK-DAG: [[UNDEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
 ; CHECK-DAG: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
 ; CHECK-DAG: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
 ; CHECK-DAG: [[MASK:%[0-9]+]]:_(<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C3]](s32)
 ; CHECK: [[VEC:%[0-9]+]]:_(<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](<4 x s32>), [[UNDEF]], [[MASK]](<2 x s32>)
-; CHECK: %d0 = COPY [[VEC]](<2 x s32>)
+; CHECK: $d0 = COPY [[VEC]](<2 x s32>)
   %res = shufflevector <4 x i32> %arg, <4 x i32> undef, <2 x i32> <i32 1, i32 3>
   ret <2 x i32> %res
 }
@@ -1591,8 +1591,8 @@ define <2 x i32> @test_shufflevector_v4s
 
 define <16 x i8> @test_shufflevector_v8s8_v16s8(<8 x i8> %arg1, <8 x i8> %arg2) {
 ; CHECK-LABEL: name: test_shufflevector_v8s8_v16s8
-; CHECK: [[ARG1:%[0-9]+]]:_(<8 x s8>) = COPY %d0
-; CHECK: [[ARG2:%[0-9]+]]:_(<8 x s8>) = COPY %d1
+; CHECK: [[ARG1:%[0-9]+]]:_(<8 x s8>) = COPY $d0
+; CHECK: [[ARG2:%[0-9]+]]:_(<8 x s8>) = COPY $d1
 ; CHECK: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
 ; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -1611,7 +1611,7 @@ define <16 x i8> @test_shufflevector_v8s
 ; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
 ; CHECK: [[MASK:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C8]](s32), [[C1]](s32), [[C9]](s32), [[C2]](s32), [[C10]](s32), [[C3]](s32), [[C11]](s32), [[C4]](s32), [[C12]](s32), [[C5]](s32), [[C13]](s32), [[C6]](s32), [[C14]](s32), [[C7]](s32), [[C15]](s32)
 ; CHECK: [[VEC:%[0-9]+]]:_(<16 x s8>) = G_SHUFFLE_VECTOR [[ARG1]](<8 x s8>), [[ARG2]], [[MASK]](<16 x s32>)
-; CHECK: %q0 = COPY [[VEC]](<16 x s8>)
+; CHECK: $q0 = COPY [[VEC]](<16 x s8>)
   %res = shufflevector <8 x i8> %arg1, <8 x i8> %arg2, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
   ret <16 x i8> %res
 }
@@ -1620,14 +1620,14 @@ define <16 x i8> @test_shufflevector_v8s
 ; CHECK: [[UNDEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
 ; CHECK: [[F:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
 ; CHECK: [[M:%[0-9]+]]:_(<4 x s16>) = G_MERGE_VALUES [[UNDEF]](s16), [[UNDEF]](s16), [[UNDEF]](s16), [[F]](s16)
-; CHECK: %d0 = COPY [[M]](<4 x s16>)
+; CHECK: $d0 = COPY [[M]](<4 x s16>)
 define <4 x half> @test_constant_vector() {
   ret <4 x half> <half undef, half undef, half undef, half 0xH3C00>
 }
 
 define i32 @test_target_mem_intrinsic(i32* %addr) {
 ; CHECK-LABEL: name: test_target_mem_intrinsic
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[VAL:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), [[ADDR]](p0) :: (volatile load 4 from %ir.addr)
 ; CHECK: G_TRUNC [[VAL]](s64)
   %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/arm64-regbankselect.mir Wed Jan 31 14:04:26 2018
@@ -122,10 +122,10 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: defaultMapping
     ; CHECK:      %1:gpr(s32) = G_ADD %0
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_ADD %0, %0
 ...
 
@@ -140,11 +140,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0.entry:
-    liveins: %d0
+    liveins: $d0
     ; CHECK-LABEL: name: defaultMappingVector
-    ; CHECK:      %0:fpr(<2 x s32>) = COPY %d0
+    ; CHECK:      %0:fpr(<2 x s32>) = COPY $d0
     ; CHECK:      %1:fpr(<2 x s32>) = G_ADD %0
-    %0(<2 x s32>) = COPY %d0
+    %0(<2 x s32>) = COPY $d0
     %1(<2 x s32>) = G_ADD %0, %0
 ...
 
@@ -160,14 +160,14 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0.entry:
-    liveins: %s0, %x0
+    liveins: $s0, $x0
     ; CHECK-LABEL: name: defaultMapping1Repair
-    ; CHECK:           %0:fpr(s32) = COPY %s0
-    ; CHECK-NEXT:      %1:gpr(s32) = COPY %w0
+    ; CHECK:           %0:fpr(s32) = COPY $s0
+    ; CHECK-NEXT:      %1:gpr(s32) = COPY $w0
     ; CHECK-NEXT:      %3:gpr(s32) = COPY %0
     ; CHECK-NEXT:      %2:gpr(s32) = G_ADD %3, %1
-    %0(s32) = COPY %s0
-    %1(s32) = COPY %w0
+    %0(s32) = COPY $s0
+    %1(s32) = COPY $w0
     %2(s32) = G_ADD %0, %1
 ...
 
@@ -179,13 +179,13 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0.entry:
-    liveins: %s0, %x0
+    liveins: $s0, $x0
     ; CHECK-LABEL: name: defaultMapping2Repairs
-    ; CHECK:           %0:fpr(s32) = COPY %s0
+    ; CHECK:           %0:fpr(s32) = COPY $s0
     ; CHECK-NEXT:      %2:gpr(s32) = COPY %0
     ; CHECK-NEXT:      %3:gpr(s32) = COPY %0
     ; CHECK-NEXT:      %1:gpr(s32) = G_ADD %2, %3
-    %0(s32) = COPY %s0
+    %0(s32) = COPY $s0
     %1(s32) = G_ADD %0, %0
 ...
 
@@ -201,12 +201,12 @@ registers:
   - { id: 1, class: fpr }
 body: |
   bb.0.entry:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: defaultMappingDefRepair
-    ; CHECK:           %0:gpr(s32) = COPY %w0
+    ; CHECK:           %0:gpr(s32) = COPY $w0
     ; CHECK-NEXT:      %2:gpr(s32) = G_ADD %0, %0
     ; CHECK-NEXT:      %1:fpr(s32) = COPY %2
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_ADD %0, %0
 ...
 
@@ -231,12 +231,12 @@ registers:
 body: |
   bb.0.entry:
     successors: %bb.2.end, %bb.1.then
-    liveins: %x0, %x1, %w2
+    liveins: $x0, $x1, $w2
 
-    %0 = LDRWui killed %x0, 0 :: (load 4 from %ir.src)
+    %0 = LDRWui killed $x0, 0 :: (load 4 from %ir.src)
     %5(s32) = COPY %0
-    %1(p0) = COPY %x1
-    %2 = COPY %w2
+    %1(p0) = COPY $x1
+    %2 = COPY $w2
     TBNZW killed %2, 0, %bb.2.end
 
   bb.1.then:
@@ -259,14 +259,14 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0.entry:
-    liveins: %w0, %s0
+    liveins: $w0, $s0
     ; CHECK-LABEL: name: defaultMappingUseRepairPhysReg
-    ; CHECK:           %0:gpr(s32) = COPY %w0
-    ; CHECK-NEXT:      %1:fpr(s32) = COPY %s0
+    ; CHECK:           %0:gpr(s32) = COPY $w0
+    ; CHECK-NEXT:      %1:fpr(s32) = COPY $s0
     ; CHECK-NEXT:      %3:gpr(s32) = COPY %1
     ; CHECK-NEXT:      %2:gpr(s32) = G_ADD %0, %3
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %s0
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $s0
     %2(s32) = G_ADD %0, %1
 ...
 
@@ -279,14 +279,14 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0.entry:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: defaultMappingDefRepairPhysReg
-    ; CHECK:           %0:gpr(s32) = COPY %w0
+    ; CHECK:           %0:gpr(s32) = COPY $w0
     ; CHECK-NEXT:      %1:gpr(s32) = G_ADD %0, %0
-    ; CHECK-NEXT:      %s0 = COPY %1
-    %0(s32) = COPY %w0
+    ; CHECK-NEXT:      $s0 = COPY %1
+    %0(s32) = COPY $w0
     %1(s32) = G_ADD %0, %0
-    %s0 = COPY %1
+    $s0 = COPY %1
 ...
 
 ---
@@ -300,9 +300,9 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1
-    ; CHECK: %0:gpr(<2 x s32>) = COPY %x0
-    ; CHECK-NEXT: %1:gpr(<2 x s32>) = COPY %x1
+    liveins: $x0, $x1
+    ; CHECK: %0:gpr(<2 x s32>) = COPY $x0
+    ; CHECK-NEXT: %1:gpr(<2 x s32>) = COPY $x1
 
     ; Fast mode tries to reuse the source of the copy for the destination.
     ; Now, the default mapping says that %0 and %1 need to be in FPR.
@@ -314,8 +314,8 @@ body: |
 
     ; Greedy mode remapped the instruction on the GPR bank.
     ; GREEDY-NEXT: %2:gpr(<2 x s32>) = G_OR %0, %1
-    %0(<2 x s32>) = COPY %x0
-    %1(<2 x s32>) = COPY %x1
+    %0(<2 x s32>) = COPY $x0
+    %1(<2 x s32>) = COPY $x1
     %2(<2 x s32>) = G_OR %0, %1
 ...
 
@@ -331,11 +331,11 @@ registers:
   - { id: 2, class: fpr }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
     ; CHECK-LABEL: name: greedyMappingOrWithConstraints
 
-    ; CHECK: %0:gpr(<2 x s32>) = COPY %x0
-    ; CHECK-NEXT: %1:gpr(<2 x s32>) = COPY %x1
+    ; CHECK: %0:gpr(<2 x s32>) = COPY $x0
+    ; CHECK-NEXT: %1:gpr(<2 x s32>) = COPY $x1
 
     ; Fast mode tries to reuse the source of the copy for the destination.
     ; Now, the default mapping says that %0 and %1 need to be in FPR.
@@ -349,8 +349,8 @@ body: |
     ; GREEDY-NEXT: %3:gpr(<2 x s32>) = G_OR %0, %1
     ; We need to keep %2 into FPR because we do not know anything about it.
     ; GREEDY-NEXT: %2:fpr(<2 x s32>) = COPY %3
-    %0(<2 x s32>) = COPY %x0
-    %1(<2 x s32>) = COPY %x1
+    %0(<2 x s32>) = COPY $x0
+    %1(<2 x s32>) = COPY $x1
     %2(<2 x s32>) = G_OR %0, %1
 ...
 
@@ -366,17 +366,17 @@ registers:
   - { id: 1, class: gpr64 }
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
-    ; CHECK: %0:gpr64 = COPY %x0
+    ; CHECK: %0:gpr64 = COPY $x0
     ; CHECK-NEXT: %1:gpr64 = ADDXrr %0, %0
-    ; CHECK-NEXT: %x0 = COPY %1
-    ; CHECK-NEXT: RET_ReallyLR implicit %x0
+    ; CHECK-NEXT: $x0 = COPY %1
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
 
-    %0 = COPY %x0
+    %0 = COPY $x0
     %1 = ADDXrr %0, %0
-    %x0 = COPY %1
-    RET_ReallyLR implicit %x0
+    $x0 = COPY %1
+    RET_ReallyLR implicit $x0
 ...
 
 ---
@@ -404,13 +404,13 @@ registers:
   - { id: 1, class: _ }
 
 # CHECK:  body:
-# CHECK:    %0:gpr(s32) = COPY %w0
+# CHECK:    %0:gpr(s32) = COPY $w0
 # CHECK:    %1:gpr(s32) = G_BITCAST %0
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_BITCAST %0
 ...
 
@@ -427,13 +427,13 @@ registers:
   - { id: 1, class: _ }
 
 # CHECK:  body:
-# CHECK:    %0:fpr(<2 x s16>) = COPY %s0
+# CHECK:    %0:fpr(<2 x s16>) = COPY $s0
 # CHECK:    %1:fpr(<2 x s16>) = G_BITCAST %0
 body:             |
   bb.0:
-    liveins: %s0
+    liveins: $s0
 
-    %0(<2 x s16>) = COPY %s0
+    %0(<2 x s16>) = COPY $s0
     %1(<2 x s16>) = G_BITCAST %0
 ...
 
@@ -451,14 +451,14 @@ registers:
   - { id: 1, class: _ }
 
 # CHECK:  body:
-# CHECK:    %0:gpr(s32) = COPY %w0
+# CHECK:    %0:gpr(s32) = COPY $w0
 # FAST:     %1:fpr(<2 x s16>) = G_BITCAST %0
 # GREEDY:   %1:gpr(<2 x s16>) = G_BITCAST %0
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(<2 x s16>) = G_BITCAST %0
 ...
 
@@ -470,14 +470,14 @@ registers:
   - { id: 0, class: _ }
   - { id: 1, class: _ }
 # CHECK:  body:
-# CHECK:    %0:fpr(<2 x s16>) = COPY %s0
+# CHECK:    %0:fpr(<2 x s16>) = COPY $s0
 # FAST:     %1:gpr(s32) = G_BITCAST %0
 # GREEDY:   %1:fpr(s32) = G_BITCAST %0
 body:             |
   bb.0:
-    liveins: %s0
+    liveins: $s0
 
-    %0(<2 x s16>) = COPY %s0
+    %0(<2 x s16>) = COPY $s0
     %1(s32) = G_BITCAST %0
 ...
 
@@ -489,13 +489,13 @@ registers:
   - { id: 0, class: _ }
   - { id: 1, class: _ }
 # CHECK:  body:
-# CHECK:    %0:gpr(s64) = COPY %x0
+# CHECK:    %0:gpr(s64) = COPY $x0
 # CHECK:    %1:gpr(s64) = G_BITCAST %0
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
-    %0(s64) = COPY %x0
+    %0(s64) = COPY $x0
     %1(s64) = G_BITCAST %0
 ...
 
@@ -507,13 +507,13 @@ registers:
   - { id: 0, class: _ }
   - { id: 1, class: _ }
 # CHECK:  body:
-# CHECK:    %0:fpr(<2 x s32>) = COPY %d0
+# CHECK:    %0:fpr(<2 x s32>) = COPY $d0
 # CHECK:    %1:fpr(<2 x s32>) = G_BITCAST %0
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
-    %0(<2 x s32>) = COPY %d0
+    %0(<2 x s32>) = COPY $d0
     %1(<2 x s32>) = G_BITCAST %0
 ...
 
@@ -525,14 +525,14 @@ registers:
   - { id: 0, class: _ }
   - { id: 1, class: _ }
 # CHECK:  body:
-# CHECK:    %0:gpr(s64) = COPY %x0
+# CHECK:    %0:gpr(s64) = COPY $x0
 # FAST:     %1:fpr(<2 x s32>) = G_BITCAST %0
 # GREEDY:   %1:gpr(<2 x s32>) = G_BITCAST %0
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
-    %0(s64) = COPY %x0
+    %0(s64) = COPY $x0
     %1(<2 x s32>) = G_BITCAST %0
 ...
 
@@ -544,14 +544,14 @@ registers:
   - { id: 0, class: _ }
   - { id: 1, class: _ }
 # CHECK:  body:
-# CHECK:    %0:fpr(<2 x s32>) = COPY %d0
+# CHECK:    %0:fpr(<2 x s32>) = COPY $d0
 # FAST:     %1:gpr(s64) = G_BITCAST %0
 # GREEDY:   %1:fpr(s64) = G_BITCAST %0
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
-    %0(<2 x s32>) = COPY %d0
+    %0(<2 x s32>) = COPY $d0
     %1(s64) = G_BITCAST %0
 ...
 
@@ -569,13 +569,13 @@ registers:
 # CHECK: %2:fpr(<2 x s64>) = G_BITCAST %3(s128)
 body:             |
   bb.1:
-    liveins: %x0, %x1
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    liveins: $x0, $x1
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %3(s128) = G_MERGE_VALUES %0(s64), %1(s64)
     %2(<2 x s64>) = G_BITCAST %3(s128)
-    %q0 = COPY %2(<2 x s64>)
-    RET_ReallyLR implicit %q0
+    $q0 = COPY %2(<2 x s64>)
+    RET_ReallyLR implicit $q0
 
 ...
 
@@ -598,14 +598,14 @@ registers:
 # CHECK-NEXT: %2:fpr(<2 x s64>) = G_BITCAST %4(s128)
 body:             |
   bb.1:
-    liveins: %x0, %x1
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    liveins: $x0, $x1
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %3(s128) = G_MERGE_VALUES %0(s64), %1(s64)
     %4(s128) = COPY %3(s128)
     %2(<2 x s64>) = G_BITCAST %4(s128)
-    %q0 = COPY %2(<2 x s64>)
-    RET_ReallyLR implicit %q0
+    $q0 = COPY %2(<2 x s64>)
+    RET_ReallyLR implicit $q0
 
 ...
 
@@ -630,11 +630,11 @@ registers:
 # CHECK: %1:fpr(s128) = COPY %0
 body:             |
   bb.1:
-    liveins: %x0
-    %0 = LDRQui killed %x0, 0
+    liveins: $x0
+    %0 = LDRQui killed $x0, 0
     %1(s128) = COPY %0
-    %q0 = COPY %1(s128)
-    RET_ReallyLR implicit %q0
+    $q0 = COPY %1(s128)
+    RET_ReallyLR implicit $q0
 
 ...
 
@@ -654,15 +654,15 @@ registers:
 # CHECK: registers:
 # CHECK:  - { id: 0, class: gpr, preferred-register: '' }
 # CHECK:  - { id: 1, class: gpr, preferred-register: '' }
-# CHECK: %0:gpr(s32) = COPY %w0
+# CHECK: %0:gpr(s32) = COPY $w0
 # CHECK-NEXT: %1:gpr(s16) = G_TRUNC %0(s32)
 body:             |
   bb.1:
-    liveins: %w0
-    %0(s32) = COPY %w0
+    liveins: $w0
+    %0(s32) = COPY $w0
     %1(s16) = G_TRUNC %0(s32)
-    %h0 = COPY %1(s16)
-    RET_ReallyLR implicit %h0
+    $h0 = COPY %1(s16)
+    RET_ReallyLR implicit $h0
 
 ...
 
@@ -682,8 +682,8 @@ registers:
   - { id: 4, class: _ }
   - { id: 5, class: _ }
 # No repairing should be necessary for both modes.
-# CHECK:         %0:gpr(s64) = COPY %x0
-# CHECK-NEXT:    %1:gpr(p0) = COPY %x1
+# CHECK:         %0:gpr(s64) = COPY $x0
+# CHECK-NEXT:    %1:gpr(p0) = COPY $x1
 # FAST-NEXT:     %2:fpr(<2 x s32>) = G_BITCAST %0(s64)
 # FAST-NEXT:     %3:fpr(<2 x s32>) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
 # FAST-NEXT:     %4:fpr(<2 x s32>) = G_OR %2, %3
@@ -691,20 +691,20 @@ registers:
 # GREEDY-NEXT:   %3:gpr(<2 x s32>) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
 # GREEDY-NEXT:   %4:gpr(<2 x s32>) = G_OR %2, %3
 # CHECK-NEXT:    %5:gpr(s64) = G_BITCAST %4(<2 x s32>)
-# CHECK-NEXT:    %x0 = COPY %5(s64)
-# CHECK-NEXT:    RET_ReallyLR implicit %x0
+# CHECK-NEXT:    $x0 = COPY %5(s64)
+# CHECK-NEXT:    RET_ReallyLR implicit $x0
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    %0(s64) = COPY %x0
-    %1(p0) = COPY %x1
+    %0(s64) = COPY $x0
+    %1(p0) = COPY $x1
     %2(<2 x s32>) = G_BITCAST %0(s64)
     %3(<2 x s32>) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
     %4(<2 x s32>) = G_OR %2, %3
     %5(s64) = G_BITCAST %4(<2 x s32>)
-    %x0 = COPY %5(s64)
-    RET_ReallyLR implicit %x0
+    $x0 = COPY %5(s64)
+    RET_ReallyLR implicit $x0
 
 ...
 
@@ -728,25 +728,25 @@ registers:
   - { id: 3, class: _ }
 
 # No repairing should be necessary for both modes.
-# CHECK:         %0:gpr(s64) = COPY %x0
-# CHECK-NEXT:    %1:gpr(p0) = COPY %x1
+# CHECK:         %0:gpr(s64) = COPY $x0
+# CHECK-NEXT:    %1:gpr(p0) = COPY $x1
 # CHECK-NEXT:    %2:fpr(s64) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
 # %0 has been mapped to GPR, we need to repair to match FPR.
 # CHECK-NEXT:    %4:fpr(s64) = COPY %0
 # CHECK-NEXT:    %3:fpr(s64) = G_FADD %4, %2
-# CHECK-NEXT:    %x0 = COPY %3(s64)
-# CHECK-NEXT:    RET_ReallyLR implicit %x0
+# CHECK-NEXT:    $x0 = COPY %3(s64)
+# CHECK-NEXT:    RET_ReallyLR implicit $x0
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    %0(s64) = COPY %x0
-    %1(p0) = COPY %x1
+    %0(s64) = COPY $x0
+    %1(p0) = COPY $x1
     %2(s64) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
     %3(s64) = G_FADD %0, %2
-    %x0 = COPY %3(s64)
-    RET_ReallyLR implicit %x0
+    $x0 = COPY %3(s64)
+    RET_ReallyLR implicit $x0
 
 ...
 
@@ -768,8 +768,8 @@ registers:
   - { id: 1, class: _ }
   - { id: 2, class: _ }
 
-# CHECK:         %0:gpr(s64) = COPY %x0
-# CHECK-NEXT:    %1:gpr(p0) = COPY %x1
+# CHECK:         %0:gpr(s64) = COPY $x0
+# CHECK-NEXT:    %1:gpr(p0) = COPY $x1
 # %0 has been mapped to GPR, we need to repair to match FPR.
 # CHECK-NEXT:    %3:fpr(s64) = COPY %0
 # CHECK-NEXT:    %4:fpr(s64) = COPY %0
@@ -779,10 +779,10 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    %0(s64) = COPY %x0
-    %1(p0) = COPY %x1
+    %0(s64) = COPY $x0
+    %1(p0) = COPY $x1
     %2(s64) = G_FADD %0, %0
     G_STORE %2(s64), %1(p0) :: (store 8 into %ir.addr)
     RET_ReallyLR
@@ -804,23 +804,23 @@ registers:
   - { id: 0, class: _ }
   - { id: 1, class: _ }
   - { id: 2, class: _ }
-# CHECK:         %1:gpr(s32) = COPY %w0
+# CHECK:         %1:gpr(s32) = COPY $w0
 # CHECK-NEXT:    %0:gpr(s16) = G_TRUNC %1
 # %0 has been mapped to GPR, we need to repair to match FPR.
 # CHECK-NEXT:    %3:fpr(s16) = COPY %0
 # CHECK-NEXT:    %2:fpr(s32) = G_FPEXT %3
-# CHECK-NEXT:    %s0 = COPY %2
+# CHECK-NEXT:    $s0 = COPY %2
 # CHECK-NEXT:    RET_ReallyLR
 
 body:             |
   bb.1:
-    liveins: %w0
+    liveins: $w0
 
-    %1(s32) = COPY %w0
+    %1(s32) = COPY $w0
     %0(s16) = G_TRUNC %1(s32)
     %2(s32) = G_FPEXT %0(s16)
-    %s0 = COPY %2(s32)
-    RET_ReallyLR implicit %s0
+    $s0 = COPY %2(s32)
+    RET_ReallyLR implicit $s0
 
 ...
 
@@ -839,23 +839,23 @@ registers:
   - { id: 0, class: _ }
   - { id: 1, class: _ }
   - { id: 2, class: _ }
-# CHECK:         %1:gpr(s32) = COPY %w0
+# CHECK:         %1:gpr(s32) = COPY $w0
 # CHECK-NEXT:    %0:gpr(s16) = G_TRUNC %1
 # %0 has been mapped to GPR, we need to repair to match FPR.
 # CHECK-NEXT:    %3:fpr(s16) = COPY %0
 # CHECK-NEXT:    %2:fpr(s64) = G_FPEXT %3
-# CHECK-NEXT:    %d0 = COPY %2
+# CHECK-NEXT:    $d0 = COPY %2
 # CHECK-NEXT:    RET_ReallyLR
 
 body:             |
   bb.1:
-    liveins: %w0
+    liveins: $w0
 
-    %1(s32) = COPY %w0
+    %1(s32) = COPY $w0
     %0(s16) = G_TRUNC %1(s32)
     %2(s64) = G_FPEXT %0(s16)
-    %d0 = COPY %2(s64)
-    RET_ReallyLR implicit %d0
+    $d0 = COPY %2(s64)
+    RET_ReallyLR implicit $d0
 
 ...
 
@@ -872,20 +872,20 @@ legalized:       true
 registers:
   - { id: 0, class: _ }
   - { id: 1, class: _ }
-# CHECK:         %0:gpr(s32) = COPY %w0
+# CHECK:         %0:gpr(s32) = COPY $w0
 # %0 has been mapped to GPR, we need to repair to match FPR.
 # CHECK-NEXT:    %2:fpr(s32) = COPY %0
 # CHECK-NEXT:    %1:fpr(s64) = G_FPEXT %2
-# CHECK-NEXT:    %d0 = COPY %1
+# CHECK-NEXT:    $d0 = COPY %1
 # CHECK-NEXT:    RET_ReallyLR
 body:             |
   bb.1:
-    liveins: %w0
+    liveins: $w0
 
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s64) = G_FPEXT %0(s32)
-    %d0 = COPY %1(s64)
-    RET_ReallyLR implicit %d0
+    $d0 = COPY %1(s64)
+    RET_ReallyLR implicit $d0
 
 ...
 
@@ -894,8 +894,8 @@ body:             |
 # CHECK-LABEL: name: passFp16
 # CHECK: registers:
 # CHECK:  - { id: 0, class: fpr, preferred-register: '' }
-# CHECK:  %0:fpr(s16) = COPY %h0
-# CHECK-NEXT: %h0 = COPY %0(s16)
+# CHECK:  %0:fpr(s16) = COPY $h0
+# CHECK-NEXT: $h0 = COPY %0(s16)
 name:            passFp16
 alignment:       2
 legalized:       true
@@ -903,11 +903,11 @@ registers:
   - { id: 0, class: _ }
 body:             |
   bb.1.entry:
-    liveins: %h0
+    liveins: $h0
 
-    %0(s16) = COPY %h0
-    %h0 = COPY %0(s16)
-    RET_ReallyLR implicit %h0
+    %0(s16) = COPY $h0
+    $h0 = COPY %0(s16)
+    RET_ReallyLR implicit $h0
 
 ...
 ---
@@ -919,7 +919,7 @@ body:             |
 # CHECK:  - { id: 1, class: gpr, preferred-register: '' }
 # CHECK:  - { id: 2, class: fpr, preferred-register: '' }
 #
-# CHECK:  %0:fpr(s16) = COPY %h0
+# CHECK:  %0:fpr(s16) = COPY $h0
 # CHECK-NEXT: %1:gpr(p0) = G_FRAME_INDEX %stack.0.p.addr
 # If we didn't look through the copy for %0, the default mapping
 # would have been on GPR and we would have to insert a copy to move
@@ -929,7 +929,7 @@ body:             |
 # would have been on GPR and we would have to insert a copy to move
 # the value to FPR (h0).
 # CHECK-NEXT: %2:fpr(s16) = G_LOAD %1(p0) :: (load 2 from %ir.p.addr)
-# CHECK-NEXT: %h0 = COPY %2(s16)
+# CHECK-NEXT: $h0 = COPY %2(s16)
 name:            passFp16ViaAllocas
 alignment:       2
 legalized:       true
@@ -944,13 +944,13 @@ stack:
   - { id: 0, name: p.addr, size: 2, alignment: 2, stack-id: 0 }
 body:             |
   bb.1.entry:
-    liveins: %h0
+    liveins: $h0
 
-    %0(s16) = COPY %h0
+    %0(s16) = COPY $h0
     %1(p0) = G_FRAME_INDEX %stack.0.p.addr
     G_STORE %0(s16), %1(p0) :: (store 2 into %ir.p.addr)
     %2(s16) = G_LOAD %1(p0) :: (load 2 from %ir.p.addr)
-    %h0 = COPY %2(s16)
-    RET_ReallyLR implicit %h0
+    $h0 = COPY %2(s16)
+    RET_ReallyLR implicit $h0
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/call-translator-ios.ll Wed Jan 31 14:04:26 2018
@@ -11,7 +11,7 @@
 ; CHECK: [[RHS:%[0-9]+]]:_(s8) = G_LOAD [[RHS_ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[STACK8]], align 0)
 ; CHECK: [[SUM:%[0-9]+]]:_(s8) = G_ADD [[LHS]], [[RHS]]
 ; CHECK: [[SUM32:%[0-9]+]]:_(s32) = G_SEXT [[SUM]](s8)
-; CHECK: %w0 = COPY [[SUM32]](s32)
+; CHECK: $w0 = COPY [[SUM32]](s32)
 define signext i8 @test_stack_slots([8 x i64], i8 signext %lhs, i8 signext %rhs) {
   %sum = add i8 %lhs, %rhs
   ret i8 %sum
@@ -20,11 +20,11 @@ define signext i8 @test_stack_slots([8 x
 ; CHECK-LABEL: name: test_call_stack
 ; CHECK: [[C42:%[0-9]+]]:_(s8) = G_CONSTANT i8 42
 ; CHECK: [[C12:%[0-9]+]]:_(s8) = G_CONSTANT i8 12
-; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[C42_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
 ; CHECK: [[C42_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C42_OFFS]](s64)
 ; CHECK: G_STORE [[C42]](s8), [[C42_LOC]](p0) :: (store 1 into stack, align 0)
-; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[C12_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
 ; CHECK: [[C12_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C12_OFFS]](s64)
 ; CHECK: G_STORE [[C12]](s8), [[C12_LOC]](p0) :: (store 1 into stack + 1, align 0)
@@ -35,9 +35,9 @@ define void @test_call_stack() {
 }
 
 ; CHECK-LABEL: name: test_128bit_struct
-; CHECK: %x0 = COPY
-; CHECK: %x1 = COPY
-; CHECK: %x2 = COPY
+; CHECK: $x0 = COPY
+; CHECK: $x1 = COPY
+; CHECK: $x2 = COPY
 ; CHECK: BL @take_128bit_struct
 define void @test_128bit_struct([2 x i64]* %ptr) {
   %struct = load [2 x i64], [2 x i64]* %ptr
@@ -46,9 +46,9 @@ define void @test_128bit_struct([2 x i64
 }
 
 ; CHECK-LABEL: name: take_128bit_struct
-; CHECK: {{%.*}}:_(p0) = COPY %x0
-; CHECK: {{%.*}}:_(s64) = COPY %x1
-; CHECK: {{%.*}}:_(s64) = COPY %x2
+; CHECK: {{%.*}}:_(p0) = COPY $x0
+; CHECK: {{%.*}}:_(s64) = COPY $x1
+; CHECK: {{%.*}}:_(s64) = COPY $x2
 define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
   store [2 x i64] %in, [2 x i64]* %ptr
   ret void
@@ -59,12 +59,12 @@ define void @take_128bit_struct([2 x i64
 ; CHECK: [[LO:%[0-9]+]]:_(s64) = G_EXTRACT [[STRUCT]](s128), 0
 ; CHECK: [[HI:%[0-9]+]]:_(s64) = G_EXTRACT [[STRUCT]](s128), 64
 
-; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[OFF:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF]]
 ; CHECK: G_STORE [[LO]](s64), [[ADDR]](p0) :: (store 8 into stack, align 0)
 
-; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[OFF:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF]]
 ; CHECK: G_STORE [[HI]](s64), [[ADDR]](p0) :: (store 8 into stack + 8, align 0)

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/call-translator.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/call-translator.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/call-translator.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/call-translator.ll Wed Jan 31 14:04:26 2018
@@ -1,9 +1,9 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
 
 ; CHECK-LABEL: name: test_trivial_call
-; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def %sp, implicit %sp
-; CHECK: BL @trivial_callee, csr_aarch64_aapcs, implicit-def %lr
-; CHECK: ADJCALLSTACKUP 0, 0, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+; CHECK: BL @trivial_callee, csr_aarch64_aapcs, implicit-def $lr
+; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
 declare void @trivial_callee()
 define void @test_trivial_call() {
   call void @trivial_callee()
@@ -11,10 +11,10 @@ define void @test_trivial_call() {
 }
 
 ; CHECK-LABEL: name: test_simple_return
-; CHECK: BL @simple_return_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit-def %x0
-; CHECK: [[RES:%[0-9]+]]:_(s64) = COPY %x0
-; CHECK: %x0 = COPY [[RES]]
-; CHECK: RET_ReallyLR implicit %x0
+; CHECK: BL @simple_return_callee, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit-def $x0
+; CHECK: [[RES:%[0-9]+]]:_(s64) = COPY $x0
+; CHECK: $x0 = COPY [[RES]]
+; CHECK: RET_ReallyLR implicit $x0
 declare i64 @simple_return_callee()
 define i64 @test_simple_return() {
   %res = call i64 @simple_return_callee()
@@ -22,9 +22,9 @@ define i64 @test_simple_return() {
 }
 
 ; CHECK-LABEL: name: test_simple_arg
-; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0
-; CHECK: %w0 = COPY [[IN]]
-; CHECK: BL @simple_arg_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0
+; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0
+; CHECK: $w0 = COPY [[IN]]
+; CHECK: BL @simple_arg_callee, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0
 ; CHECK: RET_ReallyLR
 declare void @simple_arg_callee(i32 %in)
 define void @test_simple_arg(i32 %in) {
@@ -36,8 +36,8 @@ define void @test_simple_arg(i32 %in) {
 ; CHECK: registers:
 ; Make sure the register feeding the indirect call is properly constrained.
 ; CHECK: - { id: [[FUNC:[0-9]+]], class: gpr64, preferred-register: '' }
-; CHECK: %[[FUNC]]:gpr64(p0) = COPY %x0
-; CHECK: BLR %[[FUNC]](p0), csr_aarch64_aapcs, implicit-def %lr, implicit %sp
+; CHECK: %[[FUNC]]:gpr64(p0) = COPY $x0
+; CHECK: BLR %[[FUNC]](p0), csr_aarch64_aapcs, implicit-def $lr, implicit $sp
 ; CHECK: RET_ReallyLR
 define void @test_indirect_call(void()* %func) {
   call void %func()
@@ -45,11 +45,11 @@ define void @test_indirect_call(void()*
 }
 
 ; CHECK-LABEL: name: test_multiple_args
-; CHECK: [[IN:%[0-9]+]]:_(s64) = COPY %x0
+; CHECK: [[IN:%[0-9]+]]:_(s64) = COPY $x0
 ; CHECK: [[ANSWER:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
-; CHECK: %w0 = COPY [[ANSWER]]
-; CHECK: %x1 = COPY [[IN]]
-; CHECK: BL @multiple_args_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0, implicit %x1
+; CHECK: $w0 = COPY [[ANSWER]]
+; CHECK: $x1 = COPY [[IN]]
+; CHECK: BL @multiple_args_callee, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit $x1
 ; CHECK: RET_ReallyLR
 declare void @multiple_args_callee(i32, i64)
 define void @test_multiple_args(i64 %in) {
@@ -59,11 +59,11 @@ define void @test_multiple_args(i64 %in)
 
 
 ; CHECK-LABEL: name: test_struct_formal
-; CHECK: [[DBL:%[0-9]+]]:_(s64) = COPY %d0
-; CHECK: [[I64:%[0-9]+]]:_(s64) = COPY %x0
-; CHECK: [[I8_C:%[0-9]+]]:_(s32) = COPY %w1
+; CHECK: [[DBL:%[0-9]+]]:_(s64) = COPY $d0
+; CHECK: [[I64:%[0-9]+]]:_(s64) = COPY $x0
+; CHECK: [[I8_C:%[0-9]+]]:_(s32) = COPY $w1
 ; CHECK: [[I8:%[0-9]+]]:_(s8) = G_TRUNC [[I8_C]]
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x2
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
 
 ; CHECK: [[UNDEF:%[0-9]+]]:_(s192) = G_IMPLICIT_DEF
 ; CHECK: [[ARG0:%[0-9]+]]:_(s192) = G_INSERT [[UNDEF]], [[DBL]](s64), 0
@@ -80,17 +80,17 @@ define void @test_struct_formal({double,
 
 
 ; CHECK-LABEL: name: test_struct_return
-; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK: [[VAL:%[0-9]+]]:_(s192) = G_LOAD [[ADDR]](p0)
 
 ; CHECK: [[DBL:%[0-9]+]]:_(s64) = G_EXTRACT [[VAL]](s192), 0
 ; CHECK: [[I64:%[0-9]+]]:_(s64) = G_EXTRACT [[VAL]](s192), 64
 ; CHECK: [[I32:%[0-9]+]]:_(s32) = G_EXTRACT [[VAL]](s192), 128
 
-; CHECK: %d0 = COPY [[DBL]](s64)
-; CHECK: %x0 = COPY [[I64]](s64)
-; CHECK: %w1 = COPY [[I32]](s32)
-; CHECK: RET_ReallyLR implicit %d0, implicit %x0, implicit %w1
+; CHECK: $d0 = COPY [[DBL]](s64)
+; CHECK: $x0 = COPY [[I64]](s64)
+; CHECK: $w1 = COPY [[I32]](s32)
+; CHECK: RET_ReallyLR implicit $d0, implicit $x0, implicit $w1
 define {double, i64, i32} @test_struct_return({double, i64, i32}* %addr) {
   %val = load {double, i64, i32}, {double, i64, i32}* %addr
   ret {double, i64, i32} %val
@@ -105,15 +105,15 @@ define {double, i64, i32} @test_struct_r
 ; CHECK: [[E2:%[0-9]+]]:_(s64) = G_EXTRACT [[ARG]](s256), 128
 ; CHECK: [[E3:%[0-9]+]]:_(s64) = G_EXTRACT [[ARG]](s256), 192
 
-; CHECK: %x0 = COPY [[E0]](s64)
-; CHECK: %x1 = COPY [[E1]](s64)
-; CHECK: %x2 = COPY [[E2]](s64)
-; CHECK: %x3 = COPY [[E3]](s64)
-; CHECK: BL @arr_callee, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit %x1, implicit %x2, implicit %x3, implicit-def %x0, implicit-def %x1, implicit-def %x2, implicit-def %x3
-; CHECK: [[E0:%[0-9]+]]:_(s64) = COPY %x0
-; CHECK: [[E1:%[0-9]+]]:_(s64) = COPY %x1
-; CHECK: [[E2:%[0-9]+]]:_(s64) = COPY %x2
-; CHECK: [[E3:%[0-9]+]]:_(s64) = COPY %x3
+; CHECK: $x0 = COPY [[E0]](s64)
+; CHECK: $x1 = COPY [[E1]](s64)
+; CHECK: $x2 = COPY [[E2]](s64)
+; CHECK: $x3 = COPY [[E3]](s64)
+; CHECK: BL @arr_callee, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit-def $x0, implicit-def $x1, implicit-def $x2, implicit-def $x3
+; CHECK: [[E0:%[0-9]+]]:_(s64) = COPY $x0
+; CHECK: [[E1:%[0-9]+]]:_(s64) = COPY $x1
+; CHECK: [[E2:%[0-9]+]]:_(s64) = COPY $x2
+; CHECK: [[E3:%[0-9]+]]:_(s64) = COPY $x3
 ; CHECK: [[RES:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[E0]](s64), [[E1]](s64), [[E2]](s64), [[E3]](s64)
 ; CHECK: G_EXTRACT [[RES]](s256), 64
 declare [4 x i64] @arr_callee([4 x i64])
@@ -128,14 +128,14 @@ define i64 @test_arr_call([4 x i64]* %ad
 ; CHECK-LABEL: name: test_abi_exts_call
 ; CHECK: [[VAL:%[0-9]+]]:_(s8) = G_LOAD
 ; CHECK: [[VAL_TMP:%[0-9]+]]:_(s32) = G_ANYEXT [[VAL]]
-; CHECK: %w0 = COPY [[VAL_TMP]]
-; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0
+; CHECK: $w0 = COPY [[VAL_TMP]]
+; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0
 ; CHECK: [[SVAL:%[0-9]+]]:_(s32) = G_SEXT [[VAL]](s8)
-; CHECK: %w0 = COPY [[SVAL]](s32)
-; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0
+; CHECK: $w0 = COPY [[SVAL]](s32)
+; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0
 ; CHECK: [[ZVAL:%[0-9]+]]:_(s32) = G_ZEXT [[VAL]](s8)
-; CHECK: %w0 = COPY [[ZVAL]](s32)
-; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0
+; CHECK: $w0 = COPY [[ZVAL]](s32)
+; CHECK: BL @take_char, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0
 declare void @take_char(i8)
 define void @test_abi_exts_call(i8* %addr) {
   %val = load i8, i8* %addr
@@ -148,8 +148,8 @@ define void @test_abi_exts_call(i8* %add
 ; CHECK-LABEL: name: test_abi_sext_ret
 ; CHECK: [[VAL:%[0-9]+]]:_(s8) = G_LOAD
 ; CHECK: [[SVAL:%[0-9]+]]:_(s32) = G_SEXT [[VAL]](s8)
-; CHECK: %w0 = COPY [[SVAL]](s32)
-; CHECK: RET_ReallyLR implicit %w0
+; CHECK: $w0 = COPY [[SVAL]](s32)
+; CHECK: RET_ReallyLR implicit $w0
 define signext i8 @test_abi_sext_ret(i8* %addr) {
   %val = load i8, i8* %addr
   ret i8 %val
@@ -158,8 +158,8 @@ define signext i8 @test_abi_sext_ret(i8*
 ; CHECK-LABEL: name: test_abi_zext_ret
 ; CHECK: [[VAL:%[0-9]+]]:_(s8) = G_LOAD
 ; CHECK: [[SVAL:%[0-9]+]]:_(s32) = G_ZEXT [[VAL]](s8)
-; CHECK: %w0 = COPY [[SVAL]](s32)
-; CHECK: RET_ReallyLR implicit %w0
+; CHECK: $w0 = COPY [[SVAL]](s32)
+; CHECK: RET_ReallyLR implicit $w0
 define zeroext i8 @test_abi_zext_ret(i8* %addr) {
   %val = load i8, i8* %addr
   ret i8 %val
@@ -188,21 +188,21 @@ define void @test_stack_slots([8 x i64],
 ; CHECK: [[C42:%[0-9]+]]:_(s64) = G_CONSTANT i64 42
 ; CHECK: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
-; CHECK: ADJCALLSTACKDOWN 24, 0, implicit-def %sp, implicit %sp
-; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[C42_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
 ; CHECK: [[C42_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C42_OFFS]](s64)
 ; CHECK: G_STORE [[C42]](s64), [[C42_LOC]](p0) :: (store 8 into stack, align 0)
-; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[C12_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
 ; CHECK: [[C12_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[C12_OFFS]](s64)
 ; CHECK: G_STORE [[C12]](s64), [[C12_LOC]](p0) :: (store 8 into stack + 8, align 0)
-; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[PTR_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
 ; CHECK: [[PTR_LOC:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[PTR_OFFS]](s64)
 ; CHECK: G_STORE [[PTR]](p0), [[PTR_LOC]](p0) :: (store 8 into stack + 16, align 0)
 ; CHECK: BL @test_stack_slots
-; CHECK: ADJCALLSTACKUP 24, 0, implicit-def %sp, implicit %sp
+; CHECK: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
 define void @test_call_stack() {
   call void @test_stack_slots([8 x i64] undef, i64 42, i64 12, i64* null)
   ret void
@@ -219,9 +219,9 @@ define void @test_mem_i1([8 x i64], i1 %
 }
 
 ; CHECK-LABEL: name: test_128bit_struct
-; CHECK: %x0 = COPY
-; CHECK: %x1 = COPY
-; CHECK: %x2 = COPY
+; CHECK: $x0 = COPY
+; CHECK: $x1 = COPY
+; CHECK: $x2 = COPY
 ; CHECK: BL @take_128bit_struct
 define void @test_128bit_struct([2 x i64]* %ptr) {
   %struct = load [2 x i64], [2 x i64]* %ptr
@@ -230,9 +230,9 @@ define void @test_128bit_struct([2 x i64
 }
 
 ; CHECK-LABEL: name: take_128bit_struct
-; CHECK: {{%.*}}:_(p0) = COPY %x0
-; CHECK: {{%.*}}:_(s64) = COPY %x1
-; CHECK: {{%.*}}:_(s64) = COPY %x2
+; CHECK: {{%.*}}:_(p0) = COPY $x0
+; CHECK: {{%.*}}:_(s64) = COPY $x1
+; CHECK: {{%.*}}:_(s64) = COPY $x2
 define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
   store [2 x i64] %in, [2 x i64]* %ptr
   ret void
@@ -243,12 +243,12 @@ define void @take_128bit_struct([2 x i64
 ; CHECK: [[LO:%[0-9]+]]:_(s64) = G_EXTRACT [[STRUCT]](s128), 0
 ; CHECK: [[HI:%[0-9]+]]:_(s64) = G_EXTRACT [[STRUCT]](s128), 64
 
-; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[OFF:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF]]
 ; CHECK: G_STORE [[LO]](s64), [[ADDR]](p0) :: (store 8 into stack, align 0)
 
-; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[OFF:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
 ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFF]]
 ; CHECK: G_STORE [[HI]](s64), [[ADDR]](p0) :: (store 8 into stack + 8, align 0)

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/debug-insts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/debug-insts.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/debug-insts.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/debug-insts.ll Wed Jan 31 14:04:26 2018
@@ -6,7 +6,7 @@
 ; CHECK:    - { id: {{.*}}, name: in.addr, type: default, offset: 0, size: {{.*}}, alignment: {{.*}},
 ; CHECK-NEXT: callee-saved-register: '', callee-saved-restored: true,
 ; CHECK-NEXT: di-variable: '!11', di-expression: '!DIExpression()',
-; CHECK: DBG_VALUE debug-use %0(s32), debug-use %noreg, !11, !DIExpression(), debug-location !12
+; CHECK: DBG_VALUE debug-use %0(s32), debug-use $noreg, !11, !DIExpression(), debug-location !12
 define void @debug_declare(i32 %in) #0 !dbg !7 {
 entry:
   %in.addr = alloca i32, align 4
@@ -17,7 +17,7 @@ entry:
 }
 
 ; CHECK-LABEL: name: debug_declare_vla
-; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use %noreg, !14, !DIExpression(), debug-location !15
+; CHECK: DBG_VALUE debug-use %{{[0-9]+}}(p0), debug-use $noreg, !14, !DIExpression(), debug-location !15
 define void @debug_declare_vla(i32 %in) #0 !dbg !13 {
 entry:
   %vla.addr = alloca i32, i32 %in
@@ -26,19 +26,19 @@ entry:
 }
 
 ; CHECK-LABEL: name: debug_value
-; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[IN:%[0-9]+]]:_(s32) = COPY $w0
 define void @debug_value(i32 %in) #0 !dbg !16 {
   %addr = alloca i32
-; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use %noreg, !17, !DIExpression(), debug-location !18
+; CHECK: DBG_VALUE debug-use [[IN]](s32), debug-use $noreg, !17, !DIExpression(), debug-location !18
   call void @llvm.dbg.value(metadata i32 %in, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
   store i32 %in, i32* %addr
-; CHECK: DBG_VALUE debug-use %1(p0), debug-use %noreg, !17, !DIExpression(DW_OP_deref), debug-location !18
+; CHECK: DBG_VALUE debug-use %1(p0), debug-use $noreg, !17, !DIExpression(DW_OP_deref), debug-location !18
   call void @llvm.dbg.value(metadata i32* %addr, i64 0, metadata !17, metadata !DIExpression(DW_OP_deref)), !dbg !18
 ; CHECK: DBG_VALUE 123, 0, !17, !DIExpression(), debug-location !18
   call void @llvm.dbg.value(metadata i32 123, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
 ; CHECK: DBG_VALUE float 1.000000e+00, 0, !17, !DIExpression(), debug-location !18
   call void @llvm.dbg.value(metadata float 1.000000e+00, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
-; CHECK: DBG_VALUE %noreg, 0, !17, !DIExpression(), debug-location !18
+; CHECK: DBG_VALUE $noreg, 0, !17, !DIExpression(), debug-location !18
   call void @llvm.dbg.value(metadata i32* null, i64 0, metadata !17, metadata !DIExpression()), !dbg !18
   ret void
 }

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/dynamic-alloca.ll Wed Jan 31 14:04:26 2018
@@ -1,47 +1,47 @@
 ; RUN: llc -mtriple=aarch64 -global-isel %s -o - -stop-after=irtranslator | FileCheck %s
 
 ; CHECK-LABEL: name: test_simple_alloca
-; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[TYPE_SIZE:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
 ; CHECK: [[NUMELTS_64:%[0-9]+]]:_(s64) = G_ZEXT [[NUMELTS]](s32)
 ; CHECK: [[NUMBYTES:%[0-9]+]]:_(s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
-; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[ALLOC:%[0-9]+]]:_(p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
 ; CHECK: [[ALIGNED_ALLOC:%[0-9]+]]:_(p0) = G_PTR_MASK [[ALLOC]], 4
-; CHECK: %sp = COPY [[ALIGNED_ALLOC]]
+; CHECK: $sp = COPY [[ALIGNED_ALLOC]]
 ; CHECK: [[ALLOC:%[0-9]+]]:_(p0) = COPY [[ALIGNED_ALLOC]]
-; CHECK: %x0 = COPY [[ALLOC]]
+; CHECK: $x0 = COPY [[ALLOC]]
 define i8* @test_simple_alloca(i32 %numelts) {
   %addr = alloca i8, i32 %numelts
   ret i8* %addr
 }
 
 ; CHECK-LABEL: name: test_aligned_alloca
-; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[TYPE_SIZE:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
 ; CHECK: [[NUMELTS_64:%[0-9]+]]:_(s64) = G_ZEXT [[NUMELTS]](s32)
 ; CHECK: [[NUMBYTES:%[0-9]+]]:_(s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
-; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[ALLOC:%[0-9]+]]:_(p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
 ; CHECK: [[ALIGNED_ALLOC:%[0-9]+]]:_(p0) = G_PTR_MASK [[ALLOC]], 5
-; CHECK: %sp = COPY [[ALIGNED_ALLOC]]
+; CHECK: $sp = COPY [[ALIGNED_ALLOC]]
 ; CHECK: [[ALLOC:%[0-9]+]]:_(p0) = COPY [[ALIGNED_ALLOC]]
-; CHECK: %x0 = COPY [[ALLOC]]
+; CHECK: $x0 = COPY [[ALLOC]]
 define i8* @test_aligned_alloca(i32 %numelts) {
   %addr = alloca i8, i32 %numelts, align 32
   ret i8* %addr
 }
 
 ; CHECK-LABEL: name: test_natural_alloca
-; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY %w0
+; CHECK: [[NUMELTS:%[0-9]+]]:_(s32) = COPY $w0
 ; CHECK: [[TYPE_SIZE:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16
 ; CHECK: [[NUMELTS_64:%[0-9]+]]:_(s64) = G_ZEXT [[NUMELTS]](s32)
 ; CHECK: [[NUMBYTES:%[0-9]+]]:_(s64) = G_MUL [[NUMELTS_64]], [[TYPE_SIZE]]
-; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP_TMP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[ALLOC:%[0-9]+]]:_(p0) = G_GEP [[SP_TMP]], [[NUMBYTES]]
-; CHECK: %sp = COPY [[ALLOC]]
+; CHECK: $sp = COPY [[ALLOC]]
 ; CHECK: [[ALLOC_TMP:%[0-9]+]]:_(p0) = COPY [[ALLOC]]
-; CHECK: %x0 = COPY [[ALLOC_TMP]]
+; CHECK: $x0 = COPY [[ALLOC_TMP]]
 define i128* @test_natural_alloca(i32 %numelts) {
   %addr = alloca i128, i32 %numelts
   ret i128* %addr

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/fp128-legalize-crash-pr35690.mir Wed Jan 31 14:04:26 2018
@@ -28,17 +28,17 @@ stack:
       di-variable: '', di-expression: '', di-location: '' }
 body:             |
   bb.1.entry:
-    liveins: %q0
+    liveins: $q0
 
     ; This test just checks we don't crash on G_FNEG of FP128 types. Expect to fall
     ; back until support is added for fp128.
     ; CHECK: ret
-    %0:_(s128) = COPY %q0
+    %0:_(s128) = COPY $q0
     %1:_(p0) = G_FRAME_INDEX %stack.0.a.addr
     G_STORE %0(s128), %1(p0) :: (store 16 into %ir.a.addr)
     %2:_(s128) = G_LOAD %1(p0) :: (load 16 from %ir.a.addr)
     %3:_(s128) = G_FNEG %2
-    %q0 = COPY %3(s128)
-    RET_ReallyLR implicit %q0
+    $q0 = COPY %3(s128)
+    RET_ReallyLR implicit $q0
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-bitcast.ll Wed Jan 31 14:04:26 2018
@@ -24,7 +24,7 @@ define i32 @test_bitcast_invalid_vreg()
 
 ; At this point we mapped 46 values. The 'i32 100' constant will grow the map.
 ; CHECK:  %46:_(s32) = G_CONSTANT i32 100
-; CHECK:  %w0 = COPY %46(s32)
+; CHECK:  $w0 = COPY %46(s32)
   %res = bitcast i32 100 to i32
   ret i32 %res
 }

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll Wed Jan 31 14:04:26 2018
@@ -11,24 +11,24 @@ declare i32 @llvm.eh.typeid.for(i8*)
 ; CHECK-NEXT:   bb.1 (%ir-block.0):
 ; CHECK:     successors: %[[GOOD:bb.[0-9]+]]{{.*}}%[[BAD:bb.[0-9]+]]
 ; CHECK:     EH_LABEL
-; CHECK:     %w0 = COPY
-; CHECK:     BL @foo, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %w0, implicit-def %w0
-; CHECK:     {{%[0-9]+}}:_(s32) = COPY %w0
+; CHECK:     $w0 = COPY
+; CHECK:     BL @foo, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit-def $w0
+; CHECK:     {{%[0-9]+}}:_(s32) = COPY $w0
 ; CHECK:     EH_LABEL
 ; CHECK:     G_BR %[[GOOD]]
 
 ; CHECK:   [[BAD]].{{[a-z]+}} (landing-pad):
 ; CHECK:     EH_LABEL
 ; CHECK:     [[UNDEF:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF
-; CHECK:     [[PTR:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK:     [[PTR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK:     [[VAL_WITH_PTR:%[0-9]+]]:_(s128) = G_INSERT [[UNDEF]], [[PTR]](p0), 0
-; CHECK:     [[SEL_PTR:%[0-9]+]]:_(p0) = COPY %x1
+; CHECK:     [[SEL_PTR:%[0-9]+]]:_(p0) = COPY $x1
 ; CHECK:     [[SEL:%[0-9]+]]:_(s32) = G_PTRTOINT [[SEL_PTR]]
 ; CHECK:     [[PTR_SEL:%[0-9]+]]:_(s128) = G_INSERT [[VAL_WITH_PTR]], [[SEL]](s32), 64
 ; CHECK:     [[PTR_RET:%[0-9]+]]:_(s64) = G_EXTRACT [[PTR_SEL]](s128), 0
 ; CHECK:     [[SEL_RET:%[0-9]+]]:_(s32) = G_EXTRACT [[PTR_SEL]](s128), 64
-; CHECK:     %x0 = COPY [[PTR_RET]]
-; CHECK:     %w1 = COPY [[SEL_RET]]
+; CHECK:     $x0 = COPY [[PTR_RET]]
+; CHECK:     $w1 = COPY [[SEL_RET]]
 
 ; CHECK:   [[GOOD]].{{[a-z]+}}:
 ; CHECK:     [[SEL:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -49,7 +49,7 @@ continue:
 }
 
 ; CHECK-LABEL: name: test_invoke_indirect
-; CHECK: [[CALLEE:%[0-9]+]]:gpr64(p0) = COPY %x0
+; CHECK: [[CALLEE:%[0-9]+]]:gpr64(p0) = COPY $x0
 ; CHECK: BLR [[CALLEE]]
 define void @test_invoke_indirect(void()* %callee) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
   invoke void %callee() to label %continue unwind label %broken
@@ -68,14 +68,14 @@ continue:
 ; CHECK: [[ANSWER:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
 ; CHECK: [[ONE:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.0
 
-; CHECK: %x0 = COPY [[NULL]]
+; CHECK: $x0 = COPY [[NULL]]
 
-; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[OFFSET:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
 ; CHECK: [[SLOT:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFFSET]](s64)
 ; CHECK: G_STORE [[ANSWER]](s32), [[SLOT]]
 
-; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY %sp
+; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
 ; CHECK: [[OFFSET:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
 ; CHECK: [[SLOT:%[0-9]+]]:_(p0) = G_GEP [[SP]], [[OFFSET]](s64)
 ; CHECK: G_STORE [[ONE]](s32), [[SLOT]]

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-add.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-add.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-add.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-add.mir Wed Jan 31 14:04:26 2018
@@ -30,29 +30,29 @@
 name:            test_scalar_add_big
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
     ; CHECK-LABEL: name: test_scalar_add_big
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY %x2
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY %x3
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[C]](s32)
     ; CHECK: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY]], [[COPY2]], [[TRUNC]]
     ; CHECK: [[UADDE2:%[0-9]+]]:_(s64), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY3]], [[UADDE1]]
-    ; CHECK: %x0 = COPY [[UADDE]](s64)
-    ; CHECK: %x1 = COPY [[UADDE2]](s64)
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
-    %2:_(s64) = COPY %x2
-    %3:_(s64) = COPY %x3
+    ; CHECK: $x0 = COPY [[UADDE]](s64)
+    ; CHECK: $x1 = COPY [[UADDE2]](s64)
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %2:_(s64) = COPY $x2
+    %3:_(s64) = COPY $x3
     %4:_(s128) = G_MERGE_VALUES %0, %1
     %5:_(s128) = G_MERGE_VALUES %2, %3
     %6:_(s128) = G_ADD %4, %5
     %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6
-    %x0 = COPY %7
-    %x1 = COPY %8
+    $x0 = COPY %7
+    $x1 = COPY %8
 ...
 
 ---
@@ -70,7 +70,7 @@ registers:
   - { id: 9, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
     ; CHECK-LABEL: name: test_scalar_add_big_nonpow2
     ; CHECK-NOT: G_MERGE_VALUES
     ; CHECK-NOT: G_UNMERGE_VALUES
@@ -81,71 +81,71 @@ body: |
     ; CHECK: [[RES_HI:%[0-9]+]]:_(s64), {{%.*}}(s1) = G_UADDE %2, %3, [[CARRY2]]
     ; CHECK-NOT: G_MERGE_VALUES
     ; CHECK-NOT: G_UNMERGE_VALUES
-    ; CHECK: %x0 = COPY [[RES_LO]]
-    ; CHECK: %x1 = COPY [[RES_MI]]
-    ; CHECK: %x2 = COPY [[RES_HI]]
-
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
-    %2(s64) = COPY %x2
-    %3(s64) = COPY %x3
+    ; CHECK: $x0 = COPY [[RES_LO]]
+    ; CHECK: $x1 = COPY [[RES_MI]]
+    ; CHECK: $x2 = COPY [[RES_HI]]
+
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
+    %2(s64) = COPY $x2
+    %3(s64) = COPY $x3
     %4(s192) = G_MERGE_VALUES %0, %1, %2
     %5(s192) = G_MERGE_VALUES %1, %2, %3
     %6(s192) = G_ADD %4, %5
     %7(s64), %8(s64), %9(s64) = G_UNMERGE_VALUES %6
-    %x0 = COPY %7
-    %x1 = COPY %8
-    %x2 = COPY %9
+    $x0 = COPY %7
+    $x1 = COPY %8
+    $x2 = COPY %9
 ...
 
 ---
 name:            test_scalar_add_small
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
     ; CHECK-LABEL: name: test_scalar_add_small
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC]], [[TRUNC1]]
     ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD]](s32)
-    ; CHECK: %x0 = COPY [[ANYEXT]](s64)
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[ANYEXT]](s64)
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
     %2:_(s8) = G_TRUNC %0
     %3:_(s8) = G_TRUNC %1
     %4:_(s8) = G_ADD %2, %3
     %5:_(s64) = G_ANYEXT %4
-    %x0 = COPY %5
+    $x0 = COPY %5
 ...
 
 ---
 name:            test_vector_add
 body: |
   bb.0.entry:
-    liveins: %q0, %q1, %q2, %q3
+    liveins: $q0, $q1, $q2, $q3
 
     ; CHECK-LABEL: name: test_vector_add
-    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY %q0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY %q1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY %q2
-    ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY %q3
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $q2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
     ; CHECK: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY]], [[COPY2]]
     ; CHECK: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[COPY1]], [[COPY3]]
-    ; CHECK: %q0 = COPY [[ADD]](<2 x s64>)
-    ; CHECK: %q1 = COPY [[ADD1]](<2 x s64>)
-    %0:_(<2 x s64>) = COPY %q0
-    %1:_(<2 x s64>) = COPY %q1
-    %2:_(<2 x s64>) = COPY %q2
-    %3:_(<2 x s64>) = COPY %q3
+    ; CHECK: $q0 = COPY [[ADD]](<2 x s64>)
+    ; CHECK: $q1 = COPY [[ADD1]](<2 x s64>)
+    %0:_(<2 x s64>) = COPY $q0
+    %1:_(<2 x s64>) = COPY $q1
+    %2:_(<2 x s64>) = COPY $q2
+    %3:_(<2 x s64>) = COPY $q3
     %4:_(<4 x s64>) = G_MERGE_VALUES %0, %1
     %5:_(<4 x s64>) = G_MERGE_VALUES %2, %3
     %6:_(<4 x s64>) = G_ADD %4, %5
     %7:_(<2 x s64>), %8:_(<2 x s64>) = G_UNMERGE_VALUES %6
-    %q0 = COPY %7
-    %q1 = COPY %8
+    $q0 = COPY %7
+    $q1 = COPY %8
 ...
 ---
 name:            test_vector_add_nonpow2
@@ -162,7 +162,7 @@ registers:
   - { id: 9, class: _ }
 body: |
   bb.0.entry:
-    liveins: %q0, %q1, %q2, %q3
+    liveins: $q0, $q1, $q2, $q3
     ; CHECK-LABEL: name: test_vector_add_nonpow2
     ; CHECK-NOT: G_EXTRACT
     ; CHECK-NOT: G_SEQUENCE
@@ -171,19 +171,19 @@ body: |
     ; CHECK: [[RES_HI:%[0-9]+]]:_(<2 x s64>) = G_ADD %2, %3
     ; CHECK-NOT: G_EXTRACT
     ; CHECK-NOT: G_SEQUENCE
-    ; CHECK: %q0 = COPY [[RES_LO]]
-    ; CHECK: %q1 = COPY [[RES_MI]]
-    ; CHECK: %q2 = COPY [[RES_HI]]
-
-    %0(<2 x s64>) = COPY %q0
-    %1(<2 x s64>) = COPY %q1
-    %2(<2 x s64>) = COPY %q2
-    %3(<2 x s64>) = COPY %q3
+    ; CHECK: $q0 = COPY [[RES_LO]]
+    ; CHECK: $q1 = COPY [[RES_MI]]
+    ; CHECK: $q2 = COPY [[RES_HI]]
+
+    %0(<2 x s64>) = COPY $q0
+    %1(<2 x s64>) = COPY $q1
+    %2(<2 x s64>) = COPY $q2
+    %3(<2 x s64>) = COPY $q3
     %4(<6 x s64>) = G_MERGE_VALUES %0, %1, %2
     %5(<6 x s64>) = G_MERGE_VALUES %1, %2, %3
     %6(<6 x s64>) = G_ADD %4, %5
     %7(<2 x s64>), %8(<2 x s64>), %9(<2 x s64>) = G_UNMERGE_VALUES %6
-    %q0 = COPY %7
-    %q1 = COPY %8
-    %q2 = COPY %9
+    $q0 = COPY %7
+    $q1 = COPY %8
+    $q2 = COPY %9
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-and.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-and.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-and.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-and.mir Wed Jan 31 14:04:26 2018
@@ -22,25 +22,25 @@ registers:
   - { id: 6, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
     ; CHECK-LABEL: name: test_scalar_and_small
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[TRUNC1]]
     ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[AND]](s32)
-    ; CHECK: %w0 = COPY [[COPY2]](s32)
+    ; CHECK: $w0 = COPY [[COPY2]](s32)
     ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
-    ; CHECK: %x0 = COPY [[COPY3]](s64)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[COPY3]](s64)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s8) = G_TRUNC %0
     %3(s8) = G_TRUNC %1
     %4(s8) = G_AND %2, %3
     %6(s32) = G_ANYEXT %4
-    %w0 = COPY %6
+    $w0 = COPY %6
     %5(s64) = G_ANYEXT %2
-    %x0 = COPY %5
+    $x0 = COPY %5
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-atomicrmw.mir Wed Jan 31 14:04:26 2018
@@ -14,72 +14,72 @@
 name:            cmpxchg_i8
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i8
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[CST2:%[0-9]+]]:_(s8) = G_TRUNC [[CST]]
     ; CHECK: [[RES:%[0-9]+]]:_(s8) = G_ATOMICRMW_ADD [[COPY]](p0), [[CST2]] :: (load store monotonic 1 on %ir.addr)
     ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]]
-    ; CHECK: %w0 = COPY [[RES2]]
-    %0:_(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES2]]
+    %0:_(p0) = COPY $x0
     %1:_(s8) = G_CONSTANT i8 1
     %2:_(s8) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 1 on %ir.addr)
     %3:_(s32) = G_ANYEXT %2
-    %w0 = COPY %3(s32)
+    $w0 = COPY %3(s32)
 ...
 
 ---
 name:            cmpxchg_i16
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i16
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[CST2:%[0-9]+]]:_(s16) = G_TRUNC [[CST]]
     ; CHECK: [[RES:%[0-9]+]]:_(s16) = G_ATOMICRMW_ADD [[COPY]](p0), [[CST2]] :: (load store monotonic 2 on %ir.addr)
     ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]]
-    ; CHECK: %w0 = COPY [[RES2]]
-    %0:_(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES2]]
+    %0:_(p0) = COPY $x0
     %1:_(s16) = G_CONSTANT i16 1
     %2:_(s16) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 2 on %ir.addr)
     %3:_(s32) = G_ANYEXT %2
-    %w0 = COPY %3(s32)
+    $w0 = COPY %3(s32)
 ...
 
 ---
 name:            cmpxchg_i32
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i32
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[CST]] :: (load store monotonic 4 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:_(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:_(p0) = COPY $x0
     %1:_(s32) = G_CONSTANT i32 1
     %2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 4 on %ir.addr)
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
 name:            cmpxchg_i64
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i64
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[CST]] :: (load store monotonic 8 on %ir.addr)
-    ; CHECK: %x0 = COPY [[RES]]
-    %0:_(p0) = COPY %x0
+    ; CHECK: $x0 = COPY [[RES]]
+    %0:_(p0) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 1
     %2:_(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 8 on %ir.addr)
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir Wed Jan 31 14:04:26 2018
@@ -30,13 +30,13 @@ registers:
   - { id: 14, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
     ; CHECK-LABEL: name: test_icmp
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sge), [[COPY]](s64), [[COPY1]]
     ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
-    ; CHECK: %w0 = COPY [[COPY2]](s32)
+    ; CHECK: $w0 = COPY [[COPY2]](s32)
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
@@ -45,27 +45,27 @@ body: |
     ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C1]]
     ; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[AND]](s32), [[AND1]]
     ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ICMP1]](s32)
-    ; CHECK: %w0 = COPY [[COPY3]](s32)
+    ; CHECK: $w0 = COPY [[COPY3]](s32)
     ; CHECK: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[COPY]](s64)
     ; CHECK: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[INTTOPTR]](p0), [[INTTOPTR]]
     ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP2]](s32)
-    ; CHECK: %w0 = COPY [[COPY4]](s32)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x0
+    ; CHECK: $w0 = COPY [[COPY4]](s32)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x0
 
     %2(s8) = G_TRUNC %0
     %3(s8) = G_TRUNC %1
 
     %4(s1) = G_ICMP intpred(sge), %0, %1
     %11(s32) = G_ANYEXT %4
-    %w0 = COPY %11
+    $w0 = COPY %11
 
     %8(s1) = G_ICMP intpred(ult), %2, %3
     %12(s32) = G_ANYEXT %8
-    %w0 = COPY %12
+    $w0 = COPY %12
 
     %9(p0) = G_INTTOPTR %0(s64)
     %10(s1) = G_ICMP intpred(eq), %9(p0), %9(p0)
     %14(s32) = G_ANYEXT %10
-    %w0 = COPY %14
+    $w0 = COPY %14
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg-with-success.mir Wed Jan 31 14:04:26 2018
@@ -13,24 +13,24 @@ name:            cmpxchg_i32
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i32
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
     ; CHECK: [[CMP:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
     ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 8 on %ir.addr)
     ; CHECK: [[SRES:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[RES]](s32), [[CMP]]
     ; CHECK: [[SRES32:%[0-9]+]]:_(s32) = COPY [[SRES]]
     ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[RES]], [[SRES32]]
-    ; CHECK: %w0 = COPY [[MUL]]
-    %0:_(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[MUL]]
+    %0:_(p0) = COPY $x0
     %1:_(s32) = G_CONSTANT i32 0
     %2:_(s32) = G_CONSTANT i32 1
     %3:_(s32), %4:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
     %5:_(s32) = G_ANYEXT %4
     %6:_(s32) = G_MUL %3, %5
-    %w0 = COPY %6(s32)
+    $w0 = COPY %6(s32)
 ...
 
 ---
@@ -38,22 +38,22 @@ name:            cmpxchg_i64
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i64
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
     ; CHECK: [[CMP:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
     ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 8 on %ir.addr)
     ; CHECK: [[SRES:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[RES]](s64), [[CMP]]
     ; CHECK: [[SRES64:%[0-9]+]]:_(s64) = G_ANYEXT [[SRES]]
     ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[RES]], [[SRES64]]
-    ; CHECK: %x0 = COPY [[MUL]]
-    %0:_(p0) = COPY %x0
+    ; CHECK: $x0 = COPY [[MUL]]
+    %0:_(p0) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 0
     %2:_(s64) = G_CONSTANT i64 1
     %3:_(s64), %4:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
     %5:_(s64) = G_ANYEXT %4
     %6:_(s64) = G_MUL %3, %5
-    %x0 = COPY %6(s64)
+    $x0 = COPY %6(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-cmpxchg.mir Wed Jan 31 14:04:26 2018
@@ -14,82 +14,82 @@
 name:            cmpxchg_i8
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i8
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
     ; CHECK: [[CMP:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
     ; CHECK: [[CMPT:%[0-9]+]]:_(s8) = G_TRUNC [[CMP]]
     ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[CSTT:%[0-9]+]]:_(s8) = G_TRUNC [[CST]]
     ; CHECK: [[RES:%[0-9]+]]:_(s8) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMPT]], [[CSTT]] :: (load store monotonic 1 on %ir.addr)
     ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]](s8)
-    ; CHECK: %w0 = COPY [[RES2]]
-    %0:_(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES2]]
+    %0:_(p0) = COPY $x0
     %1:_(s8) = G_CONSTANT i8 0
     %2:_(s8) = G_CONSTANT i8 1
     %3:_(s8) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 1 on %ir.addr)
     %4:_(s32) = G_ANYEXT %3
-    %w0 = COPY %4(s32)
+    $w0 = COPY %4(s32)
 ...
 
 ---
 name:            cmpxchg_i16
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i16
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
     ; CHECK: [[CMP:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
     ; CHECK: [[CMPT:%[0-9]+]]:_(s16) = G_TRUNC [[CMP]]
     ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[CSTT:%[0-9]+]]:_(s16) = G_TRUNC [[CST]]
     ; CHECK: [[RES:%[0-9]+]]:_(s16) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMPT]], [[CSTT]] :: (load store monotonic 2 on %ir.addr)
     ; CHECK: [[RES2:%[0-9]+]]:_(s32) = G_ANYEXT [[RES]](s16)
-    ; CHECK: %w0 = COPY [[RES2]]
-    %0:_(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES2]]
+    %0:_(p0) = COPY $x0
     %1:_(s16) = G_CONSTANT i16 0
     %2:_(s16) = G_CONSTANT i16 1
     %3:_(s16) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 2 on %ir.addr)
     %4:_(s32) = G_ANYEXT %3
-    %w0 = COPY %4(s32)
+    $w0 = COPY %4(s32)
 ...
 
 ---
 name:            cmpxchg_i32
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i32
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
     ; CHECK: [[CMP:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
     ; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 4 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:_(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:_(p0) = COPY $x0
     %1:_(s32) = G_CONSTANT i32 0
     %2:_(s32) = G_CONSTANT i32 1
     %3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 4 on %ir.addr)
-    %w0 = COPY %3(s32)
+    $w0 = COPY %3(s32)
 ...
 
 ---
 name:            cmpxchg_i64
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i64
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
     ; CHECK: [[CMP:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
     ; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 8 on %ir.addr)
-    ; CHECK: %x0 = COPY [[RES]]
-    %0:_(p0) = COPY %x0
+    ; CHECK: $x0 = COPY [[RES]]
+    %0:_(p0) = COPY $x0
     %1:_(s64) = G_CONSTANT i64 0
     %2:_(s64) = G_CONSTANT i64 1
     %3:_(s64) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
-    %x0 = COPY %3(s64)
+    $x0 = COPY %3(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir Wed Jan 31 14:04:26 2018
@@ -14,78 +14,78 @@
 name:            test_combines_2
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; Here the types don't match.
     ; CHECK-LABEL: name: test_combines_2
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
     ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[ADD]](s32)
     ; CHECK: [[EXTRACT:%[0-9]+]]:_(s1) = G_EXTRACT [[MV]](s64), 0
     ; CHECK: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[MV]](s64), 0
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
 
     %1:_(s32) = G_ADD %0, %0
     %2:_(s64) = G_MERGE_VALUES %0, %1
     %3:_(s1) = G_EXTRACT %2, 0
     %5:_(s32) = G_ANYEXT %3
-    %w0 = COPY %5
+    $w0 = COPY %5
     %4:_(s64) = G_EXTRACT %2, 0
-    %x0 = COPY %4
+    $x0 = COPY %4
 ...
 
 ---
 name:            test_combines_3
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: test_combines_3
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
     ; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ADD]]
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
 
     %1:_(s32) = G_ADD %0, %0
     %2:_(s64) = G_MERGE_VALUES %0, %1
     %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2
     %5:_(s32) = G_ADD %3, %4
-    %w0 = COPY %5
+    $w0 = COPY %5
 ...
 
 ---
 name:            test_combines_4
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: test_combines_4
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
     ; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY1]]
-    %0:_(s64) = COPY %x0
+    %0:_(s64) = COPY $x0
 
     %1:_(s128) = G_MERGE_VALUES %0, %0
     %2:_(s64) = G_EXTRACT %1, 0
     %3:_(s64) = G_ADD %2, %2
-    %w0 = COPY %3
+    $w0 = COPY %3
 ...
 
 ---
 name:            test_combines_5
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: test_combines_5
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
     ; CHECK: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[ADD]]
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
 
     %1:_(s32) = G_ADD %0, %0
     %2:_(s64) = G_MERGE_VALUES %0, %1
     %3:_(s32), %4:_(s32) = G_UNMERGE_VALUES %2
     %5:_(s32) = G_ADD %3, %4
-    %w0 = COPY %5
+    $w0 = COPY %5
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-constant.mir Wed Jan 31 14:04:26 2018
@@ -31,34 +31,34 @@ body: |
     ; CHECK-LABEL: name: test_constant
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C]](s32)
-    ; CHECK: %w0 = COPY [[COPY]](s32)
+    ; CHECK: $w0 = COPY [[COPY]](s32)
     ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
-    ; CHECK: %w0 = COPY [[COPY1]](s32)
+    ; CHECK: $w0 = COPY [[COPY1]](s32)
     ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
     ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
-    ; CHECK: %w0 = COPY [[COPY2]](s32)
+    ; CHECK: $w0 = COPY [[COPY2]](s32)
     ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK: %w0 = COPY [[C3]](s32)
+    ; CHECK: $w0 = COPY [[C3]](s32)
     ; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK: %x0 = COPY [[C4]](s64)
+    ; CHECK: $x0 = COPY [[C4]](s64)
     ; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK: %x0 = COPY [[C5]](s64)
+    ; CHECK: $x0 = COPY [[C5]](s64)
     %0(s1) = G_CONSTANT i1 0
     %6:_(s32) = G_ANYEXT %0
-    %w0 = COPY %6
+    $w0 = COPY %6
     %1(s8) = G_CONSTANT i8 42
     %7:_(s32) = G_ANYEXT %1
-    %w0 = COPY %7
+    $w0 = COPY %7
     %2(s16) = G_CONSTANT i16 65535
     %8:_(s32) = G_ANYEXT %2
-    %w0 = COPY %8
+    $w0 = COPY %8
     %3(s32) = G_CONSTANT i32 -1
-    %w0 = COPY %3
+    $w0 = COPY %3
     %4(s64) = G_CONSTANT i64 1
-    %x0 = COPY %4
+    $x0 = COPY %4
     %5(s64) = G_CONSTANT i64 0
-    %x0 = COPY %5
+    $x0 = COPY %5
 ...
 
 ---
@@ -72,20 +72,20 @@ body: |
 
     ; CHECK-LABEL: name: test_fconstant
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
-    ; CHECK: %w0 = COPY [[C]](s32)
+    ; CHECK: $w0 = COPY [[C]](s32)
     ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
-    ; CHECK: %x0 = COPY [[C1]](s64)
+    ; CHECK: $x0 = COPY [[C1]](s64)
     ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
     ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[C2]](s32)
     ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC]](s16)
-    ; CHECK: %w0 = COPY [[ANYEXT]](s32)
+    ; CHECK: $w0 = COPY [[ANYEXT]](s32)
     %0(s32) = G_FCONSTANT float 1.0
-    %w0 = COPY %0
+    $w0 = COPY %0
     %1(s64) = G_FCONSTANT double 2.0
-    %x0 = COPY %1
+    $x0 = COPY %1
     %2(s16) = G_FCONSTANT half 0.0
     %3:_(s32) = G_ANYEXT %2
-    %w0 = COPY %3
+    $w0 = COPY %3
 ...
 
 ---
@@ -98,8 +98,8 @@ body: |
     ; CHECK-LABEL: name: test_global
     ; CHECK: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var
     ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[GV]](p0)
-    ; CHECK: %x0 = COPY [[PTRTOINT]](s64)
+    ; CHECK: $x0 = COPY [[PTRTOINT]](s64)
     %0(p0) = G_GLOBAL_VALUE @var
     %1:_(s64) = G_PTRTOINT %0
-    %x0 = COPY %1
+    $x0 = COPY %1
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-div.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-div.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-div.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-div.mir Wed Jan 31 14:04:26 2018
@@ -21,10 +21,10 @@ registers:
   - { id: 5, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
     ; CHECK-LABEL: name: test_div
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]]
@@ -35,7 +35,7 @@ body: |
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]]
     ; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[ASHR]], [[ASHR1]]
     ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32)
-    ; CHECK: %w0 = COPY [[COPY2]](s32)
+    ; CHECK: $w0 = COPY [[COPY2]](s32)
     ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
     ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C2]]
@@ -44,20 +44,20 @@ body: |
     ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]], [[C3]]
     ; CHECK: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[AND]], [[AND1]]
     ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UDIV]](s32)
-    ; CHECK: %w0 = COPY [[COPY3]](s32)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $w0 = COPY [[COPY3]](s32)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s8) = G_TRUNC %0
     %3(s8) = G_TRUNC %1
 
 
     %4(s8) = G_SDIV %2, %3
     %6:_(s32) = G_ANYEXT %4
-    %w0 = COPY %6
+    $w0 = COPY %6
 
 
     %5(s8) = G_UDIV %2, %3
     %7:_(s32) = G_ANYEXT %5
-    %w0 = COPY %7
+    $w0 = COPY %7
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll Wed Jan 31 14:04:26 2018
@@ -15,10 +15,10 @@ declare void @_Unwind_Resume(i8*)
 ; CHECK:   [[LP]].{{[a-z]+}} (landing-pad):
 ; CHECK:     EH_LABEL
 
-; CHECK:     [[PTR:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK:     [[PTR:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK:     [[STRUCT_PTR:%[0-9]+]]:_(s64) = G_PTRTOINT [[PTR]](p0)
 
-; CHECK:     [[SEL_PTR:%[0-9]+]]:_(p0) = COPY %x1
+; CHECK:     [[SEL_PTR:%[0-9]+]]:_(p0) = COPY $x1
 ; CHECK:     [[SEL:%[0-9]+]]:_(s32) = G_PTRTOINT [[SEL_PTR]]
 ; CHECK:     [[STRUCT_SEL:%[0-9]+]]:_(s64) = G_INSERT {{%[0-9]+}}, [[SEL]](s32), 0
 

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-ext.mir Wed Jan 31 14:04:26 2018
@@ -34,110 +34,110 @@ registers:
   - { id: 18, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
     ; CHECK-LABEL: name: test_ext
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK: %w0 = COPY [[TRUNC]](s32)
+    ; CHECK: $w0 = COPY [[TRUNC]](s32)
     ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK: %w0 = COPY [[TRUNC1]](s32)
+    ; CHECK: $w0 = COPY [[TRUNC1]](s32)
     ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK: %w0 = COPY [[TRUNC2]](s32)
+    ; CHECK: $w0 = COPY [[TRUNC2]](s32)
     ; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK: %w0 = COPY [[TRUNC3]](s32)
+    ; CHECK: $w0 = COPY [[TRUNC3]](s32)
     ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
-    ; CHECK: %x0 = COPY [[COPY1]](s64)
+    ; CHECK: $x0 = COPY [[COPY1]](s64)
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
     ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
     ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[C]]
-    ; CHECK: %x0 = COPY [[AND]](s64)
+    ; CHECK: $x0 = COPY [[AND]](s64)
     ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
-    ; CHECK: %x0 = COPY [[COPY3]](s64)
+    ; CHECK: $x0 = COPY [[COPY3]](s64)
     ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
     ; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
     ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C1]]
     ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C1]]
-    ; CHECK: %x0 = COPY [[ASHR]](s64)
+    ; CHECK: $x0 = COPY [[ASHR]](s64)
     ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC4]], [[C2]]
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C2]]
-    ; CHECK: %w0 = COPY [[ASHR1]](s32)
+    ; CHECK: $w0 = COPY [[ASHR1]](s32)
     ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
     ; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC5]], [[C3]]
-    ; CHECK: %w0 = COPY [[AND1]](s32)
+    ; CHECK: $w0 = COPY [[AND1]](s32)
     ; CHECK: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK: %w0 = COPY [[TRUNC6]](s32)
+    ; CHECK: $w0 = COPY [[TRUNC6]](s32)
     ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC7]], [[C4]]
-    ; CHECK: %w0 = COPY [[AND2]](s32)
+    ; CHECK: $w0 = COPY [[AND2]](s32)
     ; CHECK: [[TRUNC8:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK: %w0 = COPY [[TRUNC8]](s32)
+    ; CHECK: $w0 = COPY [[TRUNC8]](s32)
     ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[TRUNC9:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[TRUNC9]], [[C5]]
     ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C5]]
-    ; CHECK: %w0 = COPY [[ASHR2]](s32)
+    ; CHECK: $w0 = COPY [[ASHR2]](s32)
     ; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[TRUNC10:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[TRUNC3]]4(s32)
     ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]]1, [[TRUNC3]]2
     ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[TRUNC3]]3(s32)
-    ; CHECK: %w0 = COPY [[COPY6]](s32)
+    ; CHECK: $w0 = COPY [[COPY6]](s32)
     ; CHECK: [[TRUNC11:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK: %w0 = COPY [[TRUNC11]](s32)
+    ; CHECK: $w0 = COPY [[TRUNC11]](s32)
     ; CHECK: [[TRUNC12:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-    ; CHECK: %w0 = COPY [[TRUNC12]](s32)
+    ; CHECK: $w0 = COPY [[TRUNC12]](s32)
     ; CHECK: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[TRUNC12]](s32)
-    ; CHECK: %x0 = COPY [[FPEXT]](s64)
-    %0(s64) = COPY %x0
+    ; CHECK: $x0 = COPY [[FPEXT]](s64)
+    %0(s64) = COPY $x0
 
     %1(s1) = G_TRUNC %0
     %19:_(s32) = G_ANYEXT %1
-    %w0 = COPY %19
+    $w0 = COPY %19
     %2(s8) = G_TRUNC %0
     %20:_(s32) = G_ANYEXT %2
-    %w0 = COPY %20
+    $w0 = COPY %20
     %3(s16) = G_TRUNC %0
     %21:_(s32) = G_ANYEXT %3
-    %w0 = COPY %21
+    $w0 = COPY %21
     %4(s32) = G_TRUNC %0
-    %w0 = COPY %4
+    $w0 = COPY %4
 
     %5(s64) = G_ANYEXT %1
-    %x0 = COPY %5
+    $x0 = COPY %5
     %6(s64) = G_ZEXT %2
-    %x0 = COPY %6
+    $x0 = COPY %6
     %7(s64) = G_ANYEXT %3
-    %x0 = COPY %7
+    $x0 = COPY %7
     %8(s64) = G_SEXT %4
-    %x0 = COPY %8
+    $x0 = COPY %8
 
     %9(s32) = G_SEXT %1
-    %w0 = COPY %9
+    $w0 = COPY %9
     %10(s32) = G_ZEXT %2
-    %w0 = COPY %10
+    $w0 = COPY %10
     %11(s32) = G_ANYEXT %3
-    %w0 = COPY %11
+    $w0 = COPY %11
 
     %12(s32) = G_ZEXT %1
-    %w0 = COPY %12
+    $w0 = COPY %12
     %13(s32) = G_ANYEXT %2
-    %w0 = COPY %13
+    $w0 = COPY %13
     %14(s32) = G_SEXT %3
-    %w0 = COPY %14
+    $w0 = COPY %14
 
     %15(s8) = G_ZEXT %1
     %22:_(s32) = G_ANYEXT %15
-    %w0 = COPY %22
+    $w0 = COPY %22
     %16(s16) = G_ANYEXT %2
     %23:_(s32) = G_ANYEXT %16
-    %w0 = COPY %23
+    $w0 = COPY %23
 
     %17(s32) = G_TRUNC  %0
-    %w0 = COPY %17
+    $w0 = COPY %17
     %18(s64) = G_FPEXT %17
-    %x0 = COPY %18
+    $x0 = COPY %18
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir Wed Jan 31 14:04:26 2018
@@ -5,13 +5,13 @@
 name:            test_extracts_1
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; Low part of extraction takes entirity of the low register entirely, so
     ; value stored is forwarded directly from first load.
 
     ; CHECK-LABEL: name: test_extracts_1
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x2
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x2
     ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 16)
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64)
@@ -19,9 +19,9 @@ body: |
     ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[LOAD]](s64)
     ; CHECK: G_STORE [[COPY1]](s64), [[COPY]](p0) :: (store 8)
     ; CHECK: RET_ReallyLR
-    %0:_(s64) = COPY %x0
-    %1:_(s32) = COPY %w1
-    %2:_(p0) = COPY %x2
+    %0:_(s64) = COPY $x0
+    %1:_(s32) = COPY $w1
+    %2:_(p0) = COPY $x2
     %3:_(s128) = G_LOAD %2(p0) :: (load 16)
     %4:_(s64) = G_EXTRACT %3(s128), 0
     G_STORE %4(s64), %2(p0) :: (store 8)
@@ -32,11 +32,11 @@ body: |
 name:            test_extracts_2
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
       ; Low extraction wipes takes whole low register. High extraction is real.
     ; CHECK-LABEL: name: test_extracts_2
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x2
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x2
     ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 16)
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64)
@@ -47,9 +47,9 @@ body: |
     ; CHECK: G_STORE [[COPY1]](s64), [[COPY]](p0) :: (store 8)
     ; CHECK: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 4)
     ; CHECK: RET_ReallyLR
-    %0:_(s64) = COPY %x0
-    %1:_(s32) = COPY %w1
-    %2:_(p0) = COPY %x2
+    %0:_(s64) = COPY $x0
+    %1:_(s32) = COPY $w1
+    %2:_(p0) = COPY $x2
     %3:_(s128) = G_LOAD %2(p0) :: (load 16)
     %4:_(s64) = G_EXTRACT %3(s128), 0
     %5:_(s32) = G_EXTRACT %3(s128), 64
@@ -62,22 +62,22 @@ body: |
 name:            test_extracts_3
 body: |
   bb.0:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
 
     ; CHECK-LABEL: name: test_extracts_3
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](s64), 32
     ; CHECK: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](s64), 0
     ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[EXTRACT]](s32), [[EXTRACT1]](s32)
-    ; CHECK: %x0 = COPY [[MV]](s64)
+    ; CHECK: $x0 = COPY [[MV]](s64)
     ; CHECK: RET_ReallyLR
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
     %2:_(s128) = G_MERGE_VALUES %0, %1
     %3:_(s64) = G_EXTRACT %2, 32
-    %x0 = COPY %3
+    $x0 = COPY %3
     RET_ReallyLR
 ...
 
@@ -85,19 +85,19 @@ body: |
 name:            test_extracts_4
 body: |
   bb.0:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
 
     ; CHECK-LABEL: name: test_extracts_4
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](s64), 32
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[EXTRACT]](s32)
-    ; CHECK: %w0 = COPY [[COPY1]](s32)
+    ; CHECK: $w0 = COPY [[COPY1]](s32)
     ; CHECK: RET_ReallyLR
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
     %2:_(s128) = G_MERGE_VALUES %0, %1
     %3:_(s32) = G_EXTRACT %2, 32
-    %w0 = COPY %3
+    $w0 = COPY %3
     RET_ReallyLR
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fcmp.mir Wed Jan 31 14:04:26 2018
@@ -23,25 +23,25 @@ registers:
   - { id: 7, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
     ; CHECK-LABEL: name: test_icmp
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK: [[FCMP:%[0-9]+]]:_(s32) = G_FCMP floatpred(oge), [[COPY]](s64), [[COPY1]]
-    ; CHECK: %w0 = COPY [[FCMP]](s32)
+    ; CHECK: $w0 = COPY [[FCMP]](s32)
     ; CHECK: [[FCMP1:%[0-9]+]]:_(s32) = G_FCMP floatpred(uno), [[TRUNC]](s32), [[TRUNC1]]
-    ; CHECK: %w0 = COPY [[FCMP1]](s32)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x0
+    ; CHECK: $w0 = COPY [[FCMP1]](s32)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x0
 
     %2(s32) = G_TRUNC %0
     %3(s32) = G_TRUNC %1
 
     %4(s32) = G_FCMP floatpred(oge), %0, %1
-    %w0 = COPY %4
+    $w0 = COPY %4
 
     %5(s32) = G_FCMP floatpred(uno), %2, %3
-    %w0 = COPY %5
+    $w0 = COPY %5
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fneg.mir Wed Jan 31 14:04:26 2018
@@ -20,15 +20,15 @@ registers:
   - { id: 1, class: _ }
 body:             |
   bb.1:
-    liveins: %s0
+    liveins: $s0
     ; CHECK-LABEL: name: test_fneg_f32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $s0
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float -0.000000e+00
     ; CHECK: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[C]], [[COPY]]
-    ; CHECK: %s0 = COPY [[FSUB]](s32)
-    %0(s32) = COPY %s0
+    ; CHECK: $s0 = COPY [[FSUB]](s32)
+    %0(s32) = COPY $s0
     %1(s32) = G_FNEG %0
-    %s0 = COPY %1(s32)
+    $s0 = COPY %1(s32)
 ...
 ---
 name:            test_fneg_f64
@@ -37,13 +37,13 @@ registers:
   - { id: 1, class: _ }
 body:             |
   bb.1:
-    liveins: %d0
+    liveins: $d0
     ; CHECK-LABEL: name: test_fneg_f64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $d0
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double -0.000000e+00
     ; CHECK: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[C]], [[COPY]]
-    ; CHECK: %d0 = COPY [[FSUB]](s64)
-    %0(s64) = COPY %d0
+    ; CHECK: $d0 = COPY [[FSUB]](s64)
+    %0(s64) = COPY $d0
     %1(s64) = G_FNEG %0
-    %d0 = COPY %1(s64)
+    $d0 = COPY %1(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-fptoi.mir Wed Jan 31 14:04:26 2018
@@ -29,112 +29,112 @@
 name:            test_fptosi_s32_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_fptosi_s32_s32
     ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
     ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[DEF]](s32)
-    ; CHECK: %w0 = COPY [[FPTOSI]](s32)
+    ; CHECK: $w0 = COPY [[FPTOSI]](s32)
     %0:_(s32) = G_IMPLICIT_DEF
     %1:_(s32) = G_FPTOSI %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
 name:            test_fptoui_s32_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_fptoui_s32_s32
     ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
     ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[DEF]](s32)
-    ; CHECK: %w0 = COPY [[FPTOUI]](s32)
+    ; CHECK: $w0 = COPY [[FPTOUI]](s32)
     %0:_(s32) = G_IMPLICIT_DEF
     %1:_(s32) = G_FPTOUI %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
 name:            test_fptosi_s32_s64
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_fptosi_s32_s64
     ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
     ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[DEF]](s32)
-    ; CHECK: %w0 = COPY [[FPTOSI]](s32)
+    ; CHECK: $w0 = COPY [[FPTOSI]](s32)
     %0:_(s32) = G_IMPLICIT_DEF
     %1:_(s32) = G_FPTOSI %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
 name:            test_fptoui_s32_s64
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_fptoui_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s64)
-    ; CHECK: %w0 = COPY [[FPTOUI]](s32)
-    %0:_(s64) = COPY %x0
+    ; CHECK: $w0 = COPY [[FPTOUI]](s32)
+    %0:_(s64) = COPY $x0
     %1:_(s32) = G_FPTOUI %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
 name:            test_fptosi_s64_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_fptosi_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY]](s32)
-    ; CHECK: %x0 = COPY [[FPTOSI]](s64)
-    %0:_(s32) = COPY %w0
+    ; CHECK: $x0 = COPY [[FPTOSI]](s64)
+    %0:_(s32) = COPY $w0
     %1:_(s64) = G_FPTOSI %0
-    %x0 = COPY %1
+    $x0 = COPY %1
 ...
 
 ---
 name:            test_fptoui_s64_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_fptoui_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[FPTOUI:%[0-9]+]]:_(s64) = G_FPTOUI [[COPY]](s32)
-    ; CHECK: %x0 = COPY [[FPTOUI]](s64)
-    %0:_(s32) = COPY %w0
+    ; CHECK: $x0 = COPY [[FPTOUI]](s64)
+    %0:_(s32) = COPY $w0
     %1:_(s64) = G_FPTOUI %0
-    %x0 = COPY %1
+    $x0 = COPY %1
 ...
 
 ---
 name:            test_fptosi_s64_s64
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_fptosi_s64_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[FPTOSI:%[0-9]+]]:_(s64) = G_FPTOSI [[COPY]](s64)
-    ; CHECK: %x0 = COPY [[FPTOSI]](s64)
-    %0:_(s64) = COPY %x0
+    ; CHECK: $x0 = COPY [[FPTOSI]](s64)
+    %0:_(s64) = COPY $x0
     %1:_(s64) = G_FPTOSI %0
-    %x0 = COPY %1
+    $x0 = COPY %1
 ...
 
 ---
 name:            test_fptoui_s64_s64
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_fptoui_s64_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[FPTOUI:%[0-9]+]]:_(s64) = G_FPTOUI [[COPY]](s64)
-    ; CHECK: %x0 = COPY [[FPTOUI]](s64)
-    %0:_(s64) = COPY %x0
+    ; CHECK: $x0 = COPY [[FPTOUI]](s64)
+    %0:_(s64) = COPY $x0
     %1:_(s64) = G_FPTOUI %0
-    %x0 = COPY %1
+    $x0 = COPY %1
 ...
 
 
@@ -143,93 +143,93 @@ body: |
 name:            test_fptosi_s1_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_fptosi_s1_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[FPTOSI]](s32)
-    ; CHECK: %x0 = COPY [[TRUNC]](s1)
-    %0:_(s32) = COPY %w0
+    ; CHECK: $x0 = COPY [[TRUNC]](s1)
+    %0:_(s32) = COPY $w0
     %1:_(s1) = G_FPTOSI %0
-    %x0 = COPY %1
+    $x0 = COPY %1
 ...
 
 ---
 name:            test_fptoui_s1_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_fptoui_s1_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s32)
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[FPTOUI]](s32)
-    ; CHECK: %w0 = COPY [[COPY1]](s32)
-    %0:_(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[COPY1]](s32)
+    %0:_(s32) = COPY $w0
     %1:_(s1) = G_FPTOUI %0
     %2:_(s32) = G_ANYEXT %1
-    %w0 = COPY %2
+    $w0 = COPY %2
 ...
 
 ---
 name:            test_fptosi_s8_s64
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_fptosi_s8_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s64)
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[FPTOSI]](s32)
-    ; CHECK: %w0 = COPY [[COPY1]](s32)
-    %0:_(s64) = COPY %x0
+    ; CHECK: $w0 = COPY [[COPY1]](s32)
+    %0:_(s64) = COPY $x0
     %1:_(s8) = G_FPTOSI %0
     %2:_(s32) = G_ANYEXT %1
-    %w0 = COPY %2
+    $w0 = COPY %2
 ...
 
 ---
 name:            test_fptoui_s8_s64
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_fptoui_s8_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s64)
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[FPTOUI]](s32)
-    ; CHECK: %w0 = COPY [[COPY1]](s32)
-    %0:_(s64) = COPY %x0
+    ; CHECK: $w0 = COPY [[COPY1]](s32)
+    %0:_(s64) = COPY $x0
     %1:_(s8) = G_FPTOUI %0
     %2:_(s32) = G_ANYEXT %1
-    %w0 = COPY %2
+    $w0 = COPY %2
 ...
 
 ---
 name:            test_fptosi_s16_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_fptosi_s16_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY]](s32)
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[FPTOSI]](s32)
-    ; CHECK: %w0 = COPY [[COPY1]](s32)
-    %0:_(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[COPY1]](s32)
+    %0:_(s32) = COPY $w0
     %1:_(s16) = G_FPTOSI %0
     %2:_(s32) = G_ANYEXT %1
-    %w0 = COPY %2
+    $w0 = COPY %2
 ...
 
 ---
 name:            test_fptoui_s16_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_fptoui_s16_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY]](s32)
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[FPTOUI]](s32)
-    ; CHECK: %w0 = COPY [[COPY1]](s32)
-    %0:_(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[COPY1]](s32)
+    %0:_(s32) = COPY $w0
     %1:_(s16) = G_FPTOUI %0
     %2:_(s32) = G_ANYEXT %1
-    %w0 = COPY %2
+    $w0 = COPY %2
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-gep.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-gep.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-gep.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-gep.mir Wed Jan 31 14:04:26 2018
@@ -19,20 +19,20 @@ registers:
   - { id: 3, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
     ; CHECK-LABEL: name: test_gep_small
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 56
     ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[COPY1]](s64)
     ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY2]], [[C]]
     ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]]
     ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[ASHR]](s64)
-    ; CHECK: %x0 = COPY [[GEP]](p0)
-    %0(p0) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[GEP]](p0)
+    %0(p0) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s8) = G_TRUNC %1
     %3(p0) = G_GEP %0, %2(s8)
-    %x0 = COPY %3
+    $x0 = COPY %3
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-ignore-non-generic.mir Wed Jan 31 14:04:26 2018
@@ -14,13 +14,13 @@ registers:
   - { id: 0, class: _ }
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: test_copy
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: %x0 = COPY [[COPY]](s64)
-    %0(s64) = COPY %x0
-    %x0 = COPY %0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: $x0 = COPY [[COPY]](s64)
+    %0(s64) = COPY $x0
+    $x0 = COPY %0
 ...
 
 ---

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir Wed Jan 31 14:04:26 2018
@@ -16,7 +16,7 @@
 name:            test_inserts_1
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
       ; Low part of insertion wipes out the old register entirely, so %0 gets
       ; forwarded to the G_STORE. Hi part is unchanged so (split) G_LOAD gets
@@ -26,9 +26,9 @@ body: |
     ; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD
     ; CHECK: G_STORE %0(s64)
     ; CHECK: G_STORE [[HI]]
-    %0:_(s64) = COPY %x0
-    %1:_(s32) = COPY %w1
-    %2:_(p0) = COPY %x2
+    %0:_(s64) = COPY $x0
+    %1:_(s32) = COPY $w1
+    %2:_(p0) = COPY $x2
     %3:_(s128) = G_LOAD %2(p0) :: (load 16)
     %4:_(s128) = G_INSERT %3(s128), %0(s64), 0
     G_STORE %4(s128), %2(p0) :: (store 16)
@@ -39,7 +39,7 @@ body: |
 name:            test_inserts_2
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
       ; Low insertion wipes out the old register entirely, so %0 gets forwarded
       ; to the G_STORE again. Second insertion is real.
@@ -49,9 +49,9 @@ body: |
     ; CHECK: [[NEWHI:%[0-9]+]]:_(s64) = G_INSERT [[HI]], %1(s32), 0
     ; CHECK: G_STORE %0(s64)
     ; CHECK: G_STORE [[NEWHI]]
-    %0:_(s64) = COPY %x0
-    %1:_(s32) = COPY %w1
-    %2:_(p0) = COPY %x2
+    %0:_(s64) = COPY $x0
+    %1:_(s32) = COPY $w1
+    %2:_(p0) = COPY $x2
     %3:_(s128) = G_LOAD %2(p0) :: (load 16)
     %4:_(s128) = G_INSERT %3(s128), %0(s64), 0
     %5:_(s128) = G_INSERT %4(s128), %1(s32), 64
@@ -63,7 +63,7 @@ body: |
 name:            test_inserts_3
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
       ; I'm not entirely convinced inserting a p0 into an s64 is valid, but it's
       ; certainly better than the alternative of directly forwarding the value
@@ -74,9 +74,9 @@ body: |
     ; CHECK: [[NEWLO:%[0-9]+]]:_(s64) = G_PTRTOINT %0(p0)
     ; CHECK: G_STORE [[NEWLO]](s64)
     ; CHECK: G_STORE [[HI]]
-    %0:_(p0) = COPY %x0
-    %1:_(s32) = COPY %w1
-    %2:_(p0) = COPY %x2
+    %0:_(p0) = COPY $x0
+    %1:_(s32) = COPY $w1
+    %2:_(p0) = COPY $x2
     %3:_(s128) = G_LOAD %2(p0) :: (load 16)
     %4:_(s128) = G_INSERT %3(s128), %0(p0), 0
     G_STORE %4(s128), %2(p0) :: (store 16)
@@ -87,18 +87,18 @@ body: |
 name:            test_inserts_4
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
       ; A narrow insert gets surrounded by a G_ANYEXT/G_TRUNC pair.
     ; CHECK-LABEL: name: test_inserts_4
     ; CHECK: [[VALEXT:%[0-9]+]]:_(s32) = COPY %2(s32)
     ; CHECK: [[VAL:%[0-9]+]]:_(s32) = G_INSERT [[VALEXT]], %1(s1), 0
     ; CHECK: %5:_(s8) = G_TRUNC [[VAL]](s32)
-    %4:_(s32) = COPY %w0
+    %4:_(s32) = COPY $w0
     %0:_(s1) = G_TRUNC %4
-    %5:_(s32) = COPY %w1
+    %5:_(s32) = COPY $w1
     %1:_(s8) = G_TRUNC %5
-    %2:_(p0) = COPY %x2
+    %2:_(p0) = COPY $x2
     %3:_(s8) = G_INSERT %1(s8), %0(s1), 0
     G_STORE %3(s8), %2(p0) :: (store 1)
     RET_ReallyLR
@@ -108,7 +108,7 @@ body: |
 name:            test_inserts_5
 body: |
   bb.0:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
 
     ; CHECK-LABEL: name: test_inserts_5
@@ -117,13 +117,13 @@ body: |
     ; CHECK: [[INS_HI:%[0-9]+]]:_(s32) = G_EXTRACT %2(s64), 32
     ; CHECK: [[VAL_HI:%[0-9]+]]:_(s64) = G_INSERT %1, [[INS_HI]](s32), 0
     ; CHECK: %4:_(s128) = G_MERGE_VALUES [[VAL_LO]](s64), [[VAL_HI]](s64)
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
-    %2:_(s64) = COPY %x2
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %2:_(s64) = COPY $x2
     %3:_(s128) = G_MERGE_VALUES %0, %1
     %4:_(s128) = G_INSERT %3, %2, 32
     %5:_(s64) = G_TRUNC %4
-    %x0 = COPY %5
+    $x0 = COPY %5
     RET_ReallyLR
 ...
 
@@ -131,19 +131,19 @@ body: |
 name:            test_inserts_6
 body: |
   bb.0:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
 
     ; CHECK-LABEL: name: test_inserts_6
     ; CHECK: [[VAL_LO:%[0-9]+]]:_(s64) = G_INSERT %0, %2(s32), 32
     ; CHECK: %4:_(s128) = G_MERGE_VALUES [[VAL_LO]](s64), %1(s64)
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
-    %2:_(s32) = COPY %w2
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %2:_(s32) = COPY $w2
     %3:_(s128) = G_MERGE_VALUES %0, %1
     %4:_(s128) = G_INSERT %3, %2, 32
     %5:_(s64) = G_TRUNC %4
-    %x0 = COPY %5
+    $x0 = COPY %5
     RET_ReallyLR
 ...
 
@@ -151,19 +151,19 @@ body: |
 name:            test_inserts_nonpow2
 body: |
   bb.0:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
 
     ; CHECK-LABEL: name: test_inserts_nonpow2
-    ; CHECK: [[C:%[0-9]+]]:_(s64) = COPY %x3
-    ; CHECK: %x0 = COPY [[C]]
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
-    %2:_(s64) = COPY %x2
-    %3:_(s64) = COPY %x3
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = COPY $x3
+    ; CHECK: $x0 = COPY [[C]]
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %2:_(s64) = COPY $x2
+    %3:_(s64) = COPY $x3
     %4:_(s192) = G_MERGE_VALUES %0, %1, %2
     %5:_(s192) = G_INSERT %4, %3, 0
     %6:_(s64), %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %5
-    %x0 = COPY %6
+    $x0 = COPY %6
     RET_ReallyLR
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir Wed Jan 31 14:04:26 2018
@@ -29,104 +29,104 @@
 name:            test_sitofp_s32_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_sitofp_s32_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY]](s32)
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
     %1:_(s32) = G_SITOFP %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
 name:            test_uitofp_s32_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_uitofp_s32_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[COPY]](s32)
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
     %1:_(s32) = G_UITOFP %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
 name:            test_sitofp_s32_s64
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_sitofp_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY]](s64)
-    %0:_(s64) = COPY %x0
+    %0:_(s64) = COPY $x0
     %1:_(s32) = G_SITOFP %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
 name:            test_uitofp_s32_s64
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_uitofp_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[COPY]](s64)
-    %0:_(s64) = COPY %x0
+    %0:_(s64) = COPY $x0
     %1:_(s32) = G_UITOFP %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
 name:            test_sitofp_s64_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_sitofp_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[COPY]](s32)
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
     %1:_(s64) = G_SITOFP %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
 name:            test_uitofp_s64_s32
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_uitofp_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[COPY]](s32)
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
     %1:_(s64) = G_UITOFP %0
-    %x0 = COPY %1
+    $x0 = COPY %1
 ...
 
 ---
 name:            test_sitofp_s64_s64
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_sitofp_s64_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[COPY]](s64)
-    %0:_(s64) = COPY %x0
+    %0:_(s64) = COPY $x0
     %1:_(s64) = G_SITOFP %0
-    %x0 = COPY %1
+    $x0 = COPY %1
 ...
 
 ---
 name:            test_uitofp_s64_s64
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_uitofp_s64_s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[COPY]](s64)
-    %0:_(s64) = COPY %x0
+    %0:_(s64) = COPY $x0
     %1:_(s64) = G_UITOFP %0
-    %x0 = COPY %1
+    $x0 = COPY %1
 ...
 
 
@@ -134,103 +134,103 @@ body: |
 name:            test_sitofp_s32_s1
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_sitofp_s32_s1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]]
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
     ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32)
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
     %1:_(s1) = G_TRUNC %0
     %2:_(s32) = G_SITOFP %1
-    %w0 = COPY %2
+    $w0 = COPY %2
 ...
 
 ---
 name:            test_uitofp_s32_s1
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_uitofp_s32_s1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
     ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
     ; CHECK: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[AND]](s32)
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
     %1:_(s1) = G_TRUNC %0
     %2:_(s32) = G_UITOFP %1
-    %w0 = COPY %2
+    $w0 = COPY %2
 ...
 
 ---
 name:            test_sitofp_s64_s8
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_sitofp_s64_s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]]
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
     ; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32)
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
     %1:_(s8) = G_TRUNC %0
     %2:_(s64) = G_SITOFP %1
-    %x0 = COPY %2
+    $x0 = COPY %2
 ...
 
 ---
 name:            test_uitofp_s64_s8
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_uitofp_s64_s8
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
     ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
     ; CHECK: [[UITOFP:%[0-9]+]]:_(s64) = G_UITOFP [[AND]](s32)
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
     %1:_(s8) = G_TRUNC %0
     %2:_(s64) = G_UITOFP %1
-    %x0 = COPY %2
+    $x0 = COPY %2
 ...
 
 ---
 name:            test_sitofp_s32_s16
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_sitofp_s32_s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]]
     ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
     ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32)
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
     %1:_(s16) = G_TRUNC %0
     %2:_(s32) = G_SITOFP %1
-    %w0 = COPY %2
+    $w0 = COPY %2
 ...
 
 ---
 name:            test_uitofp_s32_s16
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_uitofp_s32_s16
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
     ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C]]
     ; CHECK: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[AND]](s32)
-    %0:_(s32) = COPY %w0
+    %0:_(s32) = COPY $w0
     %1:_(s16) = G_TRUNC %0
     %2:_(s32) = G_UITOFP %1
-    %w0 = COPY %2
+    $w0 = COPY %2
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir Wed Jan 31 14:04:26 2018
@@ -27,40 +27,40 @@ registers:
   - { id: 8, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
     ; CHECK-LABEL: name: test_load
-    %0(p0) = COPY %x0
+    %0(p0) = COPY $x0
 
     %1(s1) = G_LOAD %0 :: (load 1 from %ir.addr)
     %9:_(s32) = G_ANYEXT %1
-    %w0 = COPY %9
+    $w0 = COPY %9
 
     ; CHECK: %2:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr)
     %2(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
     %10:_(s32) = G_ANYEXT %2
-    %w0 = COPY %10
+    $w0 = COPY %10
 
     ; CHECK: %3:_(s16) = G_LOAD %0(p0) :: (load 2 from %ir.addr)
     %3(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
     %11:_(s32) = G_ANYEXT %3
-    %w0 = COPY %11
+    $w0 = COPY %11
 
     ; CHECK: %4:_(s32) = G_LOAD %0(p0) :: (load 4 from %ir.addr)
     %4(s32) = G_LOAD %0 :: (load 4 from %ir.addr)
-    %w0 = COPY %4
+    $w0 = COPY %4
 
     ; CHECK: %5:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.addr)
     %5(s64) = G_LOAD %0 :: (load 8 from %ir.addr)
-    %x0 = COPY %5
+    $x0 = COPY %5
 
     %6(p0) = G_LOAD %0(p0) :: (load 8 from %ir.addr)
     %12:_(s64) = G_PTRTOINT %6
-    %x0 = COPY %12
+    $x0 = COPY %12
 
     ; CHECK: %7:_(<2 x s32>) = G_LOAD %0(p0) :: (load 8 from %ir.addr)
     %7(<2 x s32>) = G_LOAD %0(p0) :: (load 8 from %ir.addr)
     %13:_(s64) = G_BITCAST %7
-    %x0 = COPY %13
+    $x0 = COPY %13
 
     ; CHECK: [[LOAD0:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load 16 from %ir.addr)
     ; CHECK: [[OFFSET1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
@@ -69,7 +69,7 @@ body: |
     ; CHECK: %8:_(s128) = G_MERGE_VALUES [[LOAD0]](s64), [[LOAD1]](s64)
     %8(s128) = G_LOAD %0(p0) :: (load 16 from %ir.addr)
     %14:_(s64) = G_TRUNC %8
-    %x0 = COPY %14
+    $x0 = COPY %14
 ...
 
 ---
@@ -85,11 +85,11 @@ registers:
   - { id: 7, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
     ; CHECK-LABEL: name: test_store
 
-    %0(p0) = COPY %x0
-    %1(s32) = COPY %w1
+    %0(p0) = COPY $x0
+    %1(s32) = COPY $w1
 
     ; CHECK: [[C1:%.*]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[B:%.*]]:_(s32) = COPY %1(s32)

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir Wed Jan 31 14:04:26 2018
@@ -26,5 +26,5 @@ body: |
     %2(s8) = G_MERGE_VALUES %1(s4), %1(s4)
     %3(s8) = COPY %2(s8)
     %4(s64) = G_ANYEXT %3(s8)
-    %x0 = COPY %4(s64)
+    $x0 = COPY %4(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-mul.mir Wed Jan 31 14:04:26 2018
@@ -27,23 +27,23 @@ registers:
   - { id: 5, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
     ; CHECK-LABEL: name: test_scalar_mul_small
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[TRUNC]], [[TRUNC1]]
     ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[MUL]](s32)
-    ; CHECK: %x0 = COPY [[ANYEXT]](s64)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[ANYEXT]](s64)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s8) = G_TRUNC %0
     %3(s8) = G_TRUNC %1
     %4(s8) = G_MUL %2, %3
     %5(s64) = G_ANYEXT %4
-    %x0 = COPY %5
+    $x0 = COPY %5
 ...
 
 
@@ -51,25 +51,25 @@ body: |
 name:            test_smul_overflow
 body: |
   bb.0:
-    liveins: %x0, %x1, %w2, %w3
+    liveins: $x0, $x1, $w2, $w3
 
     ; CHECK-LABEL: name: test_smul_overflow
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
     ; CHECK: [[SMULH:%[0-9]+]]:_(s64) = G_SMULH [[COPY]], [[COPY1]]
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
     ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MUL]], [[C]]
     ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SMULH]](s64), [[ASHR]]
-    ; CHECK: %x0 = COPY [[MUL]](s64)
+    ; CHECK: $x0 = COPY [[MUL]](s64)
     ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
-    ; CHECK: %w0 = COPY [[COPY2]](s32)
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
+    ; CHECK: $w0 = COPY [[COPY2]](s32)
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
     %2:_(s64), %3:_(s1) = G_SMULO %0, %1
-    %x0 = COPY %2
+    $x0 = COPY %2
     %4:_(s32) = G_ANYEXT %3
-    %w0 = COPY %4
+    $w0 = COPY %4
 
 ...
 
@@ -78,23 +78,23 @@ body: |
 name:            test_umul_overflow
 body: |
   bb.0:
-    liveins: %x0, %x1, %w2, %w3
+    liveins: $x0, $x1, $w2, $w3
 
     ; CHECK-LABEL: name: test_umul_overflow
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
     ; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[COPY]], [[COPY1]]
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
     ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s64), [[C]]
-    ; CHECK: %x0 = COPY [[MUL]](s64)
+    ; CHECK: $x0 = COPY [[MUL]](s64)
     ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
-    ; CHECK: %w0 = COPY [[COPY2]](s32)
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
+    ; CHECK: $w0 = COPY [[COPY2]](s32)
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
     %2:_(s64), %3:_(s1) = G_UMULO %0, %1
-    %x0 = COPY %2
+    $x0 = COPY %2
     %4:_(s32) = G_ANYEXT %3
-    %w0 = COPY %4
+    $w0 = COPY %4
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-nonpowerof2eltsvec.mir Wed Jan 31 14:04:26 2018
@@ -19,16 +19,16 @@ registers:
   - { id: 5, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_legalize_merge_v3s64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[MV:%[0-9]+]]:_(<3 x s64>) = G_MERGE_VALUES [[COPY]](s64), [[COPY]](s64), [[COPY]](s64)
     ; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s64>) = COPY [[MV]](<3 x s64>)
     ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64), [[UV2:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<3 x s64>)
-    ; CHECK: %x0 = COPY [[UV]](s64)
-    %0(s64) = COPY %x0
+    ; CHECK: $x0 = COPY [[UV]](s64)
+    %0(s64) = COPY $x0
     %1(<3 x s64>) = G_MERGE_VALUES %0(s64), %0(s64), %0(s64)
     %2(<3 x s64>) = COPY %1(<3 x s64>)
     %3(s64), %4(s64), %5(s64) = G_UNMERGE_VALUES %2(<3 x s64>)
-    %x0 = COPY %3(s64)
+    $x0 = COPY %3(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-or.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-or.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-or.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-or.mir Wed Jan 31 14:04:26 2018
@@ -5,51 +5,51 @@
 name:            test_scalar_or_small
 body: |
   bb.0:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
     ; CHECK-LABEL: name: test_scalar_or_small
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[TRUNC]], [[TRUNC1]]
     ; CHECK: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[OR]](s32)
-    ; CHECK: %x0 = COPY [[TRUNC2]](s8)
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[TRUNC2]](s8)
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
     %2:_(s8) = G_TRUNC %0
     %3:_(s8) = G_TRUNC %1
     %4:_(s8) = G_OR %2, %3
-    %x0 = COPY %4
+    $x0 = COPY %4
 ...
 
 ---
 name:            test_big_scalar_power_of_2
 body: |
   bb.0:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
     ; We have a temporary G_MERGE_VALUES in the legalizer that gets
     ; cleaned up with the G_UNMERGE_VALUES, so we end up directly
     ; copying the results of the G_OR ops.
 
     ; CHECK-LABEL: name: test_big_scalar_power_of_2
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY %x2
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY %x3
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
     ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[COPY]], [[COPY2]]
     ; CHECK: [[OR1:%[0-9]+]]:_(s64) = G_OR [[COPY1]], [[COPY3]]
-    ; CHECK: %x0 = COPY [[OR]](s64)
-    ; CHECK: %x1 = COPY [[OR1]](s64)
-    ; CHECK: RET_ReallyLR implicit %x0, implicit %x1
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
-    %2:_(s64) = COPY %x2
-    %3:_(s64) = COPY %x3
+    ; CHECK: $x0 = COPY [[OR]](s64)
+    ; CHECK: $x1 = COPY [[OR1]](s64)
+    ; CHECK: RET_ReallyLR implicit $x0, implicit $x1
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %2:_(s64) = COPY $x2
+    %3:_(s64) = COPY $x3
     %4:_(s128) = G_MERGE_VALUES %0, %1
     %5:_(s128) = G_MERGE_VALUES %2, %3
     %6:_(s128) = G_OR %4, %5
     %7:_(s64), %8:_(s64) = G_UNMERGE_VALUES %6
-    %x0 = COPY %7
-    %x1 = COPY %8
-    RET_ReallyLR implicit %x0, implicit %x1
+    $x0 = COPY %7
+    $x1 = COPY %8
+    RET_ReallyLR implicit $x0, implicit $x1
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir Wed Jan 31 14:04:26 2018
@@ -66,8 +66,8 @@ body:             |
   ; CHECK-LABEL: name: legalize_phi
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   liveins: %w0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+  ; CHECK:   liveins: $w0
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
   ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
@@ -89,17 +89,17 @@ body:             |
   ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
   ; CHECK:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
-  ; CHECK:   %w0 = COPY [[AND]](s32)
-  ; CHECK:   RET_ReallyLR implicit %w0
+  ; CHECK:   $w0 = COPY [[AND]](s32)
+  ; CHECK:   RET_ReallyLR implicit $w0
   bb.0:
    ; Test that we insert legalization artifacts(Truncs here) into the correct BBs
    ; while legalizing the G_PHI to s16.
 
 
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
-    liveins: %w0
+    liveins: $w0
 
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_CONSTANT i32 0
     %3(s32) = G_CONSTANT i32 1
     %6(s32) = G_CONSTANT i32 2
@@ -123,8 +123,8 @@ body:             |
   bb.3:
     %9(s1) = G_PHI %5(s1), %bb.1, %8(s1), %bb.2
     %10(s32) = G_ZEXT %9(s1)
-    %w0 = COPY %10(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %10(s32)
+    RET_ReallyLR implicit $w0
 
 ...
 ---
@@ -147,10 +147,10 @@ body:             |
   ; CHECK-LABEL: name: legalize_phi_ptr
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   liveins: %w2, %x0, %x1
-  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY %x0
-  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY %x1
-  ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY %w2
+  ; CHECK:   liveins: $w2, $x0, $x1
+  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+  ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
+  ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
   ; CHECK:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY2]](s32)
   ; CHECK:   G_BRCOND [[TRUNC]](s1), %bb.1
   ; CHECK:   G_BR %bb.2
@@ -158,16 +158,16 @@ body:             |
   ; CHECK:   successors: %bb.2(0x80000000)
   ; CHECK: bb.2:
   ; CHECK:   [[PHI:%[0-9]+]]:_(p0) = G_PHI [[COPY]](p0), %bb.0, [[COPY1]](p0), %bb.1
-  ; CHECK:   %x0 = COPY [[PHI]](p0)
-  ; CHECK:   RET_ReallyLR implicit %x0
+  ; CHECK:   $x0 = COPY [[PHI]](p0)
+  ; CHECK:   RET_ReallyLR implicit $x0
   bb.1:
 
     successors: %bb.2, %bb.3
-    liveins: %w2, %x0, %x1
+    liveins: $w2, $x0, $x1
 
-    %0(p0) = COPY %x0
-    %1(p0) = COPY %x1
-    %4(s32) = COPY %w2
+    %0(p0) = COPY $x0
+    %1(p0) = COPY $x1
+    %4(s32) = COPY $w2
     %2(s1) = G_TRUNC %4(s32)
     G_BRCOND %2(s1), %bb.2
     G_BR %bb.3
@@ -177,8 +177,8 @@ body:             |
 
   bb.3:
     %3(p0) = G_PHI %0(p0), %bb.1, %1(p0), %bb.2
-    %x0 = COPY %3(p0)
-    RET_ReallyLR implicit %x0
+    $x0 = COPY %3(p0)
+    RET_ReallyLR implicit $x0
 
 ...
 ---
@@ -206,8 +206,8 @@ body:             |
   ; CHECK-LABEL: name: legalize_phi_empty
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   liveins: %w0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+  ; CHECK:   liveins: $w0
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
   ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
   ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -229,17 +229,17 @@ body:             |
   ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
   ; CHECK:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
-  ; CHECK:   %w0 = COPY [[AND]](s32)
-  ; CHECK:   RET_ReallyLR implicit %w0
+  ; CHECK:   $w0 = COPY [[AND]](s32)
+  ; CHECK:   RET_ReallyLR implicit $w0
   bb.0:
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
-    liveins: %w0
+    liveins: $w0
    ; Test that we properly legalize a phi with a predecessor that's empty
 
 
 
 
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_CONSTANT i32 0
     %3(s32) = G_CONSTANT i32 3
     %6(s32) = G_CONSTANT i32 1
@@ -263,8 +263,8 @@ body:             |
   bb.3:
     %9(s1) = G_PHI %8(s1), %bb.1, %5(s1), %bb.2
     %10(s32) = G_ZEXT %9(s1)
-    %w0 = COPY %10(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %10(s32)
+    RET_ReallyLR implicit $w0
 
 ...
 ---
@@ -289,8 +289,8 @@ body:             |
   ; CHECK-LABEL: name: legalize_phi_loop
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: %w0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+  ; CHECK:   liveins: $w0
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
   ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C1]](s32)
@@ -312,14 +312,14 @@ body:             |
   ; CHECK:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
   ; CHECK:   [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
   ; CHECK:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C3]]
-  ; CHECK:   %w0 = COPY [[AND1]](s32)
-  ; CHECK:   RET_ReallyLR implicit %w0
+  ; CHECK:   $w0 = COPY [[AND1]](s32)
+  ; CHECK:   RET_ReallyLR implicit $w0
   bb.0:
     successors: %bb.1(0x80000000)
-    liveins: %w0
+    liveins: $w0
    ; Test that we properly legalize a phi that uses a value from the same BB
 
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %2(s8) = G_CONSTANT i8 1
     %7(s8) = G_CONSTANT i8 0
 
@@ -334,8 +334,8 @@ body:             |
 
   bb.3:
     %6(s32) = G_ZEXT %3(s8)
-    %w0 = COPY %6(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %6(s32)
+    RET_ReallyLR implicit $w0
 
 ...
 ---
@@ -357,8 +357,8 @@ body:             |
   ; CHECK-LABEL: name: legalize_phi_cycle
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: %w0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+  ; CHECK:   liveins: $w0
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
   ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32)
   ; CHECK: bb.1:
@@ -373,15 +373,15 @@ body:             |
   ; CHECK:   [[COPY1:%[0-9]+]]:_(s16) = COPY [[PHI]](s16)
   ; CHECK:   G_BRCOND [[TRUNC2]](s1), %bb.1
   ; CHECK: bb.2:
-  ; CHECK:   %w0 = COPY [[AND]](s32)
-  ; CHECK:   RET_ReallyLR implicit %w0
+  ; CHECK:   $w0 = COPY [[AND]](s32)
+  ; CHECK:   RET_ReallyLR implicit $w0
   bb.0:
     successors: %bb.1(0x80000000)
-    liveins: %w0
+    liveins: $w0
    ; Test that we properly legalize a phi that uses itself
 
 
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %4(s8) = G_CONSTANT i8 0
 
   bb.1:
@@ -393,8 +393,8 @@ body:             |
     G_BRCOND %3(s1), %bb.1
 
   bb.3:
-    %w0 = COPY %2(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %2(s32)
+    RET_ReallyLR implicit $w0
 
 ...
 ---
@@ -426,8 +426,8 @@ body:             |
   ; CHECK-LABEL: name: legalize_phi_same_bb
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   liveins: %w0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+  ; CHECK:   liveins: $w0
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
   ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
   ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -457,18 +457,18 @@ body:             |
   ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI1]](s16)
   ; CHECK:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C5]]
   ; CHECK:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[C]]1, [[C]]2
-  ; CHECK:   %w0 = COPY [[C]]3(s32)
-  ; CHECK:   RET_ReallyLR implicit %w0
+  ; CHECK:   $w0 = COPY [[C]]3(s32)
+  ; CHECK:   RET_ReallyLR implicit $w0
   bb.0:
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
-    liveins: %w0
+    liveins: $w0
    ; Make sure that we correctly insert the new legalized G_PHI at the
    ; correct location (ie make sure G_PHIs are the first insts in the BB).
 
 
 
 
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_CONSTANT i32 0
     %3(s32) = G_CONSTANT i32 3
     %6(s32) = G_CONSTANT i32 1
@@ -496,8 +496,8 @@ body:             |
     %11(s32) = G_ZEXT %9(s8)
     %12(s32) = G_ZEXT %10(s8)
     %13(s32) = G_ADD %11, %12
-    %w0 = COPY %13(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %13(s32)
+    RET_ReallyLR implicit $w0
 
 ...
 ---
@@ -530,8 +530,8 @@ body:             |
   ; CHECK-LABEL: name: legalize_phi_diff_bb
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   liveins: %w0, %w1
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY %w0
+  ; CHECK:   liveins: $w0, $w1
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s32) = COPY $w0
   ; CHECK:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
   ; CHECK:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -563,18 +563,18 @@ body:             |
   ; CHECK:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
   ; CHECK:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI1]](s16)
   ; CHECK:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[C]]8, [[C]]7
-  ; CHECK:   %w0 = COPY [[AND1]](s32)
-  ; CHECK:   RET_ReallyLR implicit %w0
+  ; CHECK:   $w0 = COPY [[AND1]](s32)
+  ; CHECK:   RET_ReallyLR implicit $w0
   bb.0:
     successors: %bb.1(0x40000000), %bb.3(0x40000000)
-    liveins: %w0, %w1
+    liveins: $w0, $w1
    ; Make sure that we correctly legalize PHIs sharing common defs
    ; in different BBs.
 
 
 
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_CONSTANT i32 0
     %4(s32) = G_CONSTANT i32 3
     %9(s32) = G_CONSTANT i32 1
@@ -599,7 +599,7 @@ body:             |
   bb.3:
     %13(s8) = G_PHI %7(s8), %bb.1, %6(s8), %bb.0
     %14(s32) = G_ZEXT %13(s8)
-    %w0 = COPY %14(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %14(s32)
+    RET_ReallyLR implicit $w0
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-pow.mir Wed Jan 31 14:04:26 2018
@@ -13,28 +13,28 @@
 name:            test_pow
 body: |
   bb.0.entry:
-    liveins: %d0, %d1, %s2, %s3
+    liveins: $d0, $d1, $s2, $s3
 
     ; CHECK-LABEL: name: test_pow
     ; CHECK: hasCalls: true
 
-    %0:_(s64) = COPY %d0
-    %1:_(s64) = COPY %d1
-    %2:_(s32) = COPY %s2
-    %3:_(s32) = COPY %s3
+    %0:_(s64) = COPY $d0
+    %1:_(s64) = COPY $d1
+    %2:_(s32) = COPY $s2
+    %3:_(s32) = COPY $s3
 
-    ; CHECK: %d0 = COPY %0
-    ; CHECK: %d1 = COPY %1
-    ; CHECK: BL &pow, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %d0, implicit %d1, implicit-def %d0
-    ; CHECK: %4:_(s64) = COPY %d0
+    ; CHECK: $d0 = COPY %0
+    ; CHECK: $d1 = COPY %1
+    ; CHECK: BL &pow, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit-def $d0
+    ; CHECK: %4:_(s64) = COPY $d0
     %4:_(s64) = G_FPOW %0, %1
-    %x0 = COPY %4
+    $x0 = COPY %4
 
-    ; CHECK: %s0 = COPY %2
-    ; CHECK: %s1 = COPY %3
-    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %s1, implicit-def %s0
-    ; CHECK: %5:_(s32) = COPY %s0
+    ; CHECK: $s0 = COPY %2
+    ; CHECK: $s1 = COPY %3
+    ; CHECK: BL &powf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK: %5:_(s32) = COPY $s0
     %5:_(s32) = G_FPOW %2, %3
-    %w0 = COPY %5
+    $w0 = COPY %5
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-rem.mir Wed Jan 31 14:04:26 2018
@@ -30,19 +30,19 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
     ; CHECK-LABEL: name: test_urem_64
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[COPY]], [[COPY1]]
     ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[UDIV]], [[COPY1]]
     ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[MUL]]
-    ; CHECK: %x0 = COPY [[SUB]](s64)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[SUB]](s64)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_UREM %0, %1
-    %x0 = COPY %2
+    $x0 = COPY %2
 
 
 ...
@@ -56,23 +56,23 @@ registers:
   - { id: 5, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
     ; CHECK-LABEL: name: test_srem_32
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[TRUNC]], [[TRUNC1]]
     ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[SDIV]], [[TRUNC1]]
     ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[MUL]]
-    ; CHECK: %w0 = COPY [[SUB]](s32)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $w0 = COPY [[SUB]](s32)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %3(s32) = G_TRUNC %0
     %4(s32) = G_TRUNC %1
     %5(s32) = G_SREM %3, %4
-    %w0 = COPY %5
+    $w0 = COPY %5
 
 ...
 ---
@@ -85,12 +85,12 @@ registers:
   - { id: 8, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
 
     ; CHECK-LABEL: name: test_srem_8
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]]
@@ -107,14 +107,14 @@ body: |
     ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[MUL]](s32)
     ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC3]], [[COPY3]]
     ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
-    ; CHECK: %w0 = COPY [[COPY4]](s32)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $w0 = COPY [[COPY4]](s32)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %6(s8) = G_TRUNC %0
     %7(s8) = G_TRUNC %1
     %8(s8) = G_SREM %6, %7
     %9:_(s32) = G_ANYEXT %8
-    %w0 = COPY %9
+    $w0 = COPY %9
 ...
 ---
 name:            test_frem
@@ -127,33 +127,33 @@ registers:
   - { id: 5, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
     ; CHECK-LABEL: name: test_frem
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def %sp, implicit %sp
-    ; CHECK: %d0 = COPY [[COPY]](s64)
-    ; CHECK: %d1 = COPY [[COPY1]](s64)
-    ; CHECK: BL &fmod, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %d0, implicit %d1, implicit-def %d0
-    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY %d0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def %sp, implicit %sp
-    ; CHECK: %x0 = COPY [[COPY2]](s64)
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK: $d0 = COPY [[COPY]](s64)
+    ; CHECK: $d1 = COPY [[COPY1]](s64)
+    ; CHECK: BL &fmod, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $d0, implicit $d1, implicit-def $d0
+    ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $d0
+    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK: $x0 = COPY [[COPY2]](s64)
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
-    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def %sp, implicit %sp
-    ; CHECK: %s0 = COPY [[TRUNC]](s32)
-    ; CHECK: %s1 = COPY [[TRUNC1]](s32)
-    ; CHECK: BL &fmodf, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %s0, implicit %s1, implicit-def %s0
-    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY %s0
-    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def %sp, implicit %sp
-    ; CHECK: %w0 = COPY [[COPY3]](s32)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK: $s0 = COPY [[TRUNC]](s32)
+    ; CHECK: $s1 = COPY [[TRUNC1]](s32)
+    ; CHECK: BL &fmodf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $s0, implicit $s1, implicit-def $s0
+    ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $s0
+    ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
+    ; CHECK: $w0 = COPY [[COPY3]](s32)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_FREM %0, %1
-    %x0 = COPY %2
+    $x0 = COPY %2
 
     %3(s32) = G_TRUNC %0
     %4(s32) = G_TRUNC %1
     %5(s32) = G_FREM %3, %4
-    %w0 = COPY %5
+    $w0 = COPY %5

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir Wed Jan 31 14:04:26 2018
@@ -22,10 +22,10 @@ registers:
   - { id: 6, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
     ; CHECK-LABEL: name: test_shift
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]]
@@ -36,7 +36,7 @@ body: |
     ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]]
     ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[ASHR1]]
     ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ASHR2]](s32)
-    ; CHECK: %w0 = COPY [[COPY2]](s32)
+    ; CHECK: $w0 = COPY [[COPY2]](s32)
     ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
     ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C2]]
@@ -45,27 +45,27 @@ body: |
     ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]], [[C3]]
     ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[AND1]]
     ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
-    ; CHECK: %w0 = COPY [[COPY3]](s32)
+    ; CHECK: $w0 = COPY [[COPY3]](s32)
     ; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY1]]0, [[COPY1]]1
     ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY1]]2(s32)
-    ; CHECK: %w0 = COPY [[COPY4]](s32)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $w0 = COPY [[COPY4]](s32)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s8) = G_TRUNC %0
     %3(s8) = G_TRUNC %1
 
     %4(s8) = G_ASHR %2, %3
     %7:_(s32) = G_ANYEXT %4
-    %w0 = COPY %7
+    $w0 = COPY %7
 
 
     %5(s8) = G_LSHR %2, %3
     %8:_(s32) = G_ANYEXT %5
-    %w0 = COPY %8
+    $w0 = COPY %8
 
     %6(s8) = G_SHL %2, %3
     %9:_(s32) = G_ANYEXT %6
-    %w0 = COPY %9
+    $w0 = COPY %9
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir Wed Jan 31 14:04:26 2018
@@ -45,46 +45,46 @@ body: |
   ; CHECK-LABEL: name: test_simple
   ; CHECK: bb.0.{{[a-zA-Z0-9]+}}:
   ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+  ; CHECK:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
   ; CHECK:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
   ; CHECK:   [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
   ; CHECK:   [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[COPY]](s64)
   ; CHECK:   [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[INTTOPTR]](p0)
-  ; CHECK:   %x0 = COPY [[PTRTOINT]](s64)
+  ; CHECK:   $x0 = COPY [[PTRTOINT]](s64)
   ; CHECK:   G_BRCOND [[TRUNC]](s1), %bb.1
   ; CHECK: bb.1.{{[a-zA-Z0-9]+}}:
   ; CHECK:   [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
   ; CHECK:   [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
   ; CHECK:   [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC2]], [[TRUNC3]]
   ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
-  ; CHECK:   %w0 = COPY [[COPY1]](s32)
+  ; CHECK:   $w0 = COPY [[COPY1]](s32)
   ; CHECK:   [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
   ; CHECK:   [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
   ; CHECK:   [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC4]], [[TRUNC5]]
   ; CHECK:   [[COPY2:%[0-9]+]]:_(s32) = COPY [[SELECT1]](s32)
-  ; CHECK:   %w0 = COPY [[COPY2]](s32)
+  ; CHECK:   $w0 = COPY [[COPY2]](s32)
   ; CHECK:   [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
   ; CHECK:   [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
   ; CHECK:   [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC6]], [[TRUNC7]]
   ; CHECK:   [[COPY3:%[0-9]+]]:_(s32) = COPY [[SELECT2]](s32)
-  ; CHECK:   %w0 = COPY [[COPY3]](s32)
+  ; CHECK:   $w0 = COPY [[COPY3]](s32)
   ; CHECK:   [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC1]], [[TRUNC1]]
   ; CHECK:   [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY]]
-  ; CHECK:   %x0 = COPY [[SELECT4]](s64)
+  ; CHECK:   $x0 = COPY [[SELECT4]](s64)
   ; CHECK:   [[BITCAST:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[COPY]](s64)
   ; CHECK:   [[BITCAST1:%[0-9]+]]:_(s64) = G_BITCAST [[BITCAST]](<2 x s32>)
-  ; CHECK:   %x0 = COPY [[BITCAST1]](s64)
+  ; CHECK:   $x0 = COPY [[BITCAST1]](s64)
   ; CHECK:   [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[SELECT3]](s32)
-  ; CHECK:   %w0 = COPY [[BITCAST2]](s32)
+  ; CHECK:   $w0 = COPY [[BITCAST2]](s32)
   ; CHECK:   [[BITCAST3:%[0-9]+]]:_(<4 x s8>) = G_BITCAST [[COPY]](s64)
   ; CHECK:   [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST3]](<4 x s8>)
-  ; CHECK:   %w0 = COPY [[BITCAST4]](s32)
+  ; CHECK:   $w0 = COPY [[BITCAST4]](s32)
   ; CHECK:   [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY]](s64)
   ; CHECK:   [[BITCAST6:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST5]](<2 x s16>)
-  ; CHECK:   %w0 = COPY [[BITCAST6]](s32)
+  ; CHECK:   $w0 = COPY [[BITCAST6]](s32)
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
-    %0(s64) = COPY %x0
+    liveins: $x0, $x1, $x2, $x3
+    %0(s64) = COPY $x0
 
     %1(s1) = G_TRUNC %0
     %2(s8) = G_TRUNC %0
@@ -93,7 +93,7 @@ body: |
 
     %5(p0) = G_INTTOPTR %0
     %6(s64) = G_PTRTOINT %5
-    %x0 = COPY %6
+    $x0 = COPY %6
 
     G_BRCOND %1, %bb.1
 
@@ -101,31 +101,31 @@ body: |
 
     %7(s1) = G_SELECT %1, %1, %1
     %21:_(s32) = G_ANYEXT %7
-    %w0 = COPY %21
+    $w0 = COPY %21
 
     %8(s8) = G_SELECT %1, %2, %2
     %20:_(s32) = G_ANYEXT %8
-    %w0 = COPY %20
+    $w0 = COPY %20
 
     %9(s16) = G_SELECT %1, %3, %3
     %19:_(s32) = G_ANYEXT %9
-    %w0 = COPY %19
+    $w0 = COPY %19
 
     %10(s32) = G_SELECT %1, %4, %4
     %11(s64) = G_SELECT %1, %0, %0
-    %x0 = COPY %11
+    $x0 = COPY %11
 
     %12(<2 x s32>) = G_BITCAST %0
     %13(s64) = G_BITCAST %12
-    %x0 = COPY %13
+    $x0 = COPY %13
     %14(s32) = G_BITCAST %10
-    %w0 = COPY %14
+    $w0 = COPY %14
     %15(<4 x s8>) = G_BITCAST %0
     %17:_(s32) = G_BITCAST %15
-    %w0 = COPY %17
+    $w0 = COPY %17
     %16(<2 x s16>) = G_BITCAST %0
     %18:_(s32) = G_BITCAST %16
-    %w0 = COPY %18
+    $w0 = COPY %18
 ...
 
 ---
@@ -138,22 +138,22 @@ registers:
   - { id: 3, class: _}
 body:             |
   bb.1:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
     ; This is legal and shouldn't be changed.
     ; CHECK-LABEL: name: bitcast128
-    ; CHECK: liveins: %x0, %x1
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: liveins: $x0, $x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
     ; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[MV]](s128)
-    ; CHECK: %q0 = COPY [[BITCAST]](<2 x s64>)
-    ; CHECK: RET_ReallyLR implicit %q0
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $q0 = COPY [[BITCAST]](<2 x s64>)
+    ; CHECK: RET_ReallyLR implicit $q0
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %3(s128) = G_MERGE_VALUES %0(s64), %1(s64)
     %2(<2 x s64>) = G_BITCAST %3(s128)
-    %q0 = COPY %2(<2 x s64>)
-    RET_ReallyLR implicit %q0
+    $q0 = COPY %2(<2 x s64>)
+    RET_ReallyLR implicit $q0
 
 ...
 ---
@@ -166,19 +166,19 @@ registers:
   - { id: 3, class: _}
 body:             |
   bb.1:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: testExtOfCopyOfTrunc
-    ; CHECK: liveins: %x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: liveins: $x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
-    ; CHECK: %x0 = COPY [[COPY1]](s64)
-    ; CHECK: RET_ReallyLR implicit %x0
-    %0(s64) = COPY %x0
+    ; CHECK: $x0 = COPY [[COPY1]](s64)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %0(s64) = COPY $x0
     %1(s1) = G_TRUNC %0
     %2(s1) = COPY %1
     %3(s64) = G_ANYEXT %2
-    %x0 = COPY %3
-    RET_ReallyLR implicit %x0
+    $x0 = COPY %3
+    RET_ReallyLR implicit $x0
 
 ...
 ---
@@ -191,19 +191,19 @@ registers:
   - { id: 3, class: _}
 body:             |
   bb.1:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: testExtOf2CopyOfTrunc
-    ; CHECK: liveins: %x0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
+    ; CHECK: liveins: $x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
-    ; CHECK: %x0 = COPY [[COPY1]](s64)
-    ; CHECK: RET_ReallyLR implicit %x0
-    %0(s64) = COPY %x0
+    ; CHECK: $x0 = COPY [[COPY1]](s64)
+    ; CHECK: RET_ReallyLR implicit $x0
+    %0(s64) = COPY $x0
     %1(s1) = G_TRUNC %0
     %2(s1) = COPY %1
     %4:_(s1) = COPY %2
     %3(s64) = G_ANYEXT %4
-    %x0 = COPY %3
-    RET_ReallyLR implicit %x0
+    $x0 = COPY %3
+    RET_ReallyLR implicit $x0
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir Wed Jan 31 14:04:26 2018
@@ -21,21 +21,21 @@ registers:
   - { id: 5, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
     ; CHECK-LABEL: name: test_scalar_sub_small
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[TRUNC1]]
     ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SUB]](s32)
-    ; CHECK: %x0 = COPY [[ANYEXT]](s64)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[ANYEXT]](s64)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s8) = G_TRUNC %0
     %3(s8) = G_TRUNC %1
     %4(s8) = G_SUB %2, %3
     %5(s64) = G_ANYEXT %4
-    %x0 = COPY %5
+    $x0 = COPY %5
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir Wed Jan 31 14:04:26 2018
@@ -14,5 +14,5 @@ body: |
     ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[DEF]](s64), [[DEF1]](s64)
     %0:_(s128) = G_IMPLICIT_DEF
     %1:_(s64) = G_TRUNC %0
-    %x0 = COPY %1
+    $x0 = COPY %1
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir Wed Jan 31 14:04:26 2018
@@ -23,6 +23,6 @@ body: |
     ; CHECK: unable to legalize instruction: {{.*}} G_UNMERGE_VALUES
     %1(s4), %2(s4)= G_UNMERGE_VALUES %0(s8)
     %3(s64) = G_ANYEXT %1(s4)
-    %x0 = COPY %3(s64)
+    $x0 = COPY %3(s64)
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-vaarg.mir Wed Jan 31 14:04:26 2018
@@ -12,7 +12,7 @@ name:            test_vaarg
 body: |
   bb.0:
     ; CHECK-LABEL: name: test_vaarg
-    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
     ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8)
     ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[LOAD]], [[C]](s64)
@@ -28,7 +28,7 @@ body: |
     ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
     ; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[PTR_MASK]], [[C3]](s64)
     ; CHECK: G_STORE [[GEP3]](p0), [[COPY]](p0) :: (store 8)
-    %0:_(p0) = COPY %x0
+    %0:_(p0) = COPY $x0
 
     %1:_(s8) = G_VAARG %0(p0), 1
 

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-xor.mir Wed Jan 31 14:04:26 2018
@@ -21,21 +21,21 @@ registers:
   - { id: 5, class: _ }
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
     ; CHECK-LABEL: name: test_scalar_xor_small
-    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[TRUNC]], [[TRUNC1]]
     ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32)
-    ; CHECK: %x0 = COPY [[ANYEXT]](s64)
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[ANYEXT]](s64)
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s8) = G_TRUNC %0
     %3(s8) = G_TRUNC %1
     %4(s8) = G_XOR %2, %3
     %5(s64) = G_ANYEXT %4
-    %x0 = COPY %5
+    $x0 = COPY %5
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer-in-O0-pipeline.mir Wed Jan 31 14:04:26 2018
@@ -75,10 +75,10 @@ registers:
 # CHECK-NEXT: G_FADD %0, %2
 body:             |
   bb.0 (%ir-block.0):
-    liveins: %s0, %w0
+    liveins: $s0, $w0
 
-    %0(s32) = COPY %s0
-    %6(s32) = COPY %w0
+    %0(s32) = COPY $s0
+    %6(s32) = COPY $w0
     %1(s1) = G_TRUNC %6
     %4(s32) = G_FCONSTANT float 1.000000e+00
     %5(s32) = G_FCONSTANT float 2.000000e+00
@@ -93,7 +93,7 @@ body:             |
   bb.3.end:
     %2(s32) = PHI %4(s32), %bb.1, %5(s32), %bb.2
     %3(s32) = G_FADD %0, %2
-    %s0 = COPY %3(s32)
-    RET_ReallyLR implicit %s0
+    $s0 = COPY %3(s32)
+    RET_ReallyLR implicit $s0
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/localizer.mir Wed Jan 31 14:04:26 2018
@@ -274,8 +274,8 @@ body:             |
   ; CHECK-LABEL: name: non_local_label
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: %s0
-  ; CHECK:   [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0
+  ; CHECK:   liveins: $s0
+  ; CHECK:   [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
   ; CHECK:   [[C:%[0-9]+]]:fpr(s32) = G_FCONSTANT float 1.000000e+00
   ; CHECK: bb.1:
   ; CHECK:   successors: %bb.1(0x80000000)
@@ -288,10 +288,10 @@ body:             |
   ; The newly created reg should be on the same regbank/regclass as its origin.
 
   bb.0:
-    liveins: %s0
+    liveins: $s0
     successors: %bb.1
 
-    %0:fpr(s32) = COPY %s0
+    %0:fpr(s32) = COPY $s0
     %1:fpr(s32) = G_FCONSTANT float 1.0
 
   bb.1:

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/machine-cse-mid-pipeline.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/machine-cse-mid-pipeline.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/machine-cse-mid-pipeline.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/machine-cse-mid-pipeline.mir Wed Jan 31 14:04:26 2018
@@ -9,15 +9,15 @@ body:             |
   ; CHECK:      %[[ONE:[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT: %[[TWO:[0-9]+]]:_(s32) = G_ADD %[[ONE]], %[[ONE]]
   ; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s32) = G_ADD %[[TWO]], %[[TWO]]
-  ; CHECK-NEXT: %[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
-  ; CHECK-NEXT: RET_ReallyLR implicit %[[RET]]
+  ; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
+  ; CHECK-NEXT: RET_ReallyLR implicit $[[RET]]
   bb.0:
     %0:_(s32) = G_CONSTANT i32 1
     %1:_(s32) = G_ADD %0, %0
     %2:_(s32) = G_ADD %0, %0
     %3:_(s32) = G_ADD %1, %2
-    %w0 = COPY %3(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
 ...
 ---
 name:            regbankselected
@@ -29,15 +29,15 @@ body:             |
   ; CHECK:      %[[ONE:[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT: %[[TWO:[0-9]+]]:gpr(s32) = G_ADD %[[ONE]], %[[ONE]]
   ; CHECK-NEXT: %[[SUM:[0-9]+]]:gpr(s32) = G_ADD %[[TWO]], %[[TWO]]
-  ; CHECK-NEXT: %[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
-  ; CHECK-NEXT: RET_ReallyLR implicit %[[RET]]
+  ; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
+  ; CHECK-NEXT: RET_ReallyLR implicit $[[RET]]
   bb.0:
     %0:gpr(s32) = G_CONSTANT i32 1
     %1:gpr(s32) = G_ADD %0, %0
     %2:gpr(s32) = G_ADD %0, %0
     %3:gpr(s32) = G_ADD %1, %2
-    %w0 = COPY %3(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
 ...
 ---
 name:            legalized
@@ -49,15 +49,15 @@ body:             |
   ; CHECK:      %[[ONE:[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT: %[[TWO:[0-9]+]]:gpr(s32) = G_ADD %[[ONE]], %[[ONE]]
   ; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s32) = G_ADD %[[TWO]], %[[TWO]]
-  ; CHECK-NEXT: %[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
-  ; CHECK-NEXT: RET_ReallyLR implicit %[[RET]]
+  ; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
+  ; CHECK-NEXT: RET_ReallyLR implicit $[[RET]]
   bb.0:
     %0:_(s32) = G_CONSTANT i32 1
     %1:_(s32) = G_ADD %0, %0
     %2:gpr(s32) = G_ADD %0, %0
     %3:_(s32) = G_ADD %1, %2
-    %w0 = COPY %3(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
 ...
 ---
 name:            legalized_sym
@@ -69,15 +69,15 @@ body:             |
   ; CHECK:      %[[ONE:[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT: %[[TWO:[0-9]+]]:gpr(s32) = G_ADD %[[ONE]], %[[ONE]]
   ; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s32) = G_ADD %[[TWO]], %[[TWO]]
-  ; CHECK-NEXT: %[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
-  ; CHECK-NEXT: RET_ReallyLR implicit %[[RET]]
+  ; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s32)
+  ; CHECK-NEXT: RET_ReallyLR implicit $[[RET]]
   bb.0:
     %0:_(s32) = G_CONSTANT i32 1
     %1:gpr(s32) = G_ADD %0, %0
     %2:_(s32) = G_ADD %0, %0
     %3:_(s32) = G_ADD %1, %2
-    %w0 = COPY %3(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %3(s32)
+    RET_ReallyLR implicit $w0
 ...
 ---
 name:            int_extensions
@@ -93,8 +93,8 @@ body:             |
   ; CHECK-NEXT: %[[S16_Z64:[0-9]+]]:_(s64) = G_ZEXT %[[S16]](s16)
   ; CHECK-NEXT: %[[S32_Z64:[0-9]+]]:_(s64) = G_ZEXT %[[S32]](s32)
   ; CHECK-NEXT: %[[SUM:[0-9]+]]:_(s64) = G_ADD %[[S16_Z64]], %[[S32_Z64]]
-  ; CHECK-NEXT: %[[RET:[wx][0-9]+]] = COPY %[[SUM]](s64)
-  ; CHECK-NEXT: RET_ReallyLR implicit %[[RET]]
+  ; CHECK-NEXT: $[[RET:[wx][0-9]+]] = COPY %[[SUM]](s64)
+  ; CHECK-NEXT: RET_ReallyLR implicit $[[RET]]
   bb.0.entry:
     %0:_(s8) = G_CONSTANT i8 1
     %1:_(s16) = G_SEXT %0(s8)
@@ -102,8 +102,8 @@ body:             |
     %3:_(s64) = G_ZEXT %1(s16)
     %4:_(s64) = G_ZEXT %2(s32)
     %5:_(s64) = G_ADD %3, %4
-    %x0 = COPY %5(s64)
-    RET_ReallyLR implicit %x0
+    $x0 = COPY %5(s64)
+    RET_ReallyLR implicit $x0
 ...
 ---
 name:            generic
@@ -115,13 +115,13 @@ body:             |
   ; CHECK:      %[[SG:[0-9]+]]:_(s32) = G_ADD %{{[0-9]+}}, %{{[0-9]+}}
   ; CHECK-NEXT: %{{[0-9]+}}:_(s32) = G_ADD %[[SG]], %[[SG]]
   bb.0:
-    %0:_(s32) = COPY %w0
-    %1:_(s32) = COPY %w1
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
     %2:_(s32) = G_ADD %0, %1
     %3:_(s32) = COPY %2(s32)
     %4:_(s32) = G_ADD %3, %3
-    %w0 = COPY %4(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %4(s32)
+    RET_ReallyLR implicit $w0
 ...
 ---
 name:            generic_to_concrete_copy
@@ -134,13 +134,13 @@ body:             |
   ; CHECK-NEXT: %[[S2:[0-9]+]]:gpr32 = COPY %[[S1]](s32)
   ; CHECK-NEXT: %{{[0-9]+}}:gpr32 = ADDWrr %[[S2]], %[[S2]]
   bb.0:
-    %0:_(s32) = COPY %w0
-    %1:_(s32) = COPY %w1
+    %0:_(s32) = COPY $w0
+    %1:_(s32) = COPY $w1
     %2:_(s32) = G_ADD %0, %1
     %3:gpr32 = COPY %2(s32)
     %4:gpr32 = ADDWrr %3, %3
-    %w0 = COPY %4
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %4
+    RET_ReallyLR implicit $w0
 ...
 ---
 name:            concrete_to_generic_copy
@@ -153,13 +153,13 @@ body:             |
   ; CHECK-NEXT: %[[S2:[0-9]+]]:_(s32) = COPY %[[S1]]
   ; CHECK-NEXT: %{{[0-9]+}}:_(s32) = G_ADD %[[S2]], %[[S2]]
   bb.0:
-    %0:gpr32 = COPY %w0
-    %1:gpr32 = COPY %w1
+    %0:gpr32 = COPY $w0
+    %1:gpr32 = COPY $w1
     %2:gpr32 = ADDWrr %0, %1
     %3:_(s32) = COPY %2
     %4:_(s32) = G_ADD %3, %3
-    %w0 = COPY %4(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %4(s32)
+    RET_ReallyLR implicit $w0
 ...
 ---
 name:            concrete
@@ -171,11 +171,11 @@ body:             |
   ; CHECK:      %[[SC:[0-9]+]]:gpr32 = ADDWrr %{{[0-9]+}}, %{{[0-9]+}}
   ; CHECK-NEXT: %{{[0-9]+}}:gpr32 = ADDWrr %[[SC]], %[[SC]]
   bb.0:
-    %0:gpr32 = COPY %w0
-    %1:gpr32 = COPY %w1
+    %0:gpr32 = COPY $w0
+    %1:gpr32 = COPY $w1
     %2:gpr32 = ADDWrr %0, %1
     %3:gpr32 = COPY %2
     %4:gpr32 = ADDWrr %3, %3
-    %w0 = COPY %4
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %4
+    RET_ReallyLR implicit $w0
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/no-regclass.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/no-regclass.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/no-regclass.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/no-regclass.mir Wed Jan 31 14:04:26 2018
@@ -19,13 +19,13 @@ tracksRegLiveness: true
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: unused_reg
-    ; CHECK: liveins: %w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %w0
-    ; CHECK: %w0 = COPY [[COPY]]
-    %0:gpr(s32) = COPY %w0
+    ; CHECK: liveins: $w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
+    ; CHECK: $w0 = COPY [[COPY]]
+    %0:gpr(s32) = COPY $w0
     %1:gpr(s64) = G_MERGE_VALUES %0(s32), %0(s32)
     %2:gpr(s32), %3:gpr(s32) = G_UNMERGE_VALUES %1(s64)
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/reg-bank-128bit.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/reg-bank-128bit.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/reg-bank-128bit.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/reg-bank-128bit.mir Wed Jan 31 14:04:26 2018
@@ -5,7 +5,7 @@ legalized:       true
 registers:
 body: |
   bb.0.entry:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
     ; CHECK-LABEL: name: test_large_merge
     ; CHECK: registers:
@@ -13,10 +13,10 @@ body: |
     ; CHECK:       - { id: 1, class: gpr
     ; CHECK:       - { id: 2, class: gpr
     ; CHECK:       - { id: 3, class: fpr
-    %0:_(s64) = COPY %x0
-    %1:_(s64) = COPY %x1
-    %2:_(p0) = COPY %x2
+    %0:_(s64) = COPY $x0
+    %1:_(s64) = COPY $x1
+    %2:_(p0) = COPY $x2
     %3:_(s128) = G_MERGE_VALUES %0, %1
     %4:_(s64) = G_TRUNC %3
-    %d0 = COPY %4
+    $d0 = COPY %4
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-dbg-value.mir Wed Jan 31 14:04:26 2018
@@ -34,11 +34,11 @@ legalized:       true
 # CHECK-NEXT:  - { id: 0, class: gpr, preferred-register: '' }
 body: |
   bb.0:
-    liveins: %w0
-    %0:_(s32) = COPY %w0
-    ; CHECK: DBG_VALUE debug-use %0(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9
-    DBG_VALUE debug-use %0(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9
+    liveins: $w0
+    %0:_(s32) = COPY $w0
+    ; CHECK: DBG_VALUE debug-use %0(s32), debug-use $noreg, !7, !DIExpression(), debug-location !9
+    DBG_VALUE debug-use %0(s32), debug-use $noreg, !7, !DIExpression(), debug-location !9
 
-    ; CHECK: DBG_VALUE %noreg, 0, !7, !DIExpression(), debug-location !9
-    DBG_VALUE %noreg, 0, !7, !DIExpression(), debug-location !9
+    ; CHECK: DBG_VALUE $noreg, 0, !7, !DIExpression(), debug-location !9
+    DBG_VALUE $noreg, 0, !7, !DIExpression(), debug-location !9
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir Wed Jan 31 14:04:26 2018
@@ -80,11 +80,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_add_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY]]
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_ADD %0, %0
 ...
 
@@ -96,11 +96,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %q0
+    liveins: $q0
     ; CHECK-LABEL: name: test_add_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
     ; CHECK: [[ADD:%[0-9]+]]:fpr(<4 x s32>) = G_ADD [[COPY]], [[COPY]]
-    %0(<4 x s32>) = COPY %q0
+    %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_ADD %0, %0
 ...
 
@@ -112,11 +112,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_sub_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[SUB:%[0-9]+]]:gpr(s32) = G_SUB [[COPY]], [[COPY]]
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_SUB %0, %0
 ...
 
@@ -128,11 +128,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %q0
+    liveins: $q0
     ; CHECK-LABEL: name: test_sub_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
     ; CHECK: [[SUB:%[0-9]+]]:fpr(<4 x s32>) = G_SUB [[COPY]], [[COPY]]
-    %0(<4 x s32>) = COPY %q0
+    %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_SUB %0, %0
 ...
 
@@ -144,11 +144,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_mul_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[MUL:%[0-9]+]]:gpr(s32) = G_MUL [[COPY]], [[COPY]]
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_MUL %0, %0
 ...
 
@@ -160,11 +160,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %q0
+    liveins: $q0
     ; CHECK-LABEL: name: test_mul_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
     ; CHECK: [[MUL:%[0-9]+]]:fpr(<4 x s32>) = G_MUL [[COPY]], [[COPY]]
-    %0(<4 x s32>) = COPY %q0
+    %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_MUL %0, %0
 ...
 
@@ -176,11 +176,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_and_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[AND:%[0-9]+]]:gpr(s32) = G_AND [[COPY]], [[COPY]]
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_AND %0, %0
 ...
 
@@ -192,11 +192,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %q0
+    liveins: $q0
     ; CHECK-LABEL: name: test_and_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
     ; CHECK: [[AND:%[0-9]+]]:fpr(<4 x s32>) = G_AND [[COPY]], [[COPY]]
-    %0(<4 x s32>) = COPY %q0
+    %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_AND %0, %0
 ...
 
@@ -208,11 +208,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_or_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[OR:%[0-9]+]]:gpr(s32) = G_OR [[COPY]], [[COPY]]
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_OR %0, %0
 ...
 
@@ -224,11 +224,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %q0
+    liveins: $q0
     ; CHECK-LABEL: name: test_or_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
     ; CHECK: [[OR:%[0-9]+]]:fpr(<4 x s32>) = G_OR [[COPY]], [[COPY]]
-    %0(<4 x s32>) = COPY %q0
+    %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_OR %0, %0
 ...
 
@@ -240,11 +240,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_xor_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[XOR:%[0-9]+]]:gpr(s32) = G_XOR [[COPY]], [[COPY]]
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_XOR %0, %0
 ...
 
@@ -256,11 +256,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %q0
+    liveins: $q0
     ; CHECK-LABEL: name: test_xor_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
     ; CHECK: [[XOR:%[0-9]+]]:fpr(<4 x s32>) = G_XOR [[COPY]], [[COPY]]
-    %0(<4 x s32>) = COPY %q0
+    %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_XOR %0, %0
 ...
 
@@ -272,11 +272,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_shl_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[SHL:%[0-9]+]]:gpr(s32) = G_SHL [[COPY]], [[COPY]]
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_SHL %0, %0
 ...
 
@@ -288,11 +288,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %q0
+    liveins: $q0
     ; CHECK-LABEL: name: test_shl_v4s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY %q0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(<4 x s32>) = COPY $q0
     ; CHECK: [[SHL:%[0-9]+]]:fpr(<4 x s32>) = G_SHL [[COPY]], [[COPY]]
-    %0(<4 x s32>) = COPY %q0
+    %0(<4 x s32>) = COPY $q0
     %1(<4 x s32>) = G_SHL %0, %0
 ...
 
@@ -304,11 +304,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_lshr_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[LSHR:%[0-9]+]]:gpr(s32) = G_LSHR [[COPY]], [[COPY]]
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_LSHR %0, %0
 ...
 
@@ -320,11 +320,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_ashr_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[ASHR:%[0-9]+]]:gpr(s32) = G_ASHR [[COPY]], [[COPY]]
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_ASHR %0, %0
 ...
 
@@ -336,11 +336,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_sdiv_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[SDIV:%[0-9]+]]:gpr(s32) = G_SDIV [[COPY]], [[COPY]]
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_SDIV %0, %0
 ...
 
@@ -352,11 +352,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_udiv_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[UDIV:%[0-9]+]]:gpr(s32) = G_UDIV [[COPY]], [[COPY]]
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_UDIV %0, %0
 ...
 
@@ -368,11 +368,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_anyext_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[ANYEXT:%[0-9]+]]:gpr(s64) = G_ANYEXT [[COPY]](s32)
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s64) = G_ANYEXT %0
 ...
 
@@ -384,11 +384,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_sext_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[SEXT:%[0-9]+]]:gpr(s64) = G_SEXT [[COPY]](s32)
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s64) = G_SEXT %0
 ...
 
@@ -400,11 +400,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_zext_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[ZEXT:%[0-9]+]]:gpr(s64) = G_ZEXT [[COPY]](s32)
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s64) = G_ZEXT %0
 ...
 
@@ -416,11 +416,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_trunc_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
     ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s32) = G_TRUNC [[COPY]](s64)
-    %0(s64) = COPY %x0
+    %0(s64) = COPY $x0
     %1(s32) = G_TRUNC %0
 ...
 
@@ -457,12 +457,12 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_icmp_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[COPY]](s32), [[COPY]]
     ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_ICMP intpred(ne), %0, %0
     %2(s1) = G_TRUNC %1(s32)
 ...
@@ -476,12 +476,12 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_icmp_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
     ; CHECK: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[COPY]](p0), [[COPY]]
     ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
-    %0(p0) = COPY %x0
+    %0(p0) = COPY $x0
     %1(s32) = G_ICMP intpred(ne), %0, %0
     %2(s1) = G_TRUNC %1(s32)
 ...
@@ -508,11 +508,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_ptrtoint_s64_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
     ; CHECK: [[PTRTOINT:%[0-9]+]]:gpr(s64) = G_PTRTOINT [[COPY]](p0)
-    %0(p0) = COPY %x0
+    %0(p0) = COPY $x0
     %1(s64) = G_PTRTOINT %0
 ...
 
@@ -524,11 +524,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_inttoptr_p0_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
     ; CHECK: [[INTTOPTR:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[COPY]](s64)
-    %0(s64) = COPY %x0
+    %0(s64) = COPY $x0
     %1(p0) = G_INTTOPTR %0
 ...
 
@@ -540,11 +540,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_load_s32_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
     ; CHECK: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4)
-    %0(p0) = COPY %x0
+    %0(p0) = COPY $x0
     %1(s32) = G_LOAD %0 :: (load 4)
 ...
 
@@ -556,13 +556,13 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %x0, %w1
+    liveins: $x0, $w1
     ; CHECK-LABEL: name: test_store_s32_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $w1
     ; CHECK: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4)
-    %0(p0) = COPY %x0
-    %1(s32) = COPY %w1
+    %0(p0) = COPY $x0
+    %1(s32) = COPY $w1
     G_STORE %1, %0 :: (store 4)
 ...
 
@@ -574,11 +574,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %s0
+    liveins: $s0
     ; CHECK-LABEL: name: test_fadd_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
     ; CHECK: [[FADD:%[0-9]+]]:fpr(s32) = G_FADD [[COPY]], [[COPY]]
-    %0(s32) = COPY %s0
+    %0(s32) = COPY $s0
     %1(s32) = G_FADD %0, %0
 ...
 
@@ -590,11 +590,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %s0
+    liveins: $s0
     ; CHECK-LABEL: name: test_fsub_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
     ; CHECK: [[FSUB:%[0-9]+]]:fpr(s32) = G_FSUB [[COPY]], [[COPY]]
-    %0(s32) = COPY %s0
+    %0(s32) = COPY $s0
     %1(s32) = G_FSUB %0, %0
 ...
 
@@ -606,11 +606,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %s0
+    liveins: $s0
     ; CHECK-LABEL: name: test_fmul_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
     ; CHECK: [[FMUL:%[0-9]+]]:fpr(s32) = G_FMUL [[COPY]], [[COPY]]
-    %0(s32) = COPY %s0
+    %0(s32) = COPY $s0
     %1(s32) = G_FMUL %0, %0
 ...
 
@@ -622,11 +622,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %s0
+    liveins: $s0
     ; CHECK-LABEL: name: test_fdiv_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
     ; CHECK: [[FDIV:%[0-9]+]]:fpr(s32) = G_FDIV [[COPY]], [[COPY]]
-    %0(s32) = COPY %s0
+    %0(s32) = COPY $s0
     %1(s32) = G_FDIV %0, %0
 ...
 
@@ -638,11 +638,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %s0
+    liveins: $s0
     ; CHECK-LABEL: name: test_fpext_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
     ; CHECK: [[FPEXT:%[0-9]+]]:fpr(s64) = G_FPEXT [[COPY]](s32)
-    %0(s32) = COPY %s0
+    %0(s32) = COPY $s0
     %1(s64) = G_FPEXT %0
 ...
 
@@ -654,11 +654,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %d0
+    liveins: $d0
     ; CHECK-LABEL: name: test_fptrunc_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
     ; CHECK: [[FPTRUNC:%[0-9]+]]:fpr(s32) = G_FPTRUNC [[COPY]](s64)
-    %0(s64) = COPY %d0
+    %0(s64) = COPY $d0
     %1(s32) = G_FPTRUNC %0
 ...
 
@@ -683,12 +683,12 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %s0
+    liveins: $s0
     ; CHECK-LABEL: name: test_fcmp_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
     ; CHECK: [[FCMP:%[0-9]+]]:gpr(s32) = G_FCMP floatpred(olt), [[COPY]](s32), [[COPY]]
     ; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[FCMP]](s32)
-    %0(s32) = COPY %s0
+    %0(s32) = COPY $s0
     %1(s32) = G_FCMP floatpred(olt), %0, %0
     %2(s1) = G_TRUNC %1(s32)
 ...
@@ -701,11 +701,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_sitofp_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
     ; CHECK: [[SITOFP:%[0-9]+]]:fpr(s64) = G_SITOFP [[COPY]](s32)
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s64) = G_SITOFP %0
 ...
 
@@ -717,11 +717,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: test_uitofp_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
     ; CHECK: [[UITOFP:%[0-9]+]]:fpr(s32) = G_UITOFP [[COPY]](s64)
-    %0(s64) = COPY %x0
+    %0(s64) = COPY $x0
     %1(s32) = G_UITOFP %0
 ...
 
@@ -733,11 +733,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %s0
+    liveins: $s0
     ; CHECK-LABEL: name: test_fptosi_s64_s32
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(s32) = COPY $s0
     ; CHECK: [[FPTOSI:%[0-9]+]]:gpr(s64) = G_FPTOSI [[COPY]](s32)
-    %0(s32) = COPY %s0
+    %0(s32) = COPY $s0
     %1(s64) = G_FPTOSI %0
 ...
 
@@ -749,11 +749,11 @@ registers:
   - { id: 1, class: _ }
 body: |
   bb.0:
-    liveins: %d0
+    liveins: $d0
     ; CHECK-LABEL: name: test_fptoui_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr(s64) = COPY $d0
     ; CHECK: [[FPTOUI:%[0-9]+]]:gpr(s32) = G_FPTOUI [[COPY]](s64)
-    %0(s64) = COPY %d0
+    %0(s64) = COPY $d0
     %1(s32) = G_FPTOUI %0
 ...
 
@@ -772,10 +772,10 @@ body:             |
   ; CHECK-LABEL: name: test_gphi_ptr
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; CHECK:   liveins: %w2, %x0, %x1
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr(p0) = COPY %x0
-  ; CHECK:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY %x1
-  ; CHECK:   [[COPY2:%[0-9]+]]:gpr(s32) = COPY %w2
+  ; CHECK:   liveins: $w2, $x0, $x1
+  ; CHECK:   [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
+  ; CHECK:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY $x1
+  ; CHECK:   [[COPY2:%[0-9]+]]:gpr(s32) = COPY $w2
   ; CHECK:   [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[COPY2]](s32)
   ; CHECK:   G_BRCOND [[TRUNC]](s1), %bb.1
   ; CHECK:   G_BR %bb.2
@@ -783,15 +783,15 @@ body:             |
   ; CHECK:   successors: %bb.2(0x80000000)
   ; CHECK: bb.2:
   ; CHECK:   [[PHI:%[0-9]+]]:gpr(p0) = G_PHI [[COPY]](p0), %bb.0, [[COPY1]](p0), %bb.1
-  ; CHECK:   %x0 = COPY [[PHI]](p0)
-  ; CHECK:   RET_ReallyLR implicit %x0
+  ; CHECK:   $x0 = COPY [[PHI]](p0)
+  ; CHECK:   RET_ReallyLR implicit $x0
   bb.0:
     successors: %bb.1, %bb.2
-    liveins: %w2, %x0, %x1
+    liveins: $w2, $x0, $x1
 
-    %0(p0) = COPY %x0
-    %1(p0) = COPY %x1
-    %4(s32) = COPY %w2
+    %0(p0) = COPY $x0
+    %1(p0) = COPY $x1
+    %4(s32) = COPY $w2
     %2(s1) = G_TRUNC %4(s32)
     G_BRCOND %2(s1), %bb.1
     G_BR %bb.2
@@ -802,7 +802,7 @@ body:             |
 
   bb.2:
     %3(p0) = G_PHI %0(p0), %bb.0, %1(p0), %bb.1
-    %x0 = COPY %3(p0)
-    RET_ReallyLR implicit %x0
+    $x0 = COPY %3(p0)
+    RET_ReallyLR implicit $x0
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/regbankselect-reg_sequence.mir Wed Jan 31 14:04:26 2018
@@ -18,8 +18,8 @@ registers:
   - { id: 0, class: dd }
 body: |
   bb.0:
-    liveins: %d0, %d1
+    liveins: $d0, $d1
 
-    %0 = REG_SEQUENCE %d0, %subreg.dsub0, %d1, %subreg.dsub1
+    %0 = REG_SEQUENCE $d0, %subreg.dsub0, $d1, %subreg.dsub1
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir Wed Jan 31 14:04:26 2018
@@ -25,17 +25,17 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_xchg_i64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr64 = SWPX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
-    ; CHECK: %x0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $x0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 1
     %2:gpr(s64) = G_ATOMICRMW_XCHG %0, %1 :: (load store monotonic 8 on %ir.addr)
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 ---
 name:            atomicrmw_add_i64
@@ -44,17 +44,17 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_add_i64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr64 = LDADDX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
-    ; CHECK: %x0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $x0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 1
     %2:gpr(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 8 on %ir.addr)
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 ---
 name:            atomicrmw_add_i32
@@ -63,17 +63,17 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_add_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 8 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 8 on %ir.addr)
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -83,17 +83,17 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_sub_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 8 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 8 on %ir.addr)
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -103,18 +103,18 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_and_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[CST2:%[0-9]+]]:gpr32 = ORNWrr %wzr, [[CST]]
+    ; CHECK: [[CST2:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[CST]]
     ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDCLRAW [[CST2]], [[COPY]] :: (load store acquire 8 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_AND %0, %1 :: (load store acquire 8 on %ir.addr)
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -124,17 +124,17 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_or_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSETLW [[CST]], [[COPY]] :: (load store release 8 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_OR %0, %1 :: (load store release 8 on %ir.addr)
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -144,17 +144,17 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_xor_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDEORALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store acq_rel 8 on %ir.addr)
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -164,17 +164,17 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_min_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMINALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store acq_rel 8 on %ir.addr)
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -184,17 +184,17 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_max_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMAXALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store acq_rel 8 on %ir.addr)
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -204,17 +204,17 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_umin_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMINALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store acq_rel 8 on %ir.addr)
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -224,15 +224,15 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: atomicrmw_umax_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMAXALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 1
     %2:gpr(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store acq_rel 8 on %ir.addr)
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-binop.mir Wed Jan 31 14:04:26 2018
@@ -70,17 +70,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: add_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY]], [[COPY1]]
-    ; CHECK: %w0 = COPY [[ADDWrr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: $w0 = COPY [[ADDWrr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_ADD %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -96,17 +96,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: add_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[COPY]], [[COPY1]]
-    ; CHECK: %x0 = COPY [[ADDXrr]]
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[ADDXrr]]
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_ADD %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -121,16 +121,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: add_imm_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
     ; CHECK: [[ADDWri:%[0-9]+]]:gpr32sp = ADDWri [[COPY]], 1, 0
-    ; CHECK: %w0 = COPY [[ADDWri]]
-    %0(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[ADDWri]]
+    %0(s32) = COPY $w0
     %1(s32) = G_CONSTANT i32 1
     %2(s32) = G_ADD %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -145,16 +145,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %w1
+    liveins: $x0, $w1
 
     ; CHECK-LABEL: name: add_imm_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri [[COPY]], 1, 0
-    ; CHECK: %x0 = COPY [[ADDXri]]
-    %0(s64) = COPY %x0
+    ; CHECK: $x0 = COPY [[ADDXri]]
+    %0(s64) = COPY $x0
     %1(s64) = G_CONSTANT i32 1
     %2(s64) = G_ADD %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -171,22 +171,22 @@ body:             |
   ; CHECK-LABEL: name: add_imm_s32_gpr_bb
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr32sp = COPY %w0
+  ; CHECK:   [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
   ; CHECK:   B %bb.1
   ; CHECK: bb.1:
   ; CHECK:   [[ADDWri:%[0-9]+]]:gpr32sp = ADDWri [[COPY]], 1, 0
-  ; CHECK:   %w0 = COPY [[ADDWri]]
+  ; CHECK:   $w0 = COPY [[ADDWri]]
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
     successors: %bb.1
 
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_CONSTANT i32 1
     G_BR %bb.1
 
   bb.1:
     %2(s32) = G_ADD %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -202,17 +202,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: sub_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
-    ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[COPY1]], implicit-def %nzcv
-    ; CHECK: %w0 = COPY [[SUBSWrr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
+    ; CHECK: $w0 = COPY [[SUBSWrr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_SUB %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -228,17 +228,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: sub_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
-    ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def %nzcv
-    ; CHECK: %x0 = COPY [[SUBSXrr]]
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
+    ; CHECK: $x0 = COPY [[SUBSXrr]]
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_SUB %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -254,17 +254,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: or_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[COPY]], [[COPY1]]
-    ; CHECK: %w0 = COPY [[ORRWrr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: $w0 = COPY [[ORRWrr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_OR %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -280,17 +280,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: or_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: [[ORRXrr:%[0-9]+]]:gpr64 = ORRXrr [[COPY]], [[COPY1]]
-    ; CHECK: %x0 = COPY [[ORRXrr]]
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[ORRXrr]]
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_OR %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -308,17 +308,17 @@ registers:
 # on 64-bit width vector.
 body:             |
   bb.0:
-    liveins: %d0, %d1
+    liveins: $d0, $d1
 
     ; CHECK-LABEL: name: or_v2s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK: [[ORRv8i8_:%[0-9]+]]:fpr64 = ORRv8i8 [[COPY]], [[COPY1]]
-    ; CHECK: %d0 = COPY [[ORRv8i8_]]
-      %0(<2 x s32>) = COPY %d0
-      %1(<2 x s32>) = COPY %d1
+    ; CHECK: $d0 = COPY [[ORRv8i8_]]
+      %0(<2 x s32>) = COPY $d0
+      %1(<2 x s32>) = COPY $d1
       %2(<2 x s32>) = G_OR %0, %1
-      %d0 = COPY %2(<2 x s32>)
+      $d0 = COPY %2(<2 x s32>)
 ...
 
 ---
@@ -334,17 +334,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: and_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32 = ANDWrr [[COPY]], [[COPY1]]
-    ; CHECK: %w0 = COPY [[ANDWrr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: $w0 = COPY [[ANDWrr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_AND %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -360,17 +360,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: and_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: [[ANDXrr:%[0-9]+]]:gpr64 = ANDXrr [[COPY]], [[COPY1]]
-    ; CHECK: %x0 = COPY [[ANDXrr]]
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[ANDXrr]]
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_AND %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -386,17 +386,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: shl_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: [[LSLVWr:%[0-9]+]]:gpr32 = LSLVWr [[COPY]], [[COPY1]]
-    ; CHECK: %w0 = COPY [[LSLVWr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: $w0 = COPY [[LSLVWr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_SHL %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -412,17 +412,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: shl_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: [[LSLVXr:%[0-9]+]]:gpr64 = LSLVXr [[COPY]], [[COPY1]]
-    ; CHECK: %x0 = COPY [[LSLVXr]]
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[LSLVXr]]
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_SHL %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -438,17 +438,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: lshr_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: [[LSRVWr:%[0-9]+]]:gpr32 = LSRVWr [[COPY]], [[COPY1]]
-    ; CHECK: %w0 = COPY [[LSRVWr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: $w0 = COPY [[LSRVWr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_LSHR %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -464,17 +464,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: lshr_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: [[LSRVXr:%[0-9]+]]:gpr64 = LSRVXr [[COPY]], [[COPY1]]
-    ; CHECK: %x0 = COPY [[LSRVXr]]
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[LSRVXr]]
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_LSHR %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -490,17 +490,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: ashr_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: [[ASRVWr:%[0-9]+]]:gpr32 = ASRVWr [[COPY]], [[COPY1]]
-    ; CHECK: %w0 = COPY [[ASRVWr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: $w0 = COPY [[ASRVWr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_ASHR %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -516,17 +516,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: ashr_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: [[ASRVXr:%[0-9]+]]:gpr64 = ASRVXr [[COPY]], [[COPY1]]
-    ; CHECK: %x0 = COPY [[ASRVXr]]
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[ASRVXr]]
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_ASHR %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -543,17 +543,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: mul_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
-    ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[COPY]], [[COPY1]], %wzr
-    ; CHECK: %w0 = COPY [[MADDWrrr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK: [[MADDWrrr:%[0-9]+]]:gpr32 = MADDWrrr [[COPY]], [[COPY1]], $wzr
+    ; CHECK: $w0 = COPY [[MADDWrrr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_MUL %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -569,17 +569,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: mul_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
-    ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[COPY]], [[COPY1]], %xzr
-    ; CHECK: %x0 = COPY [[MADDXrrr]]
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[COPY]], [[COPY1]], $xzr
+    ; CHECK: $x0 = COPY [[MADDXrrr]]
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_MUL %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -591,21 +591,21 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: mulh_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: [[SMULHrr:%[0-9]+]]:gpr64 = SMULHrr [[COPY]], [[COPY1]]
     ; CHECK: [[UMULHrr:%[0-9]+]]:gpr64 = UMULHrr [[COPY]], [[COPY1]]
-    ; CHECK: %x0 = COPY [[SMULHrr]]
-    ; CHECK: %x0 = COPY [[UMULHrr]]
-    %0:gpr(s64) = COPY %x0
-    %1:gpr(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[SMULHrr]]
+    ; CHECK: $x0 = COPY [[UMULHrr]]
+    %0:gpr(s64) = COPY $x0
+    %1:gpr(s64) = COPY $x1
     %2:gpr(s64) = G_SMULH %0, %1
     %3:gpr(s64) = G_UMULH %0, %1
-    %x0 = COPY %2(s64)
-    %x0 = COPY %3(s64)
+    $x0 = COPY %2(s64)
+    $x0 = COPY %3(s64)
 ...
 
 ---
@@ -621,17 +621,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: sdiv_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr [[COPY]], [[COPY1]]
-    ; CHECK: %w0 = COPY [[SDIVWr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: $w0 = COPY [[SDIVWr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_SDIV %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -647,17 +647,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: sdiv_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: [[SDIVXr:%[0-9]+]]:gpr64 = SDIVXr [[COPY]], [[COPY1]]
-    ; CHECK: %x0 = COPY [[SDIVXr]]
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[SDIVXr]]
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_SDIV %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -673,17 +673,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: udiv_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: [[UDIVWr:%[0-9]+]]:gpr32 = UDIVWr [[COPY]], [[COPY1]]
-    ; CHECK: %w0 = COPY [[UDIVWr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: $w0 = COPY [[UDIVWr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_UDIV %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -699,17 +699,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: udiv_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: [[UDIVXr:%[0-9]+]]:gpr64 = UDIVXr [[COPY]], [[COPY1]]
-    ; CHECK: %x0 = COPY [[UDIVXr]]
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[UDIVXr]]
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_UDIV %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -725,17 +725,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0, %s1
+    liveins: $s0, $s1
 
     ; CHECK-LABEL: name: fadd_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK: [[FADDSrr:%[0-9]+]]:fpr32 = FADDSrr [[COPY]], [[COPY1]]
-    ; CHECK: %s0 = COPY [[FADDSrr]]
-    %0(s32) = COPY %s0
-    %1(s32) = COPY %s1
+    ; CHECK: $s0 = COPY [[FADDSrr]]
+    %0(s32) = COPY $s0
+    %1(s32) = COPY $s1
     %2(s32) = G_FADD %0, %1
-    %s0 = COPY %2(s32)
+    $s0 = COPY %2(s32)
 ...
 
 ---
@@ -750,17 +750,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0, %d1
+    liveins: $d0, $d1
 
     ; CHECK-LABEL: name: fadd_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK: [[FADDDrr:%[0-9]+]]:fpr64 = FADDDrr [[COPY]], [[COPY1]]
-    ; CHECK: %d0 = COPY [[FADDDrr]]
-    %0(s64) = COPY %d0
-    %1(s64) = COPY %d1
+    ; CHECK: $d0 = COPY [[FADDDrr]]
+    %0(s64) = COPY $d0
+    %1(s64) = COPY $d1
     %2(s64) = G_FADD %0, %1
-    %d0 = COPY %2(s64)
+    $d0 = COPY %2(s64)
 ...
 
 ---
@@ -775,17 +775,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0, %s1
+    liveins: $s0, $s1
 
     ; CHECK-LABEL: name: fsub_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK: [[FSUBSrr:%[0-9]+]]:fpr32 = FSUBSrr [[COPY]], [[COPY1]]
-    ; CHECK: %s0 = COPY [[FSUBSrr]]
-    %0(s32) = COPY %s0
-    %1(s32) = COPY %s1
+    ; CHECK: $s0 = COPY [[FSUBSrr]]
+    %0(s32) = COPY $s0
+    %1(s32) = COPY $s1
     %2(s32) = G_FSUB %0, %1
-    %s0 = COPY %2(s32)
+    $s0 = COPY %2(s32)
 ...
 
 ---
@@ -800,17 +800,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0, %d1
+    liveins: $d0, $d1
 
     ; CHECK-LABEL: name: fsub_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK: [[FSUBDrr:%[0-9]+]]:fpr64 = FSUBDrr [[COPY]], [[COPY1]]
-    ; CHECK: %d0 = COPY [[FSUBDrr]]
-    %0(s64) = COPY %d0
-    %1(s64) = COPY %d1
+    ; CHECK: $d0 = COPY [[FSUBDrr]]
+    %0(s64) = COPY $d0
+    %1(s64) = COPY $d1
     %2(s64) = G_FSUB %0, %1
-    %d0 = COPY %2(s64)
+    $d0 = COPY %2(s64)
 ...
 
 ---
@@ -825,17 +825,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0, %s1
+    liveins: $s0, $s1
 
     ; CHECK-LABEL: name: fmul_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK: [[FMULSrr:%[0-9]+]]:fpr32 = FMULSrr [[COPY]], [[COPY1]]
-    ; CHECK: %s0 = COPY [[FMULSrr]]
-    %0(s32) = COPY %s0
-    %1(s32) = COPY %s1
+    ; CHECK: $s0 = COPY [[FMULSrr]]
+    %0(s32) = COPY $s0
+    %1(s32) = COPY $s1
     %2(s32) = G_FMUL %0, %1
-    %s0 = COPY %2(s32)
+    $s0 = COPY %2(s32)
 ...
 
 ---
@@ -850,17 +850,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0, %d1
+    liveins: $d0, $d1
 
     ; CHECK-LABEL: name: fmul_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK: [[FMULDrr:%[0-9]+]]:fpr64 = FMULDrr [[COPY]], [[COPY1]]
-    ; CHECK: %d0 = COPY [[FMULDrr]]
-    %0(s64) = COPY %d0
-    %1(s64) = COPY %d1
+    ; CHECK: $d0 = COPY [[FMULDrr]]
+    %0(s64) = COPY $d0
+    %1(s64) = COPY $d1
     %2(s64) = G_FMUL %0, %1
-    %d0 = COPY %2(s64)
+    $d0 = COPY %2(s64)
 ...
 
 ---
@@ -875,17 +875,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0, %s1
+    liveins: $s0, $s1
 
     ; CHECK-LABEL: name: fdiv_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK: [[FDIVSrr:%[0-9]+]]:fpr32 = FDIVSrr [[COPY]], [[COPY1]]
-    ; CHECK: %s0 = COPY [[FDIVSrr]]
-    %0(s32) = COPY %s0
-    %1(s32) = COPY %s1
+    ; CHECK: $s0 = COPY [[FDIVSrr]]
+    %0(s32) = COPY $s0
+    %1(s32) = COPY $s1
     %2(s32) = G_FDIV %0, %1
-    %s0 = COPY %2(s32)
+    $s0 = COPY %2(s32)
 ...
 
 ---
@@ -900,15 +900,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0, %d1
+    liveins: $d0, $d1
 
     ; CHECK-LABEL: name: fdiv_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK: [[FDIVDrr:%[0-9]+]]:fpr64 = FDIVDrr [[COPY]], [[COPY1]]
-    ; CHECK: %d0 = COPY [[FDIVDrr]]
-    %0(s64) = COPY %d0
-    %1(s64) = COPY %d1
+    ; CHECK: $d0 = COPY [[FDIVDrr]]
+    %0(s64) = COPY $d0
+    %1(s64) = COPY $d1
     %2(s64) = G_FDIV %0, %1
-    %d0 = COPY %2(s64)
+    $d0 = COPY %2(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bitcast-bigendian.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bitcast-bigendian.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bitcast-bigendian.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bitcast-bigendian.mir Wed Jan 31 14:04:26 2018
@@ -6,13 +6,13 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: bitcast_v2f32_to_s64
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $x0
     ; CHECK: [[REV:%[0-9]+]]:fpr64 = REV64v2i32 [[COPY]]
-    ; CHECK: %x0 = COPY [[REV]]
-    %0:fpr(<2 x s32>) = COPY %x0
+    ; CHECK: $x0 = COPY [[REV]]
+    %0:fpr(<2 x s32>) = COPY $x0
     %1:fpr(s64) = G_BITCAST %0
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bitcast.mir Wed Jan 31 14:04:26 2018
@@ -26,14 +26,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: bitcast_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %w0
-    ; CHECK: %w0 = COPY [[COPY]]
-    %0(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
+    ; CHECK: $w0 = COPY [[COPY]]
+    %0(s32) = COPY $w0
     %1(s32) = G_BITCAST %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -47,14 +47,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0
+    liveins: $s0
 
     ; CHECK-LABEL: name: bitcast_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
-    ; CHECK: %s0 = COPY [[COPY]]
-    %0(s32) = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK: $s0 = COPY [[COPY]]
+    %0(s32) = COPY $s0
     %1(s32) = G_BITCAST %0
-    %s0 = COPY %1(s32)
+    $s0 = COPY %1(s32)
 ...
 
 ---
@@ -68,15 +68,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: bitcast_s32_gpr_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
     ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY [[COPY]]
-    ; CHECK: %s0 = COPY [[COPY1]]
-    %0(s32) = COPY %w0
+    ; CHECK: $s0 = COPY [[COPY1]]
+    %0(s32) = COPY $w0
     %1(s32) = G_BITCAST %0
-    %s0 = COPY %1(s32)
+    $s0 = COPY %1(s32)
 ...
 
 ---
@@ -90,15 +90,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0
+    liveins: $s0
 
     ; CHECK-LABEL: name: bitcast_s32_fpr_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
-    ; CHECK: %w0 = COPY [[COPY1]]
-    %0(s32) = COPY %s0
+    ; CHECK: $w0 = COPY [[COPY1]]
+    %0(s32) = COPY $s0
     %1(s32) = G_BITCAST %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -112,14 +112,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: bitcast_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %x0
-    ; CHECK: %x0 = COPY [[COPY]]
-    %0(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
+    ; CHECK: $x0 = COPY [[COPY]]
+    %0(s64) = COPY $x0
     %1(s64) = G_BITCAST %0
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...
 
 ---
@@ -133,14 +133,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: bitcast_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
-    ; CHECK: %d0 = COPY [[COPY]]
-    %0(s64) = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: $d0 = COPY [[COPY]]
+    %0(s64) = COPY $d0
     %1(s64) = G_BITCAST %0
-    %d0 = COPY %1(s64)
+    $d0 = COPY %1(s64)
 ...
 
 ---
@@ -153,15 +153,15 @@ registers:
   - { id: 1, class: fpr }
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: bitcast_s64_gpr_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
     ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY [[COPY]]
-    ; CHECK: %d0 = COPY [[COPY1]]
-    %0(s64) = COPY %x0
+    ; CHECK: $d0 = COPY [[COPY1]]
+    %0(s64) = COPY $x0
     %1(s64) = G_BITCAST %0
-    %d0 = COPY %1(s64)
+    $d0 = COPY %1(s64)
 ...
 
 ---
@@ -175,15 +175,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: bitcast_s64_fpr_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY [[COPY]]
-    ; CHECK: %x0 = COPY [[COPY1]]
-    %0(s64) = COPY %d0
+    ; CHECK: $x0 = COPY [[COPY1]]
+    %0(s64) = COPY $d0
     %1(s64) = G_BITCAST %0
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...
 
 ---
@@ -197,14 +197,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: bitcast_s64_v2f32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
-    ; CHECK: %x0 = COPY [[COPY]]
-    %0(s64) = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: $x0 = COPY [[COPY]]
+    %0(s64) = COPY $d0
     %1(<2 x s32>) = G_BITCAST %0
-    %x0 = COPY %1(<2 x s32>)
+    $x0 = COPY %1(<2 x s32>)
 ...
 
 ---
@@ -218,12 +218,12 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: bitcast_s64_v8i8_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
-    ; CHECK: %x0 = COPY [[COPY]]
-    %0(s64) = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
+    ; CHECK: $x0 = COPY [[COPY]]
+    %0(s64) = COPY $d0
     %1(<8 x s8>) = G_BITCAST %0
-    %x0 = COPY %1(<8 x s8>)
+    $x0 = COPY %1(<8 x s8>)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-br.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-br.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-br.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-br.mir Wed Jan 31 14:04:26 2018
@@ -42,7 +42,7 @@ registers:
 body:             |
   bb.0:
     successors: %bb.0, %bb.1
-    %1(s32) = COPY %w0
+    %1(s32) = COPY $w0
     %0(s1) = G_TRUNC %1
     G_BRCOND %0(s1), %bb.1
     G_BR %bb.0
@@ -61,12 +61,12 @@ registers:
 
 # CHECK:  body:
 # CHECK:   bb.0:
-# CHECK:    %0:gpr64 = COPY %x0
+# CHECK:    %0:gpr64 = COPY $x0
 # CHECK:    BR %0
 body:             |
   bb.0:
     successors: %bb.0, %bb.1
-    %0(p0) = COPY %x0
+    %0(p0) = COPY $x0
     G_BRINDIRECT %0(p0)
 
   bb.1:

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bswap.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bswap.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bswap.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-bswap.mir Wed Jan 31 14:04:26 2018
@@ -19,15 +19,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: bswap_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[REVWr:%[0-9]+]]:gpr32 = REVWr [[COPY]]
-    ; CHECK: %w0 = COPY [[REVWr]]
-    %0(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[REVWr]]
+    %0(s32) = COPY $w0
     %1(s32) = G_BSWAP %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
@@ -41,13 +41,13 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: bswap_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: [[REVXr:%[0-9]+]]:gpr64 = REVXr [[COPY]]
-    ; CHECK: %x0 = COPY [[REVXr]]
-    %0(s64) = COPY %x0
+    ; CHECK: $x0 = COPY [[REVXr]]
+    %0(s64) = COPY $x0
     %1(s64) = G_BSWAP %0
-    %x0 = COPY %1
+    $x0 = COPY %1
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cbz.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cbz.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cbz.mir Wed Jan 31 14:04:26 2018
@@ -15,15 +15,15 @@ regBankSelected: true
 
 # CHECK:  body:
 # CHECK:   bb.0:
-# CHECK:    %0:gpr32 = COPY %w0
+# CHECK:    %0:gpr32 = COPY $w0
 # CHECK:    CBZW %0, %bb.1
 # CHECK:    B %bb.0
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     successors: %bb.0, %bb.1
 
-    %0:gpr(s32) = COPY %w0
+    %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = G_CONSTANT i32 0
     %2:gpr(s32) = G_ICMP intpred(eq), %0, %1
     %3:gpr(s1) = G_TRUNC %2(s32)
@@ -41,15 +41,15 @@ regBankSelected: true
 
 # CHECK:  body:
 # CHECK:   bb.0:
-# CHECK:    %0:gpr64 = COPY %x0
+# CHECK:    %0:gpr64 = COPY $x0
 # CHECK:    CBZX %0, %bb.1
 # CHECK:    B %bb.0
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     successors: %bb.0, %bb.1
 
-    %0:gpr(s64) = COPY %x0
+    %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 0
     %2:gpr(s32) = G_ICMP intpred(eq), %0, %1
     %3:gpr(s1) = G_TRUNC %2(s32)
@@ -67,15 +67,15 @@ regBankSelected: true
 
 # CHECK:  body:
 # CHECK:   bb.0:
-# CHECK:    %0:gpr32 = COPY %w0
+# CHECK:    %0:gpr32 = COPY $w0
 # CHECK:    CBNZW %0, %bb.1
 # CHECK:    B %bb.0
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     successors: %bb.0, %bb.1
 
-    %0:gpr(s32) = COPY %w0
+    %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = G_CONSTANT i32 0
     %2:gpr(s32) = G_ICMP intpred(ne), %0, %1
     %3:gpr(s1) = G_TRUNC %2(s32)
@@ -93,15 +93,15 @@ regBankSelected: true
 
 # CHECK:  body:
 # CHECK:   bb.0:
-# CHECK:    %0:gpr64 = COPY %x0
+# CHECK:    %0:gpr64 = COPY $x0
 # CHECK:    CBNZX %0, %bb.1
 # CHECK:    B %bb.0
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     successors: %bb.0, %bb.1
 
-    %0:gpr(s64) = COPY %x0
+    %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 0
     %2:gpr(s32) = G_ICMP intpred(ne), %0, %1
     %3:gpr(s1) = G_TRUNC %2(s32)

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir Wed Jan 31 14:04:26 2018
@@ -15,19 +15,19 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CMP:%[0-9]+]]:gpr32 = MOVi32imm 0
     ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr32 = CASW [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
-    ; CHECK: %w0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s32) = G_CONSTANT i32 0
     %2:gpr(s32) = G_CONSTANT i32 1
     %3:gpr(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
-    %w0 = COPY %3(s32)
+    $w0 = COPY %3(s32)
 ...
 
 ---
@@ -37,17 +37,17 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: cmpxchg_i64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[CMP:%[0-9]+]]:gpr64 = MOVi64imm 0
     ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
     ; CHECK: [[RES:%[0-9]+]]:gpr64 = CASX [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
-    ; CHECK: %x0 = COPY [[RES]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $x0 = COPY [[RES]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 0
     %2:gpr(s64) = G_CONSTANT i64 1
     %3:gpr(s64) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
-    %x0 = COPY %3(s64)
+    $x0 = COPY %3(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-constant.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-constant.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-constant.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-constant.mir Wed Jan 31 14:04:26 2018
@@ -24,9 +24,9 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: const_s32
     ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 42
-    ; CHECK: %w0 = COPY [[MOVi32imm]]
+    ; CHECK: $w0 = COPY [[MOVi32imm]]
     %0(s32) = G_CONSTANT i32 42
-    %w0 = COPY %0(s32)
+    $w0 = COPY %0(s32)
 ...
 
 ---
@@ -40,9 +40,9 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: const_s64
     ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 1234567890123
-    ; CHECK: %x0 = COPY [[MOVi64imm]]
+    ; CHECK: $x0 = COPY [[MOVi64imm]]
     %0(s64) = G_CONSTANT i64 1234567890123
-    %x0 = COPY %0(s64)
+    $x0 = COPY %0(s64)
 ...
 
 ---
@@ -57,9 +57,9 @@ body:             |
     ; CHECK-LABEL: name: fconst_s32
     ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1080033280
     ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY [[MOVi32imm]]
-    ; CHECK: %s0 = COPY [[COPY]]
+    ; CHECK: $s0 = COPY [[COPY]]
     %0(s32) = G_FCONSTANT float 3.5
-    %s0 = COPY %0(s32)
+    $s0 = COPY %0(s32)
 ...
 
 ---
@@ -74,9 +74,9 @@ body:             |
     ; CHECK-LABEL: name: fconst_s64
     ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 4607182418800017408
     ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY [[MOVi64imm]]
-    ; CHECK: %d0 = COPY [[COPY]]
+    ; CHECK: $d0 = COPY [[COPY]]
     %0(s64) = G_FCONSTANT double 1.0
-    %d0 = COPY %0(s64)
+    $d0 = COPY %0(s64)
 ...
 
 ---
@@ -90,9 +90,9 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fconst_s32_0
     ; CHECK: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
-    ; CHECK: %s0 = COPY [[FMOVS0_]]
+    ; CHECK: $s0 = COPY [[FMOVS0_]]
     %0(s32) = G_FCONSTANT float 0.0
-    %s0 = COPY %0(s32)
+    $s0 = COPY %0(s32)
 ...
 
 ---
@@ -106,7 +106,7 @@ body:             |
   bb.0:
     ; CHECK-LABEL: name: fconst_s64_0
     ; CHECK: [[FMOVD0_:%[0-9]+]]:fpr64 = FMOVD0
-    ; CHECK: %x0 = COPY [[FMOVD0_]]
+    ; CHECK: $x0 = COPY [[FMOVD0_]]
     %0(s64) = G_FCONSTANT double 0.0
-    %x0 = COPY %0(s64)
+    $x0 = COPY %0(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-dbg-value.mir Wed Jan 31 14:04:26 2018
@@ -41,16 +41,16 @@ legalized:       true
 regBankSelected: true
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_dbg_value
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[COPY]], [[COPY]]
-    ; CHECK: %w0 = COPY [[ADDWrr]]
-    ; CHECK: DBG_VALUE debug-use [[ADDWrr]], debug-use %noreg, !7, !DIExpression(), debug-location !9
-    %0:gpr(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[ADDWrr]]
+    ; CHECK: DBG_VALUE debug-use [[ADDWrr]], debug-use $noreg, !7, !DIExpression(), debug-location !9
+    %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = G_ADD %0, %0
-    %w0 = COPY %1(s32)
-    DBG_VALUE debug-use %1(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9
+    $w0 = COPY %1(s32)
+    DBG_VALUE debug-use %1(s32), debug-use $noreg, !7, !DIExpression(), debug-location !9
 ...
 
 ---
@@ -59,10 +59,10 @@ legalized:       true
 regBankSelected: true
 body: |
   bb.0:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_dbg_value_dead
     ; CHECK-NOT: COPY
-    ; CHECK: DBG_VALUE debug-use %noreg, debug-use %noreg, !7, !DIExpression(), debug-location !9
-    %0:gpr(s32) = COPY %w0
-    DBG_VALUE debug-use %0(s32), debug-use %noreg, !7, !DIExpression(), debug-location !9
+    ; CHECK: DBG_VALUE debug-use $noreg, debug-use $noreg, !7, !DIExpression(), debug-location !9
+    %0:gpr(s32) = COPY $w0
+    DBG_VALUE debug-use %0(s32), debug-use $noreg, !7, !DIExpression(), debug-location !9
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-fma.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-fma.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-fma.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-fma.mir Wed Jan 31 14:04:26 2018
@@ -20,17 +20,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1, %w2
+    liveins: $w0, $w1, $w2
 
     ; CHECK-LABEL: name: FMADDSrrr_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %w1
-    ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY %w2
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $w1
+    ; CHECK: [[COPY2:%[0-9]+]]:fpr32 = COPY $w2
     ; CHECK: [[FMADDSrrr:%[0-9]+]]:fpr32 = FMADDSrrr [[COPY]], [[COPY1]], [[COPY2]]
-    ; CHECK: %w0 = COPY [[FMADDSrrr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
-    %2(s32) = COPY %w2
+    ; CHECK: $w0 = COPY [[FMADDSrrr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
+    %2(s32) = COPY $w2
     %3(s32) = G_FMA %0, %1, %2
-    %w0 = COPY %3
+    $w0 = COPY %3
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-fp-casts.mir Wed Jan 31 14:04:26 2018
@@ -44,15 +44,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0
+    liveins: $s0
 
     ; CHECK-LABEL: name: fptrunc_s16_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK: [[FCVTHSr:%[0-9]+]]:fpr16 = FCVTHSr [[COPY]]
-    ; CHECK: %h0 = COPY [[FCVTHSr]]
-    %0(s32) = COPY %s0
+    ; CHECK: $h0 = COPY [[FCVTHSr]]
+    %0(s32) = COPY $s0
     %1(s16) = G_FPTRUNC %0
-    %h0 = COPY %1(s16)
+    $h0 = COPY %1(s16)
 ...
 
 ---
@@ -66,15 +66,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: fptrunc_s16_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK: [[FCVTHDr:%[0-9]+]]:fpr16 = FCVTHDr [[COPY]]
-    ; CHECK: %h0 = COPY [[FCVTHDr]]
-    %0(s64) = COPY %d0
+    ; CHECK: $h0 = COPY [[FCVTHDr]]
+    %0(s64) = COPY $d0
     %1(s16) = G_FPTRUNC %0
-    %h0 = COPY %1(s16)
+    $h0 = COPY %1(s16)
 ...
 
 ---
@@ -88,15 +88,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: fptrunc_s32_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK: [[FCVTSDr:%[0-9]+]]:fpr32 = FCVTSDr [[COPY]]
-    ; CHECK: %s0 = COPY [[FCVTSDr]]
-    %0(s64) = COPY %d0
+    ; CHECK: $s0 = COPY [[FCVTSDr]]
+    %0(s64) = COPY $d0
     %1(s32) = G_FPTRUNC %0
-    %s0 = COPY %1(s32)
+    $s0 = COPY %1(s32)
 ...
 
 ---
@@ -110,15 +110,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %h0
+    liveins: $h0
 
     ; CHECK-LABEL: name: fpext_s32_s16_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY %h0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
     ; CHECK: [[FCVTSHr:%[0-9]+]]:fpr32 = FCVTSHr [[COPY]]
-    ; CHECK: %s0 = COPY [[FCVTSHr]]
-    %0(s16) = COPY %h0
+    ; CHECK: $s0 = COPY [[FCVTSHr]]
+    %0(s16) = COPY $h0
     %1(s32) = G_FPEXT %0
-    %s0 = COPY %1(s32)
+    $s0 = COPY %1(s32)
 ...
 
 ---
@@ -132,15 +132,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %h0
+    liveins: $h0
 
     ; CHECK-LABEL: name: fpext_s64_s16_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY %h0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr16 = COPY $h0
     ; CHECK: [[FCVTDHr:%[0-9]+]]:fpr64 = FCVTDHr [[COPY]]
-    ; CHECK: %d0 = COPY [[FCVTDHr]]
-    %0(s16) = COPY %h0
+    ; CHECK: $d0 = COPY [[FCVTDHr]]
+    %0(s16) = COPY $h0
     %1(s64) = G_FPEXT %0
-    %d0 = COPY %1(s64)
+    $d0 = COPY %1(s64)
 ...
 
 ---
@@ -154,15 +154,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: fpext_s64_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK: [[FCVTDSr:%[0-9]+]]:fpr64 = FCVTDSr [[COPY]]
-    ; CHECK: %d0 = COPY [[FCVTDSr]]
-    %0(s32) = COPY %s0
+    ; CHECK: $d0 = COPY [[FCVTDSr]]
+    %0(s32) = COPY $s0
     %1(s64) = G_FPEXT %0
-    %d0 = COPY %1(s64)
+    $d0 = COPY %1(s64)
 ...
 
 ---
@@ -176,15 +176,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: sitofp_s32_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[SCVTFUWSri:%[0-9]+]]:fpr32 = SCVTFUWSri [[COPY]]
-    ; CHECK: %s0 = COPY [[SCVTFUWSri]]
-    %0(s32) = COPY %w0
+    ; CHECK: $s0 = COPY [[SCVTFUWSri]]
+    %0(s32) = COPY $w0
     %1(s32) = G_SITOFP %0
-    %s0 = COPY %1(s32)
+    $s0 = COPY %1(s32)
 ...
 
 ---
@@ -198,15 +198,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: sitofp_s32_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: [[SCVTFUXSri:%[0-9]+]]:fpr32 = SCVTFUXSri [[COPY]]
-    ; CHECK: %s0 = COPY [[SCVTFUXSri]]
-    %0(s64) = COPY %x0
+    ; CHECK: $s0 = COPY [[SCVTFUXSri]]
+    %0(s64) = COPY $x0
     %1(s32) = G_SITOFP %0
-    %s0 = COPY %1(s32)
+    $s0 = COPY %1(s32)
 ...
 
 ---
@@ -220,15 +220,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: sitofp_s64_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[SCVTFUWDri:%[0-9]+]]:fpr64 = SCVTFUWDri [[COPY]]
-    ; CHECK: %d0 = COPY [[SCVTFUWDri]]
-    %0(s32) = COPY %w0
+    ; CHECK: $d0 = COPY [[SCVTFUWDri]]
+    %0(s32) = COPY $w0
     %1(s64) = G_SITOFP %0
-    %d0 = COPY %1(s64)
+    $d0 = COPY %1(s64)
 ...
 
 ---
@@ -242,15 +242,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: sitofp_s64_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: [[SCVTFUXDri:%[0-9]+]]:fpr64 = SCVTFUXDri [[COPY]]
-    ; CHECK: %d0 = COPY [[SCVTFUXDri]]
-    %0(s64) = COPY %x0
+    ; CHECK: $d0 = COPY [[SCVTFUXDri]]
+    %0(s64) = COPY $x0
     %1(s64) = G_SITOFP %0
-    %d0 = COPY %1(s64)
+    $d0 = COPY %1(s64)
 ...
 
 ---
@@ -264,15 +264,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: uitofp_s32_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[UCVTFUWSri:%[0-9]+]]:fpr32 = UCVTFUWSri [[COPY]]
-    ; CHECK: %s0 = COPY [[UCVTFUWSri]]
-    %0(s32) = COPY %w0
+    ; CHECK: $s0 = COPY [[UCVTFUWSri]]
+    %0(s32) = COPY $w0
     %1(s32) = G_UITOFP %0
-    %s0 = COPY %1(s32)
+    $s0 = COPY %1(s32)
 ...
 
 ---
@@ -286,15 +286,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: uitofp_s32_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: [[UCVTFUXSri:%[0-9]+]]:fpr32 = UCVTFUXSri [[COPY]]
-    ; CHECK: %s0 = COPY [[UCVTFUXSri]]
-    %0(s64) = COPY %x0
+    ; CHECK: $s0 = COPY [[UCVTFUXSri]]
+    %0(s64) = COPY $x0
     %1(s32) = G_UITOFP %0
-    %s0 = COPY %1(s32)
+    $s0 = COPY %1(s32)
 ...
 
 ---
@@ -308,15 +308,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: uitofp_s64_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[UCVTFUWDri:%[0-9]+]]:fpr64 = UCVTFUWDri [[COPY]]
-    ; CHECK: %d0 = COPY [[UCVTFUWDri]]
-    %0(s32) = COPY %w0
+    ; CHECK: $d0 = COPY [[UCVTFUWDri]]
+    %0(s32) = COPY $w0
     %1(s64) = G_UITOFP %0
-    %d0 = COPY %1(s64)
+    $d0 = COPY %1(s64)
 ...
 
 ---
@@ -330,15 +330,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: uitofp_s64_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: [[UCVTFUXDri:%[0-9]+]]:fpr64 = UCVTFUXDri [[COPY]]
-    ; CHECK: %d0 = COPY [[UCVTFUXDri]]
-    %0(s64) = COPY %x0
+    ; CHECK: $d0 = COPY [[UCVTFUXDri]]
+    %0(s64) = COPY $x0
     %1(s64) = G_UITOFP %0
-    %d0 = COPY %1(s64)
+    $d0 = COPY %1(s64)
 ...
 
 ---
@@ -352,15 +352,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0
+    liveins: $s0
 
     ; CHECK-LABEL: name: fptosi_s32_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK: [[FCVTZSUWSr:%[0-9]+]]:gpr32 = FCVTZSUWSr [[COPY]]
-    ; CHECK: %w0 = COPY [[FCVTZSUWSr]]
-    %0(s32) = COPY %s0
+    ; CHECK: $w0 = COPY [[FCVTZSUWSr]]
+    %0(s32) = COPY $s0
     %1(s32) = G_FPTOSI %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -374,15 +374,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: fptosi_s32_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK: [[FCVTZSUWDr:%[0-9]+]]:gpr32 = FCVTZSUWDr [[COPY]]
-    ; CHECK: %w0 = COPY [[FCVTZSUWDr]]
-    %0(s64) = COPY %d0
+    ; CHECK: $w0 = COPY [[FCVTZSUWDr]]
+    %0(s64) = COPY $d0
     %1(s32) = G_FPTOSI %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -396,15 +396,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0
+    liveins: $s0
 
     ; CHECK-LABEL: name: fptosi_s64_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK: [[FCVTZSUXSr:%[0-9]+]]:gpr64 = FCVTZSUXSr [[COPY]]
-    ; CHECK: %x0 = COPY [[FCVTZSUXSr]]
-    %0(s32) = COPY %s0
+    ; CHECK: $x0 = COPY [[FCVTZSUXSr]]
+    %0(s32) = COPY $s0
     %1(s64) = G_FPTOSI %0
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...
 
 ---
@@ -418,15 +418,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: fptosi_s64_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK: [[FCVTZSUXDr:%[0-9]+]]:gpr64 = FCVTZSUXDr [[COPY]]
-    ; CHECK: %x0 = COPY [[FCVTZSUXDr]]
-    %0(s64) = COPY %d0
+    ; CHECK: $x0 = COPY [[FCVTZSUXDr]]
+    %0(s64) = COPY $d0
     %1(s64) = G_FPTOSI %0
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...
 
 ---
@@ -440,15 +440,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0
+    liveins: $s0
 
     ; CHECK-LABEL: name: fptoui_s32_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK: [[FCVTZUUWSr:%[0-9]+]]:gpr32 = FCVTZUUWSr [[COPY]]
-    ; CHECK: %w0 = COPY [[FCVTZUUWSr]]
-    %0(s32) = COPY %s0
+    ; CHECK: $w0 = COPY [[FCVTZUUWSr]]
+    %0(s32) = COPY $s0
     %1(s32) = G_FPTOUI %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -462,15 +462,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: fptoui_s32_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK: [[FCVTZUUWDr:%[0-9]+]]:gpr32 = FCVTZUUWDr [[COPY]]
-    ; CHECK: %w0 = COPY [[FCVTZUUWDr]]
-    %0(s64) = COPY %d0
+    ; CHECK: $w0 = COPY [[FCVTZUUWDr]]
+    %0(s64) = COPY $d0
     %1(s32) = G_FPTOUI %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -484,15 +484,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0
+    liveins: $s0
 
     ; CHECK-LABEL: name: fptoui_s64_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY %s0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK: [[FCVTZUUXSr:%[0-9]+]]:gpr64 = FCVTZUUXSr [[COPY]]
-    ; CHECK: %x0 = COPY [[FCVTZUUXSr]]
-    %0(s32) = COPY %s0
+    ; CHECK: $x0 = COPY [[FCVTZUUXSr]]
+    %0(s32) = COPY $s0
     %1(s64) = G_FPTOUI %0
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...
 
 ---
@@ -506,13 +506,13 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: fptoui_s64_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK: [[FCVTZUUXDr:%[0-9]+]]:gpr64 = FCVTZUUXDr [[COPY]]
-    ; CHECK: %x0 = COPY [[FCVTZUUXDr]]
-    %0(s64) = COPY %d0
+    ; CHECK: $x0 = COPY [[FCVTZUUXDr]]
+    %0(s64) = COPY $d0
     %1(s64) = G_FPTOUI %0
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-large.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-large.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-large.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-gv-cmodel-large.mir Wed Jan 31 14:04:26 2018
@@ -39,12 +39,12 @@ body:             |
     ; CHECK: [[MOVKXi4:%[0-9]+]]:gpr64 = MOVKXi [[MOVKXi3]], target-flags(aarch64-g2, aarch64-nc) @foo2, 32
     ; CHECK: [[MOVKXi5:%[0-9]+]]:gpr64 = MOVKXi [[MOVKXi4]], target-flags(aarch64-g3) @foo2, 48
     ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY [[MOVKXi5]]
-    ; CHECK: STRWui %wzr, %stack.0.retval, 0 :: (store 4 into %ir.retval)
+    ; CHECK: STRWui $wzr, %stack.0.retval, 0 :: (store 4 into %ir.retval)
     ; CHECK: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo1, i64 0, i64 0)`)
     ; CHECK: [[LDRWui1:%[0-9]+]]:gpr32 = LDRWui [[COPY1]], 0 :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo2, i64 0, i64 0)`)
     ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[LDRWui]], [[LDRWui1]]
-    ; CHECK: %w0 = COPY [[ADDWrr]]
-    ; CHECK: RET_ReallyLR implicit %w0
+    ; CHECK: $w0 = COPY [[ADDWrr]]
+    ; CHECK: RET_ReallyLR implicit $w0
     %1:gpr(s32) = G_CONSTANT i32 0
     %4:gpr(p0) = G_GLOBAL_VALUE @foo1
     %3:gpr(p0) = COPY %4(p0)
@@ -55,7 +55,7 @@ body:             |
     %2:gpr(s32) = G_LOAD %3(p0) :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo1, i64 0, i64 0)`)
     %5:gpr(s32) = G_LOAD %6(p0) :: (load 4 from `i32* getelementptr inbounds ([1073741824 x i32], [1073741824 x i32]* @foo2, i64 0, i64 0)`)
     %8:gpr(s32) = G_ADD %2, %5
-    %w0 = COPY %8(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %8(s32)
+    RET_ReallyLR implicit $w0
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-imm.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-imm.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-imm.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-imm.mir Wed Jan 31 14:04:26 2018
@@ -20,13 +20,13 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: imm_s32_gpr
     ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm -1234
-    ; CHECK: %w0 = COPY [[MOVi32imm]]
+    ; CHECK: $w0 = COPY [[MOVi32imm]]
     %0(s32) = G_CONSTANT i32 -1234
-    %w0 = COPY %0(s32)
+    $w0 = COPY %0(s32)
 ...
 
 ---
@@ -40,11 +40,11 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: imm_s64_gpr
     ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 1234
-    ; CHECK: %x0 = COPY [[MOVi64imm]]
+    ; CHECK: $x0 = COPY [[MOVi64imm]]
     %0(s64) = G_CONSTANT i64 1234
-    %x0 = COPY %0(s64)
+    $x0 = COPY %0(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-implicit-def.mir Wed Jan 31 14:04:26 2018
@@ -20,8 +20,8 @@ body:             |
     ; CHECK-LABEL: name: implicit_def
     ; CHECK: [[DEF:%[0-9]+]]:gpr32 = IMPLICIT_DEF
     ; CHECK: [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr [[DEF]], [[DEF]]
-    ; CHECK: %w0 = COPY [[ADDWrr]]
+    ; CHECK: $w0 = COPY [[ADDWrr]]
     %0(s32) = G_IMPLICIT_DEF
     %1(s32) = G_ADD %0, %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-insert-extract.mir Wed Jan 31 14:04:26 2018
@@ -8,9 +8,9 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
-    %0:gpr(s32) = COPY %w0
+    %0:gpr(s32) = COPY $w0
 
     %1:gpr(s64) = G_IMPLICIT_DEF
 
@@ -23,8 +23,8 @@ body:             |
     ; CHECK: %3:gpr64 = BFMXri %1, [[TMP]], 51, 31
     %3:gpr(s64) = G_INSERT %1, %0, 13
 
-    %x0 = COPY %2
-    %x1 = COPY %3
+    $x0 = COPY %2
+    $x1 = COPY %3
 ...
 
 
@@ -36,9 +36,9 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
-    %0:gpr(s64) = COPY %x0
+    %0:gpr(s64) = COPY $x0
 
     ; CHECK:  body:
     ; CHECK: [[TMP:%[0-9]+]]:gpr64 = UBFMXri %0, 0, 31
@@ -49,6 +49,6 @@ body:             |
     ; CHECK: %2:gpr32 = COPY [[TMP]].sub_32
     %2:gpr(s32) = G_EXTRACT %0, 13
 
-    %w0 = COPY %1
-    %w1 = COPY %2
+    $w0 = COPY %1
+    $w1 = COPY %2
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-int-ext.mir Wed Jan 31 14:04:26 2018
@@ -29,15 +29,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: anyext_s64_from_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w0
     ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
-    ; CHECK: %x0 = COPY [[SUBREG_TO_REG]]
-    %0(s32) = COPY %w0
+    ; CHECK: $x0 = COPY [[SUBREG_TO_REG]]
+    %0(s32) = COPY $w0
     %1(s64) = G_ANYEXT %0
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...
 
 ---
@@ -51,16 +51,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: anyext_s32_from_s8
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY]]
-    ; CHECK: %w0 = COPY [[COPY2]]
-    %2:gpr(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[COPY2]]
+    %2:gpr(s32) = COPY $w0
     %0(s8) = G_TRUNC %2
     %1(s32) = G_ANYEXT %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -74,16 +74,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: zext_s64_from_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
     ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[SUBREG_TO_REG]], 0, 31
-    ; CHECK: %x0 = COPY [[UBFMXri]]
-    %0(s32) = COPY %w0
+    ; CHECK: $x0 = COPY [[UBFMXri]]
+    %0(s32) = COPY $w0
     %1(s64) = G_ZEXT %0
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...
 
 ---
@@ -97,16 +97,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: zext_s32_from_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 15
-    ; CHECK: %w0 = COPY [[UBFMWri]]
-    %2:gpr(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[UBFMWri]]
+    %2:gpr(s32) = COPY $w0
     %0(s16) = G_TRUNC %2
     %1(s32) = G_ZEXT %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
@@ -120,16 +120,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: zext_s32_from_s8
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 15
-    ; CHECK: %w0 = COPY [[UBFMWri]]
-    %2:gpr(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[UBFMWri]]
+    %2:gpr(s32) = COPY $w0
     %0(s16) = G_TRUNC %2
     %1(s32) = G_ZEXT %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -143,18 +143,18 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: zext_s16_from_s8
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 0, 7
     ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[UBFMWri]]
-    ; CHECK: %w0 = COPY [[COPY2]]
-    %2:gpr(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[COPY2]]
+    %2:gpr(s32) = COPY $w0
     %0(s8) = G_TRUNC %2
     %1(s16) = G_ZEXT %0
     %3:gpr(s32) = G_ANYEXT %1
-    %w0 = COPY %3(s32)
+    $w0 = COPY %3(s32)
 ...
 
 ---
@@ -168,16 +168,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: sext_s64_from_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32
     ; CHECK: [[SBFMXri:%[0-9]+]]:gpr64 = SBFMXri [[SUBREG_TO_REG]], 0, 31
-    ; CHECK: %x0 = COPY [[SBFMXri]]
-    %0(s32) = COPY %w0
+    ; CHECK: $x0 = COPY [[SBFMXri]]
+    %0(s32) = COPY $w0
     %1(s64) = G_SEXT %0
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...
 
 ---
@@ -191,16 +191,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: sext_s32_from_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 15
-    ; CHECK: %w0 = COPY [[SBFMWri]]
-    %2:gpr(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[SBFMWri]]
+    %2:gpr(s32) = COPY $w0
     %0(s16) = G_TRUNC %2
     %1(s32) = G_SEXT %0
-    %w0 = COPY %1
+    $w0 = COPY %1
 ...
 
 ---
@@ -214,16 +214,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: sext_s32_from_s8
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 7
-    ; CHECK: %w0 = COPY [[SBFMWri]]
-    %2:gpr(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[SBFMWri]]
+    %2:gpr(s32) = COPY $w0
     %0(s8) = G_TRUNC %2
     %1(s32) = G_SEXT %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -237,16 +237,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: sext_s16_from_s8
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 0, 7
     ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[SBFMWri]]
-    ; CHECK: %w0 = COPY [[COPY2]]
-    %2:gpr(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[COPY2]]
+    %2:gpr(s32) = COPY $w0
     %0(s8) = G_TRUNC %2
     %1(s16) = G_SEXT %0
     %3:gpr(s32) = G_ANYEXT %1
-    %w0 = COPY %3(s32)
+    $w0 = COPY %3(s32)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir Wed Jan 31 14:04:26 2018
@@ -22,13 +22,13 @@ registers:
   - { id: 1, class: gpr }
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: inttoptr_p0_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY %x0
-    ; CHECK: %x0 = COPY [[COPY]]
-    %0(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64all = COPY $x0
+    ; CHECK: $x0 = COPY [[COPY]]
+    %0(s64) = COPY $x0
     %1(p0) = G_INTTOPTR %0
-    %x0 = COPY %1(p0)
+    $x0 = COPY %1(p0)
 ...
 
 ---
@@ -41,13 +41,13 @@ registers:
   - { id: 1, class: gpr }
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: ptrtoint_s64_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: %x0 = COPY [[COPY]]
-    %0(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: $x0 = COPY [[COPY]]
+    %0(p0) = COPY $x0
     %1(s64) = G_PTRTOINT %0
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...
 
 ---
@@ -60,14 +60,14 @@ registers:
   - { id: 1, class: gpr }
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: ptrtoint_s32_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32
-    ; CHECK: %w0 = COPY [[COPY1]]
-    %0(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[COPY1]]
+    %0(p0) = COPY $x0
     %1(s32) = G_PTRTOINT %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -80,16 +80,16 @@ registers:
   - { id: 1, class: gpr }
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: ptrtoint_s16_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32
     ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]]
-    ; CHECK: %w0 = COPY [[COPY2]]
-    %0(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[COPY2]]
+    %0(p0) = COPY $x0
     %1(s16) = G_PTRTOINT %0
     %2:gpr(s32) = G_ANYEXT %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -102,16 +102,16 @@ registers:
   - { id: 1, class: gpr }
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: ptrtoint_s8_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32
     ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]]
-    ; CHECK: %w0 = COPY [[COPY2]]
-    %0(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[COPY2]]
+    %0(p0) = COPY $x0
     %1(s8) = G_PTRTOINT %0
     %2:gpr(s32) = G_ANYEXT %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -124,14 +124,14 @@ registers:
   - { id: 1, class: gpr }
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     ; CHECK-LABEL: name: ptrtoint_s1_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32
     ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]]
-    ; CHECK: %w0 = COPY [[COPY2]]
-    %0(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[COPY2]]
+    %0(p0) = COPY $x0
     %1(s1) = G_PTRTOINT %0
     %2:gpr(s32) = G_ANYEXT %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-hint.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-hint.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-hint.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-hint.mir Wed Jan 31 14:04:26 2018
@@ -22,7 +22,7 @@ registers:
 # CHECK:    HINT 1
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     %0(s32) = G_CONSTANT i32 1
     G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.hint), %0

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-aarch64-sdiv.mir Wed Jan 31 14:04:26 2018
@@ -21,15 +21,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: sdiv_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: [[SDIVWr:%[0-9]+]]:gpr32 = SDIVWr [[COPY]], [[COPY1]]
-    ; CHECK: %w0 = COPY [[SDIVWr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: $w0 = COPY [[SDIVWr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.sdiv.i32), %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-intrinsic-crypto-aesmc.mir Wed Jan 31 14:04:26 2018
@@ -10,17 +10,17 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %q0, %q1
+    liveins: $q0, $q1
 
     ; CHECK-LABEL: name: aesmc_aese
-    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY %q0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY %q1
+    ; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
     ; CHECK: [[T0:%[0-9]+]]:fpr128 = AESErr [[COPY]], [[COPY1]]
     ; CHECK: [[T1:%[0-9]+]]:fpr128 = AESMCrrTied [[T0]]
-    ; CHECK: %q0 = COPY [[T1]]
-    %0:fpr(<16 x s8>) = COPY %q0
-    %1:fpr(<16 x s8>) = COPY %q1
+    ; CHECK: $q0 = COPY [[T1]]
+    %0:fpr(<16 x s8>) = COPY $q0
+    %1:fpr(<16 x s8>) = COPY $q1
     %2:fpr(<16 x s8>) = G_INTRINSIC intrinsic(@llvm.aarch64.crypto.aese), %0, %1
     %3:fpr(<16 x s8>) = G_INTRINSIC intrinsic(@llvm.aarch64.crypto.aesmc), %2
-    %q0 = COPY %3(<16 x s8>)
+    $q0 = COPY %3(<16 x s8>)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-load.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-load.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-load.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-load.mir Wed Jan 31 14:04:26 2018
@@ -47,15 +47,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[COPY]], 0 :: (load 8 from %ir.addr)
-    ; CHECK: %x0 = COPY [[LDRXui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $x0 = COPY [[LDRXui]]
+    %0(p0) = COPY $x0
     %1(s64) = G_LOAD  %0 :: (load 8 from %ir.addr)
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...
 
 ---
@@ -69,15 +69,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 0 :: (load 4 from %ir.addr)
-    ; CHECK: %w0 = COPY [[LDRWui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[LDRWui]]
+    %0(p0) = COPY $x0
     %1(s32) = G_LOAD  %0 :: (load 4 from %ir.addr)
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -91,16 +91,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_s16_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load 2 from %ir.addr)
-    ; CHECK: %w0 = COPY [[LDRHHui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[LDRHHui]]
+    %0(p0) = COPY $x0
     %1(s16) = G_LOAD  %0 :: (load 2 from %ir.addr)
     %2:gpr(s32) = G_ANYEXT %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -114,16 +114,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_s8_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load 1 from %ir.addr)
-    ; CHECK: %w0 = COPY [[LDRBBui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[LDRBBui]]
+    %0(p0) = COPY $x0
     %1(s8) = G_LOAD  %0 :: (load 1 from %ir.addr)
     %2:gpr(s32) = G_ANYEXT %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -140,14 +140,14 @@ stack:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_fi_s64_gpr
     ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui %stack.0.ptr0, 0 :: (load 8)
-    ; CHECK: %x0 = COPY [[LDRXui]]
+    ; CHECK: $x0 = COPY [[LDRXui]]
     %0(p0) = G_FRAME_INDEX %stack.0.ptr0
     %1(s64) = G_LOAD %0 :: (load 8)
-    %x0 = COPY %1(s64)
+    $x0 = COPY %1(s64)
 ...
 
 ---
@@ -163,17 +163,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_gep_128_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[COPY]], 16 :: (load 8 from %ir.addr)
-    ; CHECK: %x0 = COPY [[LDRXui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $x0 = COPY [[LDRXui]]
+    %0(p0) = COPY $x0
     %1(s64) = G_CONSTANT i64 128
     %2(p0) = G_GEP %0, %1
     %3(s64) = G_LOAD %2 :: (load 8 from %ir.addr)
-    %x0 = COPY %3
+    $x0 = COPY %3
 ...
 
 ---
@@ -189,17 +189,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_gep_512_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRWui:%[0-9]+]]:gpr32 = LDRWui [[COPY]], 128 :: (load 4 from %ir.addr)
-    ; CHECK: %w0 = COPY [[LDRWui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[LDRWui]]
+    %0(p0) = COPY $x0
     %1(s64) = G_CONSTANT i64 512
     %2(p0) = G_GEP %0, %1
     %3(s32) = G_LOAD %2 :: (load 4 from %ir.addr)
-    %w0 = COPY %3
+    $w0 = COPY %3
 ...
 
 ---
@@ -215,18 +215,18 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_gep_64_s16_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRHHui:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 32 :: (load 2 from %ir.addr)
-    ; CHECK: %w0 = COPY [[LDRHHui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[LDRHHui]]
+    %0(p0) = COPY $x0
     %1(s64) = G_CONSTANT i64 64
     %2(p0) = G_GEP %0, %1
     %3(s16) = G_LOAD %2 :: (load 2 from %ir.addr)
     %4:gpr(s32) = G_ANYEXT %3
-    %w0 = COPY %4
+    $w0 = COPY %4
 ...
 
 ---
@@ -242,18 +242,18 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_gep_1_s8_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 1 :: (load 1 from %ir.addr)
-    ; CHECK: %w0 = COPY [[LDRBBui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[LDRBBui]]
+    %0(p0) = COPY $x0
     %1(s64) = G_CONSTANT i64 1
     %2(p0) = G_GEP %0, %1
     %3(s8) = G_LOAD %2 :: (load 1 from %ir.addr)
     %4:gpr(s32) = G_ANYEXT %3
-    %w0 = COPY %4
+    $w0 = COPY %4
 ...
 
 ---
@@ -267,15 +267,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY]], 0 :: (load 8 from %ir.addr)
-    ; CHECK: %d0 = COPY [[LDRDui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $d0 = COPY [[LDRDui]]
+    %0(p0) = COPY $x0
     %1(s64) = G_LOAD  %0 :: (load 8 from %ir.addr)
-    %d0 = COPY %1(s64)
+    $d0 = COPY %1(s64)
 ...
 
 ---
@@ -289,15 +289,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRSui:%[0-9]+]]:fpr32 = LDRSui [[COPY]], 0 :: (load 4 from %ir.addr)
-    ; CHECK: %s0 = COPY [[LDRSui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $s0 = COPY [[LDRSui]]
+    %0(p0) = COPY $x0
     %1(s32) = G_LOAD  %0 :: (load 4 from %ir.addr)
-    %s0 = COPY %1(s32)
+    $s0 = COPY %1(s32)
 ...
 
 ---
@@ -311,15 +311,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_s16_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRHui:%[0-9]+]]:fpr16 = LDRHui [[COPY]], 0 :: (load 2 from %ir.addr)
-    ; CHECK: %h0 = COPY [[LDRHui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $h0 = COPY [[LDRHui]]
+    %0(p0) = COPY $x0
     %1(s16) = G_LOAD  %0 :: (load 2 from %ir.addr)
-    %h0 = COPY %1(s16)
+    $h0 = COPY %1(s16)
 ...
 
 ---
@@ -333,15 +333,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_s8_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRBui:%[0-9]+]]:fpr8 = LDRBui [[COPY]], 0 :: (load 1 from %ir.addr)
-    ; CHECK: %b0 = COPY [[LDRBui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $b0 = COPY [[LDRBui]]
+    %0(p0) = COPY $x0
     %1(s8) = G_LOAD  %0 :: (load 1 from %ir.addr)
-    %b0 = COPY %1(s8)
+    $b0 = COPY %1(s8)
 ...
 
 ---
@@ -357,17 +357,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_gep_8_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY]], 1 :: (load 8 from %ir.addr)
-    ; CHECK: %d0 = COPY [[LDRDui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $d0 = COPY [[LDRDui]]
+    %0(p0) = COPY $x0
     %1(s64) = G_CONSTANT i64 8
     %2(p0) = G_GEP %0, %1
     %3(s64) = G_LOAD %2 :: (load 8 from %ir.addr)
-    %d0 = COPY %3
+    $d0 = COPY %3
 ...
 
 ---
@@ -383,17 +383,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_gep_16_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRSui:%[0-9]+]]:fpr32 = LDRSui [[COPY]], 4 :: (load 4 from %ir.addr)
-    ; CHECK: %s0 = COPY [[LDRSui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $s0 = COPY [[LDRSui]]
+    %0(p0) = COPY $x0
     %1(s64) = G_CONSTANT i64 16
     %2(p0) = G_GEP %0, %1
     %3(s32) = G_LOAD %2 :: (load 4 from %ir.addr)
-    %s0 = COPY %3
+    $s0 = COPY %3
 ...
 
 ---
@@ -409,17 +409,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_gep_64_s16_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRHui:%[0-9]+]]:fpr16 = LDRHui [[COPY]], 32 :: (load 2 from %ir.addr)
-    ; CHECK: %h0 = COPY [[LDRHui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $h0 = COPY [[LDRHui]]
+    %0(p0) = COPY $x0
     %1(s64) = G_CONSTANT i64 64
     %2(p0) = G_GEP %0, %1
     %3(s16) = G_LOAD %2 :: (load 2 from %ir.addr)
-    %h0 = COPY %3
+    $h0 = COPY %3
 ...
 
 ---
@@ -435,17 +435,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_gep_32_s8_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRBui:%[0-9]+]]:fpr8 = LDRBui [[COPY]], 32 :: (load 1 from %ir.addr)
-    ; CHECK: %b0 = COPY [[LDRBui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $b0 = COPY [[LDRBui]]
+    %0(p0) = COPY $x0
     %1(s64) = G_CONSTANT i64 32
     %2(p0) = G_GEP %0, %1
     %3(s8) = G_LOAD %2 :: (load 1 from %ir.addr)
-    %b0 = COPY %3
+    $b0 = COPY %3
 ...
 ---
 name:            load_v2s32
@@ -458,15 +458,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: load_v2s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[LDRDui:%[0-9]+]]:fpr64 = LDRDui [[COPY]], 0 :: (load 8 from %ir.addr)
-    ; CHECK: %d0 = COPY [[LDRDui]]
-    %0(p0) = COPY %x0
+    ; CHECK: $d0 = COPY [[LDRDui]]
+    %0(p0) = COPY $x0
     %1(<2 x s32>) = G_LOAD %0 :: (load 8 from %ir.addr)
-    %d0 = COPY %1(<2 x s32>)
+    $d0 = COPY %1(<2 x s32>)
 ...
 ---
 name:            sextload_s32_from_s16
@@ -475,16 +475,16 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: sextload_s32_from_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRSHWui [[COPY]], 0 :: (load 2 from %ir.addr)
-    ; CHECK: %w0 = COPY [[T0]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[T0]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
     %2:gpr(s32) = G_SEXT %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -494,16 +494,16 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: zextload_s32_from_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load 2 from %ir.addr)
-    ; CHECK: %w0 = COPY [[T0]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[T0]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
     %2:gpr(s32) = G_ZEXT %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -513,14 +513,14 @@ regBankSelected: true
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: aextload_s32_from_s16
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[T0:%[0-9]+]]:gpr32 = LDRHHui [[COPY]], 0 :: (load 2 from %ir.addr)
-    ; CHECK: %w0 = COPY [[T0]]
-    %0:gpr(p0) = COPY %x0
+    ; CHECK: $w0 = COPY [[T0]]
+    %0:gpr(p0) = COPY $x0
     %1:gpr(s16) = G_LOAD %0 :: (load 2 from %ir.addr)
     %2:gpr(s32) = G_ANYEXT %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-mul.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-mul.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-mul.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-mul.mir Wed Jan 31 14:04:26 2018
@@ -13,22 +13,22 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; Make sure InstructionSelector is able to match a pattern
     ; with an SDNodeXForm, trunc_imm.
     ; def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
     ;             (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
     ; CHECK-LABEL: name: mul_i64_sext_imm32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 3
-    ; CHECK: [[SMADDLrrr:%[0-9]+]]:gpr64 = SMADDLrrr [[COPY]], [[MOVi32imm]], %xzr
-    ; CHECK: %x0 = COPY [[SMADDLrrr]]
-    %0:gpr(s32) = COPY %w0
+    ; CHECK: [[SMADDLrrr:%[0-9]+]]:gpr64 = SMADDLrrr [[COPY]], [[MOVi32imm]], $xzr
+    ; CHECK: $x0 = COPY [[SMADDLrrr]]
+    %0:gpr(s32) = COPY $w0
     %1:gpr(s64) = G_SEXT %0(s32)
     %2:gpr(s64) = G_CONSTANT i64 3
     %3:gpr(s64) = G_MUL %1, %2
-    %x0 = COPY %3(s64)
+    $x0 = COPY %3(s64)
 ...
 
 

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-muladd.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-muladd.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-muladd.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-muladd.mir Wed Jan 31 14:04:26 2018
@@ -23,21 +23,21 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %w1, %w2
+    liveins: $x0, $w1, $w2
 
     ; CHECK-LABEL: name: SMADDLrrr_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY %w2
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
+    ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $w2
     ; CHECK: [[SMADDLrrr:%[0-9]+]]:gpr64 = SMADDLrrr [[COPY1]], [[COPY2]], [[COPY]]
-    ; CHECK: %x0 = COPY [[SMADDLrrr]]
-    %0(s64) = COPY %x0
-    %1(s32) = COPY %w1
-    %2(s32) = COPY %w2
+    ; CHECK: $x0 = COPY [[SMADDLrrr]]
+    %0(s64) = COPY $x0
+    %1(s32) = COPY $w1
+    %2(s32) = COPY $w2
     %3(s64) = G_SEXT %1
     %4(s64) = G_SEXT %2
     %5(s64) = G_MUL %3, %4
     %6(s64) = G_ADD %0, %5
-    %x0 = COPY %6
+    $x0 = COPY %6
 ...
 

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-neon-vcvtfxu2fp.mir Wed Jan 31 14:04:26 2018
@@ -20,14 +20,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %d0
+    liveins: $d0
 
     ; CHECK-LABEL: name: vcvtfxu2fp_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY %d0
+    ; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK: [[UCVTFd:%[0-9]+]]:fpr64 = UCVTFd [[COPY]], 12
-    ; CHECK: %d1 = COPY [[UCVTFd]]
-    %0(s64) = COPY %d0
+    ; CHECK: $d1 = COPY [[UCVTFd]]
+    %0(s64) = COPY $d0
     %1(s32) = G_CONSTANT i32 12
     %2(s64) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.vcvtfxu2fp.f64), %0, %1
-    %d1 = COPY %2(s64)
+    $d1 = COPY %2(s64)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-phi.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-phi.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-phi.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-phi.mir Wed Jan 31 14:04:26 2018
@@ -51,11 +51,11 @@ liveins:
 body:             |
   bb.1.entry:
     successors: %bb.2.case1(0x40000000), %bb.3.case2(0x40000000)
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: test_phi
     ; CHECK: [[RES:%.*]]:gpr32 = PHI
 
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_CONSTANT i32 0
     %3(s32) = G_CONSTANT i32 1
     %5(s32) = G_CONSTANT i32 2
@@ -77,8 +77,8 @@ body:             |
 
   bb.4.return:
     %7(s32) = G_PHI %4(s32), %bb.2.case1, %6(s32), %bb.3.case2
-    %w0 = COPY %7(s32)
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %7(s32)
+    RET_ReallyLR implicit $w0
 
 ...
 
@@ -101,12 +101,12 @@ liveins:
 body:             |
   bb.0:
     successors: %bb.1, %bb.2
-    liveins: %w2, %x0, %x1
+    liveins: $w2, $x0, $x1
     ; CHECK-LABEL: name: test_phi_ptr
 
-    %0(p0) = COPY %x0
-    %1(p0) = COPY %x1
-    %6:gpr(s32) = COPY %w2
+    %0(p0) = COPY $x0
+    %1(p0) = COPY $x1
+    %6:gpr(s32) = COPY $w2
     %2(s1) = G_TRUNC %6
     G_BRCOND %2(s1), %bb.1
     G_BR %bb.2
@@ -118,7 +118,7 @@ body:             |
   bb.2:
     ; CHECK: %{{[0-9]+}}:gpr64 = PHI %{{[0-9]+}}, %bb.0, %{{[0-9]+}}, %bb.1
     %3(p0) = G_PHI %0(p0), %bb.0, %1(p0), %bb.1
-    %x0 = COPY %3(p0)
-    RET_ReallyLR implicit %x0
+    $x0 = COPY %3(p0)
+    RET_ReallyLR implicit $x0
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-pr32733.mir Wed Jan 31 14:04:26 2018
@@ -50,17 +50,17 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.1.entry:
-    liveins: %w0
+    liveins: $w0
     ; CHECK-LABEL: name: main
-    ; CHECK: liveins: %w0
+    ; CHECK: liveins: $w0
     ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[EONWrr:%[0-9]+]]:gpr32 = EONWrr [[COPY]], [[MOVi32imm]]
-    ; CHECK: %w0 = COPY [[EONWrr]]
+    ; CHECK: $w0 = COPY [[EONWrr]]
     %0(s32) = G_CONSTANT i32 -1
     %3(s32) = G_CONSTANT i32 1
-    %1(s32) = COPY %w0
+    %1(s32) = COPY $w0
     %2(s32) = G_XOR %1, %0
     %4(s32) = G_XOR %2, %3
-    %w0 = COPY %4(s32)
+    $w0 = COPY %4(s32)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-store.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-store.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-store.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-store.mir Wed Jan 31 14:04:26 2018
@@ -42,14 +42,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: store_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: STRXui [[COPY1]], [[COPY]], 0 :: (store 8 into %ir.addr)
-    %0(p0) = COPY %x0
-    %1(s64) = COPY %x1
+    %0(p0) = COPY $x0
+    %1(s64) = COPY $x1
     G_STORE  %1, %0 :: (store 8 into %ir.addr)
 
 ...
@@ -65,14 +65,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %w1
+    liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: STRWui [[COPY1]], [[COPY]], 0 :: (store 4 into %ir.addr)
-    %0(p0) = COPY %x0
-    %1(s32) = COPY %w1
+    %0(p0) = COPY $x0
+    %1(s32) = COPY $w1
     G_STORE  %1, %0 :: (store 4 into %ir.addr)
 
 ...
@@ -88,14 +88,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %w1
+    liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_s16_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: STRHHui [[COPY1]], [[COPY]], 0 :: (store 2 into %ir.addr)
-    %0(p0) = COPY %x0
-    %2:gpr(s32) = COPY %w1
+    %0(p0) = COPY $x0
+    %2:gpr(s32) = COPY $w1
     %1(s16) = G_TRUNC %2
     G_STORE  %1, %0 :: (store 2 into %ir.addr)
 
@@ -112,14 +112,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %w1
+    liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_s8_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: STRBBui [[COPY1]], [[COPY]], 0 :: (store 1 into %ir.addr)
-    %0(p0) = COPY %x0
-    %2:gpr(s32) = COPY %w1
+    %0(p0) = COPY $x0
+    %2:gpr(s32) = COPY $w1
     %1(s8) = G_TRUNC %2
     G_STORE  %1, %0 :: (store 1 into %ir.addr)
 
@@ -136,12 +136,12 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: store_zero_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: STRXui %xzr, [[COPY]], 0 :: (store 8 into %ir.addr)
-    %0(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: STRXui $xzr, [[COPY]], 0 :: (store 8 into %ir.addr)
+    %0(p0) = COPY $x0
     %1(s64) = G_CONSTANT i64 0
     G_STORE  %1, %0 :: (store 8 into %ir.addr)
 
@@ -158,12 +158,12 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: store_zero_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: STRWui %wzr, [[COPY]], 0 :: (store 4 into %ir.addr)
-    %0(p0) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: STRWui $wzr, [[COPY]], 0 :: (store 4 into %ir.addr)
+    %0(p0) = COPY $x0
     %1(s32) = G_CONSTANT i32 0
     G_STORE  %1, %0 :: (store 4 into %ir.addr)
 
@@ -183,12 +183,12 @@ stack:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: store_fi_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: STRXui [[COPY]], %stack.0.ptr0, 0 :: (store 8)
-    %0(p0) = COPY %x0
+    %0(p0) = COPY $x0
     %1(p0) = G_FRAME_INDEX %stack.0.ptr0
     G_STORE  %0, %1 :: (store 8)
 ...
@@ -206,14 +206,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: store_gep_128_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: STRXui [[COPY1]], [[COPY]], 16 :: (store 8 into %ir.addr)
-    %0(p0) = COPY %x0
-    %1(s64) = COPY %x1
+    %0(p0) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_CONSTANT i64 128
     %3(p0) = G_GEP %0, %2
     G_STORE %1, %3 :: (store 8 into %ir.addr)
@@ -232,14 +232,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %w1
+    liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_gep_512_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: STRWui [[COPY1]], [[COPY]], 128 :: (store 4 into %ir.addr)
-    %0(p0) = COPY %x0
-    %1(s32) = COPY %w1
+    %0(p0) = COPY $x0
+    %1(s32) = COPY $w1
     %2(s64) = G_CONSTANT i64 512
     %3(p0) = G_GEP %0, %2
     G_STORE %1, %3 :: (store 4 into %ir.addr)
@@ -258,14 +258,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %w1
+    liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_gep_64_s16_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: STRHHui [[COPY1]], [[COPY]], 32 :: (store 2 into %ir.addr)
-    %0(p0) = COPY %x0
-    %4:gpr(s32) = COPY %w1
+    %0(p0) = COPY $x0
+    %4:gpr(s32) = COPY $w1
     %1(s16) = G_TRUNC %4
     %2(s64) = G_CONSTANT i64 64
     %3(p0) = G_GEP %0, %2
@@ -285,14 +285,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %w1
+    liveins: $x0, $w1
 
     ; CHECK-LABEL: name: store_gep_1_s8_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: STRBBui [[COPY1]], [[COPY]], 1 :: (store 1 into %ir.addr)
-    %0(p0) = COPY %x0
-    %4:gpr(s32) = COPY %w1
+    %0(p0) = COPY $x0
+    %4:gpr(s32) = COPY $w1
     %1(s8) = G_TRUNC %4
     %2(s64) = G_CONSTANT i64 1
     %3(p0) = G_GEP %0, %2
@@ -310,14 +310,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %d1
+    liveins: $x0, $d1
 
     ; CHECK-LABEL: name: store_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK: STRDui [[COPY1]], [[COPY]], 0 :: (store 8 into %ir.addr)
-    %0(p0) = COPY %x0
-    %1(s64) = COPY %d1
+    %0(p0) = COPY $x0
+    %1(s64) = COPY $d1
     G_STORE  %1, %0 :: (store 8 into %ir.addr)
 
 ...
@@ -333,14 +333,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %s1
+    liveins: $x0, $s1
 
     ; CHECK-LABEL: name: store_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK: STRSui [[COPY1]], [[COPY]], 0 :: (store 4 into %ir.addr)
-    %0(p0) = COPY %x0
-    %1(s32) = COPY %s1
+    %0(p0) = COPY $x0
+    %1(s32) = COPY $s1
     G_STORE  %1, %0 :: (store 4 into %ir.addr)
 
 ...
@@ -358,14 +358,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %d1
+    liveins: $x0, $d1
 
     ; CHECK-LABEL: name: store_gep_8_s64_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK: STRDui [[COPY1]], [[COPY]], 1 :: (store 8 into %ir.addr)
-    %0(p0) = COPY %x0
-    %1(s64) = COPY %d1
+    %0(p0) = COPY $x0
+    %1(s64) = COPY $d1
     %2(s64) = G_CONSTANT i64 8
     %3(p0) = G_GEP %0, %2
     G_STORE %1, %3 :: (store 8 into %ir.addr)
@@ -384,14 +384,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %s1
+    liveins: $x0, $s1
 
     ; CHECK-LABEL: name: store_gep_8_s32_fpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY %s1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
     ; CHECK: STRSui [[COPY1]], [[COPY]], 2 :: (store 4 into %ir.addr)
-    %0(p0) = COPY %x0
-    %1(s32) = COPY %s1
+    %0(p0) = COPY $x0
+    %1(s32) = COPY $s1
     %2(s64) = G_CONSTANT i64 8
     %3(p0) = G_GEP %0, %2
     G_STORE %1, %3 :: (store 4 into %ir.addr)
@@ -407,14 +407,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %d1
+    liveins: $x0, $d1
 
     ; CHECK-LABEL: name: store_v2s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY %d1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
     ; CHECK: STRDui [[COPY1]], [[COPY]], 0 :: (store 8 into %ir.addr)
-    %0(p0) = COPY %x0
-    %1(<2 x s32>) = COPY %d1
+    %0(p0) = COPY $x0
+    %1(<2 x s32>) = COPY $d1
     G_STORE  %1, %0 :: (store 8 into %ir.addr)
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-trunc.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-trunc.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-trunc.mir Wed Jan 31 14:04:26 2018
@@ -20,15 +20,15 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: trunc_s32_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
     ; CHECK: [[COPY1:%[0-9]+]]:gpr32sp = COPY [[COPY]].sub_32
-    ; CHECK: %w0 = COPY [[COPY1]]
-    %0(s64) = COPY %x0
+    ; CHECK: $w0 = COPY [[COPY1]]
+    %0(s64) = COPY $x0
     %1(s32) = G_TRUNC %0
-    %w0 = COPY %1(s32)
+    $w0 = COPY %1(s32)
 ...
 
 ---
@@ -42,17 +42,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: trunc_s8_s64
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32
     ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]]
-    ; CHECK: %w0 = COPY [[COPY2]]
-    %0(s64) = COPY %x0
+    ; CHECK: $w0 = COPY [[COPY2]]
+    %0(s64) = COPY $x0
     %1(s8) = G_TRUNC %0
     %2:gpr(s32) = G_ANYEXT %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -66,14 +66,14 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: trunc_s1_s32
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY]]
-    ; CHECK: %w0 = COPY [[COPY2]]
-    %0(s32) = COPY %w0
+    ; CHECK: $w0 = COPY [[COPY2]]
+    %0(s32) = COPY $w0
     %1(s1) = G_TRUNC %0
     %2:gpr(s32) = G_ANYEXT %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-xor.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-xor.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-xor.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-xor.mir Wed Jan 31 14:04:26 2018
@@ -26,17 +26,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
 
     ; CHECK-LABEL: name: xor_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY %w1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK: [[EORWrr:%[0-9]+]]:gpr32 = EORWrr [[COPY]], [[COPY1]]
-    ; CHECK: %w0 = COPY [[EORWrr]]
-    %0(s32) = COPY %w0
-    %1(s32) = COPY %w1
+    ; CHECK: $w0 = COPY [[EORWrr]]
+    %0(s32) = COPY $w0
+    %1(s32) = COPY $w1
     %2(s32) = G_XOR %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -52,17 +52,17 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
     ; CHECK-LABEL: name: xor_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY %x1
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK: [[EORXrr:%[0-9]+]]:gpr64 = EORXrr [[COPY]], [[COPY1]]
-    ; CHECK: %x0 = COPY [[EORXrr]]
-    %0(s64) = COPY %x0
-    %1(s64) = COPY %x1
+    ; CHECK: $x0 = COPY [[EORXrr]]
+    %0(s64) = COPY $x0
+    %1(s64) = COPY $x1
     %2(s64) = G_XOR %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -79,16 +79,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
     ; CHECK-LABEL: name: xor_constant_n1_s32_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-    ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr %wzr, [[COPY]]
-    ; CHECK: %w0 = COPY [[ORNWrr]]
-    %0(s32) = COPY %w0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+    ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]]
+    ; CHECK: $w0 = COPY [[ORNWrr]]
+    %0(s32) = COPY $w0
     %1(s32) = G_CONSTANT i32 -1
     %2(s32) = G_XOR %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...
 
 ---
@@ -104,16 +104,16 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
 
     ; CHECK-LABEL: name: xor_constant_n1_s64_gpr
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY %x0
-    ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr %xzr, [[COPY]]
-    ; CHECK: %x0 = COPY [[ORNXrr]]
-    %0(s64) = COPY %x0
+    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK: [[ORNXrr:%[0-9]+]]:gpr64 = ORNXrr $xzr, [[COPY]]
+    ; CHECK: $x0 = COPY [[ORNXrr]]
+    %0(s64) = COPY $x0
     %1(s64) = G_CONSTANT i64 -1
     %2(s64) = G_XOR %0, %1
-    %x0 = COPY %2(s64)
+    $x0 = COPY %2(s64)
 ...
 
 ---
@@ -134,16 +134,16 @@ body:             |
   ; CHECK:   successors: %bb.1(0x80000000)
   ; CHECK:   B %bb.1
   ; CHECK: bb.1:
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr32 = COPY %w0
-  ; CHECK:   [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr %wzr, [[COPY]]
-  ; CHECK:   %w0 = COPY [[ORNWrr]]
+  ; CHECK:   [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+  ; CHECK:   [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]]
+  ; CHECK:   $w0 = COPY [[ORNWrr]]
   bb.0:
-    liveins: %w0, %w1
+    liveins: $w0, $w1
     successors: %bb.1
     %1(s32) = G_CONSTANT i32 -1
     G_BR %bb.1
   bb.1:
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %2(s32) = G_XOR %0, %1
-    %w0 = COPY %2(s32)
+    $w0 = COPY %2(s32)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/select.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select.mir Wed Jan 31 14:04:26 2018
@@ -47,7 +47,7 @@ stack:
 body:             |
   bb.0:
     %0(p0) = G_FRAME_INDEX %stack.0.ptr0
-    %x0 = COPY %0(p0)
+    $x0 = COPY %0(p0)
 ...
 
 ---
@@ -65,11 +65,11 @@ registers:
 # CHECK: %2:gpr64 = ADDXrr %0, %1
 body:             |
   bb.0:
-      liveins: %x0
-    %0(p0) = COPY %x0
+      liveins: $x0
+    %0(p0) = COPY $x0
     %1(s64) = G_CONSTANT i64 42
     %2(p0) = G_GEP %0, %1(s64)
-    %x0 = COPY %2(p0)
+    $x0 = COPY %2(p0)
 ...
 
 ---
@@ -82,10 +82,10 @@ regBankSelected: true
 # CHECK: %1:gpr64sp = ANDXri %0, 8060
 body:             |
   bb.0:
-      liveins: %x0
-    %0:gpr(p0) = COPY %x0
+      liveins: $x0
+    %0:gpr(p0) = COPY $x0
     %1:gpr(p0) = G_PTR_MASK %0, 3
-    %x0 = COPY %1(p0)
+    $x0 = COPY %1(p0)
 ...
 
 ---
@@ -104,7 +104,7 @@ registers:
 body:             |
   bb.0:
     %0(p0) = G_GLOBAL_VALUE @var_local
-    %x0 = COPY %0(p0)
+    $x0 = COPY %0(p0)
 ...
 
 ---
@@ -122,7 +122,7 @@ registers:
 body:             |
   bb.0:
     %0(p0) = G_GLOBAL_VALUE @var_got
-    %x0 = COPY %0(p0)
+    $x0 = COPY %0(p0)
 ...
 
 ---
@@ -153,36 +153,36 @@ registers:
   - { id: 11, class: gpr }
 
 # CHECK:  body:
-# CHECK:    %wzr = SUBSWrr %0, %0, implicit-def %nzcv
-# CHECK:    %1:gpr32 = CSINCWr %wzr, %wzr, 1, implicit %nzcv
+# CHECK:    $wzr = SUBSWrr %0, %0, implicit-def $nzcv
+# CHECK:    %1:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
 
-# CHECK:    %xzr = SUBSXrr %2, %2, implicit-def %nzcv
-# CHECK:    %3:gpr32 = CSINCWr %wzr, %wzr, 3, implicit %nzcv
+# CHECK:    $xzr = SUBSXrr %2, %2, implicit-def $nzcv
+# CHECK:    %3:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
 
-# CHECK:    %xzr = SUBSXrr %4, %4, implicit-def %nzcv
-# CHECK:    %5:gpr32 = CSINCWr %wzr, %wzr, 0, implicit %nzcv
+# CHECK:    $xzr = SUBSXrr %4, %4, implicit-def $nzcv
+# CHECK:    %5:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
 
 body:             |
   bb.0:
-    liveins: %w0, %x0
+    liveins: $w0, $x0
 
-    %0(s32) = COPY %w0
+    %0(s32) = COPY $w0
     %1(s32) = G_ICMP intpred(eq), %0, %0
     %6(s1) = G_TRUNC %1(s32)
     %9(s32) = G_ANYEXT %6
-    %w0 = COPY %9(s32)
+    $w0 = COPY %9(s32)
 
-    %2(s64) = COPY %x0
+    %2(s64) = COPY $x0
     %3(s32) = G_ICMP intpred(uge), %2, %2
     %7(s1) = G_TRUNC %3(s32)
     %10(s32) = G_ANYEXT %7
-    %w0 = COPY %10(s32)
+    $w0 = COPY %10(s32)
 
-    %4(p0) = COPY %x0
+    %4(p0) = COPY $x0
     %5(s32) = G_ICMP intpred(ne), %4, %4
     %8(s1) = G_TRUNC %5(s32)
     %11(s32) = G_ANYEXT %8
-    %w0 = COPY %11(s32)
+    $w0 = COPY %11(s32)
 ...
 
 ---
@@ -209,29 +209,29 @@ registers:
   - { id: 7, class: gpr }
 
 # CHECK:  body:
-# CHECK:    FCMPSrr %0, %0, implicit-def %nzcv
-# CHECK:    [[TST_MI:%[0-9]+]]:gpr32 = CSINCWr %wzr, %wzr, 5, implicit %nzcv
-# CHECK:    [[TST_GT:%[0-9]+]]:gpr32 = CSINCWr %wzr, %wzr, 13, implicit %nzcv
+# CHECK:    FCMPSrr %0, %0, implicit-def $nzcv
+# CHECK:    [[TST_MI:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
+# CHECK:    [[TST_GT:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
 # CHECK:    %1:gpr32 = ORRWrr [[TST_MI]], [[TST_GT]]
 
-# CHECK:    FCMPDrr %2, %2, implicit-def %nzcv
-# CHECK:    %3:gpr32 = CSINCWr %wzr, %wzr, 4, implicit %nzcv
+# CHECK:    FCMPDrr %2, %2, implicit-def $nzcv
+# CHECK:    %3:gpr32 = CSINCWr $wzr, $wzr, 4, implicit $nzcv
 
 body:             |
   bb.0:
-    liveins: %w0, %x0
+    liveins: $w0, $x0
 
-    %0(s32) = COPY %s0
+    %0(s32) = COPY $s0
     %1(s32) = G_FCMP floatpred(one), %0, %0
     %4(s1) = G_TRUNC %1(s32)
     %6(s32) = G_ANYEXT %4
-    %w0 = COPY %6(s32)
+    $w0 = COPY %6(s32)
 
-    %2(s64) = COPY %d0
+    %2(s64) = COPY $d0
     %3(s32) = G_FCMP floatpred(uge), %2, %2
     %5(s1) = G_TRUNC %3(s32)
     %7(s32) = G_ANYEXT %5
-    %w0 = COPY %7(s32)
+    $w0 = COPY %7(s32)
 
 ...
 
@@ -257,10 +257,10 @@ registers:
 
 body:             |
   bb.0:
-    liveins: %s0, %w0
+    liveins: $s0, $w0
     successors: %bb.1
-    %0(s32) = COPY %s0
-    %3:gpr(s32) = COPY %w0
+    %0(s32) = COPY $s0
+    %3:gpr(s32) = COPY $w0
     %1(s1) = G_TRUNC %3
 
   bb.1:
@@ -269,8 +269,8 @@ body:             |
     G_BRCOND %1, %bb.1
 
   bb.2:
-    %s0 = COPY %2
-    RET_ReallyLR implicit %s0
+    $s0 = COPY %2
+    RET_ReallyLR implicit $s0
 ...
 
 ---
@@ -304,30 +304,30 @@ registers:
   - { id: 9, class: gpr }
 
 # CHECK:  body:
-# CHECK:      %wzr = ANDSWri %10, 0, implicit-def %nzcv
-# CHECK:      %3:gpr32 = CSELWr %1, %2, 1, implicit %nzcv
-# CHECK:      %wzr = ANDSWri %10, 0, implicit-def %nzcv
-# CHECK:      %6:gpr64 = CSELXr %4, %5, 1, implicit %nzcv
-# CHECK:      %wzr = ANDSWri %10, 0, implicit-def %nzcv
-# CHECK:      %9:gpr64 = CSELXr %7, %8, 1, implicit %nzcv
+# CHECK:      $wzr = ANDSWri %10, 0, implicit-def $nzcv
+# CHECK:      %3:gpr32 = CSELWr %1, %2, 1, implicit $nzcv
+# CHECK:      $wzr = ANDSWri %10, 0, implicit-def $nzcv
+# CHECK:      %6:gpr64 = CSELXr %4, %5, 1, implicit $nzcv
+# CHECK:      $wzr = ANDSWri %10, 0, implicit-def $nzcv
+# CHECK:      %9:gpr64 = CSELXr %7, %8, 1, implicit $nzcv
 body:             |
   bb.0:
-    liveins: %w0, %w1, %w2
-    %10:gpr(s32) = COPY %w0
+    liveins: $w0, $w1, $w2
+    %10:gpr(s32) = COPY $w0
     %0(s1) = G_TRUNC %10
 
-    %1(s32) = COPY %w1
-    %2(s32) = COPY %w2
+    %1(s32) = COPY $w1
+    %2(s32) = COPY $w2
     %3(s32) = G_SELECT %0, %1, %2
-    %w0 = COPY %3(s32)
+    $w0 = COPY %3(s32)
 
-    %4(s64) = COPY %x0
-    %5(s64) = COPY %x1
+    %4(s64) = COPY $x0
+    %5(s64) = COPY $x1
     %6(s64) = G_SELECT %0, %4, %5
-    %x0 = COPY %6(s64)
+    $x0 = COPY %6(s64)
 
-    %7(p0) = COPY %x0
-    %8(p0) = COPY %x1
+    %7(p0) = COPY $x0
+    %8(p0) = COPY $x1
     %9(p0) = G_SELECT %0, %7, %8
-    %x0 = COPY %9(p0)
+    $x0 = COPY %9(p0)
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/translate-gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/translate-gep.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/translate-gep.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/translate-gep.ll Wed Jan 31 14:04:26 2018
@@ -5,7 +5,7 @@
 
 define i8*  @translate_element_size1(i64 %arg) {
 ; CHECK-LABEL: name: translate_element_size1
-; CHECK: [[OFFSET:%[0-9]+]]:_(s64) = COPY %x0
+; CHECK: [[OFFSET:%[0-9]+]]:_(s64) = COPY $x0
 ; CHECK: [[BASE:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
 ; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[BASE]], [[OFFSET]]
   %tmp = getelementptr i8, i8* null, i64 %arg
@@ -16,12 +16,12 @@ define %type* @first_offset_const(%type*
 
   ; CHECK-LABEL: name: first_offset_const
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: %x0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+  ; CHECK:   liveins: $x0
+  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
   ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64)
-  ; CHECK:   %x0 = COPY [[GEP]](p0)
-  ; CHECK:   RET_ReallyLR implicit %x0
+  ; CHECK:   $x0 = COPY [[GEP]](p0)
+  ; CHECK:   RET_ReallyLR implicit $x0
   %res = getelementptr %type, %type* %addr, i32 1
   ret %type* %res
 }
@@ -30,11 +30,11 @@ define %type* @first_offset_trivial(%typ
 
   ; CHECK-LABEL: name: first_offset_trivial
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: %x0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY %x0
+  ; CHECK:   liveins: $x0
+  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
   ; CHECK:   [[COPY1:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
-  ; CHECK:   %x0 = COPY [[COPY1]](p0)
-  ; CHECK:   RET_ReallyLR implicit %x0
+  ; CHECK:   $x0 = COPY [[COPY1]](p0)
+  ; CHECK:   RET_ReallyLR implicit $x0
   %res = getelementptr %type, %type* %addr, i32 0
   ret %type* %res
 }
@@ -43,15 +43,15 @@ define %type* @first_offset_variable(%ty
 
   ; CHECK-LABEL: name: first_offset_variable
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: %x0, %x1
-  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY %x0
-  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+  ; CHECK:   liveins: $x0, $x1
+  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
   ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[COPY1]]
   ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL]](s64)
   ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
-  ; CHECK:   %x0 = COPY [[COPY2]](p0)
-  ; CHECK:   RET_ReallyLR implicit %x0
+  ; CHECK:   $x0 = COPY [[COPY2]](p0)
+  ; CHECK:   RET_ReallyLR implicit $x0
   %res = getelementptr %type, %type* %addr, i64 %idx
   ret %type* %res
 }
@@ -60,16 +60,16 @@ define %type* @first_offset_ext(%type* %
 
   ; CHECK-LABEL: name: first_offset_ext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: %w1, %x0
-  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY %x0
-  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY %w1
+  ; CHECK:   liveins: $w1, $x0
+  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+  ; CHECK:   [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
   ; CHECK:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
   ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[SEXT]]
   ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL]](s64)
   ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP]](p0)
-  ; CHECK:   %x0 = COPY [[COPY2]](p0)
-  ; CHECK:   RET_ReallyLR implicit %x0
+  ; CHECK:   $x0 = COPY [[COPY2]](p0)
+  ; CHECK:   RET_ReallyLR implicit $x0
   %res = getelementptr %type, %type* %addr, i32 %idx
   ret %type* %res
 }
@@ -79,17 +79,17 @@ define i32* @const_then_var(%type1* %add
 
   ; CHECK-LABEL: name: const_then_var
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: %x0, %x1
-  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY %x0
-  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+  ; CHECK:   liveins: $x0, $x1
+  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 272
   ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s64)
   ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C1]], [[COPY1]]
   ; CHECK:   [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[GEP]], [[MUL]](s64)
   ; CHECK:   [[COPY2:%[0-9]+]]:_(p0) = COPY [[GEP1]](p0)
-  ; CHECK:   %x0 = COPY [[COPY2]](p0)
-  ; CHECK:   RET_ReallyLR implicit %x0
+  ; CHECK:   $x0 = COPY [[COPY2]](p0)
+  ; CHECK:   RET_ReallyLR implicit $x0
   %res = getelementptr %type1, %type1* %addr, i32 4, i32 1, i64 %idx
   ret i32* %res
 }
@@ -98,16 +98,16 @@ define i32* @var_then_const(%type1* %add
 
   ; CHECK-LABEL: name: var_then_const
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: %x0, %x1
-  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY %x0
-  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY %x1
+  ; CHECK:   liveins: $x0, $x1
+  ; CHECK:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+  ; CHECK:   [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
   ; CHECK:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
   ; CHECK:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
   ; CHECK:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[C]], [[COPY1]]
   ; CHECK:   [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[MUL]](s64)
   ; CHECK:   [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[GEP]], [[C1]](s64)
-  ; CHECK:   %x0 = COPY [[GEP1]](p0)
-  ; CHECK:   RET_ReallyLR implicit %x0
+  ; CHECK:   $x0 = COPY [[GEP1]](p0)
+  ; CHECK:   RET_ReallyLR implicit $x0
   %res = getelementptr %type1, %type1* %addr, i64 %idx, i32 2, i32 2
   ret i32* %res
 }

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/varargs-ios-translator.ll Wed Jan 31 14:04:26 2018
@@ -6,7 +6,7 @@ define void @test_varargs_sentinel(i8* %
 ; CHECK: fixedStack:
 ; CHECK:   - { id: [[VARARGS_SLOT:[0-9]+]], type: default, offset: 8
 ; CHECK: body:
-; CHECK:   [[LIST:%[0-9]+]]:gpr64sp = COPY %x0
+; CHECK:   [[LIST:%[0-9]+]]:gpr64sp = COPY $x0
 ; CHECK:   [[VARARGS_AREA:%[0-9]+]]:gpr64common = ADDXri %fixed-stack.[[VARARGS_SLOT]], 0, 0
 ; CHECK:   STRXui [[VARARGS_AREA]], [[LIST]], 0 :: (store 8 into %ir.list, align 0)
   call void @llvm.va_start(i8* %list)

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/vastart.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/vastart.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/vastart.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/vastart.ll Wed Jan 31 14:04:26 2018
@@ -5,7 +5,7 @@
 declare void @llvm.va_start(i8*)
 define void @test_va_start(i8* %list) {
 ; CHECK-LABEL: name: test_va_start
-; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY %x0
+; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY $x0
 ; CHECK-IOS: G_VASTART [[LIST]](p0) :: (store 8 into %ir.list, align 0)
 ; CHECK-LINUX: G_VASTART [[LIST]](p0) :: (store 32 into %ir.list, align 0)
   call void @llvm.va_start(i8* %list)

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir Wed Jan 31 14:04:26 2018
@@ -17,6 +17,6 @@ registers:
   - { id: 0, class: _ }
 body: |
   bb.0:
-   liveins: %x0
-   %0(s64) = COPY %x0
+   liveins: $x0
+   %0(s64) = COPY $x0
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-selected.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-selected.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/verify-selected.mir Wed Jan 31 14:04:26 2018
@@ -18,8 +18,8 @@ registers:
   - { id: 2, class: gpr }
 body: |
   bb.0:
-   liveins: %x0
-   %0 = COPY %x0
+   liveins: $x0
+   %0 = COPY $x0
 
    ; CHECK: *** Bad machine code: Unexpected generic instruction in a Selected function ***
    ; CHECK: instruction: %1:gpr64 = G_ADD
@@ -28,5 +28,5 @@ body: |
    ; CHECK: *** Bad machine code: Generic virtual register invalid in a Selected function ***
    ; CHECK: instruction: %2:gpr(s64) = COPY
    ; CHECK: operand 0: %2
-   %2(s64) = COPY %x0
+   %2(s64) = COPY $x0
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-combine-fmul-fsub.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-combine-fmul-fsub.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-combine-fmul-fsub.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-combine-fmul-fsub.mir Wed Jan 31 14:04:26 2018
@@ -12,13 +12,13 @@ registers:
   - { id: 4, class: fpr64 }
 body:             |
   bb.0.entry:
-    %2:fpr64 = COPY %d2
-    %1:fpr64 = COPY %d1
-    %0:fpr64 = COPY %d0
+    %2:fpr64 = COPY $d2
+    %1:fpr64 = COPY $d1
+    %0:fpr64 = COPY $d0
     %3:fpr64 = FMULv2f32 %0, %1
     %4:fpr64 = FSUBv2f32 killed %3, %2
-    %d0 = COPY %4
-    RET_ReallyLR implicit %d0
+    $d0 = COPY %4
+    RET_ReallyLR implicit $d0
 
 ...
 # UNPROFITABLE-LABEL: name: f1_2s
@@ -38,13 +38,13 @@ registers:
   - { id: 4, class: fpr128 }
 body:             |
   bb.0.entry:
-    %2:fpr128 = COPY %q2
-    %1:fpr128 = COPY %q1
-    %0:fpr128 = COPY %q0
+    %2:fpr128 = COPY $q2
+    %1:fpr128 = COPY $q1
+    %0:fpr128 = COPY $q0
     %3:fpr128 = FMULv4f32 %0, %1
     %4:fpr128 = FSUBv4f32 killed %3, %2
-    %q0 = COPY %4
-    RET_ReallyLR implicit %q0
+    $q0 = COPY %4
+    RET_ReallyLR implicit $q0
 
 ...
 # UNPROFITABLE-LABEL: name: f1_4s
@@ -64,13 +64,13 @@ registers:
   - { id: 4, class: fpr128 }
 body:             |
   bb.0.entry:
-    %2:fpr128 = COPY %q2
-    %1:fpr128 = COPY %q1
-    %0:fpr128 = COPY %q0
+    %2:fpr128 = COPY $q2
+    %1:fpr128 = COPY $q1
+    %0:fpr128 = COPY $q0
     %3:fpr128 = FMULv2f64 %0, %1
     %4:fpr128 = FSUBv2f64 killed %3, %2
-    %q0 = COPY %4
-    RET_ReallyLR implicit %q0
+    $q0 = COPY %4
+    RET_ReallyLR implicit $q0
 
 ...
 # UNPROFITABLE-LABEL: name: f1_2d
@@ -92,15 +92,15 @@ registers:
   - { id: 6, class: fpr64 }
 body:             |
   bb.0.entry:
-    %3:fpr64 = COPY %q3
-    %2:fpr64 = COPY %q2
-    %1:fpr64 = COPY %q1
-    %0:fpr64 = COPY %q0
+    %3:fpr64 = COPY $q3
+    %2:fpr64 = COPY $q2
+    %1:fpr64 = COPY $q1
+    %0:fpr64 = COPY $q0
     %4:fpr64 = FMULv2f32 %0, %1
     %5:fpr64 = FMULv2f32 %2, %3
     %6:fpr64 = FSUBv2f32 killed %4, %5
-    %q0 = COPY %6
-    RET_ReallyLR implicit %q0
+    $q0 = COPY %6
+    RET_ReallyLR implicit $q0
 
 ...
 # ALL-LABEL: name: f1_both_fmul_2s
@@ -118,15 +118,15 @@ registers:
   - { id: 6, class: fpr128 }
 body:             |
   bb.0.entry:
-    %3:fpr128 = COPY %q3
-    %2:fpr128 = COPY %q2
-    %1:fpr128 = COPY %q1
-    %0:fpr128 = COPY %q0
+    %3:fpr128 = COPY $q3
+    %2:fpr128 = COPY $q2
+    %1:fpr128 = COPY $q1
+    %0:fpr128 = COPY $q0
     %4:fpr128 = FMULv4f32 %0, %1
     %5:fpr128 = FMULv4f32 %2, %3
     %6:fpr128 = FSUBv4f32 killed %4, %5
-    %q0 = COPY %6
-    RET_ReallyLR implicit %q0
+    $q0 = COPY %6
+    RET_ReallyLR implicit $q0
 
 ...
 # ALL-LABEL: name: f1_both_fmul_4s
@@ -144,15 +144,15 @@ registers:
   - { id: 6, class: fpr128 }
 body:             |
   bb.0.entry:
-    %3:fpr128 = COPY %q3
-    %2:fpr128 = COPY %q2
-    %1:fpr128 = COPY %q1
-    %0:fpr128 = COPY %q0
+    %3:fpr128 = COPY $q3
+    %2:fpr128 = COPY $q2
+    %1:fpr128 = COPY $q1
+    %0:fpr128 = COPY $q0
     %4:fpr128 = FMULv2f64 %0, %1
     %5:fpr128 = FMULv2f64 %2, %3
     %6:fpr128 = FSUBv2f64 killed %4, %5
-    %q0 = COPY %6
-    RET_ReallyLR implicit %q0
+    $q0 = COPY %6
+    RET_ReallyLR implicit $q0
 
 ...
 # ALL-LABEL: name: f1_both_fmul_2d

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-csldst-mmo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-csldst-mmo.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-csldst-mmo.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-csldst-mmo.ll Wed Jan 31 14:04:26 2018
@@ -10,8 +10,8 @@
 ;
 ; CHECK: Before post-MI-sched:
 ; CHECK-LABEL: # Machine code for function test1:
-; CHECK: SU(2):   STRWui %wzr
-; CHECK: SU(3):   %x21, %x20 = frame-destroy LDPXi %sp, 2
+; CHECK: SU(2):   STRWui $wzr
+; CHECK: SU(3):   $x21, $x20 = frame-destroy LDPXi $sp, 2
 ; CHECK:  Predecessors:
 ; CHECK-NEXT:   SU(0): Out
 ; CHECK-NEXT:   SU(0): Out

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-ldst-unscaled-pre-post.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-ldst-unscaled-pre-post.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ldst-unscaled-pre-post.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-ldst-unscaled-pre-post.mir Wed Jan 31 14:04:26 2018
@@ -1,115 +1,115 @@
 # RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass aarch64-ldst-opt  -verify-machineinstrs  -o - %s | FileCheck %s
 ---
 # CHECK-LABEL: name: test_LDURSi_post
-# CHECK: LDRSpost %x0, -4
+# CHECK: LDRSpost $x0, -4
 name: test_LDURSi_post
 body: |
   bb.0.entry:
-    liveins: %x0
+    liveins: $x0
 
-    %s0 = LDURSi %x0, 0
-    %x0 = SUBXri %x0, 4, 0
-    RET_ReallyLR implicit %x0
+    $s0 = LDURSi $x0, 0
+    $x0 = SUBXri $x0, 4, 0
+    RET_ReallyLR implicit $x0
 ...
 # CHECK-LABEL: name: test_LDURDi_post
-# CHECK: LDRDpost %x0, -4
+# CHECK: LDRDpost $x0, -4
 name: test_LDURDi_post
 body: |
   bb.0.entry:
-    liveins: %x0
+    liveins: $x0
 
-    %d0 = LDURDi %x0, 0
-    %x0 = SUBXri %x0, 4, 0
-    RET_ReallyLR implicit %x0
+    $d0 = LDURDi $x0, 0
+    $x0 = SUBXri $x0, 4, 0
+    RET_ReallyLR implicit $x0
 ...
 # CHECK-LABEL: name: test_LDURQi_post
-# CHECK: LDRQpost %x0, -4
+# CHECK: LDRQpost $x0, -4
 name: test_LDURQi_post
 body: |
   bb.0.entry:
-    liveins: %x0
+    liveins: $x0
 
-    %q0 = LDURQi  %x0, 0
-    %x0 = SUBXri %x0, 4, 0
-    RET_ReallyLR implicit %x0
+    $q0 = LDURQi  $x0, 0
+    $x0 = SUBXri $x0, 4, 0
+    RET_ReallyLR implicit $x0
 ...
 # CHECK-LABEL: name: test_LDURWi_post
-# CHECK: LDRWpost %x0, -4
+# CHECK: LDRWpost $x0, -4
 name: test_LDURWi_post
 body: |
   bb.0.entry:
-    liveins: %x0
+    liveins: $x0
 
-    %w1 = LDURWi %x0, 0
-    %x0 = SUBXri %x0, 4, 0
-    RET_ReallyLR implicit %x0
+    $w1 = LDURWi $x0, 0
+    $x0 = SUBXri $x0, 4, 0
+    RET_ReallyLR implicit $x0
 ...
 # CHECK-LABEL: name: test_LDURXi_post
-# CHECK: %x1 = LDRXpost %x0, -4
+# CHECK: $x1 = LDRXpost $x0, -4
 name: test_LDURXi_post
 body: |
   bb.0.entry:
-    liveins: %x0
+    liveins: $x0
 
-    %x1 = LDURXi %x0, 0
-    %x0 = SUBXri %x0, 4, 0
-    RET_ReallyLR implicit %x0
+    $x1 = LDURXi $x0, 0
+    $x0 = SUBXri $x0, 4, 0
+    RET_ReallyLR implicit $x0
 ...
 # CHECK-LABEL: name: test_STURSi_post
-# CHECK: STRSpost %s0, %x0, -4
+# CHECK: STRSpost $s0, $x0, -4
 name: test_STURSi_post
 body: |
   bb.0.entry:
-    liveins: %x0
+    liveins: $x0
 
-    %s0 = FMOVS0
-    STURSi %s0, %x0, 0
-    %x0 = SUBXri %x0, 4, 0
-    RET_ReallyLR implicit %x0
+    $s0 = FMOVS0
+    STURSi $s0, $x0, 0
+    $x0 = SUBXri $x0, 4, 0
+    RET_ReallyLR implicit $x0
 ...
 # CHECK-LABEL: name: test_STURDi_post
-# CHECK: STRDpost %d0, %x0, -4
+# CHECK: STRDpost $d0, $x0, -4
 name: test_STURDi_post
 body: |
   bb.0.entry:
-    liveins: %x0
+    liveins: $x0
 
-    %d0 = FMOVD0
-    STURDi %d0, %x0, 0
-    %x0 = SUBXri %x0, 4, 0
-    RET_ReallyLR implicit %x0
+    $d0 = FMOVD0
+    STURDi $d0, $x0, 0
+    $x0 = SUBXri $x0, 4, 0
+    RET_ReallyLR implicit $x0
 ...
 # CHECK-LABEL: name: test_STURQi_post
-# CHECK: STRQpost %q0, %x0, -4
+# CHECK: STRQpost $q0, $x0, -4
 name: test_STURQi_post
 body: |
   bb.0.entry:
-    liveins: %x0
+    liveins: $x0
 
-    %q0 = MOVIv4i32 0, 0
-    STURQi %q0, %x0, 0
-    %x0 = SUBXri %x0, 4, 0
-    RET_ReallyLR implicit %x0
+    $q0 = MOVIv4i32 0, 0
+    STURQi $q0, $x0, 0
+    $x0 = SUBXri $x0, 4, 0
+    RET_ReallyLR implicit $x0
 ...
 # CHECK-LABEL: name: test_STURWi_post
-# CHECK: STRWpost %wzr, %x0, -4
+# CHECK: STRWpost $wzr, $x0, -4
 name: test_STURWi_post
 body: |
   bb.0.entry:
-    liveins: %x0
+    liveins: $x0
 
-    STURWi %wzr, %x0, 0
-    %x0 = SUBXri %x0, 4, 0
-    RET_ReallyLR implicit %x0
+    STURWi $wzr, $x0, 0
+    $x0 = SUBXri $x0, 4, 0
+    RET_ReallyLR implicit $x0
 ...
 # CHECK-LABEL: name: test_STURXi_post
-# CHECK: STRXpost %xzr, %x0, -4
+# CHECK: STRXpost $xzr, $x0, -4
 name: test_STURXi_post
 body: |
   bb.0.entry:
-    liveins: %x0
+    liveins: $x0
 
-    STURXi %xzr, %x0, 0
-    %x0 = SUBXri %x0, 4, 0
-    RET_ReallyLR implicit %x0
+    STURXi $xzr, $x0, 0
+    $x0 = SUBXri $x0, 4, 0
+    RET_ReallyLR implicit $x0
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll Wed Jan 31 14:04:26 2018
@@ -9,11 +9,11 @@
 ; CHECK:   Successors:
 ; CHECK-NEXT:    SU(5): Data Latency=4 Reg=%2
 ; CHECK-NEXT:    SU(4): Ord  Latency=0
-; CHECK: SU(3):   STRWui %wzr, %0:gpr64common, 0; mem:ST4[%ptr1]
+; CHECK: SU(3):   STRWui $wzr, %0:gpr64common, 0; mem:ST4[%ptr1]
 ; CHECK:   Successors:
 ; CHECK: SU(4): Ord  Latency=0
-; CHECK: SU(4):   STRWui %wzr, %1:gpr64common, 0; mem:ST4[%ptr2]
-; CHECK: SU(5):   %w0 = COPY %2
+; CHECK: SU(4):   STRWui $wzr, %1:gpr64common, 0; mem:ST4[%ptr2]
+; CHECK: SU(5):   $w0 = COPY %2
 ; CHECK: ** ScheduleDAGMI::schedule picking next node
 define i32 @misched_bug(i32* %ptr1, i32* %ptr2) {
 entry:

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-misched-multimmo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-misched-multimmo.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-misched-multimmo.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-multimmo.ll Wed Jan 31 14:04:26 2018
@@ -8,11 +8,11 @@
 ; Check that no scheduling dependencies are created between the paired loads and the store during post-RA MI scheduling.
 ;
 ; CHECK-LABEL: # Machine code for function foo:
-; CHECK: SU(2):   renamable %w{{[0-9]+}}, renamable %w{{[0-9]+}} = LDPWi
+; CHECK: SU(2):   renamable $w{{[0-9]+}}, renamable $w{{[0-9]+}} = LDPWi
 ; CHECK: Successors:
 ; CHECK-NOT: ch SU(4)
 ; CHECK: SU(3)
-; CHECK: SU(4):   STRWui %wzr, renamable %x{{[0-9]+}}
+; CHECK: SU(4):   STRWui $wzr, renamable $x{{[0-9]+}}
 define i32 @foo() {
 entry:
   %0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @G2, i64 0, i64 0), align 4

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-regress-opt-cmp.mir Wed Jan 31 14:04:26 2018
@@ -1,6 +1,6 @@
 # RUN: llc -mtriple=aarch64-linux-gnu -run-pass peephole-opt -o - %s | FileCheck %s
 # CHECK: %1:gpr32common = ANDWri {{.*}}
-# CHECK-NEXT: %wzr = SUBSWri {{.*}}
+# CHECK-NEXT: $wzr = SUBSWri {{.*}}
 --- |
   define i32 @test01() nounwind {
   entry:
@@ -27,15 +27,15 @@ body:             |
 
     %0 = MOVi32imm 1
     %1 = ANDWri killed %1, 15
-    %wzr = SUBSWri killed %1, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.2.if.end, implicit %nzcv
+    $wzr = SUBSWri killed %1, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.2.if.end, implicit $nzcv
 
   bb.1.if.then:
-    %w0 = MOVi32imm 1
-    RET_ReallyLR implicit %w0
+    $w0 = MOVi32imm 1
+    RET_ReallyLR implicit $w0
 
   bb.2.if.end:
-    %w0 = MOVi32imm 0
-    RET_ReallyLR implicit %w0
+    $w0 = MOVi32imm 0
+    RET_ReallyLR implicit $w0
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/ccmp-successor-probs.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ccmp-successor-probs.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ccmp-successor-probs.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/ccmp-successor-probs.mir Wed Jan 31 14:04:26 2018
@@ -6,7 +6,7 @@
 # CHECK-LABEL: name: aarch64-ccmp-successor-probs
 # CHECK:       bb.0:
 # CHECK-NEXT:    successors: %bb.2(0x04000000), %bb.3(0x7c000000)
-# CHECK:         CCMPXr %5, %4, 0, 10, implicit-def %nzcv, implicit %nzcv
+# CHECK:         CCMPXr %5, %4, 0, 10, implicit-def $nzcv, implicit $nzcv
 #
 name: aarch64-ccmp-successor-probs
 registers:
@@ -22,21 +22,21 @@ body : |
   bb.0:
     successors: %bb.1(0x7e000000), %bb.2(0x02000000)
 
-    %0 = LDRXui killed %x0, 69
-    %1 = COPY %xzr
-    %2 = SUBSXrr %1, %0, implicit-def dead %nzcv
-    %3 = SUBSXri %x1, 1, 0, implicit-def dead %nzcv
+    %0 = LDRXui killed $x0, 69
+    %1 = COPY $xzr
+    %2 = SUBSXrr %1, %0, implicit-def dead $nzcv
+    %3 = SUBSXri $x1, 1, 0, implicit-def dead $nzcv
     %4 = COPY %0
     %5 = COPY %3
-    %6 = SUBSXrr %x1, killed %2, implicit-def %nzcv
-    Bcc 11, %bb.2, implicit %nzcv
+    %6 = SUBSXrr $x1, killed %2, implicit-def $nzcv
+    Bcc 11, %bb.2, implicit $nzcv
     B %bb.1
 
   bb.1:
     successors: %bb.2(0x02082082), %bb.3(0x7df7df7e)
 
-    %7 = SUBSXrr %5, %4, implicit-def %nzcv
-    Bcc 12, %bb.2, implicit %nzcv
+    %7 = SUBSXrr %5, %4, implicit-def $nzcv
+    Bcc 12, %bb.2, implicit $nzcv
     B %bb.3
 
   bb.2:

Modified: llvm/trunk/test/CodeGen/AArch64/cfi_restore.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/cfi_restore.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/cfi_restore.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/cfi_restore.mir Wed Jan 31 14:04:26 2018
@@ -9,29 +9,29 @@ frameInfo:
   hasCalls:        true
 stack:
   - { id: 0, type: spill-slot, offset: -8, size: 8, alignment: 8, stack-id: 0,
-      callee-saved-register: '%lr' }
+      callee-saved-register: '$lr' }
   - { id: 1, type: spill-slot, offset: -16, size: 8, alignment: 8, stack-id: 0,
-      callee-saved-register: '%fp' }
+      callee-saved-register: '$fp' }
 body:             |
   bb.0:
-    liveins: %fp, %lr
+    liveins: $fp, $lr
 
-    %sp = frame-setup SUBXri %sp, 16, 0
-    frame-setup STRXui killed %fp, %sp, 0 :: (store 8 into %stack.1)
-    frame-setup CFI_INSTRUCTION offset %w29, -16
+    $sp = frame-setup SUBXri $sp, 16, 0
+    frame-setup STRXui killed $fp, $sp, 0 :: (store 8 into %stack.1)
+    frame-setup CFI_INSTRUCTION offset $w29, -16
     ; CHECK: .cfi_offset w29, -16
-    frame-setup STRXui killed %lr, %sp, 1 :: (store 8 into %stack.0)
-    frame-setup CFI_INSTRUCTION offset %w30, -8
+    frame-setup STRXui killed $lr, $sp, 1 :: (store 8 into %stack.0)
+    frame-setup CFI_INSTRUCTION offset $w30, -8
     ; CHECK: .cfi_offset w30, -8
-    %fp = frame-setup ADDXri %sp, 0, 0
-    frame-setup CFI_INSTRUCTION def_cfa %w29, 16
-    %lr = LDRXui %sp, 1 :: (load 8 from %stack.0)
-    CFI_INSTRUCTION restore %w30
+    $fp = frame-setup ADDXri $sp, 0, 0
+    frame-setup CFI_INSTRUCTION def_cfa $w29, 16
+    $lr = LDRXui $sp, 1 :: (load 8 from %stack.0)
+    CFI_INSTRUCTION restore $w30
     ; CHECK: .cfi_restore w30
-    %fp = LDRXui %sp, 0 :: (load 8 from %stack.1)
-    CFI_INSTRUCTION restore %w29
+    $fp = LDRXui $sp, 0 :: (load 8 from %stack.1)
+    CFI_INSTRUCTION restore $w29
     ; CHECK: .cfi_restore w29
-    %sp = ADDXri %sp, 16, 0
+    $sp = ADDXri $sp, 16, 0
     RET_ReallyLR
     ; CHECK: .cfi_endproc
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/falkor-hwpf-fix.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/falkor-hwpf-fix.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/falkor-hwpf-fix.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/falkor-hwpf-fix.mir Wed Jan 31 14:04:26 2018
@@ -3,147 +3,147 @@
 # Verify that the tag collision between the loads is resolved for various load opcodes.
 
 # CHECK-LABEL: name: hwpf1
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LDRWui %[[BASE]], 0
-# CHECK: LDRWui %x1, 1
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LDRWui $[[BASE]], 0
+# CHECK: LDRWui $x1, 1
 name:            hwpf1
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    %w2 = LDRWui %x1, 0 :: ("aarch64-strided-access" load 4)
-    %w2 = LDRWui %x1, 1
+    $w2 = LDRWui $x1, 0 :: ("aarch64-strided-access" load 4)
+    $w2 = LDRWui $x1, 1
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpf2
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LD1i64 %q2, 0, %[[BASE]]
-# CHECK: LDRWui %x1, 0
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LD1i64 $q2, 0, $[[BASE]]
+# CHECK: LDRWui $x1, 0
 name:            hwpf2
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1, %q2
+    liveins: $w0, $x1, $q2
 
-    %q2 = LD1i64 %q2, 0, %x1 :: ("aarch64-strided-access" load 4)
-    %w2 = LDRWui %x1, 0
+    $q2 = LD1i64 $q2, 0, $x1 :: ("aarch64-strided-access" load 4)
+    $w2 = LDRWui $x1, 0
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpf3
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LD1i8 %q2, 0, %[[BASE]]
-# CHECK: LDRWui %x1, 0
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LD1i8 $q2, 0, $[[BASE]]
+# CHECK: LDRWui $x1, 0
 name:            hwpf3
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1, %q2
+    liveins: $w0, $x1, $q2
 
-    %q2 = LD1i8 %q2, 0, %x1 :: ("aarch64-strided-access" load 4)
-    %w0 = LDRWui %x1, 0
+    $q2 = LD1i8 $q2, 0, $x1 :: ("aarch64-strided-access" load 4)
+    $w0 = LDRWui $x1, 0
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpf4
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LD1Onev1d %[[BASE]]
-# CHECK: LDRWui %x1, 0
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LD1Onev1d $[[BASE]]
+# CHECK: LDRWui $x1, 0
 name:            hwpf4
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    %d2 = LD1Onev1d %x1 :: ("aarch64-strided-access" load 4)
-    %w2 = LDRWui %x1, 0
+    $d2 = LD1Onev1d $x1 :: ("aarch64-strided-access" load 4)
+    $w2 = LDRWui $x1, 0
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpf5
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LD1Twov1d %[[BASE]]
-# CHECK: LDRWui %x1, 0
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LD1Twov1d $[[BASE]]
+# CHECK: LDRWui $x1, 0
 name:            hwpf5
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    %d2_d3 = LD1Twov1d %x1 :: ("aarch64-strided-access" load 4)
-    %w0 = LDRWui %x1, 0
+    $d2_d3 = LD1Twov1d $x1 :: ("aarch64-strided-access" load 4)
+    $w0 = LDRWui $x1, 0
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpf6
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LDPQi %[[BASE]]
-# CHECK: LDRWui %x1, 3
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LDPQi $[[BASE]]
+# CHECK: LDRWui $x1, 3
 name:            hwpf6
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    %q2, %q3 = LDPQi %x1, 3 :: ("aarch64-strided-access" load 4)
-    %w0 = LDRWui %x1, 3
+    $q2, $q3 = LDPQi $x1, 3 :: ("aarch64-strided-access" load 4)
+    $w0 = LDRWui $x1, 3
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpf7
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LDPXi %[[BASE]]
-# CHECK: LDRWui %x1, 2
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LDPXi $[[BASE]]
+# CHECK: LDRWui $x1, 2
 name:            hwpf7
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    %x2, %x3 = LDPXi %x1, 3 :: ("aarch64-strided-access" load 4)
-    %w2 = LDRWui %x1, 2
+    $x2, $x3 = LDPXi $x1, 3 :: ("aarch64-strided-access" load 4)
+    $w2 = LDRWui $x1, 2
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
@@ -153,154 +153,154 @@ body: |
 # for post increment addressing for various load opcodes.
 
 # CHECK-LABEL: name: hwpfinc1
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LDRWpost %[[BASE]], 0
-# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0
-# CHECK: LDRWui %x1, 1
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LDRWpost $[[BASE]], 0
+# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0
+# CHECK: LDRWui $x1, 1
 name:            hwpfinc1
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    %x1, %w2 = LDRWpost %x1, 0 :: ("aarch64-strided-access" load 4)
-    %w2 = LDRWui %x1, 1
+    $x1, $w2 = LDRWpost $x1, 0 :: ("aarch64-strided-access" load 4)
+    $w2 = LDRWui $x1, 1
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpfinc2
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LD1i64_POST %q2, 0, %[[BASE]]
-# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0
-# CHECK: LDRWui %x1, 1
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LD1i64_POST $q2, 0, $[[BASE]]
+# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0
+# CHECK: LDRWui $x1, 1
 name:            hwpfinc2
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1, %q2
+    liveins: $w0, $x1, $q2
 
-    %x1, %q2 = LD1i64_POST %q2, 0, %x1, %x1 :: ("aarch64-strided-access" load 4)
-    %w2 = LDRWui %x1, 132
+    $x1, $q2 = LD1i64_POST $q2, 0, $x1, $x1 :: ("aarch64-strided-access" load 4)
+    $w2 = LDRWui $x1, 132
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpfinc3
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LD1i8_POST %q2, 0, %[[BASE]]
-# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0
-# CHECK: LDRWui %x1, 132
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LD1i8_POST $q2, 0, $[[BASE]]
+# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0
+# CHECK: LDRWui $x1, 132
 name:            hwpfinc3
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1, %q2
+    liveins: $w0, $x1, $q2
 
-    %x1, %q2 = LD1i8_POST %q2, 0, %x1, %x1 :: ("aarch64-strided-access" load 4)
-    %w0 = LDRWui %x1, 132
+    $x1, $q2 = LD1i8_POST $q2, 0, $x1, $x1 :: ("aarch64-strided-access" load 4)
+    $w0 = LDRWui $x1, 132
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpfinc4
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LD1Rv1d_POST %[[BASE]]
-# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0
-# CHECK: LDRWui %x1, 252
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LD1Rv1d_POST $[[BASE]]
+# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0
+# CHECK: LDRWui $x1, 252
 name:            hwpfinc4
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1, %q2
+    liveins: $w0, $x1, $q2
 
-    %x1, %d2 = LD1Rv1d_POST %x1, %xzr :: ("aarch64-strided-access" load 4)
-    %w2 = LDRWui %x1, 252
+    $x1, $d2 = LD1Rv1d_POST $x1, $xzr :: ("aarch64-strided-access" load 4)
+    $w2 = LDRWui $x1, 252
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpfinc5
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LD3Threev2s_POST %[[BASE]]
-# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0
-# CHECK: LDRWroX %x17, %x0
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LD3Threev2s_POST $[[BASE]]
+# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0
+# CHECK: LDRWroX $x17, $x0
 name:            hwpfinc5
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1, %x17, %q2
+    liveins: $w0, $x1, $x17, $q2
 
-    %x1, %d2_d3_d4 = LD3Threev2s_POST %x1, %x0 :: ("aarch64-strided-access" load 4)
-    %w0 = LDRWroX %x17, %x0, 0, 0
+    $x1, $d2_d3_d4 = LD3Threev2s_POST $x1, $x0 :: ("aarch64-strided-access" load 4)
+    $w0 = LDRWroX $x17, $x0, 0, 0
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpfinc6
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LDPDpost %[[BASE]]
-# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0
-# CHECK: LDRWui %x17, 2
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LDPDpost $[[BASE]]
+# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0
+# CHECK: LDRWui $x17, 2
 name:            hwpfinc6
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1, %x17, %q2
+    liveins: $w0, $x1, $x17, $q2
 
-    %x1, %d2, %d3 = LDPDpost %x1, 3 :: ("aarch64-strided-access" load 4)
-    %w16 = LDRWui %x17, 2
+    $x1, $d2, $d3 = LDPDpost $x1, 3 :: ("aarch64-strided-access" load 4)
+    $w16 = LDRWui $x17, 2
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
 ...
 ---
 # CHECK-LABEL: name: hwpfinc7
-# CHECK: %[[BASE:[a-z0-9]+]] = ORRXrs %xzr, %x1, 0
-# CHECK: LDPXpost %[[BASE]]
-# CHECK: %x1 = ORRXrs %xzr, %[[BASE]], 0
-# CHECK: LDRWui %x17, 2
+# CHECK: $[[BASE:[a-z0-9]+]] = ORRXrs $xzr, $x1, 0
+# CHECK: LDPXpost $[[BASE]]
+# CHECK: $x1 = ORRXrs $xzr, $[[BASE]], 0
+# CHECK: LDRWui $x17, 2
 name:            hwpfinc7
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1, %x17, %q2
+    liveins: $w0, $x1, $x17, $q2
 
-    %x1, %x2, %x3 = LDPXpost %x1, 3 :: ("aarch64-strided-access" load 4)
-    %w18 = LDRWui %x17, 2
+    $x1, $x2, $x3 = LDPXpost $x1, 3 :: ("aarch64-strided-access" load 4)
+    $w18 = LDRWui $x17, 2
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
@@ -309,23 +309,23 @@ body: |
 # Check that we handle case of strided load with no HW prefetcher tag correctly.
 
 # CHECK-LABEL: name: hwpf_notagbug
-# CHECK-NOT: ORRXrs %xzr
-# CHECK: LDARW %x1
-# CHECK-NOT: ORRXrs %xzr
-# CHECK: LDRWui %x1
+# CHECK-NOT: ORRXrs $xzr
+# CHECK: LDARW $x1
+# CHECK-NOT: ORRXrs $xzr
+# CHECK: LDRWui $x1
 name:            hwpf_notagbug
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x1, %x17
+    liveins: $w0, $x1, $x17
 
-    %w1 = LDARW %x1 :: ("aarch64-strided-access" load 4)
-    %w1 = LDRWui %x1, 0 :: ("aarch64-strided-access" load 4)
-    %w17 = LDRWui %x17, 0 :: ("aarch64-strided-access" load 4)
-
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w1 = LDARW $x1 :: ("aarch64-strided-access" load 4)
+    $w1 = LDRWui $x1, 0 :: ("aarch64-strided-access" load 4)
+    $w17 = LDRWui $x17, 0 :: ("aarch64-strided-access" load 4)
+
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR
@@ -334,21 +334,21 @@ body: |
 # Check that we treat sp based loads as non-prefetching.
 
 # CHECK-LABEL: name: hwpf_spbase
-# CHECK-NOT: ORRXrs %xzr
-# CHECK: LDRWui %x15
-# CHECK: LDRWui %sp
+# CHECK-NOT: ORRXrs $xzr
+# CHECK: LDRWui $x15
+# CHECK: LDRWui $sp
 name:            hwpf_spbase
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w0, %x15
+    liveins: $w0, $x15
 
-    %w1 = LDRWui %x15, 0 :: ("aarch64-strided-access" load 4)
-    %w17 = LDRWui %sp, 0
+    $w1 = LDRWui $x15, 0 :: ("aarch64-strided-access" load 4)
+    $w17 = LDRWui $sp, 0
 
-    %w0 = SUBWri %w0, 1, 0
-    %wzr = SUBSWri %w0, 0, 0, implicit-def %nzcv
-    Bcc 9, %bb.0, implicit %nzcv
+    $w0 = SUBWri $w0, 1, 0
+    $wzr = SUBSWri $w0, 0, 0, implicit-def $nzcv
+    Bcc 9, %bb.0, implicit $nzcv
 
   bb.1:
     RET_ReallyLR

Modified: llvm/trunk/test/CodeGen/AArch64/fast-regalloc-empty-bb-with-liveins.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/fast-regalloc-empty-bb-with-liveins.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/fast-regalloc-empty-bb-with-liveins.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/fast-regalloc-empty-bb-with-liveins.mir Wed Jan 31 14:04:26 2018
@@ -11,16 +11,16 @@ body:             |
   ; CHECK-LABEL: name: crashing
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   liveins: %x0, %x1
+  ; CHECK:   liveins: $x0, $x1
   ; CHECK: bb.1:
-  ; CHECK:   renamable %w0 = MOVi32imm -1
-  ; CHECK:   RET_ReallyLR implicit killed %w0
+  ; CHECK:   renamable $w0 = MOVi32imm -1
+  ; CHECK:   RET_ReallyLR implicit killed $w0
   bb.1:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
   bb.2:
     %0:gpr32 = MOVi32imm -1
-    %w0 = COPY %0
-    RET_ReallyLR implicit %w0
+    $w0 = COPY %0
+    RET_ReallyLR implicit $w0
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/ldst-opt-aa.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-opt-aa.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-opt-aa.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-opt-aa.mir Wed Jan 31 14:04:26 2018
@@ -14,17 +14,17 @@
 ...
 ---
 # CHECK-LABEL: name: ldr_str_aa
-# CHECK: %w8, %w9 = LDPWi %x1, 0
-# CHECK: STPWi %w8, %w9, %x0, 0
+# CHECK: $w8, $w9 = LDPWi $x1, 0
+# CHECK: STPWi $w8, $w9, $x0, 0
 name:            ldr_str_aa
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    %w8 = LDRWui %x1, 0 :: (load 4 from %ir.y)
-    STRWui killed %w8, %x0, 0 :: (store 4 into %ir.x)
-    %w9 = LDRWui killed %x1, 1 :: (load 4 from %ir.arrayidx2)
-    STRWui killed %w9, killed %x0, 1 :: (store 4 into %ir.arrayidx3)
-    RET undef %lr
+    $w8 = LDRWui $x1, 0 :: (load 4 from %ir.y)
+    STRWui killed $w8, $x0, 0 :: (store 4 into %ir.x)
+    $w9 = LDRWui killed $x1, 1 :: (load 4 from %ir.arrayidx2)
+    STRWui killed $w9, killed $x0, 1 :: (store 4 into %ir.arrayidx3)
+    RET undef $lr
 

Modified: llvm/trunk/test/CodeGen/AArch64/ldst-opt-zr-clobber.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-opt-zr-clobber.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-opt-zr-clobber.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-opt-zr-clobber.mir Wed Jan 31 14:04:26 2018
@@ -10,17 +10,17 @@
 # schedulers reordering instructions such that SUBS doesn't appear
 # between stores.
 # CHECK-LABEL: name: no-clobber-zr
-# CHECK: STPXi %xzr, %xzr, %x0, 0
+# CHECK: STPXi $xzr, $xzr, $x0, 0
 name: no-clobber-zr
 body: |
   bb.0:
-    liveins: %x0,  %x1
-    STRXui %xzr, %x0, 0 :: (store 8 into %ir.p)
-    dead %xzr = SUBSXri killed %x1, 0, 0, implicit-def %nzcv
-    %w8 = CSINCWr %wzr, %wzr, 1, implicit killed %nzcv
-    STRXui %xzr, killed %x0, 1 :: (store 8 into %ir.p)
-    %w0 = ORRWrs %wzr, killed %w8, 0
-    RET %lr, implicit %w0
+    liveins: $x0,  $x1
+    STRXui $xzr, $x0, 0 :: (store 8 into %ir.p)
+    dead $xzr = SUBSXri killed $x1, 0, 0, implicit-def $nzcv
+    $w8 = CSINCWr $wzr, $wzr, 1, implicit killed $nzcv
+    STRXui $xzr, killed $x0, 1 :: (store 8 into %ir.p)
+    $w0 = ORRWrs $wzr, killed $w8, 0
+    RET $lr, implicit $w0
 ...
 
 

Modified: llvm/trunk/test/CodeGen/AArch64/ldst-opt.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-opt.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-opt.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-opt.mir Wed Jan 31 14:04:26 2018
@@ -4,9 +4,9 @@ name: promote-load-from-store
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w1, %x0, %lr
+    liveins: $w1, $x0, $lr
 
-    STRWui killed %w1, %x0, 0 :: (store 4)
+    STRWui killed $w1, $x0, 0 :: (store 4)
     CFI_INSTRUCTION 0
     CFI_INSTRUCTION 0
     CFI_INSTRUCTION 0
@@ -27,22 +27,22 @@ body: |
     CFI_INSTRUCTION 0
     CFI_INSTRUCTION 0
     CFI_INSTRUCTION 0
-    %w0 = LDRHHui killed %x0, 1 :: (load 2)
-    RET %lr, implicit %w0
+    $w0 = LDRHHui killed $x0, 1 :: (load 2)
+    RET $lr, implicit $w0
 
 ...
 # Don't count transient instructions towards search limits.
 # CHECK-LABEL: name: promote-load-from-store
-# CHECK: STRWui %w1
-# CHECK: UBFMWri killed %w1
+# CHECK: STRWui $w1
+# CHECK: UBFMWri killed $w1
 ---
 name: store-pair
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w1, %x0, %lr
+    liveins: $w1, $x0, $lr
 
-    STRWui %w1, %x0, 0 :: (store 4)
+    STRWui $w1, $x0, 0 :: (store 4)
     CFI_INSTRUCTION 0
     CFI_INSTRUCTION 0
     CFI_INSTRUCTION 0
@@ -63,8 +63,8 @@ body: |
     CFI_INSTRUCTION 0
     CFI_INSTRUCTION 0
     CFI_INSTRUCTION 0
-    STRWui killed %w1, killed %x0, 1 :: (store 4)
-    RET %lr
+    STRWui killed $w1, killed $x0, 1 :: (store 4)
+    RET $lr
 
 ...
 # CHECK-LABEL: name: store-pair
@@ -74,110 +74,110 @@ name: store-pair-clearkill0
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w1, %x0, %lr
+    liveins: $w1, $x0, $lr
 
-    STRWui %w1, %x0, 0 :: (store 4)
-    %w2 = COPY %w1
-    %x3 = COPY %x0
-    STRWui killed %w1, killed %x0, 1 :: (store 4)
-    RET %lr
+    STRWui $w1, $x0, 0 :: (store 4)
+    $w2 = COPY $w1
+    $x3 = COPY $x0
+    STRWui killed $w1, killed $x0, 1 :: (store 4)
+    RET $lr
 ...
 # When merging a lower store with an upper one, we must clear kill flags on
 # the lower store.
 # CHECK-LABEL: store-pair-clearkill0
-# CHECK-NOT: STPWi %w1, killed %w1, %x0, 0 :: (store 4)
-# CHECK: STPWi %w1, %w1, %x0, 0 :: (store 4)
-# CHECK: %w2 = COPY %w1
-# CHECK: RET %lr
+# CHECK-NOT: STPWi $w1, killed $w1, $x0, 0 :: (store 4)
+# CHECK: STPWi $w1, $w1, $x0, 0 :: (store 4)
+# CHECK: $w2 = COPY $w1
+# CHECK: RET $lr
 ---
 name: store-pair-clearkill1
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %x0, %lr
+    liveins: $x0, $lr
 
-    %w1 = MOVi32imm 13
-    %w2 = MOVi32imm 7
-    STRWui %w1, %x0, 1 :: (store 4)
-    %w2 = COPY killed %w1
-    STRWui killed %w2, %x0, 0 :: (store 4)
-
-    %w1 = MOVi32imm 42
-    %w2 = MOVi32imm 7
-    STRWui %w1, %x0, 0 :: (store 4)
-    %w2 = COPY killed %w1
-    STRWui killed %w2, killed %x0, 1 :: (store 4)
+    $w1 = MOVi32imm 13
+    $w2 = MOVi32imm 7
+    STRWui $w1, $x0, 1 :: (store 4)
+    $w2 = COPY killed $w1
+    STRWui killed $w2, $x0, 0 :: (store 4)
+
+    $w1 = MOVi32imm 42
+    $w2 = MOVi32imm 7
+    STRWui $w1, $x0, 0 :: (store 4)
+    $w2 = COPY killed $w1
+    STRWui killed $w2, killed $x0, 1 :: (store 4)
 
-    RET %lr
+    RET $lr
 ...
 # When merging an upper store with a lower one, kill flags along the way need
-# to be removed; In this case the kill flag on %w1.
+# to be removed; In this case the kill flag on $w1.
 # CHECK-LABEL: store-pair-clearkill1
-# CHECK: %w1 = MOVi32imm
-# CHECK: %w2 = MOVi32imm
-# CHECK-NOT: %w2 = COPY killed %w1
-# CHECK: %w2 = COPY %w1
-# CHECK: STPWi killed %w2, %w1, %x0, 0
-
-# CHECK: %w1 = MOVi32imm
-# CHECK: %w2 = MOVi32imm
-# CHECK-NOT: %w2 = COPY killed %w1
-# CHECK: %w2 = COPY %w1
-# CHECK: STPWi %w1, killed %w2, killed %x0, 0
+# CHECK: $w1 = MOVi32imm
+# CHECK: $w2 = MOVi32imm
+# CHECK-NOT: $w2 = COPY killed $w1
+# CHECK: $w2 = COPY $w1
+# CHECK: STPWi killed $w2, $w1, $x0, 0
+
+# CHECK: $w1 = MOVi32imm
+# CHECK: $w2 = MOVi32imm
+# CHECK-NOT: $w2 = COPY killed $w1
+# CHECK: $w2 = COPY $w1
+# CHECK: STPWi $w1, killed $w2, killed $x0, 0
 ---
 name: store-load-clearkill
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %w1
+    liveins: $w1
 
-    STRWui %w1, %sp, 0 :: (store 4)
-    %wzr = COPY killed %w1 ; killing use of %w1
-    %w11 = LDRWui %sp, 0 :: (load 4)
-    HINT 0, implicit %w11 ; some use of %w11
+    STRWui $w1, $sp, 0 :: (store 4)
+    $wzr = COPY killed $w1 ; killing use of $w1
+    $w11 = LDRWui $sp, 0 :: (load 4)
+    HINT 0, implicit $w11 ; some use of $w11
 ...
 # When replaceing the load of a store-load pair with a copy the kill flags
 # along the way need to be cleared.
 # CHECK-LABEL: name: store-load-clearkill
-# CHECK: STRWui %w1, %sp, 0 :: (store 4)
-# CHECK-NOT: COPY killed %w1
-# CHECK: %wzr = COPY %w1
-# CHECK: %w11 = ORRWrs %wzr, %w1, 0
-# CHECK: HINT 0, implicit %w11
+# CHECK: STRWui $w1, $sp, 0 :: (store 4)
+# CHECK-NOT: COPY killed $w1
+# CHECK: $wzr = COPY $w1
+# CHECK: $w11 = ORRWrs $wzr, $w1, 0
+# CHECK: HINT 0, implicit $w11
 ---
 name: promote-load-from-store-undef
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %x0, %x2, %lr
+    liveins: $x0, $x2, $lr
 
-    STRWui undef %w1, %x0, 0 :: (store 4)
-    %w0 = LDRBBui %x0, 1 :: (load 2)
-    STRHHui undef %w3, %x2, 0 :: (store 4)
-    %w1 = LDRBBui %x2, 0 :: (load 4)
-    RET %lr, implicit %w0
+    STRWui undef $w1, $x0, 0 :: (store 4)
+    $w0 = LDRBBui $x0, 1 :: (load 2)
+    STRHHui undef $w3, $x2, 0 :: (store 4)
+    $w1 = LDRBBui $x2, 0 :: (load 4)
+    RET $lr, implicit $w0
 ...
 # CHECK-LABEL: name: promote-load-from-store-undef
-# CHECK: STRWui undef %w1
-# CHECK: UBFMWri undef %w1
-# CHECK: STRHHui undef %w3
-# CHECK: ANDWri undef %w3
+# CHECK: STRWui undef $w1
+# CHECK: UBFMWri undef $w1
+# CHECK: STRHHui undef $w3
+# CHECK: ANDWri undef $w3
 ---
 name: promote-load-from-store-trivial-kills
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %x0, %lr
+    liveins: $x0, $lr
 
-    STRXui %x0, %sp, 0 :: (store 8)
-    STRXui killed %x0, %sp, 2 :: (store 8)
-    %x0 = LDRXui %sp, 0 :: (load 8)
-    BL &bar, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit-def %sp
-    RET %lr
+    STRXui $x0, $sp, 0 :: (store 8)
+    STRXui killed $x0, $sp, 2 :: (store 8)
+    $x0 = LDRXui $sp, 0 :: (load 8)
+    BL &bar, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit-def $sp
+    RET $lr
 ...
 # CHECK-LABEL: name: promote-load-from-store-trivial-kills
-# CHECK: STRXui %x0, %sp, 0
-# CHECK: STRXui %x0, %sp, 2
+# CHECK: STRXui $x0, $sp, 0
+# CHECK: STRXui $x0, $sp, 2
 # CHECK-NOT: LDRXui
 # CHECK-NOT: ORR
-# CHECK: BL &bar, csr_aarch64_aapcs, implicit-def %lr, implicit %sp, implicit %x0, implicit-def %sp
+# CHECK: BL &bar, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit-def $sp

Modified: llvm/trunk/test/CodeGen/AArch64/live-interval-analysis.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/live-interval-analysis.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/live-interval-analysis.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/live-interval-analysis.mir Wed Jan 31 14:04:26 2018
@@ -14,9 +14,9 @@ name: reserved_reg_liveness
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %x28
-    %6 : xseqpairsclass = COPY %x28_fp
-    %x28_fp = COPY %6
-    %x28 = COPY %x28
-    %fp = COPY %fp
+    liveins: $x28
+    %6 : xseqpairsclass = COPY $x28_fp
+    $x28_fp = COPY %6
+    $x28 = COPY $x28
+    $fp = COPY $fp
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/loh.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/loh.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/loh.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/loh.mir Wed Jan 31 14:04:26 2018
@@ -22,171 +22,171 @@ tracksRegLiveness: true
 body: |
   bb.0:
     ; CHECK: Adding MCLOH_AdrpAdrp:
-    ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g3
-    ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g4
+    ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g3
+    ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g4
     ; CHECK-NEXT: Adding MCLOH_AdrpAdrp:
-    ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g2
-    ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g3
+    ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g2
+    ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g3
     ; CHECK-NEXT: Adding MCLOH_AdrpAdrp:
-    ; CHECK-NEXT: %x0 = ADRP target-flags(aarch64-page) @g0
-    ; CHECK-NEXT: %x0 = ADRP target-flags(aarch64-page) @g1
-    %x0 = ADRP target-flags(aarch64-page) @g0
-    %x0 = ADRP target-flags(aarch64-page) @g1
-    %x1 = ADRP target-flags(aarch64-page) @g2
-    %x1 = ADRP target-flags(aarch64-page) @g3
-    %x1 = ADRP target-flags(aarch64-page) @g4
+    ; CHECK-NEXT: $x0 = ADRP target-flags(aarch64-page) @g0
+    ; CHECK-NEXT: $x0 = ADRP target-flags(aarch64-page) @g1
+    $x0 = ADRP target-flags(aarch64-page) @g0
+    $x0 = ADRP target-flags(aarch64-page) @g1
+    $x1 = ADRP target-flags(aarch64-page) @g2
+    $x1 = ADRP target-flags(aarch64-page) @g3
+    $x1 = ADRP target-flags(aarch64-page) @g4
 
   bb.1:
     ; CHECK-NEXT: Adding MCLOH_AdrpAdd:
-    ; CHECK-NEXT: %x20 = ADRP target-flags(aarch64-page) @g0
-    ; CHECK-NEXT: %x3 = ADDXri %x20, target-flags(aarch64-pageoff) @g0
+    ; CHECK-NEXT: $x20 = ADRP target-flags(aarch64-page) @g0
+    ; CHECK-NEXT: $x3 = ADDXri $x20, target-flags(aarch64-pageoff) @g0
     ; CHECK-NEXT: Adding MCLOH_AdrpAdd:
-    ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g0
-    ; CHECK-NEXT: %x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g0
-    %x1 = ADRP target-flags(aarch64-page) @g0
-    %x9 = SUBXri undef %x11, 5, 0 ; should not affect MCLOH formation
-    %x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g0, 0
-    %x20 = ADRP target-flags(aarch64-page) @g0
+    ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g0
+    ; CHECK-NEXT: $x1 = ADDXri $x1, target-flags(aarch64-pageoff) @g0
+    $x1 = ADRP target-flags(aarch64-page) @g0
+    $x9 = SUBXri undef $x11, 5, 0 ; should not affect MCLOH formation
+    $x1 = ADDXri $x1, target-flags(aarch64-pageoff) @g0, 0
+    $x20 = ADRP target-flags(aarch64-page) @g0
     BL @extfunc, csr_aarch64_aapcs ; should not clobber X20
-    %x3 = ADDXri %x20, target-flags(aarch64-pageoff) @g0, 0
+    $x3 = ADDXri $x20, target-flags(aarch64-pageoff) @g0, 0
 
   bb.2:
     ; CHECK-NOT: MCLOH_AdrpAdd
-    %x9 = ADRP target-flags(aarch64-page) @g0
+    $x9 = ADRP target-flags(aarch64-page) @g0
     BL @extfunc, csr_aarch64_aapcs ; clobbers x9
-    ; Verification requires the use of 'undef' in front of the clobbered %x9
-    %x9 = ADDXri undef %x9, target-flags(aarch64-pageoff) @g0, 0
+    ; Verification requires the use of 'undef' in front of the clobbered $x9
+    $x9 = ADDXri undef $x9, target-flags(aarch64-pageoff) @g0, 0
 
   bb.3:
     ; CHECK-NOT: MCLOH_AdrpAdd
-    %x10 = ADRP target-flags(aarch64-page) @g0
-    HINT 0, implicit def %x10 ; clobbers x10
-    %x10 = ADDXri %x10, target-flags(aarch64-pageoff) @g0, 0
+    $x10 = ADRP target-flags(aarch64-page) @g0
+    HINT 0, implicit def $x10 ; clobbers x10
+    $x10 = ADDXri $x10, target-flags(aarch64-pageoff) @g0, 0
 
   bb.4:
     ; Cannot produce a LOH for multiple users
     ; CHECK-NOT: MCLOH_AdrpAdd
-    %x10 = ADRP target-flags(aarch64-page) @g0
-    HINT 0, implicit def %x10 ; clobbers x10
-    %x11 = ADDXri %x10, target-flags(aarch64-pageoff) @g0, 0
-    %x12 = ADDXri %x10, target-flags(aarch64-pageoff) @g0, 0
+    $x10 = ADRP target-flags(aarch64-page) @g0
+    HINT 0, implicit def $x10 ; clobbers x10
+    $x11 = ADDXri $x10, target-flags(aarch64-pageoff) @g0, 0
+    $x12 = ADDXri $x10, target-flags(aarch64-pageoff) @g0, 0
 
   bb.5:
     ; CHECK-NEXT: Adding MCLOH_AdrpLdr:
-    ; CHECK-NEXT: %x5 = ADRP target-flags(aarch64-page) @g2
-    ; CHECK-NEXT: %s6 = LDRSui %x5, target-flags(aarch64-pageoff) @g2
+    ; CHECK-NEXT: $x5 = ADRP target-flags(aarch64-page) @g2
+    ; CHECK-NEXT: $s6 = LDRSui $x5, target-flags(aarch64-pageoff) @g2
     ; CHECK-NEXT: Adding MCLOH_AdrpLdr:
-    ; CHECK-NEXT: %x4 = ADRP target-flags(aarch64-page) @g2
-    ; CHECK-NEXT: %x4 = LDRXui %x4, target-flags(aarch64-pageoff) @g2
-    %x4 = ADRP target-flags(aarch64-page) @g2
-    %x4 = LDRXui %x4, target-flags(aarch64-pageoff) @g2
-    %x5 = ADRP target-flags(aarch64-page) @g2
-    %s6 = LDRSui %x5, target-flags(aarch64-pageoff) @g2
+    ; CHECK-NEXT: $x4 = ADRP target-flags(aarch64-page) @g2
+    ; CHECK-NEXT: $x4 = LDRXui $x4, target-flags(aarch64-pageoff) @g2
+    $x4 = ADRP target-flags(aarch64-page) @g2
+    $x4 = LDRXui $x4, target-flags(aarch64-pageoff) @g2
+    $x5 = ADRP target-flags(aarch64-page) @g2
+    $s6 = LDRSui $x5, target-flags(aarch64-pageoff) @g2
 
   bb.6:
     ; CHECK-NEXT: Adding MCLOH_AdrpLdrGot:
-    ; CHECK-NEXT: %x5 = ADRP target-flags(aarch64-page, aarch64-got) @g2
-    ; CHECK-NEXT: %x6 = LDRXui %x5, target-flags(aarch64-pageoff, aarch64-got) @g2
+    ; CHECK-NEXT: $x5 = ADRP target-flags(aarch64-page, aarch64-got) @g2
+    ; CHECK-NEXT: $x6 = LDRXui $x5, target-flags(aarch64-pageoff, aarch64-got) @g2
     ; CHECK-NEXT: Adding MCLOH_AdrpLdrGot:
-    ; CHECK-NEXT: %x4 = ADRP target-flags(aarch64-page, aarch64-got) @g2
-    ; CHECK-NEXT: %x4 = LDRXui %x4, target-flags(aarch64-pageoff, aarch64-got) @g2
-    %x4 = ADRP target-flags(aarch64-page, aarch64-got) @g2
-    %x4 = LDRXui %x4, target-flags(aarch64-pageoff, aarch64-got) @g2
-    %x5 = ADRP target-flags(aarch64-page, aarch64-got) @g2
-    %x6 = LDRXui %x5, target-flags(aarch64-pageoff, aarch64-got) @g2
+    ; CHECK-NEXT: $x4 = ADRP target-flags(aarch64-page, aarch64-got) @g2
+    ; CHECK-NEXT: $x4 = LDRXui $x4, target-flags(aarch64-pageoff, aarch64-got) @g2
+    $x4 = ADRP target-flags(aarch64-page, aarch64-got) @g2
+    $x4 = LDRXui $x4, target-flags(aarch64-pageoff, aarch64-got) @g2
+    $x5 = ADRP target-flags(aarch64-page, aarch64-got) @g2
+    $x6 = LDRXui $x5, target-flags(aarch64-pageoff, aarch64-got) @g2
 
   bb.7:
     ; CHECK-NOT: Adding MCLOH_AdrpLdrGot:
     ; Loading a float value from a GOT table makes no sense so this should not
     ; produce an LOH.
-    %x11 = ADRP target-flags(aarch64-page, aarch64-got) @g5
-    %s11 = LDRSui %x11, target-flags(aarch64-pageoff, aarch64-got) @g5
+    $x11 = ADRP target-flags(aarch64-page, aarch64-got) @g5
+    $s11 = LDRSui $x11, target-flags(aarch64-pageoff, aarch64-got) @g5
 
   bb.8:
     ; CHECK-NEXT: Adding MCLOH_AdrpAddLdr:
-    ; CHECK-NEXT: %x7 = ADRP target-flags(aarch64-page) @g3
-    ; CHECK-NEXT: %x8 = ADDXri %x7, target-flags(aarch64-pageoff) @g3
-    ; CHECK-NEXT: %d1 = LDRDui %x8, 8
-    %x7 = ADRP target-flags(aarch64-page) @g3
-    %x8 = ADDXri %x7, target-flags(aarch64-pageoff) @g3, 0
-    %d1 = LDRDui %x8, 8
+    ; CHECK-NEXT: $x7 = ADRP target-flags(aarch64-page) @g3
+    ; CHECK-NEXT: $x8 = ADDXri $x7, target-flags(aarch64-pageoff) @g3
+    ; CHECK-NEXT: $d1 = LDRDui $x8, 8
+    $x7 = ADRP target-flags(aarch64-page) @g3
+    $x8 = ADDXri $x7, target-flags(aarch64-pageoff) @g3, 0
+    $d1 = LDRDui $x8, 8
 
   bb.9:
     ; CHECK-NEXT: Adding MCLOH_AdrpAdd:
-    ; CHECK-NEXT: %x3 = ADRP target-flags(aarch64-page) @g3
-    ; CHECK-NEXT: %x3 = ADDXri %x3, target-flags(aarch64-pageoff) @g3
+    ; CHECK-NEXT: $x3 = ADRP target-flags(aarch64-page) @g3
+    ; CHECK-NEXT: $x3 = ADDXri $x3, target-flags(aarch64-pageoff) @g3
     ; CHECK-NEXT: Adding MCLOH_AdrpAdd:
-    ; CHECK-NEXT: %x5 = ADRP target-flags(aarch64-page) @g3
-    ; CHECK-NEXT: %x2 = ADDXri %x5, target-flags(aarch64-pageoff) @g3
+    ; CHECK-NEXT: $x5 = ADRP target-flags(aarch64-page) @g3
+    ; CHECK-NEXT: $x2 = ADDXri $x5, target-flags(aarch64-pageoff) @g3
     ; CHECK-NEXT: Adding MCLOH_AdrpAddStr:
-    ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page) @g3
-    ; CHECK-NEXT: %x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g3
-    ; CHECK-NEXT: STRXui %xzr, %x1, 16
-    %x1 = ADRP target-flags(aarch64-page) @g3
-    %x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g3, 0
-    STRXui %xzr, %x1, 16
+    ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page) @g3
+    ; CHECK-NEXT: $x1 = ADDXri $x1, target-flags(aarch64-pageoff) @g3
+    ; CHECK-NEXT: STRXui $xzr, $x1, 16
+    $x1 = ADRP target-flags(aarch64-page) @g3
+    $x1 = ADDXri $x1, target-flags(aarch64-pageoff) @g3, 0
+    STRXui $xzr, $x1, 16
 
     ; This sequence should just produce an AdrpAdd (not AdrpAddStr)
-    %x5 = ADRP target-flags(aarch64-page) @g3
-    %x2 = ADDXri %x5, target-flags(aarch64-pageoff) @g3, 0
-    STRXui %x2, undef %x11, 16
+    $x5 = ADRP target-flags(aarch64-page) @g3
+    $x2 = ADDXri $x5, target-flags(aarch64-pageoff) @g3, 0
+    STRXui $x2, undef $x11, 16
 
     ; This sequence should just produce an AdrpAdd (not AdrpAddStr)
-    %x3 = ADRP target-flags(aarch64-page) @g3
-    %x3 = ADDXri %x3, target-flags(aarch64-pageoff) @g3, 0
-    STRXui %x3, %x3, 16
+    $x3 = ADRP target-flags(aarch64-page) @g3
+    $x3 = ADDXri $x3, target-flags(aarch64-pageoff) @g3, 0
+    STRXui $x3, $x3, 16
 
   bb.10:
     ; CHECK-NEXT: Adding MCLOH_AdrpLdr:
-    ; CHECK-NEXT: %x2 = ADRP target-flags(aarch64-page) @g3
-    ; CHECK-NEXT: %x2 = LDRXui %x2, target-flags(aarch64-pageoff) @g3
+    ; CHECK-NEXT: $x2 = ADRP target-flags(aarch64-page) @g3
+    ; CHECK-NEXT: $x2 = LDRXui $x2, target-flags(aarch64-pageoff) @g3
     ; CHECK-NEXT: Adding MCLOH_AdrpLdrGotLdr:
-    ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
-    ; CHECK-NEXT: %x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4
-    ; CHECK-NEXT: %x1 = LDRXui %x1, 24
-    %x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
-    %x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4
-    %x1 = LDRXui %x1, 24
+    ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
+    ; CHECK-NEXT: $x1 = LDRXui $x1, target-flags(aarch64-pageoff, aarch64-got) @g4
+    ; CHECK-NEXT: $x1 = LDRXui $x1, 24
+    $x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
+    $x1 = LDRXui $x1, target-flags(aarch64-pageoff, aarch64-got) @g4
+    $x1 = LDRXui $x1, 24
     ; Should just produce a MCLOH_AdrpLdr (not MCLOH_AdrpLdrGotLdr)
-    %x2 = ADRP target-flags(aarch64-page) @g3
-    %x2 = LDRXui %x2, target-flags(aarch64-pageoff) @g3
-    %x2 = LDRXui %x2, 24
+    $x2 = ADRP target-flags(aarch64-page) @g3
+    $x2 = LDRXui $x2, target-flags(aarch64-pageoff) @g3
+    $x2 = LDRXui $x2, 24
 
   bb.11:
     ; CHECK-NEXT: Adding MCLOH_AdrpLdr
-    ; CHECK-NEXT: %x5 = ADRP target-flags(aarch64-page) @g1
-    ; CHECK-NEXT: %x5 = LDRXui %x5, target-flags(aarch64-pageoff) @g1
+    ; CHECK-NEXT: $x5 = ADRP target-flags(aarch64-page) @g1
+    ; CHECK-NEXT: $x5 = LDRXui $x5, target-flags(aarch64-pageoff) @g1
     ; CHECK-NEXT: Adding MCLOH_AdrpLdrGotStr:
-    ; CHECK-NEXT: %x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
-    ; CHECK-NEXT: %x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4
-    ; CHECK-NEXT: STRXui %xzr, %x1, 32
-    %x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
-    %x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4
-    STRXui %xzr, %x1, 32
+    ; CHECK-NEXT: $x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
+    ; CHECK-NEXT: $x1 = LDRXui $x1, target-flags(aarch64-pageoff, aarch64-got) @g4
+    ; CHECK-NEXT: STRXui $xzr, $x1, 32
+    $x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
+    $x1 = LDRXui $x1, target-flags(aarch64-pageoff, aarch64-got) @g4
+    STRXui $xzr, $x1, 32
     ; Should just produce a MCLOH_AdrpLdr (not MCLOH_AdrpLdrGotStr)
-    %x5 = ADRP target-flags(aarch64-page) @g1
-    %x5 = LDRXui %x5, target-flags(aarch64-pageoff) @g1
-    STRXui undef %x11, %x5, 32
+    $x5 = ADRP target-flags(aarch64-page) @g1
+    $x5 = LDRXui $x5, target-flags(aarch64-pageoff) @g1
+    STRXui undef $x11, $x5, 32
 
   bb.12:
     ; CHECK-NOT: MCLOH_AdrpAdrp
     ; CHECK: Adding MCLOH_AdrpAddLdr
-    ; %x9 = ADRP @g4
-    ; %x9 = ADDXri %x9, @g4
-    ; %x5 = LDRXui %x9, 0
-    %x9 = ADRP target-flags(aarch64-page, aarch64-got) @g4
-    %x9 = ADDXri %x9, target-flags(aarch64-pageoff, aarch64-got) @g4, 0
-    %x5 = LDRXui %x9, 0
-    %x9 = ADRP target-flags(aarch64-page, aarch64-got) @g5
+    ; $x9 = ADRP @g4
+    ; $x9 = ADDXri $x9, @g4
+    ; $x5 = LDRXui $x9, 0
+    $x9 = ADRP target-flags(aarch64-page, aarch64-got) @g4
+    $x9 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @g4, 0
+    $x5 = LDRXui $x9, 0
+    $x9 = ADRP target-flags(aarch64-page, aarch64-got) @g5
 
   bb.13:
     ; Cannot produce a LOH for multiple users
     ; CHECK-NOT: MCLOH_AdrpAdd
-    %x10 = ADRP target-flags(aarch64-page) @g0
-    %x11 = ADDXri %x10, target-flags(aarch64-pageoff) @g0, 0
+    $x10 = ADRP target-flags(aarch64-page) @g0
+    $x11 = ADDXri $x10, target-flags(aarch64-pageoff) @g0, 0
     B %bb.14
 
   bb.14:
-    liveins: %x10
-    %x12 = ADDXri %x10, target-flags(aarch64-pageoff) @g0, 0
+    liveins: $x10
+    $x12 = ADDXri $x10, target-flags(aarch64-pageoff) @g0, 0
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/machine-combiner.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/machine-combiner.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/machine-combiner.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/machine-combiner.mir Wed Jan 31 14:04:26 2018
@@ -22,27 +22,27 @@ body:             |
   bb.0:
     successors: %bb.1, %bb.2
 
-    %3 = COPY %w2
-    %2 = COPY %w1
-    %1 = COPY %w0
-    %0 = COPY %d0
-    %4 = SUBSWrr %1, %2, implicit-def %nzcv
-    Bcc 13, %bb.2, implicit %nzcv
+    %3 = COPY $w2
+    %2 = COPY $w1
+    %1 = COPY $w0
+    %0 = COPY $d0
+    %4 = SUBSWrr %1, %2, implicit-def $nzcv
+    Bcc 13, %bb.2, implicit $nzcv
     B %bb.1
 
   bb.1:
     ; CHECK: MADDWrrr %1, %2, %3
-    %5 = MADDWrrr %1, %2, %wzr
+    %5 = MADDWrrr %1, %2, $wzr
     %6 = ADDWrr %3, killed %5
     %7 = SCVTFUWDri killed %6
     ; CHECK: FMADDDrrr %7, %7, %0
     %8 = FMULDrr %7, %7
     %9 = FADDDrr %0, killed %8
-    %d0 = COPY %9
-    RET_ReallyLR implicit %d0
+    $d0 = COPY %9
+    RET_ReallyLR implicit $d0
 
   bb.2:
-    %d0 = COPY %0
-    RET_ReallyLR implicit %d0
+    $d0 = COPY %0
+    RET_ReallyLR implicit $d0
 
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/machine-copy-remove.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/machine-copy-remove.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/machine-copy-remove.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/machine-copy-remove.mir Wed Jan 31 14:04:26 2018
@@ -2,285 +2,285 @@
 ---
 # Check that bb.0 COPY is seen through to allow the bb.1 COPY of XZR to be removed.
 # CHECK-LABEL: name: test1
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test1
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    %x0 = COPY %x1
-    CBNZX %x1, %bb.2
+    $x0 = COPY $x1
+    CBNZX $x1, %bb.2
 
   bb.1:
-    %x0 = COPY %xzr
+    $x0 = COPY $xzr
     B %bb.3
 
   bb.2:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = LDRXui %x1, 0
+    $x0 = LDRXui $x1, 0
 
   bb.3:
-    liveins: %x0
+    liveins: $x0
 
-    RET_ReallyLR implicit %x0
+    RET_ReallyLR implicit $x0
 
 ...
 # Similar to test1, but with reversed COPY.
 # CHECK-LABEL: name: test2
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test2
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    %x1 = COPY %x0
-    CBNZX %x1, %bb.2
+    $x1 = COPY $x0
+    CBNZX $x1, %bb.2
 
   bb.1:
-    %x0 = COPY %xzr
+    $x0 = COPY $xzr
     B %bb.3
 
   bb.2:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = LDRXui %x1, 0
+    $x0 = LDRXui $x1, 0
 
   bb.3:
-    liveins: %x0
+    liveins: $x0
 
-    RET_ReallyLR implicit %x0
+    RET_ReallyLR implicit $x0
 
 ...
 # Similar to test1, but with a clobber that prevents removal of the XZR COPY.
 # CHECK-LABEL: name: test3
-# CHECK: COPY %xzr
+# CHECK: COPY $xzr
 name:            test3
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
-    %x0 = COPY %x1
-    %x1 = LDRXui %x1, 0
-    CBNZX %x1, %bb.2
+    $x0 = COPY $x1
+    $x1 = LDRXui $x1, 0
+    CBNZX $x1, %bb.2
 
   bb.1:
-    %x0 = COPY %xzr
+    $x0 = COPY $xzr
     B %bb.3
 
   bb.2:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = LDRXui %x1, 0
+    $x0 = LDRXui $x1, 0
 
   bb.3:
-    liveins: %x0
+    liveins: $x0
 
-    RET_ReallyLR implicit %x0
+    RET_ReallyLR implicit $x0
 
 ...
 # Similar to test2, but with a clobber that prevents removal of the XZR COPY.
 # CHECK-LABEL: name: test4
-# CHECK: COPY %xzr
+# CHECK: COPY $xzr
 name:            test4
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
-    %x1 = COPY %x0
-    %x1 = LDRXui %x1, 0
-    CBNZX %x1, %bb.2
+    $x1 = COPY $x0
+    $x1 = LDRXui $x1, 0
+    CBNZX $x1, %bb.2
 
   bb.1:
-    %x0 = COPY %xzr
+    $x0 = COPY $xzr
     B %bb.3
 
   bb.2:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = LDRXui %x1, 0
+    $x0 = LDRXui $x1, 0
 
   bb.3:
-    liveins: %x0
+    liveins: $x0
 
-    RET_ReallyLR implicit %x0
+    RET_ReallyLR implicit $x0
 
 ...
 # Similar to test2, but with a clobber that prevents removal of the XZR COPY.
 # CHECK-LABEL: name: test5
-# CHECK: COPY %xzr
+# CHECK: COPY $xzr
 name:            test5
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
-    %x1 = COPY %x0
-    %x0 = LDRXui %x1, 0
-    CBNZX %x1, %bb.2
+    $x1 = COPY $x0
+    $x0 = LDRXui $x1, 0
+    CBNZX $x1, %bb.2
 
   bb.1:
-    %x0 = COPY %xzr
+    $x0 = COPY $xzr
     B %bb.3
 
   bb.2:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = LDRXui %x1, 0
+    $x0 = LDRXui $x1, 0
 
   bb.3:
-    liveins: %x0
+    liveins: $x0
 
-    RET_ReallyLR implicit %x0
+    RET_ReallyLR implicit $x0
 
 ...
 # Similar to test1, but with two levels of COPYs.
 # CHECK-LABEL: name: test6
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test6
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
-    %x2 = COPY %x0
-    %x1 = COPY %x2
-    CBNZX %x1, %bb.2
+    $x2 = COPY $x0
+    $x1 = COPY $x2
+    CBNZX $x1, %bb.2
 
   bb.1:
-    %x0 = COPY %xzr
+    $x0 = COPY $xzr
     B %bb.3
 
   bb.2:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = LDRXui %x1, 0
+    $x0 = LDRXui $x1, 0
 
   bb.3:
-    liveins: %x0
+    liveins: $x0
 
-    RET_ReallyLR implicit %x0
+    RET_ReallyLR implicit $x0
 
 ...
 # Similar to test1, but with two levels of COPYs and a clobber preventing COPY of XZR removal.
 # CHECK-LABEL: name: test7
-# CHECK: COPY %xzr
+# CHECK: COPY $xzr
 name:            test7
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
-    %x2 = COPY %x0
-    %x0 = LDRXui %x1, 0
-    %x1 = COPY %x2
-    CBNZX %x1, %bb.2
+    $x2 = COPY $x0
+    $x0 = LDRXui $x1, 0
+    $x1 = COPY $x2
+    CBNZX $x1, %bb.2
 
   bb.1:
-    %x0 = COPY %xzr
+    $x0 = COPY $xzr
     B %bb.3
 
   bb.2:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = LDRXui %x1, 0
+    $x0 = LDRXui $x1, 0
 
   bb.3:
-    liveins: %x0
+    liveins: $x0
 
-    RET_ReallyLR implicit %x0
+    RET_ReallyLR implicit $x0
 
 ...
 # Check that the TargetRegs vector clobber update loop in
 #  AArch64RedundantCopyElimination::optimizeCopy works correctly.
 # CHECK-LABEL: name: test8
-# CHECK: x0 = COPY %xzr
-# CHECK: x1 = COPY %xzr
+# CHECK: x0 = COPY $xzr
+# CHECK: x1 = COPY $xzr
 name:            test8
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    %x1 = COPY %x0
-    CBNZX %x1, %bb.2
+    $x1 = COPY $x0
+    CBNZX $x1, %bb.2
 
   bb.1:
-    liveins: %x0, %x2
+    liveins: $x0, $x2
 
-    %x0, %x1 = LDPXi %x2, 0
-    %x0 = COPY %xzr
-    %x1 = COPY %xzr
+    $x0, $x1 = LDPXi $x2, 0
+    $x0 = COPY $xzr
+    $x1 = COPY $xzr
     B %bb.3
 
   bb.2:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = LDRXui %x1, 0
+    $x0 = LDRXui $x1, 0
 
   bb.3:
-    liveins: %x0
+    liveins: $x0
 
-    RET_ReallyLR implicit %x0
+    RET_ReallyLR implicit $x0
 
 ...
 # Check that copy isn't removed from a block with multiple predecessors.
 # CHECK-LABEL: name: test9
-# CHECK: x0 = COPY %xzr
+# CHECK: x0 = COPY $xzr
 # CHECK-NEXT: B %bb.3
 name:            test9
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    CBNZX %x0, %bb.2
+    CBNZX $x0, %bb.2
 
   bb.1:
-    liveins: %x0, %x2
+    liveins: $x0, $x2
 
-    %x0 = COPY %xzr
+    $x0 = COPY $xzr
     B %bb.3
 
   bb.2:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = LDRXui %x1, 0
+    $x0 = LDRXui $x1, 0
 
-    CBNZX %x1, %bb.1
+    CBNZX $x1, %bb.1
 
   bb.3:
-    liveins: %x0
+    liveins: $x0
 
-    RET_ReallyLR implicit %x0
+    RET_ReallyLR implicit $x0
 
 ...
 # Eliminate redundant MOVi32imm 7 in bb.1
 # Note: 32-bit compare/32-bit move imm
 # Kill marker should be removed from compare.
 # CHECK-LABEL: name: test10
-# CHECK: SUBSWri %w0, 7, 0, implicit-def %nzcv
+# CHECK: SUBSWri $w0, 7, 0, implicit-def $nzcv
 # CHECK: bb.1:
 # CHECK-NOT: MOVi32imm
 name:            test10
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $wzr = SUBSWri killed $w0, 7, 0, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %w0 = MOVi32imm 7
-    STRWui killed %w0, killed %x1, 0
+    $w0 = MOVi32imm 7
+    STRWui killed $w0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -289,24 +289,24 @@ body:             |
 # Note: 64-bit compare/32-bit move imm w/implicit def
 # Kill marker should be removed from compare.
 # CHECK-LABEL: name: test11
-# CHECK: SUBSXri %x0, 7, 0, implicit-def %nzcv
+# CHECK: SUBSXri $x0, 7, 0, implicit-def $nzcv
 # CHECK: bb.1:
 # CHECK-NOT: MOVi32imm
 name:            test11
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $xzr = SUBSXri killed $x0, 7, 0, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %w0 = MOVi32imm 7, implicit-def %x0
-    STRXui killed %x0, killed %x1, 0
+    $w0 = MOVi32imm 7, implicit-def $x0
+    STRXui killed $x0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -315,24 +315,24 @@ body:             |
 # Note: 64-bit compare/32-bit move imm
 # Kill marker should be removed from compare.
 # CHECK-LABEL: name: test12
-# CHECK: SUBSXri %x0, 7, 0, implicit-def %nzcv
+# CHECK: SUBSXri $x0, 7, 0, implicit-def $nzcv
 # CHECK: bb.1:
 # CHECK-NOT: MOVi32imm
 name:            test12
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    dead %xzr = SUBSXri killed %x0, 7, 0, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $xzr = SUBSXri killed $x0, 7, 0, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %w0 = MOVi32imm 7
-    STRWui killed %w0, killed %x1, 0
+    $w0 = MOVi32imm 7
+    STRWui killed $w0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -341,24 +341,24 @@ body:             |
 # Note: 32-bit compare/32-bit move imm w/implicit def
 # Kill marker should remain on compare.
 # CHECK-LABEL: name: test13
-# CHECK: SUBSWri killed %w0, 7, 0, implicit-def %nzcv
+# CHECK: SUBSWri killed $w0, 7, 0, implicit-def $nzcv
 # CHECK: bb.1:
 # CHECK: MOVi32imm
 name:            test13
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $wzr = SUBSWri killed $w0, 7, 0, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %w0 = MOVi32imm 7, implicit-def %x0
-    STRXui killed %x0, killed %x1, 0
+    $w0 = MOVi32imm 7, implicit-def $x0
+    STRXui killed $x0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -371,19 +371,19 @@ name:            test14
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1, %x2
+    liveins: $w0, $x1, $x2
 
-    dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
-    %w0 = LDRWui %x1, 0
-    STRWui killed %w0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $wzr = SUBSWri killed $w0, 7, 0, implicit-def $nzcv
+    $w0 = LDRWui $x1, 0
+    STRWui killed $w0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %w0 = MOVi32imm 7
-    STRWui killed %w0, killed %x1, 0
+    $w0 = MOVi32imm 7
+    STRWui killed $w0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -396,19 +396,19 @@ name:            test15
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1, %x2
+    liveins: $w0, $x1, $x2
 
-    dead %wzr = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $wzr = SUBSWri killed $w0, 7, 0, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1, %x2
+    liveins: $x1, $x2
 
-    %w0 = LDRWui %x1, 0
-    STRWui killed %w0, killed %x2, 0
-    %w0 = MOVi32imm 7
-    STRWui killed %w0, killed %x1, 0
+    $w0 = LDRWui $x1, 0
+    STRWui killed $w0, killed $x2, 0
+    $w0 = MOVi32imm 7
+    STRWui killed $w0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -421,18 +421,18 @@ name:            test16
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    dead %wzr = SUBSWri %w0, 7, 0, implicit-def %nzcv
-    %w2 = COPY %w0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $wzr = SUBSWri $w0, 7, 0, implicit-def $nzcv
+    $w2 = COPY $w0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %w2 = MOVi32imm 7
-    STRWui killed %w2, killed %x1, 0
+    $w2 = MOVi32imm 7
+    STRWui killed $w2, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -445,17 +445,17 @@ name:            test17
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    dead %w0 = SUBSWri killed %w0, 7, 0, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $w0 = SUBSWri killed $w0, 7, 0, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %w0 = MOVi32imm 7
-    STRWui killed %w0, killed %x1, 0
+    $w0 = MOVi32imm 7
+    STRWui killed $w0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -470,16 +470,16 @@ name:            test18
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    CBNZX killed %x0, %bb.2
+    CBNZX killed $x0, %bb.2
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = MOVi64imm 4252017623040
-    STRXui killed %x0, killed %x1, 0
+    $x0 = MOVi64imm 4252017623040
+    STRXui killed $x0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -488,24 +488,24 @@ body:             |
 # Note: 32-bit compare/32-bit move imm
 # Kill marker should be removed from compare.
 # CHECK-LABEL: name: test19
-# CHECK: ADDSWri %w0, 1, 0, implicit-def %nzcv
+# CHECK: ADDSWri $w0, 1, 0, implicit-def $nzcv
 # CHECK: bb.1:
 # CHECK-NOT: MOVi32imm
 name:            test19
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $wzr = ADDSWri killed $w0, 1, 0, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %w0 = MOVi32imm -1
-    STRWui killed %w0, killed %x1, 0
+    $w0 = MOVi32imm -1
+    STRWui killed $w0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -514,24 +514,24 @@ body:             |
 # Note: 64-bit compare/64-bit move imm
 # Kill marker should be removed from compare.
 # CHECK-LABEL: name: test20
-# CHECK: ADDSXri %x0, 1, 0, implicit-def %nzcv
+# CHECK: ADDSXri $x0, 1, 0, implicit-def $nzcv
 # CHECK: bb.1:
 # CHECK-NOT: MOVi64imm
 name:            test20
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $xzr = ADDSXri killed $x0, 1, 0, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = MOVi64imm -1
-    STRXui killed %x0, killed %x1, 0
+    $x0 = MOVi64imm -1
+    STRXui killed $x0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -540,24 +540,24 @@ body:             |
 # Note: 64-bit compare/32-bit move imm
 # Kill marker should be removed from compare.
 # CHECK-LABEL: name: test21
-# CHECK: ADDSXri %x0, 1, 0, implicit-def %nzcv
+# CHECK: ADDSXri $x0, 1, 0, implicit-def $nzcv
 # CHECK: bb.1:
 # CHECK-NOT: MOVi32imm
 name:            test21
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1
+    liveins: $x0, $x1
 
-    dead %xzr = ADDSXri killed %x0, 1, 0, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $xzr = ADDSXri killed $x0, 1, 0, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %w0 = MOVi32imm -1
-    STRWui killed %w0, killed %x1, 0
+    $w0 = MOVi32imm -1
+    STRWui killed $w0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -571,17 +571,17 @@ name:            test22
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    dead %wzr = ADDSWri killed %w0, 1, 0, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $wzr = ADDSWri killed $w0, 1, 0, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %x0 = MOVi64imm -1
-    STRXui killed %x0, killed %x1, 0
+    $x0 = MOVi64imm -1
+    STRXui killed $x0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR
@@ -594,17 +594,17 @@ name:            test23
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1
+    liveins: $w0, $x1
 
-    dead %wzr = SUBSWri killed %w0, 1, 12, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    dead $wzr = SUBSWri killed $w0, 1, 12, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x1
+    liveins: $x1
 
-    %w0 = MOVi32imm 4096
-    STRWui killed %w0, killed %x1, 0
+    $w0 = MOVi32imm 4096
+    STRWui killed $w0, killed $x1, 0
 
   bb.2:
     RET_ReallyLR

Modified: llvm/trunk/test/CodeGen/AArch64/machine-dead-copy.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/machine-dead-copy.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/machine-dead-copy.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/machine-dead-copy.mir Wed Jan 31 14:04:26 2018
@@ -12,29 +12,29 @@
 # The first copy is dead copy which is not used.
 # CHECK-LABEL: name: copyprop1
 # CHECK: bb.0:
-# CHECK-NOT: %w20 = COPY
+# CHECK-NOT: $w20 = COPY
 name: copyprop1
 body: |
   bb.0:
-    liveins: %w0,  %w1
-    %w20 = COPY %w1
-    BL @foo, csr_aarch64_aapcs, implicit %w0, implicit-def %w0
-    RET_ReallyLR implicit %w0
+    liveins: $w0,  $w1
+    $w20 = COPY $w1
+    BL @foo, csr_aarch64_aapcs, implicit $w0, implicit-def $w0
+    RET_ReallyLR implicit $w0
 ...
 ---
 # The first copy is not a dead copy which is used in the second copy after the
 # call.
 # CHECK-LABEL: name: copyprop2
 # CHECK: bb.0:
-# CHECK: %w20 = COPY
+# CHECK: $w20 = COPY
 name: copyprop2
 body: |
   bb.0:
-    liveins: %w0,  %w1
-    %w20 = COPY %w1
-    BL @foo, csr_aarch64_aapcs, implicit %w0, implicit-def %w0
-    %w0 = COPY %w20
-    RET_ReallyLR implicit %w0
+    liveins: $w0,  $w1
+    $w20 = COPY $w1
+    BL @foo, csr_aarch64_aapcs, implicit $w0, implicit-def $w0
+    $w0 = COPY $w20
+    RET_ReallyLR implicit $w0
 ...
 ---
 # Both the first and second copy are dead copies which are not used.
@@ -44,11 +44,11 @@ body: |
 name: copyprop3
 body: |
   bb.0:
-    liveins: %w0,  %w1
-    %w20 = COPY %w1
-    BL @foo, csr_aarch64_aapcs, implicit %w0, implicit-def %w0
-    %w20 = COPY %w0
-    RET_ReallyLR implicit %w0
+    liveins: $w0,  $w1
+    $w20 = COPY $w1
+    BL @foo, csr_aarch64_aapcs, implicit $w0, implicit-def $w0
+    $w20 = COPY $w0
+    RET_ReallyLR implicit $w0
 ...
 # The second copy is removed as a NOP copy, after then the first copy become
 # dead which should be removed as well.
@@ -58,10 +58,10 @@ body: |
 name: copyprop4
 body: |
   bb.0:
-    liveins: %w0,  %w1
-    %w20 = COPY %w0
-    %w0 = COPY %w20
-    BL @foo, csr_aarch64_aapcs, implicit %w0, implicit-def %w0
-    RET_ReallyLR implicit %w0
+    liveins: $w0,  $w1
+    $w20 = COPY $w0
+    $w0 = COPY $w20
+    BL @foo, csr_aarch64_aapcs, implicit $w0, implicit-def $w0
+    RET_ReallyLR implicit $w0
 ...
 

Modified: llvm/trunk/test/CodeGen/AArch64/machine-outliner.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/machine-outliner.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/machine-outliner.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/machine-outliner.mir Wed Jan 31 14:04:26 2018
@@ -28,126 +28,126 @@
 # CHECK-LABEL: name: main
 
 # CHECK: BL @OUTLINED_FUNCTION_[[F0:[0-9]+]]
-# CHECK-NEXT: early-clobber %sp, %lr = LDRXpost %sp, 16
-# CHECK-NEXT: %x16 = ADDXri %sp, 48, 0
-# CHECK-NEXT: STRHHroW %w16, %x9, %w30, 1, 1
-# CHECK-NEXT: %lr = ORRXri %xzr, 1
+# CHECK-NEXT: early-clobber $sp, $lr = LDRXpost $sp, 16
+# CHECK-NEXT: $x16 = ADDXri $sp, 48, 0
+# CHECK-NEXT: STRHHroW $w16, $x9, $w30, 1, 1
+# CHECK-NEXT: $lr = ORRXri $xzr, 1
 
 # CHECK: BL @OUTLINED_FUNCTION_[[F0]]
-# CHECK-NEXT: early-clobber %sp, %lr = LDRXpost %sp, 16
-# CHECK-NEXT: %x16 = ADDXri %sp, 48, 0
-# CHECK-NEXT: STRHHroW %w16, %x9, %w30, 1, 1
-# CHECK-NEXT: %lr = ORRXri %xzr, 1
+# CHECK-NEXT: early-clobber $sp, $lr = LDRXpost $sp, 16
+# CHECK-NEXT: $x16 = ADDXri $sp, 48, 0
+# CHECK-NEXT: STRHHroW $w16, $x9, $w30, 1, 1
+# CHECK-NEXT: $lr = ORRXri $xzr, 1
 
 # CHECK: BL @OUTLINED_FUNCTION_[[F0]]
-# CHECK-NEXT: early-clobber %sp, %lr = LDRXpost %sp, 16
-# CHECK-NEXT: %x16 = ADDXri %sp, 48, 0
-# CHECK-NEXT: STRHHroW %w16, %x9, %w30, 1, 1
-# CHECK-NEXT: %lr = ORRXri %xzr, 1
+# CHECK-NEXT: early-clobber $sp, $lr = LDRXpost $sp, 16
+# CHECK-NEXT: $x16 = ADDXri $sp, 48, 0
+# CHECK-NEXT: STRHHroW $w16, $x9, $w30, 1, 1
+# CHECK-NEXT: $lr = ORRXri $xzr, 1
 name:            main
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %sp = frame-setup SUBXri %sp, 16, 0
-    renamable %x9 = ADRP target-flags(aarch64-page) @bar
-    %x9 = ORRXri %xzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w30 = ORRWri %wzr, 1
-    %lr = ORRXri %xzr, 1
-
-    %x20, %x19 = LDPXi %sp, 10
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    renamable %x9 = ADRP target-flags(aarch64-page) @x
-    %x16 = ADDXri %sp, 48, 0;
-    STRHHroW %w16, %x9, %w30, 1, 1
-    %lr = ORRXri %xzr, 1
-    %w3 = ORRWri %wzr, 1993
-
-    %x20, %x19 = LDPXi %sp, 10
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    renamable %x9 = ADRP target-flags(aarch64-page) @x
-    %x16 = ADDXri %sp, 48, 0;
-    STRHHroW %w16, %x9, %w30, 1, 1
-    %lr = ORRXri %xzr, 1 
-
-    %w4 = ORRWri %wzr, 1994
-
-    %x20, %x19 = LDPXi %sp, 10
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    %w16 = ORRWri %wzr, 1
-    renamable %x9 = ADRP target-flags(aarch64-page) @x
-    %x16 = ADDXri %sp, 48, 0;
-    STRHHroW %w16, %x9, %w30, 1, 1
-    %lr = ORRXri %xzr, 1
+    $sp = frame-setup SUBXri $sp, 16, 0
+    renamable $x9 = ADRP target-flags(aarch64-page) @bar
+    $x9 = ORRXri $xzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w30 = ORRWri $wzr, 1
+    $lr = ORRXri $xzr, 1
+
+    $x20, $x19 = LDPXi $sp, 10
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    renamable $x9 = ADRP target-flags(aarch64-page) @x
+    $x16 = ADDXri $sp, 48, 0;
+    STRHHroW $w16, $x9, $w30, 1, 1
+    $lr = ORRXri $xzr, 1
+    $w3 = ORRWri $wzr, 1993
+
+    $x20, $x19 = LDPXi $sp, 10
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    renamable $x9 = ADRP target-flags(aarch64-page) @x
+    $x16 = ADDXri $sp, 48, 0;
+    STRHHroW $w16, $x9, $w30, 1, 1
+    $lr = ORRXri $xzr, 1 
+
+    $w4 = ORRWri $wzr, 1994
+
+    $x20, $x19 = LDPXi $sp, 10
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    $w16 = ORRWri $wzr, 1
+    renamable $x9 = ADRP target-flags(aarch64-page) @x
+    $x16 = ADDXri $sp, 48, 0;
+    STRHHroW $w16, $x9, $w30, 1, 1
+    $lr = ORRXri $xzr, 1
 
-    %sp = ADDXri %sp, 16, 0
-    RET undef %lr
+    $sp = ADDXri $sp, 16, 0
+    RET undef $lr
 
 ...
 ---
 # This test ensures that we can avoid saving LR when it's available.
 # CHECK-LABEL: bb.1:
-# CHECK-NOT: BL @baz, implicit-def dead %lr, implicit %sp
-# CHECK: BL @OUTLINED_FUNCTION_[[F1:[0-9]+]], implicit-def %lr, implicit %sp
-# CHECK-NEXT: %w17 = ORRWri %wzr, 2
-# CHECK-NEXT: BL @OUTLINED_FUNCTION_[[F1]], implicit-def %lr, implicit %sp
-# CHECK-NEXT: %w8 = ORRWri %wzr, 0
+# CHECK-NOT: BL @baz, implicit-def dead $lr, implicit $sp
+# CHECK: BL @OUTLINED_FUNCTION_[[F1:[0-9]+]], implicit-def $lr, implicit $sp
+# CHECK-NEXT: $w17 = ORRWri $wzr, 2
+# CHECK-NEXT: BL @OUTLINED_FUNCTION_[[F1]], implicit-def $lr, implicit $sp
+# CHECK-NEXT: $w8 = ORRWri $wzr, 0
 name:            bar
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %w0, %lr, %w8
-    %sp = frame-setup SUBXri %sp, 32, 0
-    %fp = frame-setup ADDXri %sp, 16, 0
+    liveins: $w0, $lr, $w8
+    $sp = frame-setup SUBXri $sp, 32, 0
+    $fp = frame-setup ADDXri $sp, 16, 0
 
   bb.1:
-    BL @baz, implicit-def dead %lr, implicit %sp
-    %w17 = ORRWri %wzr, 1
-    %w17 = ORRWri %wzr, 1
-    %w17 = ORRWri %wzr, 1
-    %w17 = ORRWri %wzr, 1
-    BL @baz, implicit-def dead %lr, implicit %sp
-    %w17 = ORRWri %wzr, 2
-    BL @baz, implicit-def dead %lr, implicit %sp
-    %w17 = ORRWri %wzr, 1
-    %w17 = ORRWri %wzr, 1
-    %w17 = ORRWri %wzr, 1
-    %w17 = ORRWri %wzr, 1
-    BL @baz, implicit-def dead %lr, implicit %sp
-    %w8 = ORRWri %wzr, 0
+    BL @baz, implicit-def dead $lr, implicit $sp
+    $w17 = ORRWri $wzr, 1
+    $w17 = ORRWri $wzr, 1
+    $w17 = ORRWri $wzr, 1
+    $w17 = ORRWri $wzr, 1
+    BL @baz, implicit-def dead $lr, implicit $sp
+    $w17 = ORRWri $wzr, 2
+    BL @baz, implicit-def dead $lr, implicit $sp
+    $w17 = ORRWri $wzr, 1
+    $w17 = ORRWri $wzr, 1
+    $w17 = ORRWri $wzr, 1
+    $w17 = ORRWri $wzr, 1
+    BL @baz, implicit-def dead $lr, implicit $sp
+    $w8 = ORRWri $wzr, 0
     
   bb.2:
-    %w15 = ORRWri %wzr, 1
-    %w15 = ORRWri %wzr, 1
-    %w15 = ORRWri %wzr, 1
-    %w15 = ORRWri %wzr, 1
-    %x15 = ADDXri %sp, 48, 0;
-    %w9 = ORRWri %wzr, 0
-    %w15 = ORRWri %wzr, 1
-    %w15 = ORRWri %wzr, 1
-    %w15 = ORRWri %wzr, 1
-    %w15 = ORRWri %wzr, 1
-    %x15 = ADDXri %sp, 48, 0;
-    %w8 = ORRWri %wzr, 0
+    $w15 = ORRWri $wzr, 1
+    $w15 = ORRWri $wzr, 1
+    $w15 = ORRWri $wzr, 1
+    $w15 = ORRWri $wzr, 1
+    $x15 = ADDXri $sp, 48, 0;
+    $w9 = ORRWri $wzr, 0
+    $w15 = ORRWri $wzr, 1
+    $w15 = ORRWri $wzr, 1
+    $w15 = ORRWri $wzr, 1
+    $w15 = ORRWri $wzr, 1
+    $x15 = ADDXri $sp, 48, 0;
+    $w8 = ORRWri $wzr, 0
     
   bb.3:
-    %fp, %lr = LDPXi %sp, 2
-    %sp = ADDXri %sp, 32, 0
-    RET undef %lr
+    $fp, $lr = LDPXi $sp, 2
+    $sp = ADDXri $sp, 32, 0
+    RET undef $lr
 
 ...
 ---
@@ -155,8 +155,8 @@ name:            baz
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: %w0, %lr, %w8
-    RET undef %lr
+    liveins: $w0, $lr, $w8
+    RET undef $lr
 
 # CHECK-LABEL: name:            OUTLINED_FUNCTION_{{[0-9]}}
 # CHECK=LABEL: name:            OUTLINED_FUNCTION_{{[1-9]}}

Modified: llvm/trunk/test/CodeGen/AArch64/machine-scheduler.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/machine-scheduler.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/machine-scheduler.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/machine-scheduler.mir Wed Jan 31 14:04:26 2018
@@ -18,18 +18,18 @@
 ---
 # CHECK-LABEL: name: load_imp-def
 # CHECK: bb.0.entry:
-# CHECK: LDRWui %x0, 0
-# CHECK: LDRWui %x0, 1
-# CHECK: STRWui %w1, %x0, 2
+# CHECK: LDRWui $x0, 0
+# CHECK: LDRWui $x0, 1
+# CHECK: STRWui $w1, $x0, 2
 name: load_imp-def
 tracksRegLiveness: true
 body: |
   bb.0.entry:
-    liveins: %w1, %x0
-    %w8 = LDRWui %x0, 1, implicit-def %x8  :: (load 4 from %ir.0)
-    STRWui killed %w1, %x0, 2 :: (store 4 into %ir.arrayidx1)
-    %w9 = LDRWui killed %x0, 0, implicit-def %x9  :: (load 4 from %ir.arrayidx19, align 8)
-    %x0 = ADDXrr killed %x9, killed %x8
-    RET_ReallyLR implicit %x0
+    liveins: $w1, $x0
+    $w8 = LDRWui $x0, 1, implicit-def $x8  :: (load 4 from %ir.0)
+    STRWui killed $w1, $x0, 2 :: (store 4 into %ir.arrayidx1)
+    $w9 = LDRWui killed $x0, 0, implicit-def $x9  :: (load 4 from %ir.arrayidx19, align 8)
+    $x0 = ADDXrr killed $x9, killed $x8
+    RET_ReallyLR implicit $x0
 ...
 

Modified: llvm/trunk/test/CodeGen/AArch64/machine-sink-zr.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/machine-sink-zr.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/machine-sink-zr.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/machine-sink-zr.mir Wed Jan 31 14:04:26 2018
@@ -15,24 +15,24 @@ body:             |
   ; Check that WZR copy is sunk into the loop preheader.
   ; CHECK-LABEL: name: sinkwzr
   ; CHECK-LABEL: bb.0:
-  ; CHECK-NOT: COPY %wzr
+  ; CHECK-NOT: COPY $wzr
   bb.0:
-    liveins: %w0
+    liveins: $w0
 
-    %0 = COPY %w0
-    %1 = COPY %wzr
+    %0 = COPY $w0
+    %1 = COPY $wzr
     CBZW %0, %bb.3
 
   ; CHECK-LABEL: bb.1:
-  ; CHECK: COPY %wzr
+  ; CHECK: COPY $wzr
 
   bb.1:
     B %bb.2
 
   bb.2:
     %2 = PHI %0, %bb.1, %4, %bb.2
-    %w0 = COPY %1
-    %3 = SUBSWri %2, 1, 0, implicit-def dead %nzcv
+    $w0 = COPY %1
+    %3 = SUBSWri %2, 1, 0, implicit-def dead $nzcv
     %4 = COPY %3
     CBZW %3, %bb.3
     B %bb.2

Modified: llvm/trunk/test/CodeGen/AArch64/machine-zero-copy-remove.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/machine-zero-copy-remove.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/machine-zero-copy-remove.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/machine-zero-copy-remove.mir Wed Jan 31 14:04:26 2018
@@ -1,565 +1,565 @@
 # RUN: llc -mtriple=aarch64--linux-gnu -run-pass=aarch64-copyelim %s -verify-machineinstrs -o - | FileCheck %s
 ---
 # CHECK-LABEL: name: test1
-# CHECK: ANDSWri %w0, 1, implicit-def %nzcv
+# CHECK: ANDSWri $w0, 1, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %wzr
+# CHECK-NOT: COPY $wzr
 name:            test1
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1, %x2
+    liveins: $w0, $x1, $x2
 
-    %w0 = ANDSWri %w0, 1, implicit-def %nzcv
-    STRWui killed %w0, killed %x1, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = ANDSWri $w0, 1, implicit-def $nzcv
+    STRWui killed $w0, killed $x1, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x2
+    liveins: $x2
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x2, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x2, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test2
-# CHECK: ANDSXri %x0, 1, implicit-def %nzcv
+# CHECK: ANDSXri $x0, 1, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test2
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
-    %x0 = ANDSXri %x0, 1, implicit-def %nzcv
-    STRXui killed %x0, killed %x1, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $x0 = ANDSXri $x0, 1, implicit-def $nzcv
+    STRXui killed $x0, killed $x1, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x2
+    liveins: $x2
 
-    %x0 = COPY %xzr
-    STRXui killed %x0, killed %x2, 0
+    $x0 = COPY $xzr
+    STRXui killed $x0, killed $x2, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test3
-# CHECK: ADDSWri %w0, 1, 0, implicit-def %nzcv
+# CHECK: ADDSWri $w0, 1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %wzr
+# CHECK-NOT: COPY $wzr
 name:            test3
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1, %x2
+    liveins: $w0, $x1, $x2
 
-    %w0 = ADDSWri %w0, 1, 0, implicit-def %nzcv
-    STRWui killed %w0, killed %x1, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = ADDSWri $w0, 1, 0, implicit-def $nzcv
+    STRWui killed $w0, killed $x1, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x2
+    liveins: $x2
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x2, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x2, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test4
-# CHECK: ADDSXri %x0, 1, 0, implicit-def %nzcv
+# CHECK: ADDSXri $x0, 1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test4
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
-    %x0 = ADDSXri %x0, 1, 0, implicit-def %nzcv
-    STRXui killed %x0, killed %x1, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $x0 = ADDSXri $x0, 1, 0, implicit-def $nzcv
+    STRXui killed $x0, killed $x1, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x2
+    liveins: $x2
 
-    %x0 = COPY %xzr
-    STRXui killed %x0, killed %x2, 0
+    $x0 = COPY $xzr
+    STRXui killed $x0, killed $x2, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test5
-# CHECK: SUBSWri %w0, 1, 0, implicit-def %nzcv
+# CHECK: SUBSWri $w0, 1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %wzr
+# CHECK-NOT: COPY $wzr
 name:            test5
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1, %x2
+    liveins: $w0, $x1, $x2
 
-    %w0 = SUBSWri %w0, 1, 0, implicit-def %nzcv
-    STRWui killed %w0, killed %x1, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = SUBSWri $w0, 1, 0, implicit-def $nzcv
+    STRWui killed $w0, killed $x1, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x2
+    liveins: $x2
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x2, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x2, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test6
-# CHECK: SUBSXri %x0, 1, 0, implicit-def %nzcv
+# CHECK: SUBSXri $x0, 1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test6
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
-    %x0 = SUBSXri %x0, 1, 0, implicit-def %nzcv
-    STRXui killed %x0, killed %x1, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $x0 = SUBSXri $x0, 1, 0, implicit-def $nzcv
+    STRXui killed $x0, killed $x1, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x2
+    liveins: $x2
 
-    %x0 = COPY %xzr
-    STRXui killed %x0, killed %x2, 0
+    $x0 = COPY $xzr
+    STRXui killed $x0, killed $x2, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test7
-# CHECK: ADDSWrr %w0, %w1, implicit-def %nzcv
+# CHECK: ADDSWrr $w0, $w1, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %wzr
+# CHECK-NOT: COPY $wzr
 name:            test7
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %w1, %x2, %x3
+    liveins: $w0, $w1, $x2, $x3
 
-    %w0 = ADDSWrr %w0, %w1, implicit-def %nzcv
-    STRWui killed %w0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = ADDSWrr $w0, $w1, implicit-def $nzcv
+    STRWui killed $w0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x3, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test8
-# CHECK: ADDSXrr %x0, %x1, implicit-def %nzcv
+# CHECK: ADDSXrr $x0, $x1, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test8
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
-    %x0 = ADDSXrr %x0, %x1, implicit-def %nzcv
-    STRXui killed %x0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $x0 = ADDSXrr $x0, $x1, implicit-def $nzcv
+    STRXui killed $x0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %x0 = COPY %xzr
-    STRXui killed %x0, killed %x3, 0
+    $x0 = COPY $xzr
+    STRXui killed $x0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test9
-# CHECK: ANDSWrr %w0, %w1, implicit-def %nzcv
+# CHECK: ANDSWrr $w0, $w1, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %wzr
+# CHECK-NOT: COPY $wzr
 name:            test9
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %w1, %x2, %x3
+    liveins: $w0, $w1, $x2, $x3
 
-    %w0 = ANDSWrr %w0, %w1, implicit-def %nzcv
-    STRWui killed %w0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = ANDSWrr $w0, $w1, implicit-def $nzcv
+    STRWui killed $w0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x3, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test10
-# CHECK: ANDSXrr %x0, %x1, implicit-def %nzcv
+# CHECK: ANDSXrr $x0, $x1, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test10
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
-    %x0 = ANDSXrr %x0, %x1, implicit-def %nzcv
-    STRXui killed %x0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $x0 = ANDSXrr $x0, $x1, implicit-def $nzcv
+    STRXui killed $x0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %x0 = COPY %xzr
-    STRXui killed %x0, killed %x3, 0
+    $x0 = COPY $xzr
+    STRXui killed $x0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test11
-# CHECK: BICSWrr %w0, %w1, implicit-def %nzcv
+# CHECK: BICSWrr $w0, $w1, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %wzr
+# CHECK-NOT: COPY $wzr
 name:            test11
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %w1, %x2, %x3
+    liveins: $w0, $w1, $x2, $x3
 
-    %w0 = BICSWrr %w0, %w1, implicit-def %nzcv
-    STRWui killed %w0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = BICSWrr $w0, $w1, implicit-def $nzcv
+    STRWui killed $w0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x3, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test12
-# CHECK: BICSXrr %x0, %x1, implicit-def %nzcv
+# CHECK: BICSXrr $x0, $x1, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test12
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
-    %x0 = BICSXrr %x0, %x1, implicit-def %nzcv
-    STRXui killed %x0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $x0 = BICSXrr $x0, $x1, implicit-def $nzcv
+    STRXui killed $x0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %x0 = COPY %xzr
-    STRXui killed %x0, killed %x3, 0
+    $x0 = COPY $xzr
+    STRXui killed $x0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test13
-# CHECK: SUBSWrr %w0, %w1, implicit-def %nzcv
+# CHECK: SUBSWrr $w0, $w1, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %wzr
+# CHECK-NOT: COPY $wzr
 name:            test13
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %w1, %x2, %x3
+    liveins: $w0, $w1, $x2, $x3
 
-    %w0 = SUBSWrr %w0, %w1, implicit-def %nzcv
-    STRWui killed %w0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = SUBSWrr $w0, $w1, implicit-def $nzcv
+    STRWui killed $w0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x3, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test14
-# CHECK: SUBSXrr %x0, %x1, implicit-def %nzcv
+# CHECK: SUBSXrr $x0, $x1, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test14
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
-    %x0 = SUBSXrr %x0, %x1, implicit-def %nzcv
-    STRXui killed %x0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $x0 = SUBSXrr $x0, $x1, implicit-def $nzcv
+    STRXui killed $x0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %x0 = COPY %xzr
-    STRXui killed %x0, killed %x3, 0
+    $x0 = COPY $xzr
+    STRXui killed $x0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test15
-# CHECK: ADDSWrs %w0, %w1, 0, implicit-def %nzcv
+# CHECK: ADDSWrs $w0, $w1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %wzr
+# CHECK-NOT: COPY $wzr
 name:            test15
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %w1, %x2, %x3
+    liveins: $w0, $w1, $x2, $x3
 
-    %w0 = ADDSWrs %w0, %w1, 0, implicit-def %nzcv
-    STRWui killed %w0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = ADDSWrs $w0, $w1, 0, implicit-def $nzcv
+    STRWui killed $w0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x3, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test16
-# CHECK: ADDSXrs %x0, %x1, 0, implicit-def %nzcv
+# CHECK: ADDSXrs $x0, $x1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test16
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
-    %x0 = ADDSXrs %x0, %x1, 0, implicit-def %nzcv
-    STRXui killed %x0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $x0 = ADDSXrs $x0, $x1, 0, implicit-def $nzcv
+    STRXui killed $x0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %x0 = COPY %xzr
-    STRXui killed %x0, killed %x3, 0
+    $x0 = COPY $xzr
+    STRXui killed $x0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test17
-# CHECK: ANDSWrs %w0, %w1, 0, implicit-def %nzcv
+# CHECK: ANDSWrs $w0, $w1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %wzr
+# CHECK-NOT: COPY $wzr
 name:            test17
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %w1, %x2, %x3
+    liveins: $w0, $w1, $x2, $x3
 
-    %w0 = ANDSWrs %w0, %w1, 0, implicit-def %nzcv
-    STRWui killed %w0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = ANDSWrs $w0, $w1, 0, implicit-def $nzcv
+    STRWui killed $w0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x3, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test18
-# CHECK: ANDSXrs %x0, %x1, 0, implicit-def %nzcv
+# CHECK: ANDSXrs $x0, $x1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %xzr
+# CHECK-NOT: COPY $xzr
 name:            test18
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1, %x2, %x3
+    liveins: $x0, $x1, $x2, $x3
 
-    %x0 = ANDSXrs %x0, %x1, 0, implicit-def %nzcv
-    STRXui killed %x0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $x0 = ANDSXrs $x0, $x1, 0, implicit-def $nzcv
+    STRXui killed $x0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %x0 = COPY %xzr
-    STRXui killed %x0, killed %x3, 0
+    $x0 = COPY $xzr
+    STRXui killed $x0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # CHECK-LABEL: name: test19
-# CHECK: BICSWrs %w0, %w1, 0, implicit-def %nzcv
+# CHECK: BICSWrs $w0, $w1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: COPY %wzr
+# CHECK-NOT: COPY $wzr
 name:            test19
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %w1, %x2, %x3
+    liveins: $w0, $w1, $x2, $x3
 
-    %w0 = BICSWrs %w0, %w1, 0, implicit-def %nzcv
-    STRWui killed %w0, killed %x2, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = BICSWrs $w0, $w1, 0, implicit-def $nzcv
+    STRWui killed $w0, killed $x2, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x3
+    liveins: $x3
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x3, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x3, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # Unicorn test - we can remove a redundant copy and a redundant mov
 # CHECK-LABEL: name: test20
-# CHECK: SUBSWri %w1, 1, 0, implicit-def %nzcv
+# CHECK: SUBSWri $w1, 1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK-NOT: %w0 = COPY %wzr
-# CHECK-NOT: %w1 = MOVi32imm 1
+# CHECK-NOT: $w0 = COPY $wzr
+# CHECK-NOT: $w1 = MOVi32imm 1
 name:            test20
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w1, %x2
+    liveins: $w1, $x2
 
-    %w0 = SUBSWri %w1, 1, 0, implicit-def %nzcv
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = SUBSWri $w1, 1, 0, implicit-def $nzcv
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x2
+    liveins: $x2
 
-    %w0 = COPY %wzr
-    %w1 = MOVi32imm 1
-    STRWui killed %w0, %x2, 0
-    STRWui killed %w1, killed %x2, 1
+    $w0 = COPY $wzr
+    $w1 = MOVi32imm 1
+    STRWui killed $w0, $x2, 0
+    STRWui killed $w1, killed $x2, 1
 
   bb.2:
     RET_ReallyLR
 
 ...
-# Negative test - MOVi32imm clobbers %w0
+# Negative test - MOVi32imm clobbers $w0
 # CHECK-LABEL: name: test21
-# CHECK: ANDSWri %w0, 1, implicit-def %nzcv
+# CHECK: ANDSWri $w0, 1, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK: %w0 = COPY %wzr
+# CHECK: $w0 = COPY $wzr
 name:            test21
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1, %x2
+    liveins: $w0, $x1, $x2
 
-    %w0 = ANDSWri %w0, 1, implicit-def %nzcv
-    STRWui killed %w0, %x1, 0
-    %w0 = MOVi32imm -1
-    STRWui killed %w0, killed %x1, 1
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = ANDSWri $w0, 1, implicit-def $nzcv
+    STRWui killed $w0, $x1, 0
+    $w0 = MOVi32imm -1
+    STRWui killed $w0, killed $x1, 1
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x2
+    liveins: $x2
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x2, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x2, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # Negative test - SUBSXri self-clobbers x0, so MOVi64imm can't be removed
 # CHECK-LABEL: name: test22
-# CHECK: SUBSXri %x0, 1, 0, implicit-def %nzcv
+# CHECK: SUBSXri $x0, 1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK: %x0 = MOVi64imm 1
+# CHECK: $x0 = MOVi64imm 1
 name:            test22
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %x0, %x1, %x2
+    liveins: $x0, $x1, $x2
 
-    %x0 = SUBSXri %x0, 1, 0, implicit-def %nzcv
-    STRXui killed %x0, killed %x1, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $x0 = SUBSXri $x0, 1, 0, implicit-def $nzcv
+    STRXui killed $x0, killed $x1, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.1:
-    liveins: %x2
+    liveins: $x2
 
-    %x0 = MOVi64imm 1
-    STRXui killed %x0, killed %x2, 0
+    $x0 = MOVi64imm 1
+    STRXui killed $x0, killed $x2, 0
 
   bb.2:
     RET_ReallyLR
 ...
 # Negative test - bb.1 has multiple preds
 # CHECK-LABEL: name: test23
-# CHECK: ADDSWri %w0, 1, 0, implicit-def %nzcv
+# CHECK: ADDSWri $w0, 1, 0, implicit-def $nzcv
 # CHECK: bb.1:
-# CHECK: COPY %wzr
+# CHECK: COPY $wzr
 name:            test23
 tracksRegLiveness: true
 body:             |
   bb.0.entry:
-    liveins: %w0, %x1, %x2
+    liveins: $w0, $x1, $x2
 
-    %w0 = ADDSWri %w0, 1, 0, implicit-def %nzcv
-    STRWui killed %w0, killed %x1, 0
-    Bcc 1, %bb.2, implicit killed %nzcv
+    $w0 = ADDSWri $w0, 1, 0, implicit-def $nzcv
+    STRWui killed $w0, killed $x1, 0
+    Bcc 1, %bb.2, implicit killed $nzcv
     B %bb.1
 
   bb.3:
     B %bb.1
 
   bb.1:
-    liveins: %x2
+    liveins: $x2
 
-    %w0 = COPY %wzr
-    STRWui killed %w0, killed %x2, 0
+    $w0 = COPY $wzr
+    STRWui killed $w0, killed $x2, 0
 
   bb.2:
     RET_ReallyLR

Modified: llvm/trunk/test/CodeGen/AArch64/movimm-wzr.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/movimm-wzr.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/movimm-wzr.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/movimm-wzr.mir Wed Jan 31 14:04:26 2018
@@ -32,11 +32,11 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0 (%ir-block.0):
-    %wzr = MOVi32imm 42
-    %xzr = MOVi64imm 42
-    RET_ReallyLR implicit killed %w0
+    $wzr = MOVi32imm 42
+    $xzr = MOVi64imm 42
+    RET_ReallyLR implicit killed $w0
 
 ...
 
 # CHECK: bb.0
-# CHECK-NEXT: RET undef %lr
+# CHECK-NEXT: RET undef $lr

Modified: llvm/trunk/test/CodeGen/AArch64/phi-dbg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/phi-dbg.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/phi-dbg.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/phi-dbg.ll Wed Jan 31 14:04:26 2018
@@ -30,7 +30,7 @@ define i32 @func(i32) #0 !dbg !8 {
 ; CHECK: ldr     w[[REG:[0-9]+]], [sp, #8]
 ; CHECK-NEXT: .Ltmp
   call void @llvm.dbg.value(metadata i32 %.0, i64 0, metadata !15, metadata !13), !dbg !16
-; CHECK-NEXT:  //DEBUG_VALUE: func:c <- %w[[REG]]
+; CHECK-NEXT:  //DEBUG_VALUE: func:c <- $w[[REG]]
   %5 = add nsw i32 %.0, %0, !dbg !22
   call void @llvm.dbg.value(metadata i32 %5, i64 0, metadata !15, metadata !13), !dbg !16
   ret i32 %5, !dbg !23

Modified: llvm/trunk/test/CodeGen/AArch64/reg-scavenge-frame.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/reg-scavenge-frame.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/reg-scavenge-frame.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/reg-scavenge-frame.mir Wed Jan 31 14:04:26 2018
@@ -12,75 +12,75 @@ stack:
   - { id: 0, type: spill-slot, offset: 0, size: 32, alignment: 8 }
 body:             |
   bb.0:
-    liveins: %d16_d17_d18_d19
-    %x0 = COPY %xzr
-    %x1 = COPY %xzr
-    %x2 = COPY %xzr
-    %x3 = COPY %xzr
-    %x4 = COPY %xzr
-    %x5 = COPY %xzr
-    %x6 = COPY %xzr
-    %x7 = COPY %xzr
-    %x8 = COPY %xzr
-    %x9 = COPY %xzr
-    %x10 = COPY %xzr
-    %x11 = COPY %xzr
-    %x12 = COPY %xzr
-    %x13 = COPY %xzr
-    %x14 = COPY %xzr
-    %x15 = COPY %xzr
-    %x16 = COPY %xzr
-    %x17 = COPY %xzr
-    %x18 = COPY %xzr
-    %x19 = COPY %xzr
-    %x20 = COPY %xzr
-    %x21 = COPY %xzr
-    %x22 = COPY %xzr
-    %x23 = COPY %xzr
-    %x24 = COPY %xzr
-    %x25 = COPY %xzr
-    %x26 = COPY %xzr
-    %x27 = COPY %xzr
-    %x28 = COPY %xzr
-    %fp = COPY %xzr
-    %lr = COPY %xzr
-    ST1Fourv1d killed %d16_d17_d18_d19, %stack.0 :: (store 32 into %stack.0, align 8)
-    ; CHECK:  STRXui killed %[[SCAVREG:x[0-9]+|fp|lr]], %sp, [[SPOFFSET:[0-9]+]] :: (store 8 into %stack.1)
-    ; CHECK-NEXT:  %[[SCAVREG]] = ADDXri %sp, {{[0-9]+}}, 0
-    ; CHECK-NEXT:  ST1Fourv1d killed %d16_d17_d18_d19, killed %[[SCAVREG]] :: (store 32 into %stack.0, align 8)
-    ; CHECK-NEXT:  %[[SCAVREG]] = LDRXui %sp, [[SPOFFSET]] :: (load 8 from %stack.1)
+    liveins: $d16_d17_d18_d19
+    $x0 = COPY $xzr
+    $x1 = COPY $xzr
+    $x2 = COPY $xzr
+    $x3 = COPY $xzr
+    $x4 = COPY $xzr
+    $x5 = COPY $xzr
+    $x6 = COPY $xzr
+    $x7 = COPY $xzr
+    $x8 = COPY $xzr
+    $x9 = COPY $xzr
+    $x10 = COPY $xzr
+    $x11 = COPY $xzr
+    $x12 = COPY $xzr
+    $x13 = COPY $xzr
+    $x14 = COPY $xzr
+    $x15 = COPY $xzr
+    $x16 = COPY $xzr
+    $x17 = COPY $xzr
+    $x18 = COPY $xzr
+    $x19 = COPY $xzr
+    $x20 = COPY $xzr
+    $x21 = COPY $xzr
+    $x22 = COPY $xzr
+    $x23 = COPY $xzr
+    $x24 = COPY $xzr
+    $x25 = COPY $xzr
+    $x26 = COPY $xzr
+    $x27 = COPY $xzr
+    $x28 = COPY $xzr
+    $fp = COPY $xzr
+    $lr = COPY $xzr
+    ST1Fourv1d killed $d16_d17_d18_d19, %stack.0 :: (store 32 into %stack.0, align 8)
+    ; CHECK:  STRXui killed $[[SCAVREG:x[0-9]+|fp|lr]], $sp, [[SPOFFSET:[0-9]+]] :: (store 8 into %stack.1)
+    ; CHECK-NEXT:  $[[SCAVREG]] = ADDXri $sp, {{[0-9]+}}, 0
+    ; CHECK-NEXT:  ST1Fourv1d killed $d16_d17_d18_d19, killed $[[SCAVREG]] :: (store 32 into %stack.0, align 8)
+    ; CHECK-NEXT:  $[[SCAVREG]] = LDRXui $sp, [[SPOFFSET]] :: (load 8 from %stack.1)
 
-    HINT 0, implicit %x0
-    HINT 0, implicit %x1
-    HINT 0, implicit %x2
-    HINT 0, implicit %x3
-    HINT 0, implicit %x4
-    HINT 0, implicit %x5
-    HINT 0, implicit %x6
-    HINT 0, implicit %x7
-    HINT 0, implicit %x8
-    HINT 0, implicit %x9
-    HINT 0, implicit %x10
-    HINT 0, implicit %x11
-    HINT 0, implicit %x12
-    HINT 0, implicit %x13
-    HINT 0, implicit %x14
-    HINT 0, implicit %x15
-    HINT 0, implicit %x16
-    HINT 0, implicit %x17
-    HINT 0, implicit %x18
-    HINT 0, implicit %x19
-    HINT 0, implicit %x20
-    HINT 0, implicit %x21
-    HINT 0, implicit %x22
-    HINT 0, implicit %x23
-    HINT 0, implicit %x24
-    HINT 0, implicit %x25
-    HINT 0, implicit %x26
-    HINT 0, implicit %x27
-    HINT 0, implicit %x28
-    HINT 0, implicit %fp
-    HINT 0, implicit %lr
+    HINT 0, implicit $x0
+    HINT 0, implicit $x1
+    HINT 0, implicit $x2
+    HINT 0, implicit $x3
+    HINT 0, implicit $x4
+    HINT 0, implicit $x5
+    HINT 0, implicit $x6
+    HINT 0, implicit $x7
+    HINT 0, implicit $x8
+    HINT 0, implicit $x9
+    HINT 0, implicit $x10
+    HINT 0, implicit $x11
+    HINT 0, implicit $x12
+    HINT 0, implicit $x13
+    HINT 0, implicit $x14
+    HINT 0, implicit $x15
+    HINT 0, implicit $x16
+    HINT 0, implicit $x17
+    HINT 0, implicit $x18
+    HINT 0, implicit $x19
+    HINT 0, implicit $x20
+    HINT 0, implicit $x21
+    HINT 0, implicit $x22
+    HINT 0, implicit $x23
+    HINT 0, implicit $x24
+    HINT 0, implicit $x25
+    HINT 0, implicit $x26
+    HINT 0, implicit $x27
+    HINT 0, implicit $x28
+    HINT 0, implicit $fp
+    HINT 0, implicit $lr
 
     RET_ReallyLR
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/regcoal-physreg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/regcoal-physreg.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/regcoal-physreg.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/regcoal-physreg.mir Wed Jan 31 14:04:26 2018
@@ -13,79 +13,79 @@ name: func0
 body: |
   bb.0:
     ; We usually should not coalesce copies from allocatable physregs.
-    ; CHECK: %0:gpr32 = COPY %w7
-    ; CHECK: STRWui %0, %x1, 0
-    %0 : gpr32 = COPY %w7
-    STRWui %0, %x1, 0
+    ; CHECK: %0:gpr32 = COPY $w7
+    ; CHECK: STRWui %0, $x1, 0
+    %0 : gpr32 = COPY $w7
+    STRWui %0, $x1, 0
 
     ; It is fine to coalesce copies from reserved physregs
     ; CHECK-NOT: COPY
-    ; CHECK: STRXui %fp, %x1, 0
-    %1 : gpr64 = COPY %fp
-    STRXui %1, %x1, 0
+    ; CHECK: STRXui $fp, $x1, 0
+    %1 : gpr64 = COPY $fp
+    STRXui %1, $x1, 0
 
     ; It is not fine to coalesce copies from reserved physregs when they are
     ; clobbered.
-    ; CHECK: %2:gpr64 = COPY %fp
-    ; CHECK: STRXui %2, %x1, 0
-    %2 : gpr64 = COPY %fp
-    %fp = SUBXri %fp, 4, 0
-    STRXui %2, %x1, 0
+    ; CHECK: %2:gpr64 = COPY $fp
+    ; CHECK: STRXui %2, $x1, 0
+    %2 : gpr64 = COPY $fp
+    $fp = SUBXri $fp, 4, 0
+    STRXui %2, $x1, 0
 
     ; Is is fine to coalesce copies from constant physregs even when they are
     ; clobbered.
     ; CHECK-NOT: COPY
-    ; CHECK: STRWui %wzr, %x1
-    %3 : gpr32 = COPY %wzr
-    dead %wzr = SUBSWri %w1, 0, 0, implicit-def %nzcv
-    STRWui %3, %x1, 0
+    ; CHECK: STRWui $wzr, $x1
+    %3 : gpr32 = COPY $wzr
+    dead $wzr = SUBSWri $w1, 0, 0, implicit-def $nzcv
+    STRWui %3, $x1, 0
 
     ; Is is fine to coalesce copies from constant physregs even when they are
     ; clobbered.
     ; CHECK-NOT: COPY
-    ; CHECK: STRXui %xzr, %x1
-    %4 : gpr64 = COPY %xzr
-    dead %wzr = SUBSWri %w1, 0, 0, implicit-def %nzcv
-    STRXui %4, %x1, 0
+    ; CHECK: STRXui $xzr, $x1
+    %4 : gpr64 = COPY $xzr
+    dead $wzr = SUBSWri $w1, 0, 0, implicit-def $nzcv
+    STRXui %4, $x1, 0
 
     ; Coalescing COPYs into constant physregs.
-    ; CHECK: %wzr = SUBSWri %w1, 0, 0
-    %5 : gpr32 = SUBSWri %w1, 0, 0, implicit-def %nzcv
-    %wzr = COPY %5
+    ; CHECK: $wzr = SUBSWri $w1, 0, 0
+    %5 : gpr32 = SUBSWri $w1, 0, 0, implicit-def $nzcv
+    $wzr = COPY %5
 
     ; Only coalesce when the source register is reserved as a whole (this is
     ; a limitation of the current code which cannot update liveness information
     ; of the non-reserved part).
-    ; CHECK: %6:xseqpairsclass = COPY %x28_fp
+    ; CHECK: %6:xseqpairsclass = COPY $x28_fp
     ; CHECK: HINT 0, implicit %6
-    %6 : xseqpairsclass = COPY %x28_fp
+    %6 : xseqpairsclass = COPY $x28_fp
     HINT 0, implicit %6
 
     ; It is not fine to coalesce copies from reserved physregs when they are
     ; clobbered by the regmask on a call.
-    ; CHECK: %7:gpr64 = COPY %x18
-    ; CHECK: BL @f2, csr_aarch64_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
-    ; CHECK: STRXui %7, %x1, 0
+    ; CHECK: %7:gpr64 = COPY $x18
+    ; CHECK: BL @f2, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+    ; CHECK: STRXui %7, $x1, 0
 
     ; Need a def of x18 so that it's not deduced as "constant".
-    %x18 = COPY %xzr
-    %7 : gpr64 = COPY %x18
-    BL @f2, csr_aarch64_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
-    STRXui %7, %x1, 0
+    $x18 = COPY $xzr
+    %7 : gpr64 = COPY $x18
+    BL @f2, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+    STRXui %7, $x1, 0
 
     ; This can be coalesced.
-    ; CHECK: %fp = SUBXri %fp, 4, 0
-    %8 : gpr64sp = SUBXri %fp, 4, 0
-    %fp = COPY %8
+    ; CHECK: $fp = SUBXri $fp, 4, 0
+    %8 : gpr64sp = SUBXri $fp, 4, 0
+    $fp = COPY %8
 
     ; Cannot coalesce when there are reads of the physreg.
-    ; CHECK-NOT: %fp = SUBXri %fp, 8, 0
-    ; CHECK: %9:gpr64sp = SUBXri %fp, 8, 0
-    ; CHECK: STRXui %fp, %fp, 0
-    ; CHECK: %fp = COPY %9
-    %9 : gpr64sp = SUBXri %fp, 8, 0
-    STRXui %fp, %fp, 0
-    %fp = COPY %9
+    ; CHECK-NOT: $fp = SUBXri $fp, 8, 0
+    ; CHECK: %9:gpr64sp = SUBXri $fp, 8, 0
+    ; CHECK: STRXui $fp, $fp, 0
+    ; CHECK: $fp = COPY %9
+    %9 : gpr64sp = SUBXri $fp, 8, 0
+    STRXui $fp, $fp, 0
+    $fp = COPY %9
 ...
 ---
 # Check coalescing of COPYs from reserved physregs.
@@ -95,20 +95,20 @@ body: |
   bb.0:
     ; Cannot coalesce physreg because we have reads on other CFG paths (we
     ; currently abort for any control flow)
-    ; CHECK-NOT: %fp = SUBXri
-    ; CHECK: %0:gpr64sp = SUBXri %fp, 12, 0
-    ; CHECK: CBZX undef %x0, %bb.1
+    ; CHECK-NOT: $fp = SUBXri
+    ; CHECK: %0:gpr64sp = SUBXri $fp, 12, 0
+    ; CHECK: CBZX undef $x0, %bb.1
     ; CHECK: B %bb.2
-    %0 : gpr64sp = SUBXri %fp, 12, 0
-    CBZX undef %x0, %bb.1
+    %0 : gpr64sp = SUBXri $fp, 12, 0
+    CBZX undef $x0, %bb.1
     B %bb.2
 
   bb.1:
-    %fp = COPY %0
+    $fp = COPY %0
     RET_ReallyLR
 
   bb.2:
-    STRXui %fp, %fp, 0
+    STRXui $fp, $fp, 0
     RET_ReallyLR
 ...
 ---
@@ -118,16 +118,16 @@ body: |
   bb.0:
     ; We can coalesce copies from physreg to vreg across multiple blocks.
     ; CHECK-NOT: COPY
-    ; CHECK: CBZX undef %x0, %bb.1
+    ; CHECK: CBZX undef $x0, %bb.1
     ; CHECK-NEXT: B %bb.2
-    %0 : gpr64sp = COPY %fp
-    CBZX undef %x0, %bb.1
+    %0 : gpr64sp = COPY $fp
+    CBZX undef $x0, %bb.1
     B %bb.2
 
   bb.1:
-    ; CHECK: STRXui undef %x0, %fp, 0
+    ; CHECK: STRXui undef $x0, $fp, 0
     ; CHECK-NEXT: RET_ReallyLR
-    STRXui undef %x0, %0, 0
+    STRXui undef $x0, %0, 0
     RET_ReallyLR
 
   bb.2:

Modified: llvm/trunk/test/CodeGen/AArch64/scheduledag-constreg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/scheduledag-constreg.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/scheduledag-constreg.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/scheduledag-constreg.mir Wed Jan 31 14:04:26 2018
@@ -7,23 +7,23 @@
 # Check that the instructions are not dependent on each other, even though
 # they all read/write to the zero register.
 # CHECK-LABEL: MI Scheduling
-# CHECK: SU(0): dead %wzr = SUBSWri %w1, 0, 0, implicit-def dead %nzcv
+# CHECK: SU(0): dead $wzr = SUBSWri $w1, 0, 0, implicit-def dead $nzcv
 # CHECK: # succs left : 0
 # CHECK-NOT: Successors:
-# CHECK: SU(1): %w2 = COPY %wzr
+# CHECK: SU(1): $w2 = COPY $wzr
 # CHECK: # succs left : 0
 # CHECK-NOT: Successors:
-# CHECK: SU(2): dead %wzr = SUBSWri %w3, 0, 0, implicit-def dead %nzcv
+# CHECK: SU(2): dead $wzr = SUBSWri $w3, 0, 0, implicit-def dead $nzcv
 # CHECK: # succs left : 0
 # CHECK-NOT: Successors:
-# CHECK: SU(3): %w4 = COPY %wzr
+# CHECK: SU(3): $w4 = COPY $wzr
 # CHECK: # succs left : 0
 # CHECK-NOT: Successors:
 name: func
 body: |
   bb.0:
-    dead %wzr = SUBSWri %w1, 0, 0, implicit-def dead %nzcv
-    %w2 = COPY %wzr
-    dead %wzr = SUBSWri %w3, 0, 0, implicit-def dead %nzcv
-    %w4 = COPY %wzr
+    dead $wzr = SUBSWri $w1, 0, 0, implicit-def dead $nzcv
+    $w2 = COPY $wzr
+    dead $wzr = SUBSWri $w3, 0, 0, implicit-def dead $nzcv
+    $w4 = COPY $wzr
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/spill-fold.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/spill-fold.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/spill-fold.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/spill-fold.mir Wed Jan 31 14:04:26 2018
@@ -14,11 +14,11 @@ registers:
   - { id: 0, class: gpr64 }
 body:             |
   bb.0:
-    ; CHECK: STRXui %xzr, %stack.0, 0 :: (store 8 into %stack.0)
-    undef %0.sub_32 = COPY %wzr
-    INLINEASM &nop, 1, 12, implicit-def dead %x0, 12, implicit-def dead %x1, 12, implicit-def dead %x2, 12, implicit-def dead %x3, 12, implicit-def dead %x4, 12, implicit-def dead %x5, 12, implicit-def dead %x6, 12, implicit-def dead %x7, 12, implicit-def dead %x8, 12, implicit-def dead %x9, 12, implicit-def dead %x10, 12, implicit-def dead %x11, 12, implicit-def dead %x12, 12, implicit-def dead %x13, 12, implicit-def dead %x14, 12, implicit-def dead %x15, 12, implicit-def dead %x16, 12, implicit-def dead %x17, 12, implicit-def dead %x18, 12, implicit-def dead %x19, 12, implicit-def dead %x20, 12, implicit-def dead %x21, 12, implicit-def dead %x22, 12, implicit-def dead %x23, 12, implicit-def dead %x24, 12, implicit-def dead %x25, 12, implicit-def dead %x26, 12, implicit-def dead %x27, 12, implicit-def dead %x28, 12, implicit-def dead %fp, 12, implicit-def dead %lr, 12, implicit-def %sp
-    %x0 = COPY %0
-    RET_ReallyLR implicit %x0
+    ; CHECK: STRXui $xzr, %stack.0, 0 :: (store 8 into %stack.0)
+    undef %0.sub_32 = COPY $wzr
+    INLINEASM &nop, 1, 12, implicit-def dead $x0, 12, implicit-def dead $x1, 12, implicit-def dead $x2, 12, implicit-def dead $x3, 12, implicit-def dead $x4, 12, implicit-def dead $x5, 12, implicit-def dead $x6, 12, implicit-def dead $x7, 12, implicit-def dead $x8, 12, implicit-def dead $x9, 12, implicit-def dead $x10, 12, implicit-def dead $x11, 12, implicit-def dead $x12, 12, implicit-def dead $x13, 12, implicit-def dead $x14, 12, implicit-def dead $x15, 12, implicit-def dead $x16, 12, implicit-def dead $x17, 12, implicit-def dead $x18, 12, implicit-def dead $x19, 12, implicit-def dead $x20, 12, implicit-def dead $x21, 12, implicit-def dead $x22, 12, implicit-def dead $x23, 12, implicit-def dead $x24, 12, implicit-def dead $x25, 12, implicit-def dead $x26, 12, implicit-def dead $x27, 12, implicit-def dead $x28, 12, implicit-def dead $fp, 12, implicit-def dead $lr, 12, implicit-def $sp
+    $x0 = COPY %0
+    RET_ReallyLR implicit $x0
 ...
 ---
 # CHECK-LABEL: name: test_subreg_spill_fold2
@@ -28,11 +28,11 @@ registers:
   - { id: 0, class: gpr64sp }
 body:             |
   bb.0:
-    ; CHECK: STRXui %xzr, %stack.0, 0 :: (store 8 into %stack.0)
-    undef %0.sub_32 = COPY %wzr
-    INLINEASM &nop, 1, 12, implicit-def dead %x0, 12, implicit-def dead %x1, 12, implicit-def dead %x2, 12, implicit-def dead %x3, 12, implicit-def dead %x4, 12, implicit-def dead %x5, 12, implicit-def dead %x6, 12, implicit-def dead %x7, 12, implicit-def dead %x8, 12, implicit-def dead %x9, 12, implicit-def dead %x10, 12, implicit-def dead %x11, 12, implicit-def dead %x12, 12, implicit-def dead %x13, 12, implicit-def dead %x14, 12, implicit-def dead %x15, 12, implicit-def dead %x16, 12, implicit-def dead %x17, 12, implicit-def dead %x18, 12, implicit-def dead %x19, 12, implicit-def dead %x20, 12, implicit-def dead %x21, 12, implicit-def dead %x22, 12, implicit-def dead %x23, 12, implicit-def dead %x24, 12, implicit-def dead %x25, 12, implicit-def dead %x26, 12, implicit-def dead %x27, 12, implicit-def dead %x28, 12, implicit-def dead %fp, 12, implicit-def dead %lr, 12, implicit-def %sp
-    %x0 = ADDXri %0, 1, 0
-    RET_ReallyLR implicit %x0
+    ; CHECK: STRXui $xzr, %stack.0, 0 :: (store 8 into %stack.0)
+    undef %0.sub_32 = COPY $wzr
+    INLINEASM &nop, 1, 12, implicit-def dead $x0, 12, implicit-def dead $x1, 12, implicit-def dead $x2, 12, implicit-def dead $x3, 12, implicit-def dead $x4, 12, implicit-def dead $x5, 12, implicit-def dead $x6, 12, implicit-def dead $x7, 12, implicit-def dead $x8, 12, implicit-def dead $x9, 12, implicit-def dead $x10, 12, implicit-def dead $x11, 12, implicit-def dead $x12, 12, implicit-def dead $x13, 12, implicit-def dead $x14, 12, implicit-def dead $x15, 12, implicit-def dead $x16, 12, implicit-def dead $x17, 12, implicit-def dead $x18, 12, implicit-def dead $x19, 12, implicit-def dead $x20, 12, implicit-def dead $x21, 12, implicit-def dead $x22, 12, implicit-def dead $x23, 12, implicit-def dead $x24, 12, implicit-def dead $x25, 12, implicit-def dead $x26, 12, implicit-def dead $x27, 12, implicit-def dead $x28, 12, implicit-def dead $fp, 12, implicit-def dead $lr, 12, implicit-def $sp
+    $x0 = ADDXri %0, 1, 0
+    RET_ReallyLR implicit $x0
 ...
 ---
 # CHECK-LABEL: name: test_subreg_spill_fold3
@@ -42,11 +42,11 @@ registers:
   - { id: 0, class: fpr64 }
 body:             |
   bb.0:
-    ; CHECK: STRXui %xzr, %stack.0, 0 :: (store 8 into %stack.0)
-    undef %0.ssub = COPY %wzr
-    INLINEASM &nop, 1, 12, implicit-def dead %d0, 12, implicit-def dead %d1, 12, implicit-def dead %d2, 12, implicit-def dead %d3, 12, implicit-def dead %d4, 12, implicit-def dead %d5, 12, implicit-def dead %d6, 12, implicit-def dead %d7, 12, implicit-def dead %d8, 12, implicit-def dead %d9, 12, implicit-def dead %d10, 12, implicit-def dead %d11, 12, implicit-def dead %d12, 12, implicit-def dead %d13, 12, implicit-def dead %d14, 12, implicit-def dead %d15, 12, implicit-def dead %d16, 12, implicit-def dead %d17, 12, implicit-def dead %d18, 12, implicit-def dead %d19, 12, implicit-def dead %d20, 12, implicit-def dead %d21, 12, implicit-def dead %d22, 12, implicit-def dead %d23, 12, implicit-def dead %d24, 12, implicit-def dead %d25, 12, implicit-def dead %d26, 12, implicit-def dead %d27, 12, implicit-def dead %d28, 12, implicit-def dead %d29, 12, implicit-def dead %d30, 12, implicit-def %d31
-    %x0 = COPY %0
-    RET_ReallyLR implicit %x0
+    ; CHECK: STRXui $xzr, %stack.0, 0 :: (store 8 into %stack.0)
+    undef %0.ssub = COPY $wzr
+    INLINEASM &nop, 1, 12, implicit-def dead $d0, 12, implicit-def dead $d1, 12, implicit-def dead $d2, 12, implicit-def dead $d3, 12, implicit-def dead $d4, 12, implicit-def dead $d5, 12, implicit-def dead $d6, 12, implicit-def dead $d7, 12, implicit-def dead $d8, 12, implicit-def dead $d9, 12, implicit-def dead $d10, 12, implicit-def dead $d11, 12, implicit-def dead $d12, 12, implicit-def dead $d13, 12, implicit-def dead $d14, 12, implicit-def dead $d15, 12, implicit-def dead $d16, 12, implicit-def dead $d17, 12, implicit-def dead $d18, 12, implicit-def dead $d19, 12, implicit-def dead $d20, 12, implicit-def dead $d21, 12, implicit-def dead $d22, 12, implicit-def dead $d23, 12, implicit-def dead $d24, 12, implicit-def dead $d25, 12, implicit-def dead $d26, 12, implicit-def dead $d27, 12, implicit-def dead $d28, 12, implicit-def dead $d29, 12, implicit-def dead $d30, 12, implicit-def $d31
+    $x0 = COPY %0
+    RET_ReallyLR implicit $x0
 ...
 ---
 # CHECK-LABEL: name: test_subreg_fill_fold
@@ -57,12 +57,12 @@ registers:
   - { id: 1, class: gpr64 }
 body:             |
   bb.0:
-    %0 = COPY %wzr
-    INLINEASM &nop, 1, 12, implicit-def dead %x0, 12, implicit-def dead %x1, 12, implicit-def dead %x2, 12, implicit-def dead %x3, 12, implicit-def dead %x4, 12, implicit-def dead %x5, 12, implicit-def dead %x6, 12, implicit-def dead %x7, 12, implicit-def dead %x8, 12, implicit-def dead %x9, 12, implicit-def dead %x10, 12, implicit-def dead %x11, 12, implicit-def dead %x12, 12, implicit-def dead %x13, 12, implicit-def dead %x14, 12, implicit-def dead %x15, 12, implicit-def dead %x16, 12, implicit-def dead %x17, 12, implicit-def dead %x18, 12, implicit-def dead %x19, 12, implicit-def dead %x20, 12, implicit-def dead %x21, 12, implicit-def dead %x22, 12, implicit-def dead %x23, 12, implicit-def dead %x24, 12, implicit-def dead %x25, 12, implicit-def dead %x26, 12, implicit-def dead %x27, 12, implicit-def dead %x28, 12, implicit-def dead %fp, 12, implicit-def dead %lr, 12, implicit-def %sp
+    %0 = COPY $wzr
+    INLINEASM &nop, 1, 12, implicit-def dead $x0, 12, implicit-def dead $x1, 12, implicit-def dead $x2, 12, implicit-def dead $x3, 12, implicit-def dead $x4, 12, implicit-def dead $x5, 12, implicit-def dead $x6, 12, implicit-def dead $x7, 12, implicit-def dead $x8, 12, implicit-def dead $x9, 12, implicit-def dead $x10, 12, implicit-def dead $x11, 12, implicit-def dead $x12, 12, implicit-def dead $x13, 12, implicit-def dead $x14, 12, implicit-def dead $x15, 12, implicit-def dead $x16, 12, implicit-def dead $x17, 12, implicit-def dead $x18, 12, implicit-def dead $x19, 12, implicit-def dead $x20, 12, implicit-def dead $x21, 12, implicit-def dead $x22, 12, implicit-def dead $x23, 12, implicit-def dead $x24, 12, implicit-def dead $x25, 12, implicit-def dead $x26, 12, implicit-def dead $x27, 12, implicit-def dead $x28, 12, implicit-def dead $fp, 12, implicit-def dead $lr, 12, implicit-def $sp
     ; CHECK: undef %1.sub_32:gpr64 = LDRWui %stack.0, 0 :: (load 4 from %stack.0)
     undef %1.sub_32 = COPY %0
-    %x0 = COPY %1
-    RET_ReallyLR implicit %x0
+    $x0 = COPY %1
+    RET_ReallyLR implicit $x0
 ...
 ---
 # CHECK-LABEL: name: test_subreg_fill_fold2
@@ -73,10 +73,10 @@ registers:
   - { id: 1, class: fpr64 }
 body:             |
   bb.0:
-    %0 = COPY %wzr
-    INLINEASM &nop, 1, 12, implicit-def dead %x0, 12, implicit-def dead %x1, 12, implicit-def dead %x2, 12, implicit-def dead %x3, 12, implicit-def dead %x4, 12, implicit-def dead %x5, 12, implicit-def dead %x6, 12, implicit-def dead %x7, 12, implicit-def dead %x8, 12, implicit-def dead %x9, 12, implicit-def dead %x10, 12, implicit-def dead %x11, 12, implicit-def dead %x12, 12, implicit-def dead %x13, 12, implicit-def dead %x14, 12, implicit-def dead %x15, 12, implicit-def dead %x16, 12, implicit-def dead %x17, 12, implicit-def dead %x18, 12, implicit-def dead %x19, 12, implicit-def dead %x20, 12, implicit-def dead %x21, 12, implicit-def dead %x22, 12, implicit-def dead %x23, 12, implicit-def dead %x24, 12, implicit-def dead %x25, 12, implicit-def dead %x26, 12, implicit-def dead %x27, 12, implicit-def dead %x28, 12, implicit-def dead %fp, 12, implicit-def dead %lr, 12, implicit-def %sp
+    %0 = COPY $wzr
+    INLINEASM &nop, 1, 12, implicit-def dead $x0, 12, implicit-def dead $x1, 12, implicit-def dead $x2, 12, implicit-def dead $x3, 12, implicit-def dead $x4, 12, implicit-def dead $x5, 12, implicit-def dead $x6, 12, implicit-def dead $x7, 12, implicit-def dead $x8, 12, implicit-def dead $x9, 12, implicit-def dead $x10, 12, implicit-def dead $x11, 12, implicit-def dead $x12, 12, implicit-def dead $x13, 12, implicit-def dead $x14, 12, implicit-def dead $x15, 12, implicit-def dead $x16, 12, implicit-def dead $x17, 12, implicit-def dead $x18, 12, implicit-def dead $x19, 12, implicit-def dead $x20, 12, implicit-def dead $x21, 12, implicit-def dead $x22, 12, implicit-def dead $x23, 12, implicit-def dead $x24, 12, implicit-def dead $x25, 12, implicit-def dead $x26, 12, implicit-def dead $x27, 12, implicit-def dead $x28, 12, implicit-def dead $fp, 12, implicit-def dead $lr, 12, implicit-def $sp
     ; CHECK: undef %1.ssub:fpr64 = LDRSui %stack.0, 0 :: (load 4 from %stack.0)
     undef %1.ssub = COPY %0
-    %d0 = COPY %1
-    RET_ReallyLR implicit %d0
+    $d0 = COPY %1
+    RET_ReallyLR implicit $d0
 ...

Modified: llvm/trunk/test/CodeGen/AArch64/spill-undef.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/spill-undef.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/spill-undef.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/spill-undef.mir Wed Jan 31 14:04:26 2018
@@ -30,7 +30,7 @@ registers:
   - { id: 9, class: gpr64 }
 body:             |
   bb.0:
-    liveins: %x0
+    liveins: $x0
     successors: %bb.1, %bb.2
 
     ; %8 is going to be spilled.
@@ -43,25 +43,25 @@ body:             |
     ; %9 us going to be spilled.
     ; But it is only partially undef.
     ; Make sure we spill it properly
-    ; CHECK: [[NINE:%[0-9]+]]:gpr64 = COPY %x0
+    ; CHECK: [[NINE:%[0-9]+]]:gpr64 = COPY $x0
     ; CHECK: [[NINE]].sub_32:gpr64 = IMPLICIT_DEF
     ; CHECK-NEXT: STRXui [[NINE]]
-    %9 = COPY %x0
+    %9 = COPY $x0
     %9.sub_32 = IMPLICIT_DEF
-    CBNZW %wzr, %bb.2
+    CBNZW $wzr, %bb.2
     B %bb.1
 
   bb.1:
     %4 = ADRP target-flags(aarch64-page) @g
     %8 = LDRWui %4, target-flags(aarch64-pageoff, aarch64-nc) @g :: (volatile dereferenceable load 4 from @g)
-    INLINEASM &nop, 1, 12, implicit-def dead early-clobber %x0, 12, implicit-def dead early-clobber %x1, 12, implicit-def dead early-clobber %x2, 12, implicit-def dead early-clobber %x3, 12, implicit-def dead early-clobber %x4, 12, implicit-def dead early-clobber %x5, 12, implicit-def dead early-clobber %x6, 12, implicit-def dead early-clobber %x7, 12, implicit-def dead early-clobber %x8, 12, implicit-def dead early-clobber %x9, 12, implicit-def dead early-clobber %x10, 12, implicit-def dead early-clobber %x11, 12, implicit-def dead early-clobber %x12, 12, implicit-def dead early-clobber %x13, 12, implicit-def dead early-clobber %x14, 12, implicit-def dead early-clobber %x15, 12, implicit-def dead early-clobber %x16, 12, implicit-def dead early-clobber %x17, 12, implicit-def dead early-clobber %x18, 12, implicit-def dead early-clobber %x19, 12, implicit-def dead early-clobber %x20, 12, implicit-def dead early-clobber %x21, 12, implicit-def dead early-clobber %x22, 12, implicit-def dead early-clobber %x23, 12, implicit-def dead early-clobber %x24, 12, implicit-def dead early-clobber %x25, 12, implicit-def dead early-clobber %x26, 12, implicit-def dead early-clobber %x27, 12, implicit-def dead early-clobber %x28, 12, implicit-def dead early-clobber %fp, 12, implicit-def dead early-clobber %lr
+    INLINEASM &nop, 1, 12, implicit-def dead early-clobber $x0, 12, implicit-def dead early-clobber $x1, 12, implicit-def dead early-clobber $x2, 12, implicit-def dead early-clobber $x3, 12, implicit-def dead early-clobber $x4, 12, implicit-def dead early-clobber $x5, 12, implicit-def dead early-clobber $x6, 12, implicit-def dead early-clobber $x7, 12, implicit-def dead early-clobber $x8, 12, implicit-def dead early-clobber $x9, 12, implicit-def dead early-clobber $x10, 12, implicit-def dead early-clobber $x11, 12, implicit-def dead early-clobber $x12, 12, implicit-def dead early-clobber $x13, 12, implicit-def dead early-clobber $x14, 12, implicit-def dead early-clobber $x15, 12, implicit-def dead early-clobber $x16, 12, implicit-def dead early-clobber $x17, 12, implicit-def dead early-clobber $x18, 12, implicit-def dead early-clobber $x19, 12, implicit-def dead early-clobber $x20, 12, implicit-def dead early-clobber $x21, 12, implicit-def dead early-clobber $x22, 12, implicit-def dead early-clobber $x23, 12, implicit-def dead early-clobber $x24, 12, implicit-def dead early-clobber $x25, 12, implicit-def dead early-clobber $x26, 12, implicit-def dead early-clobber $x27, 12, implicit-def dead early-clobber $x28, 12, implicit-def dead early-clobber $fp, 12, implicit-def dead early-clobber $lr
 
   bb.2:
-    INLINEASM &nop, 1, 12, implicit-def dead early-clobber %x0, 12, implicit-def dead early-clobber %x1, 12, implicit-def dead early-clobber %x2, 12, implicit-def dead early-clobber %x3, 12, implicit-def dead early-clobber %x4, 12, implicit-def dead early-clobber %x5, 12, implicit-def dead early-clobber %x6, 12, implicit-def dead early-clobber %x7, 12, implicit-def dead early-clobber %x8, 12, implicit-def dead early-clobber %x9, 12, implicit-def dead early-clobber %x10, 12, implicit-def dead early-clobber %x11, 12, implicit-def dead early-clobber %x12, 12, implicit-def dead early-clobber %x13, 12, implicit-def dead early-clobber %x14, 12, implicit-def dead early-clobber %x15, 12, implicit-def dead early-clobber %x16, 12, implicit-def dead early-clobber %x17, 12, implicit-def dead early-clobber %x18, 12, implicit-def dead early-clobber %x19, 12, implicit-def dead early-clobber %x20, 12, implicit-def dead early-clobber %x21, 12, implicit-def dead early-clobber %x22, 12, implicit-def dead early-clobber %x23, 12, implicit-def dead early-clobber %x24, 12, implicit-def dead early-clobber %x25, 12, implicit-def dead early-clobber %x26, 12, implicit-def dead early-clobber %x27, 12, implicit-def dead early-clobber %x28, 12, implicit-def dead early-clobber %fp, 12, implicit-def dead early-clobber %lr
+    INLINEASM &nop, 1, 12, implicit-def dead early-clobber $x0, 12, implicit-def dead early-clobber $x1, 12, implicit-def dead early-clobber $x2, 12, implicit-def dead early-clobber $x3, 12, implicit-def dead early-clobber $x4, 12, implicit-def dead early-clobber $x5, 12, implicit-def dead early-clobber $x6, 12, implicit-def dead early-clobber $x7, 12, implicit-def dead early-clobber $x8, 12, implicit-def dead early-clobber $x9, 12, implicit-def dead early-clobber $x10, 12, implicit-def dead early-clobber $x11, 12, implicit-def dead early-clobber $x12, 12, implicit-def dead early-clobber $x13, 12, implicit-def dead early-clobber $x14, 12, implicit-def dead early-clobber $x15, 12, implicit-def dead early-clobber $x16, 12, implicit-def dead early-clobber $x17, 12, implicit-def dead early-clobber $x18, 12, implicit-def dead early-clobber $x19, 12, implicit-def dead early-clobber $x20, 12, implicit-def dead early-clobber $x21, 12, implicit-def dead early-clobber $x22, 12, implicit-def dead early-clobber $x23, 12, implicit-def dead early-clobber $x24, 12, implicit-def dead early-clobber $x25, 12, implicit-def dead early-clobber $x26, 12, implicit-def dead early-clobber $x27, 12, implicit-def dead early-clobber $x28, 12, implicit-def dead early-clobber $fp, 12, implicit-def dead early-clobber $lr
     %6 = ADRP target-flags(aarch64-page) @g
-    %w0 = MOVi32imm 42
+    $w0 = MOVi32imm 42
     STRWui %8, %6, target-flags(aarch64-pageoff, aarch64-nc) @g :: (volatile store 4 into @g)
     STRXui %9, %6, target-flags(aarch64-pageoff, aarch64-nc) @g :: (volatile store 8 into @g)
-    RET_ReallyLR implicit killed %w0
+    RET_ReallyLR implicit killed $w0
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir Wed Jan 31 14:04:26 2018
@@ -13,16 +13,16 @@ legalized:       true
 regBankSelected: true
 
 # GCN: global_addrspace
-# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY %vgpr0_vgpr1
+# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
 # GCN: FLAT_LOAD_DWORD  [[PTR]], 0, 0, 0
 
 body: |
   bb.0:
-    liveins:  %vgpr0_vgpr1
+    liveins:  $vgpr0_vgpr1
 
-    %0:vgpr(p1) = COPY %vgpr0_vgpr1
+    %0:vgpr(p1) = COPY $vgpr0_vgpr1
     %1:vgpr(s32) = G_LOAD %0 :: (load 4 from %ir.global0)
-    %vgpr0 = COPY %1
+    $vgpr0 = COPY %1
 
 ...
 ---

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir Wed Jan 31 14:04:26 2018
@@ -14,7 +14,7 @@ legalized:       true
 regBankSelected: true
 
 # GCN: body:
-# GCN: [[PTR:%[0-9]+]]:sreg_64 = COPY %sgpr0_sgpr1
+# GCN: [[PTR:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
 
 # Immediate offset:
 # SICI: S_LOAD_DWORD_IMM [[PTR]], 1, 0
@@ -89,54 +89,54 @@ regBankSelected: true
 
 body: |
   bb.0:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %0:sgpr(p2) = COPY %sgpr0_sgpr1
+    %0:sgpr(p2) = COPY $sgpr0_sgpr1
 
     %1:sgpr(s64) = G_CONSTANT i64 4
     %2:sgpr(p2) = G_GEP %0, %1
     %3:sgpr(s32) = G_LOAD %2 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %3
+    $sgpr0 = COPY %3
 
     %4:sgpr(s64) = G_CONSTANT i64 1020
     %5:sgpr(p2) = G_GEP %0, %4
     %6:sgpr(s32) = G_LOAD %5 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %6
+    $sgpr0 = COPY %6
 
     %7:sgpr(s64) = G_CONSTANT i64 1024
     %8:sgpr(p2) = G_GEP %0, %7
     %9:sgpr(s32) = G_LOAD %8 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %9
+    $sgpr0 = COPY %9
 
     %10:sgpr(s64) = G_CONSTANT i64 1048572
     %11:sgpr(p2) = G_GEP %0, %10
     %12:sgpr(s32) = G_LOAD %11 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %12
+    $sgpr0 = COPY %12
 
     %13:sgpr(s64) = G_CONSTANT i64 1048576
     %14:sgpr(p2) = G_GEP %0, %13
     %15:sgpr(s32) = G_LOAD %14 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %15
+    $sgpr0 = COPY %15
 
     %16:sgpr(s64) = G_CONSTANT i64 17179869180
     %17:sgpr(p2) = G_GEP %0, %16
     %18:sgpr(s32) = G_LOAD %17 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %18
+    $sgpr0 = COPY %18
 
     %19:sgpr(s64) = G_CONSTANT i64 17179869184
     %20:sgpr(p2) = G_GEP %0, %19
     %21:sgpr(s32) = G_LOAD %20 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %21
+    $sgpr0 = COPY %21
 
     %22:sgpr(s64) = G_CONSTANT i64 4294967292
     %23:sgpr(p2) = G_GEP %0, %22
     %24:sgpr(s32) = G_LOAD %23 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %24
+    $sgpr0 = COPY %24
 
     %25:sgpr(s64) = G_CONSTANT i64 4294967296
     %26:sgpr(p2) = G_GEP %0, %25
     %27:sgpr(s32) = G_LOAD %26 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %27
+    $sgpr0 = COPY %27
 
 ...
 ---

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir Wed Jan 31 14:04:26 2018
@@ -13,16 +13,16 @@ legalized:       true
 regBankSelected: true
 
 # GCN: global_addrspace
-# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY %vgpr0_vgpr1
-# GCN: [[VAL:%[0-9]+]]:vgpr_32 = COPY %vgpr2
+# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
+# GCN: [[VAL:%[0-9]+]]:vgpr_32 = COPY $vgpr2
 # GCN: FLAT_STORE_DWORD [[PTR]], [[VAL]], 0, 0, 0
 
 body: |
   bb.0:
-    liveins:  %vgpr0_vgpr1, %vgpr2
+    liveins:  $vgpr0_vgpr1, $vgpr2
 
-    %0:vgpr(p1) = COPY %vgpr0_vgpr1
-    %1:vgpr(s32) = COPY %vgpr2
+    %0:vgpr(p1) = COPY $vgpr0_vgpr1
+    %1:vgpr(s32) = COPY $vgpr2
     G_STORE %1, %0 :: (store 4 into %ir.global0)
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll Wed Jan 31 14:04:26 2018
@@ -2,7 +2,7 @@
 
 
 ; CHECK-LABEL: name: test_f32_inreg
-; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY %sgpr0
+; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY $sgpr0
 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[S0]]
 define amdgpu_vs void @test_f32_inreg(float inreg %arg0) {
   call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg0, float undef, float undef, float undef, i1 false, i1 false) #0
@@ -10,7 +10,7 @@ define amdgpu_vs void @test_f32_inreg(fl
 }
 
 ; CHECK-LABEL: name: test_f32
-; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY %vgpr0
+; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY $vgpr0
 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[V0]]
 define amdgpu_vs void @test_f32(float %arg0) {
   call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg0, float undef, float undef, float undef, i1 false, i1 false) #0
@@ -18,7 +18,7 @@ define amdgpu_vs void @test_f32(float %a
 }
 
 ; CHECK-LABEL: name: test_ptr2_byval
-; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY %sgpr0_sgpr1
+; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY $sgpr0_sgpr1
 ; CHECK: G_LOAD [[S01]]
 define amdgpu_vs void @test_ptr2_byval(i32 addrspace(2)* byval %arg0) {
    %tmp0 = load volatile i32, i32 addrspace(2)* %arg0
@@ -26,7 +26,7 @@ define amdgpu_vs void @test_ptr2_byval(i
 }
 
 ; CHECK-LABEL: name: test_ptr2_inreg
-; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY %sgpr0_sgpr1
+; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY $sgpr0_sgpr1
 ; CHECK: G_LOAD [[S01]]
 define amdgpu_vs void @test_ptr2_inreg(i32 addrspace(2)* inreg %arg0) {
   %tmp0 = load volatile i32, i32 addrspace(2)* %arg0
@@ -34,8 +34,8 @@ define amdgpu_vs void @test_ptr2_inreg(i
 }
 
 ; CHECK-LABEL: name: test_sgpr_alignment0
-; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY %sgpr0
-; CHECK: [[S23:%[0-9]+]]:_(p2) = COPY %sgpr2_sgpr3
+; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY $sgpr0
+; CHECK: [[S23:%[0-9]+]]:_(p2) = COPY $sgpr2_sgpr3
 ; CHECK: G_LOAD [[S23]]
 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[S0]]
 define amdgpu_vs void @test_sgpr_alignment0(float inreg %arg0, i32 addrspace(2)* inreg %arg1) {
@@ -45,10 +45,10 @@ define amdgpu_vs void @test_sgpr_alignme
 }
 
 ; CHECK-LABEL: name: test_order
-; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY %sgpr0
-; CHECK: [[S1:%[0-9]+]]:_(s32) = COPY %sgpr1
-; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY %vgpr0
-; CHECK: [[V1:%[0-9]+]]:_(s32) = COPY %vgpr1
+; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY $sgpr0
+; CHECK: [[S1:%[0-9]+]]:_(s32) = COPY $sgpr1
+; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY $vgpr0
+; CHECK: [[V1:%[0-9]+]]:_(s32) = COPY $vgpr1
 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[V0]](s32), [[S0]](s32), [[V1]](s32), [[S1]](s32)
 define amdgpu_vs void @test_order(float inreg %arg0, float inreg %arg1, float %arg2, float %arg3) {
   call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg2, float %arg0, float %arg3, float %arg1, i1 false, i1 false) #0

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir Wed Jan 31 14:04:26 2018
@@ -13,14 +13,14 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
     ; CHECK-LABEL: name: test_add
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_ADD %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir Wed Jan 31 14:04:26 2018
@@ -13,14 +13,14 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
     ; CHECK-LABEL: name: test_and
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]]
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_AND %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir Wed Jan 31 14:04:26 2018
@@ -13,14 +13,14 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_bitcast
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY]](s32)
     ; CHECK: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST]](<2 x s16>)
-    %0(s32) = COPY %vgpr0
+    %0(s32) = COPY $vgpr0
     %1(<2 x s16>) = G_BITCAST %0
     %2(s32) = G_BITCAST %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir Wed Jan 31 14:04:26 2018
@@ -47,7 +47,7 @@ body: |
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
     ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 7.500000e+00
     %0(s32) = G_FCONSTANT float 1.0
-    %vgpr0 = COPY %0
+    $vgpr0 = COPY %0
     %1(s32) = G_FCONSTANT float 7.5
-    %vgpr0 = COPY %1
+    $vgpr0 = COPY %1
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir Wed Jan 31 14:04:26 2018
@@ -16,12 +16,12 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0.entry:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
     ; CHECK-LABEL: name: test_fadd
     ; CHECK: %2:_(s32) = G_FADD %0, %1
 
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_FADD %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir Wed Jan 31 14:04:26 2018
@@ -13,14 +13,14 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
     ; CHECK-LABEL: name: test_fmul
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_FMUL %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir Wed Jan 31 14:04:26 2018
@@ -16,16 +16,16 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0.entry:
-    liveins: %vgpr0
+    liveins: $vgpr0
     ; CHECK-LABEL: name: test_icmp
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[C]](s32), [[COPY]]
     ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[COPY]]
-    ; CHECK: %vgpr0 = COPY [[SELECT]](s32)
+    ; CHECK: $vgpr0 = COPY [[SELECT]](s32)
     %0(s32) = G_CONSTANT i32 0
-    %1(s32) = COPY %vgpr0
+    %1(s32) = COPY $vgpr0
     %2(s1) = G_ICMP intpred(ne), %0, %1
     %3:_(s32) = G_SELECT %2(s1), %0(s32), %1(s32)
-    %vgpr0 = COPY %3
+    $vgpr0 = COPY %3
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir Wed Jan 31 14:04:26 2018
@@ -12,14 +12,14 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
     ; CHECK-LABEL: name: test_or
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]]
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_OR %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir Wed Jan 31 14:04:26 2018
@@ -16,21 +16,21 @@ registers:
   - { id: 5, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0
+    liveins: $vgpr0
     ; CHECK-LABEL: name: test_select
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[C]](s32), [[COPY]]
     ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
     ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]]
     %0(s32) = G_CONSTANT i32 0
-    %1(s32) = COPY %vgpr0
+    %1(s32) = COPY $vgpr0
 
     %2(s1) = G_ICMP intpred(ne), %0, %1
     %3(s32) = G_CONSTANT i32 1
     %4(s32) = G_CONSTANT i32 2
     %5(s32) = G_SELECT %2, %3, %4
-    %vgpr0 = COPY %5
+    $vgpr0 = COPY %5
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir Wed Jan 31 14:04:26 2018
@@ -9,14 +9,14 @@ registers:
   - { id: 2, class: _ }
 body: |
   bb.0.entry:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
     ; CHECK-LABEL: name: test_shl
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]]
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_SHL %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir Wed Jan 31 14:04:26 2018
@@ -29,8 +29,8 @@ legalized: true
 
 body: |
   bb.0:
-    liveins: %sgpr0_sgpr1
-    %0:_(p2) = COPY %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
+    %0:_(p2) = COPY $sgpr0_sgpr1
     %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.ptr0)
 ...
 
@@ -45,8 +45,8 @@ legalized: true
 
 body: |
   bb.0:
-    liveins: %sgpr0_sgpr1
-    %0:_(p1) = COPY %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
+    %0:_(p1) = COPY $sgpr0_sgpr1
     %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.ptr1)
 ...
 
@@ -63,7 +63,7 @@ legalized: true
 
 body: |
   bb.0:
-    liveins: %sgpr0_sgpr1
-    %0:_(p1) = COPY %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
+    %0:_(p1) = COPY $sgpr0_sgpr1
     %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.tmp1)
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/break-smem-soft-clauses.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/break-smem-soft-clauses.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/break-smem-soft-clauses.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/break-smem-soft-clauses.mir Wed Jan 31 14:04:26 2018
@@ -8,9 +8,9 @@ name: trivial_smem_clause_load_smrd4_x1
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x1
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     S_ENDPGM
 ...
 ---
@@ -20,11 +20,11 @@ name: trivial_smem_clause_load_smrd4_x2
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x2
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr1 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr1 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr1 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr1 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -34,13 +34,13 @@ name: trivial_smem_clause_load_smrd4_x3
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x3
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    ; GCN-NEXT: %sgpr1 = S_LOAD_DWORD_IMM %sgpr6_sgpr7, 0, 0
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
-    ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    %sgpr1 = S_LOAD_DWORD_IMM %sgpr6_sgpr7, 0, 0
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: $sgpr1 = S_LOAD_DWORD_IMM $sgpr6_sgpr7, 0, 0
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
+    ; GCN-NEXT: S_ENDPGM
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    $sgpr1 = S_LOAD_DWORD_IMM $sgpr6_sgpr7, 0, 0
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
     S_ENDPGM
 ...
 ---
@@ -50,15 +50,15 @@ name: trivial_smem_clause_load_smrd4_x4
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x4
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    ; GCN-NEXT: %sgpr1 = S_LOAD_DWORD_IMM %sgpr8_sgpr9, 0, 0
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
-    ; GCN-NEXT: %sgpr3 = S_LOAD_DWORD_IMM %sgpr16_sgpr17, 0, 0
-    ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    %sgpr1 = S_LOAD_DWORD_IMM %sgpr8_sgpr9, 0, 0
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
-    %sgpr3 = S_LOAD_DWORD_IMM %sgpr16_sgpr17, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: $sgpr1 = S_LOAD_DWORD_IMM $sgpr8_sgpr9, 0, 0
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
+    ; GCN-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr16_sgpr17, 0, 0
+    ; GCN-NEXT: S_ENDPGM
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    $sgpr1 = S_LOAD_DWORD_IMM $sgpr8_sgpr9, 0, 0
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
+    $sgpr3 = S_LOAD_DWORD_IMM $sgpr16_sgpr17, 0, 0
     S_ENDPGM
 ...
 ---
@@ -67,11 +67,11 @@ name: trivial_smem_clause_load_smrd4_x2_
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x2_sameptr
-    ; GCN: %sgpr12 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr12 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr12 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr12 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     S_ENDPGM
 ...
 ---
@@ -81,9 +81,9 @@ name: smrd_load4_overwrite_ptr_lo
 body: |
   bb.0:
     ; GCN-LABEL: name: smrd_load4_overwrite_ptr_lo
-    ; GCN: %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     S_ENDPGM
 ...
 ---
@@ -93,9 +93,9 @@ name: smrd_load4_overwrite_ptr_hi
 body: |
   bb.0:
     ; GCN-LABEL: name: smrd_load4_overwrite_ptr_hi
-    ; GCN: %sgpr11 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr11 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr11 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr11 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     S_ENDPGM
 ...
 ---
@@ -105,9 +105,9 @@ name: smrd_load8_overwrite_ptr
 body: |
   bb.0:
     ; GCN-LABEL: name: smrd_load8_overwrite_ptr
-    ; GCN: %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
     S_ENDPGM
 ...
 ---
@@ -119,47 +119,47 @@ name: break_smem_clause_at_max_smem_clau
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_at_max_smem_clause_size_smrd_load4
-    ; GCN: %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr14 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr15 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr16 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr17 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr18 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr19 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr20 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr21 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr22 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr23 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr24 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr25 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr26 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr27 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr28 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr0 = S_LOAD_DWORD_IMM %sgpr30_sgpr31, 0, 0
-    ; GCN-NEXT: %sgpr0 = S_MOV_B32 %sgpr0, implicit %sgpr13, implicit %sgpr14, implicit %sgpr15, implicit %sgpr16, implicit %sgpr17, implicit %sgpr18, implicit %sgpr19, implicit %sgpr20, implicit %sgpr21, implicit %sgpr22, implicit %sgpr23, implicit %sgpr24, implicit %sgpr25, implicit %sgpr26, implicit %sgpr27, implicit %sgpr28
-    ; GCN-NEXT: S_ENDPGM
-    %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr14 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr15 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr16 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-
-    %sgpr17 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr18 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr19 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr20 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-
-    %sgpr21 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr22 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr23 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr24 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-
-    %sgpr25 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr26 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr27 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr28 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr14 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr15 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr16 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr17 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr18 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr19 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr20 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr21 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr22 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr23 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr24 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr25 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr26 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr27 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr28 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr0 = S_LOAD_DWORD_IMM $sgpr30_sgpr31, 0, 0
+    ; GCN-NEXT: $sgpr0 = S_MOV_B32 $sgpr0, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $sgpr16, implicit $sgpr17, implicit $sgpr18, implicit $sgpr19, implicit $sgpr20, implicit $sgpr21, implicit $sgpr22, implicit $sgpr23, implicit $sgpr24, implicit $sgpr25, implicit $sgpr26, implicit $sgpr27, implicit $sgpr28
+    ; GCN-NEXT: S_ENDPGM
+    $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr14 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr15 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr16 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+
+    $sgpr17 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr18 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr19 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr20 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+
+    $sgpr21 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr22 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr23 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr24 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+
+    $sgpr25 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr26 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr27 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr28 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
 
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr30_sgpr31, 0, 0
-    %sgpr0 = S_MOV_B32 %sgpr0, implicit %sgpr13, implicit %sgpr14, implicit %sgpr15, implicit %sgpr16, implicit %sgpr17, implicit %sgpr18, implicit %sgpr19, implicit %sgpr20, implicit %sgpr21, implicit %sgpr22, implicit %sgpr23, implicit %sgpr24, implicit %sgpr25, implicit %sgpr26, implicit %sgpr27, implicit %sgpr28
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr30_sgpr31, 0, 0
+    $sgpr0 = S_MOV_B32 $sgpr0, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $sgpr16, implicit $sgpr17, implicit $sgpr18, implicit $sgpr19, implicit $sgpr20, implicit $sgpr21, implicit $sgpr22, implicit $sgpr23, implicit $sgpr24, implicit $sgpr25, implicit $sgpr26, implicit $sgpr27, implicit $sgpr28
     S_ENDPGM
 ...
 ---
@@ -169,12 +169,12 @@ name: break_smem_clause_simple_load_smrd
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_simple_load_smrd4_lo_ptr
-    ; GCN: %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %sgpr12 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: $sgpr12 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr12 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr12 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -184,11 +184,11 @@ name: break_smem_clause_simple_load_smrd
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_simple_load_smrd4_hi_ptr
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr3 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr3 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr3 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -198,12 +198,12 @@ name: break_smem_clause_simple_load_smrd
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_simple_load_smrd8_ptr
-    ; GCN: %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -213,11 +213,11 @@ name: break_smem_clause_simple_load_smrd
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_simple_load_smrd16_ptr
-    ; GCN: %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM %sgpr6_sgpr7, 0, 0
+    ; GCN: $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM $sgpr6_sgpr7, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM %sgpr6_sgpr7, 0, 0
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM $sgpr6_sgpr7, 0, 0
     S_ENDPGM
 ...
 ---
@@ -228,16 +228,16 @@ body: |
   ; GCN-LABEL: name: break_smem_clause_block_boundary_load_smrd8_ptr
   ; GCN: bb.0:
   ; GCN:   successors: %bb.1(0x80000000)
-  ; GCN:   %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
+  ; GCN:   $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
   ; GCN: bb.1:
   ; XNACK-NEXT:   S_NOP 0
-  ; GCN-NEXT:   %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0
+  ; GCN-NEXT:   $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0
   ; GCN-NEXT:   S_ENDPGM
   bb.0:
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
 
   bb.1:
-    %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -248,11 +248,11 @@ name: break_smem_clause_store_load_into_
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_store_load_into_ptr_smrd4
-    ; GCN: S_STORE_DWORD_IMM %sgpr16, %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr12 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
+    ; GCN: S_STORE_DWORD_IMM $sgpr16, $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr12 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    S_STORE_DWORD_IMM %sgpr16, %sgpr10_sgpr11, 0, 0
-    %sgpr12 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
+    S_STORE_DWORD_IMM $sgpr16, $sgpr10_sgpr11, 0, 0
+    $sgpr12 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
     S_ENDPGM
 ...
 ---
@@ -264,11 +264,11 @@ name: break_smem_clause_store_load_into_
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_store_load_into_data_smrd4
-    ; GCN: S_STORE_DWORD_IMM %sgpr8, %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr8 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: S_STORE_DWORD_IMM $sgpr8, $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr8 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    S_STORE_DWORD_IMM %sgpr8, %sgpr10_sgpr11, 0, 0
-    %sgpr8 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    S_STORE_DWORD_IMM $sgpr8, $sgpr10_sgpr11, 0, 0
+    $sgpr8 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -278,13 +278,13 @@ name: valu_inst_breaks_smem_clause
 body: |
   bb.0:
     ; GCN-LABEL: name: valu_inst_breaks_smem_clause
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %vgpr8 = V_MOV_B32_e32 0, implicit %exec
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %vgpr8 = V_MOV_B32_e32 0, implicit %exec
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: S_ENDPGM
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -294,13 +294,13 @@ name: salu_inst_breaks_smem_clause
 body: |
   bb.0:
     ; GCN-LABEL: name: salu_inst_breaks_smem_clause
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr8 = S_MOV_B32 0
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr8 = S_MOV_B32 0
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr8 = S_MOV_B32 0
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: S_ENDPGM
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr8 = S_MOV_B32 0
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -309,13 +309,13 @@ name: ds_inst_breaks_smem_clause
 body: |
   bb.0:
     ; GCN-LABEL: name: ds_inst_breaks_smem_clause
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: S_ENDPGM
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -325,13 +325,13 @@ name: flat_inst_breaks_smem_clause
 body: |
   bb.0:
     ; GCN-LABEL: name: flat_inst_breaks_smem_clause
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: S_ENDPGM
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -341,11 +341,11 @@ name: implicit_use_breaks_smem_clause
 body: |
   bb.0:
     ; GCN-LABEL: name: implicit_use_breaks_smem_clause
-    ; GCN: %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0, implicit %sgpr12_sgpr13
+    ; GCN: $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0, implicit $sgpr12_sgpr13
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM %sgpr6_sgpr7, 0, 0
+    ; GCN-NEXT: $sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM $sgpr6_sgpr7, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0, implicit %sgpr12_sgpr13
-    %sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM %sgpr6_sgpr7, 0, 0
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0, implicit $sgpr12_sgpr13
+    $sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM $sgpr6_sgpr7, 0, 0
     S_ENDPGM
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir Wed Jan 31 14:04:26 2018
@@ -7,10 +7,10 @@ name: trivial_clause_load_flat4_x1
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_flat4_x1
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -20,12 +20,12 @@ name: trivial_clause_load_flat4_x2
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_flat4_x2
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -35,14 +35,14 @@ name: trivial_clause_load_flat4_x3
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_flat4_x3
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr5_vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr5_vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr5_vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr5_vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -52,16 +52,16 @@ name: trivial_clause_load_flat4_x4
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_flat4_x4
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr8_vgpr9, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr3 = FLAT_LOAD_DWORD %vgpr10_vgpr11, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr8_vgpr9, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr10_vgpr11, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr8_vgpr9, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = FLAT_LOAD_DWORD %vgpr10_vgpr11, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr8_vgpr9, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = FLAT_LOAD_DWORD $vgpr10_vgpr11, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -71,12 +71,12 @@ name: trivial_clause_load_flat4_x2_samep
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_flat4_x2_sameptr
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -86,10 +86,10 @@ name: flat_load4_overwrite_ptr_lo
 body: |
   bb.0:
     ; GCN-LABEL: name: flat_load4_overwrite_ptr_lo
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -99,10 +99,10 @@ name: flat_load4_overwrite_ptr_hi
 body: |
   bb.0:
     ; GCN-LABEL: name: flat_load4_overwrite_ptr_hi
-    ; GCN: %vgpr1 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr1 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -112,10 +112,10 @@ name: flat_load8_overwrite_ptr
 body: |
   bb.0:
     ; GCN-LABEL: name: flat_load8_overwrite_ptr
-    ; GCN: %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -128,49 +128,49 @@ name: break_clause_at_max_clause_size_fl
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_at_max_clause_size_flat_load4
-    ; GCN: %vgpr2 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr3 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr4 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr5 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr6 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr7 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr8 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr9 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr10 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr11 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr12 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr13 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr14 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr15 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr16 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr17 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %sgpr0 = S_MOV_B32 %sgpr0, implicit %vgpr2, implicit %vgpr3, implicit %vgpr4, implicit %vgpr5, implicit %vgpr6, implicit %vgpr7, implicit %vgpr8, implicit %vgpr9, implicit %vgpr10, implicit %vgpr11, implicit %vgpr12, implicit %vgpr13, implicit %vgpr14, implicit %vgpr15, implicit %vgpr16, implicit %vgpr17, implicit %vgpr18
-    ; GCN-NEXT: S_ENDPGM
-
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr4 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr5 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-
-    %vgpr6 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr7 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr8 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr9 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-
-    %vgpr10 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr11 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr12 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr13 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-
-    %vgpr14 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr15 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr16 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr17 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr4 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr5 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr6 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr7 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr8 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr9 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr10 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr11 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr12 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr13 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr14 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr15 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr16 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr17 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; XNACK-NEXT: S_NOP 0
+    ; GCN-NEXT: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $sgpr0 = S_MOV_B32 $sgpr0, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18
+    ; GCN-NEXT: S_ENDPGM
+
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr4 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr5 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+
+    $vgpr6 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr7 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr8 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr9 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+
+    $vgpr10 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr11 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr12 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr13 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+
+    $vgpr14 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr15 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr16 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr17 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %sgpr0 = S_MOV_B32 %sgpr0, implicit %vgpr2, implicit %vgpr3, implicit %vgpr4, implicit %vgpr5, implicit %vgpr6, implicit %vgpr7, implicit %vgpr8, implicit %vgpr9, implicit %vgpr10, implicit %vgpr11, implicit %vgpr12, implicit %vgpr13, implicit %vgpr14, implicit %vgpr15, implicit %vgpr16, implicit %vgpr17, implicit %vgpr18
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $sgpr0 = S_MOV_B32 $sgpr0, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18
     S_ENDPGM
 ...
 ---
@@ -180,13 +180,13 @@ name: break_clause_simple_load_flat4_lo_
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_simple_load_flat4_lo_ptr
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -196,13 +196,13 @@ name: break_clause_simple_load_flat4_hi_
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_simple_load_flat4_hi_ptr
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr3 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -212,13 +212,13 @@ name: break_clause_simple_load_flat8_ptr
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_simple_load_flat8_ptr
-    ; GCN: %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -229,12 +229,12 @@ name: break_clause_simple_load_flat16_pt
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_simple_load_flat16_ptr
-    ; GCN: %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
-    %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -249,17 +249,17 @@ body: |
   ; GCN-LABEL: name: break_clause_block_boundary_load_flat8_ptr
   ; GCN: bb.0:
   ; GCN-NEXT:   successors: %bb.1(0x80000000)
-  ; GCN:   %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+  ; GCN:   $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
   ; GCN: bb.1:
   ; XNACK-NEXT:  S_NOP 0
-  ; GCN-NEXT:   %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+  ; GCN-NEXT:   $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
   ; GCN-NEXT:   S_ENDPGM
 
   bb.0:
-    %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
 
   bb.1:
-    %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -270,12 +270,12 @@ name: break_clause_store_load_into_ptr_f
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_store_load_into_ptr_flat4
-    ; GCN: FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -287,12 +287,12 @@ name: break_clause_store_load_into_data_
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_store_load_into_data_flat4
-    ; GCN: FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -303,15 +303,15 @@ name: valu_inst_breaks_clause
 body: |
   bb.0:
     ; GCN-LABEL: name: valu_inst_breaks_clause
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr8 = V_MOV_B32_e32 0, implicit %exec
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr8 = V_MOV_B32_e32 0, implicit $exec
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr8 = V_MOV_B32_e32 0, implicit %exec
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -322,15 +322,15 @@ name: salu_inst_breaks_clause
 body: |
   bb.0:
     ; GCN-LABEL: name: salu_inst_breaks_clause
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %sgpr8 = S_MOV_B32 0
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $sgpr8 = S_MOV_B32 0
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %sgpr8 = S_MOV_B32 0
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $sgpr8 = S_MOV_B32 0
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -340,15 +340,15 @@ name: ds_inst_breaks_clause
 body: |
   bb.0:
     ; GCN-LABEL: name: ds_inst_breaks_clause
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -358,14 +358,14 @@ name: smrd_inst_breaks_clause
 body: |
   bb.0:
     ; GCN-LABEL: name: smrd_inst_breaks_clause
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %sgpr8 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 0, 0
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $sgpr8 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %sgpr8 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 0, 0
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $sgpr8 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -375,13 +375,13 @@ name: implicit_use_breaks_clause
 body: |
   bb.0:
     ; GCN-LABEL: name: implicit_use_breaks_clause
-    ; GCN: %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr, implicit %vgpr4_vgpr5
+    ; GCN: $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr, implicit $vgpr4_vgpr5
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr, implicit %vgpr4_vgpr5
-    %vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr, implicit $vgpr4_vgpr5
+    $vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -390,12 +390,12 @@ name: trivial_clause_load_mubuf4_x2
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_mubuf4_x2
-    ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    ; GCN-NEXT: %vgpr3 = BUFFER_LOAD_DWORD_OFFEN %vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    ; GCN-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = BUFFER_LOAD_DWORD_OFFEN %vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -404,13 +404,13 @@ name: break_clause_simple_load_mubuf_off
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_simple_load_mubuf_offen_ptr
-    ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -421,13 +421,13 @@ name: mubuf_load4_overwrite_ptr
 body: |
   bb.0:
     ; GCN-LABEL: name: mubuf_load4_overwrite_ptr
-    ; GCN: %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    ; GCN-NEXT: %vgpr1 = V_MOV_B32_e32 0, implicit %exec
-    ; GCN-NEXT: %vgpr2 = V_MOV_B32_e32 %vgpr0, implicit %exec
-    ; GCN-NEXT: S_ENDPGM
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr1 = V_MOV_B32_e32 0, implicit %exec
-    %vgpr2 = V_MOV_B32_e32 %vgpr0, implicit %exec
+    ; GCN: $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    ; GCN-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+    ; GCN-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
+    ; GCN-NEXT: S_ENDPGM
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+    $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -438,29 +438,29 @@ name: break_clause_flat_load_mubuf_load
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_flat_load_mubuf_load
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 # Break a clause from interference between mubuf and flat instructions
 
 # GCN-LABEL: name: break_clause_mubuf_load_flat_load
 # GCN: bb.0:
-# GCN-NEXT: %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4
+# GCN-NEXT: $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
 # XNACK-NEXT: S_NOP 0
-# GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3
+# GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3
 # GCN-NEXT: S_ENDPGM
 name: break_clause_mubuf_load_flat_load
 
 body: |
   bb.0:
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     S_ENDPGM
 ...
@@ -471,13 +471,13 @@ name: break_clause_atomic_rtn_into_ptr_f
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_atomic_rtn_into_ptr_flat4
-    ; GCN: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr4 = FLAT_ATOMIC_ADD_RTN %vgpr5_vgpr6, %vgpr7, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr5_vgpr6, $vgpr7, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr4 = FLAT_ATOMIC_ADD_RTN %vgpr5_vgpr6, %vgpr7, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr5_vgpr6, $vgpr7, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -486,12 +486,12 @@ name: break_clause_atomic_nortn_ptr_load
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_atomic_nortn_ptr_load_flat4
-    ; GCN: FLAT_ATOMIC_ADD %vgpr0_vgpr1, %vgpr2, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: FLAT_ATOMIC_ADD $vgpr0_vgpr1, $vgpr2, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    FLAT_ATOMIC_ADD %vgpr0_vgpr1, %vgpr2, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_ATOMIC_ADD $vgpr0_vgpr1, $vgpr2, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -501,13 +501,13 @@ name: break_clause_atomic_rtn_into_ptr_m
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_atomic_rtn_into_ptr_mubuf4
-    ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN %vgpr2, %vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec
+    ; GCN-NEXT: $vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN $vgpr2, $vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN %vgpr2, %vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN $vgpr2, $vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -517,12 +517,12 @@ name: break_clause_atomic_nortn_ptr_load
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_atomic_nortn_ptr_load_mubuf4
-    ; GCN: BUFFER_ATOMIC_ADD_OFFEN %vgpr0, %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec
-    ; GCN-NEXT: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN: BUFFER_ATOMIC_ADD_OFFEN $vgpr0, $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec
+    ; GCN-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
 
-    BUFFER_ATOMIC_ADD_OFFEN %vgpr0, %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    BUFFER_ATOMIC_ADD_OFFEN $vgpr0, $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -533,11 +533,11 @@ name: no_break_clause_mubuf_load_novaddr
 body: |
   bb.0:
     ; GCN-LABEL: name: no_break_clause_mubuf_load_novaddr
-    ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    ; GCN-NEXT: %vgpr3 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    ; GCN-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -547,16 +547,16 @@ name: mix_load_store_clause
 body: |
   bb.0:
     ; GCN-LABEL: name: mix_load_store_clause
-    ; GCN: FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -566,15 +566,15 @@ name: mix_load_store_clause_same_address
 body: |
   bb.0:
     ; GCN-LABEL: name: mix_load_store_clause_same_address
-    ; GCN: FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/clamp-omod-special-case.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/clamp-omod-special-case.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/clamp-omod-special-case.mir Wed Jan 31 14:04:26 2018
@@ -1,8 +1,8 @@
 # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fold-operands  %s -o - | FileCheck -check-prefix=GCN %s
 ---
 # GCN-LABEL: name: v_max_self_clamp_not_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN-NEXT: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit $exec
 
 name:            v_max_self_clamp_not_set_f32
 tracksRegLiveness: true
@@ -35,37 +35,37 @@ registers:
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN-LABEL: name: v_clamp_omod_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit $exec
 name:            v_clamp_omod_already_set_f32
 tracksRegLiveness: true
 registers:
@@ -97,38 +97,38 @@ registers:
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
 # Don't fold a mul that looks like an omod if itself has omod set
 
 # GCN-LABEL: name: v_omod_mul_omod_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit $exec
 name:            v_omod_mul_omod_already_set_f32
 tracksRegLiveness: true
 registers:
@@ -160,30 +160,30 @@ registers:
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -191,8 +191,8 @@ body:             |
 # Don't fold a mul that looks like an omod if itself has clamp set
 # This might be OK, but would require folding the clamp at the same time.
 # GCN-LABEL: name: v_omod_mul_clamp_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit $exec
 
 name:            v_omod_mul_clamp_already_set_f32
 tracksRegLiveness: true
@@ -225,30 +225,30 @@ registers:
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -269,8 +269,8 @@ body:             |
 # Don't fold a mul that looks like an omod if itself has omod set
 
 # GCN-LABEL: name: v_omod_add_omod_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit $exec
 name:            v_omod_add_omod_already_set_f32
 tracksRegLiveness: true
 registers:
@@ -302,30 +302,30 @@ registers:
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -333,8 +333,8 @@ body:             |
 # Don't fold a mul that looks like an omod if itself has clamp set
 # This might be OK, but would require folding the clamp at the same time.
 # GCN-LABEL: name: v_omod_add_clamp_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit $exec
 
 name:            v_omod_add_clamp_already_set_f32
 tracksRegLiveness: true
@@ -367,30 +367,30 @@ registers:
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -404,9 +404,9 @@ registers:
   - { id: 1, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %0 = COPY %vgpr0
-    %1 = V_MAX_F32_e64 0, killed %0, 0, 1056964608, 1, 0, implicit %exec
+    %0 = COPY $vgpr0
+    %1 = V_MAX_F32_e64 0, killed %0, 0, 1056964608, 1, 0, implicit $exec
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir Wed Jan 31 14:04:26 2018
@@ -10,22 +10,22 @@ name:            cluster_loads_post_ra
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 body:             |
   bb.0:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %vgpr0_vgpr1 = IMPLICIT_DEF
-    %vgpr4_vgpr5 = IMPLICIT_DEF
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %vgpr4 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %vgpr2 = IMPLICIT_DEF
-    %vgpr3 = IMPLICIT_DEF
-    %vgpr6 = IMPLICIT_DEF
-    %vgpr0 = V_ADD_I32_e32 16, %vgpr2, implicit-def %vcc, implicit %exec
-    %vgpr1 = V_ADDC_U32_e32 %vgpr3, killed %vgpr6, implicit-def dead %vcc, implicit %vcc, implicit %exec
-    FLAT_STORE_DWORD %vgpr2_vgpr3, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    FLAT_STORE_DWORD %vgpr0_vgpr1, killed %vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+    $vgpr0_vgpr1 = IMPLICIT_DEF
+    $vgpr4_vgpr5 = IMPLICIT_DEF
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    $vgpr4 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    $vgpr2 = IMPLICIT_DEF
+    $vgpr3 = IMPLICIT_DEF
+    $vgpr6 = IMPLICIT_DEF
+    $vgpr0 = V_ADD_I32_e32 16, $vgpr2, implicit-def $vcc, implicit $exec
+    $vgpr1 = V_ADDC_U32_e32 $vgpr3, killed $vgpr6, implicit-def dead $vcc, implicit $vcc, implicit $exec
+    FLAT_STORE_DWORD $vgpr2_vgpr3, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    FLAT_STORE_DWORD $vgpr0_vgpr1, killed $vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/cluster-flat-loads.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/cluster-flat-loads.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/cluster-flat-loads.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/cluster-flat-loads.mir Wed Jan 31 14:04:26 2018
@@ -14,7 +14,7 @@ registers:
 body:             |
   bb.0:
     %0 = IMPLICIT_DEF
-    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec
-    %3 = FLAT_LOAD_DWORD %0, 4, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %3 = FLAT_LOAD_DWORD %0, 4, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/coalescer-subreg-join.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/coalescer-subreg-join.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/coalescer-subreg-join.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/coalescer-subreg-join.mir Wed Jan 31 14:04:26 2018
@@ -22,9 +22,9 @@ registers:
   - { id: 20, class: vreg_512 }
   - { id: 27, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr2_sgpr3', virtual-reg: '%0' }
-  - { reg: '%vgpr2', virtual-reg: '%1' }
-  - { reg: '%vgpr3', virtual-reg: '%2' }
+  - { reg: '$sgpr2_sgpr3', virtual-reg: '%0' }
+  - { reg: '$vgpr2', virtual-reg: '%1' }
+  - { reg: '$vgpr3', virtual-reg: '%2' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -41,11 +41,11 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr2_sgpr3, %vgpr2, %vgpr3
+    liveins: $sgpr2_sgpr3, $vgpr2, $vgpr3
 
-    %0 = COPY %sgpr2_sgpr3
-    %1 = COPY %vgpr2
-    %2 = COPY %vgpr3
+    %0 = COPY $sgpr2_sgpr3
+    %1 = COPY $vgpr2
+    %2 = COPY $vgpr3
     %3 = S_LOAD_DWORDX8_IMM %0, 0, 0
     %4 = S_LOAD_DWORDX4_IMM %0, 12, 0
     %5 = S_LOAD_DWORDX8_IMM %0, 16, 0
@@ -61,7 +61,7 @@ body:             |
     %11.sub6 = COPY %1
     %11.sub7 = COPY %1
     %11.sub8 = COPY %1
-    dead %18 = IMAGE_SAMPLE_C_D_O_V1_V16 %11, %3, %4, 1, 0, 0, 0, 0, 0, 0, -1, implicit %exec
+    dead %18 = IMAGE_SAMPLE_C_D_O_V1_V16 %11, %3, %4, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec
     %20.sub1 = COPY %2
     %20.sub2 = COPY %2
     %20.sub3 = COPY %2
@@ -70,6 +70,6 @@ body:             |
     %20.sub6 = COPY %2
     %20.sub7 = COPY %2
     %20.sub8 = COPY %2
-    dead %27 = IMAGE_SAMPLE_C_D_O_V1_V16 %20, %5, %6, 1, 0, 0, 0, 0, 0, 0, -1, implicit %exec
+    dead %27 = IMAGE_SAMPLE_C_D_O_V1_V16 %20, %5, %6, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir Wed Jan 31 14:04:26 2018
@@ -2,7 +2,7 @@
 ...
 
 # GCN-LABEL: name: s_fold_and_imm_regimm_32{{$}}
-# GCN: %10:vgpr_32 = V_MOV_B32_e32 1543, implicit %exec
+# GCN: %10:vgpr_32 = V_MOV_B32_e32 1543, implicit $exec
 # GCN: BUFFER_STORE_DWORD_OFFSET killed %10,
 name:            s_fold_and_imm_regimm_32
 alignment:       0
@@ -24,7 +24,7 @@ registers:
   - { id: 9, class: sreg_32_xm0 }
   - { id: 10, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -41,9 +41,9 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %0 = COPY %sgpr0_sgpr1
+    %0 = COPY $sgpr0_sgpr1
     %1 = S_LOAD_DWORDX2_IMM %0, 36, 0
     %2 = COPY %1.sub1
     %3 = COPY %1.sub0
@@ -52,9 +52,9 @@ body:             |
     %6 = REG_SEQUENCE killed %2, 1, killed %3, 2, killed %4, 3, killed %5, 4
     %7 = S_MOV_B32 1234567
     %8 = S_MOV_B32 9999
-    %9 = S_AND_B32 killed %7, killed %8, implicit-def dead %scc
+    %9 = S_AND_B32 killed %7, killed %8, implicit-def dead $scc
     %10 = COPY %9
-    BUFFER_STORE_DWORD_OFFSET killed %10, killed %6, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET killed %10, killed %6, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -62,19 +62,19 @@ body:             |
 
 # GCN-LABEL: name: v_fold_and_imm_regimm_32{{$}}
 
-# GCN: %9:vgpr_32 = V_MOV_B32_e32 646, implicit %exec
+# GCN: %9:vgpr_32 = V_MOV_B32_e32 646, implicit $exec
 # GCN: FLAT_STORE_DWORD %19, %9,
 
-# GCN: %10:vgpr_32 = V_MOV_B32_e32 646, implicit %exec
+# GCN: %10:vgpr_32 = V_MOV_B32_e32 646, implicit $exec
 # GCN: FLAT_STORE_DWORD %19, %10
 
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 646, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 646, implicit $exec
 # GCN: FLAT_STORE_DWORD %19, %11,
 
-# GCN: %12:vgpr_32 = V_MOV_B32_e32 1234567, implicit %exec
+# GCN: %12:vgpr_32 = V_MOV_B32_e32 1234567, implicit $exec
 # GCN: FLAT_STORE_DWORD %19, %12,
 
-# GCN: %13:vgpr_32 = V_MOV_B32_e32 63, implicit %exec
+# GCN: %13:vgpr_32 = V_MOV_B32_e32 63, implicit $exec
 # GCN: FLAT_STORE_DWORD %19, %13,
 
 name:            v_fold_and_imm_regimm_32
@@ -108,8 +108,8 @@ registers:
   - { id: 44, class: vgpr_32 }
 
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -126,37 +126,37 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
-    %31 = V_ASHRREV_I32_e64 31, %3, implicit %exec
+    %31 = V_ASHRREV_I32_e64 31, %3, implicit $exec
     %32 = REG_SEQUENCE %3, 1, %31, 2
-    %33 = V_LSHLREV_B64 2, killed %32, implicit %exec
+    %33 = V_LSHLREV_B64 2, killed %32, implicit $exec
     %20 = COPY %4.sub1
-    %44 = V_ADD_I32_e32 %4.sub0, %33.sub0, implicit-def %vcc, implicit %exec
+    %44 = V_ADD_I32_e32 %4.sub0, %33.sub0, implicit-def $vcc, implicit $exec
     %36 = COPY killed %20
-    %35 = V_ADDC_U32_e32 %33.sub1, %36, implicit-def %vcc, implicit %vcc, implicit %exec
+    %35 = V_ADDC_U32_e32 %33.sub1, %36, implicit-def $vcc, implicit $vcc, implicit $exec
     %37 = REG_SEQUENCE %44, 1, killed %35, 2
-    %24 = V_MOV_B32_e32 982, implicit %exec
+    %24 = V_MOV_B32_e32 982, implicit $exec
     %26 = S_MOV_B32 1234567
-    %34 = V_MOV_B32_e32 63, implicit %exec
+    %34 = V_MOV_B32_e32 63, implicit $exec
 
-    %27 = V_AND_B32_e64 %26, %24, implicit %exec
-    FLAT_STORE_DWORD %37, %27, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %27 = V_AND_B32_e64 %26, %24, implicit $exec
+    FLAT_STORE_DWORD %37, %27, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %28 = V_AND_B32_e64 %24, %26, implicit %exec
-    FLAT_STORE_DWORD %37, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %28 = V_AND_B32_e64 %24, %26, implicit $exec
+    FLAT_STORE_DWORD %37, %28, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %29 = V_AND_B32_e32 %26, %24, implicit %exec
-    FLAT_STORE_DWORD %37, %29, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %29 = V_AND_B32_e32 %26, %24, implicit $exec
+    FLAT_STORE_DWORD %37, %29, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %30 = V_AND_B32_e64 %26, %26, implicit %exec
-    FLAT_STORE_DWORD %37, %30, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %30 = V_AND_B32_e64 %26, %26, implicit $exec
+    FLAT_STORE_DWORD %37, %30, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %31 = V_AND_B32_e64 %34, %34, implicit %exec
-    FLAT_STORE_DWORD %37, %31, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %31 = V_AND_B32_e64 %34, %34, implicit $exec
+    FLAT_STORE_DWORD %37, %31, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     S_ENDPGM
 
@@ -164,7 +164,7 @@ body:             |
 ---
 
 # GCN-LABEL: name: s_fold_shl_imm_regimm_32{{$}}
-# GC1: %13 = V_MOV_B32_e32 4096, implicit %exec
+# GC1: %13 = V_MOV_B32_e32 4096, implicit $exec
 # GCN: BUFFER_STORE_DWORD_OFFSET killed %13,
 
 name:            s_fold_shl_imm_regimm_32
@@ -190,7 +190,7 @@ registers:
   - { id: 12, class: sreg_32_xm0 }
   - { id: 13, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -207,9 +207,9 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %0 = COPY %sgpr0_sgpr1
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
     %5 = S_MOV_B32 1
     %6 = COPY %4.sub1
@@ -217,43 +217,43 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
-    %12 = S_LSHL_B32 killed %5, 12, implicit-def dead %scc
+    %12 = S_LSHL_B32 killed %5, 12, implicit-def dead $scc
     %13 = COPY %12
-    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN-LABEL: name: v_fold_shl_imm_regimm_32{{$}}
 
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 40955904, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 40955904, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %11,
 
-# GCN: %12:vgpr_32 = V_MOV_B32_e32 24, implicit %exec
+# GCN: %12:vgpr_32 = V_MOV_B32_e32 24, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %12,
 
-# GCN: %13:vgpr_32 = V_MOV_B32_e32 4096, implicit %exec
+# GCN: %13:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %13,
 
-# GCN: %14:vgpr_32 = V_MOV_B32_e32 24, implicit %exec
+# GCN: %14:vgpr_32 = V_MOV_B32_e32 24, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %14,
 
-# GCN: %15:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
+# GCN: %15:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %15,
 
-# GCN: %22:vgpr_32 = V_MOV_B32_e32 4096, implicit %exec
+# GCN: %22:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %22,
 
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 1, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %23,
 
-# GCN: %25:vgpr_32 = V_MOV_B32_e32 2, implicit %exec
+# GCN: %25:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %25,
 
-# GCN: %26:vgpr_32 = V_MOV_B32_e32 7927808, implicit %exec
+# GCN: %26:vgpr_32 = V_MOV_B32_e32 7927808, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %26,
 
-# GCN: %28:vgpr_32 = V_MOV_B32_e32 -8, implicit %exec
+# GCN: %28:vgpr_32 = V_MOV_B32_e32 -8, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %28,
 
 name:            v_fold_shl_imm_regimm_32
@@ -294,8 +294,8 @@ registers:
   - { id: 27, class: sreg_32_xm0 }
   - { id: 28, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -312,54 +312,54 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %2 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %2 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %3 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %15 = V_ASHRREV_I32_e64 31, %2, implicit %exec
+    %15 = V_ASHRREV_I32_e64 31, %2, implicit $exec
     %16 = REG_SEQUENCE %2, 1, %15, 2
-    %17 = V_LSHLREV_B64 2, killed %16, implicit %exec
+    %17 = V_LSHLREV_B64 2, killed %16, implicit $exec
     %9 = COPY %3.sub1
-    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def %vcc, implicit %exec
+    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def $vcc, implicit $exec
     %19 = COPY killed %9
-    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def %vcc, implicit %vcc, implicit %exec
+    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def $vcc, implicit $vcc, implicit $exec
     %20 = REG_SEQUENCE %21, 1, killed %18, 2
-    %10 = V_MOV_B32_e32 9999, implicit %exec
-    %24 = V_MOV_B32_e32 3871, implicit %exec
-    %6 = V_MOV_B32_e32 1, implicit %exec
+    %10 = V_MOV_B32_e32 9999, implicit $exec
+    %24 = V_MOV_B32_e32 3871, implicit $exec
+    %6 = V_MOV_B32_e32 1, implicit $exec
     %7 = S_MOV_B32 1
     %27 = S_MOV_B32 -4
 
-    %11 = V_LSHLREV_B32_e64 12, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %11 = V_LSHLREV_B32_e64 12, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %12 = V_LSHLREV_B32_e64 %7, 12, implicit %exec
-    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %12 = V_LSHLREV_B32_e64 %7, 12, implicit $exec
+    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %13 = V_LSHL_B32_e64 %7, 12, implicit %exec
-    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %13 = V_LSHL_B32_e64 %7, 12, implicit $exec
+    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %14 = V_LSHL_B32_e64 12, %7, implicit %exec
-    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %14 = V_LSHL_B32_e64 12, %7, implicit $exec
+    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %15 = V_LSHL_B32_e64 12, %24, implicit %exec
-    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %15 = V_LSHL_B32_e64 12, %24, implicit $exec
+    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %22 = V_LSHL_B32_e64 %6, 12, implicit %exec
-    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %22 = V_LSHL_B32_e64 %6, 12, implicit $exec
+    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %23 = V_LSHL_B32_e64 %6, 32, implicit %exec
-    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %23 = V_LSHL_B32_e64 %6, 32, implicit $exec
+    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %25 = V_LSHL_B32_e32 %6, %6, implicit %exec
-    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %25 = V_LSHL_B32_e32 %6, %6, implicit $exec
+    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %26 = V_LSHLREV_B32_e32 11, %24, implicit %exec
-    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %26 = V_LSHLREV_B32_e32 11, %24, implicit $exec
+    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %28 = V_LSHL_B32_e32 %27, %6, implicit %exec
-    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %28 = V_LSHL_B32_e32 %27, %6, implicit $exec
+    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     S_ENDPGM
 
@@ -367,7 +367,7 @@ body:             |
 ---
 
 # GCN-LABEL: name: s_fold_ashr_imm_regimm_32{{$}}
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 243, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 243, implicit $exec
 # GCN: BUFFER_STORE_DWORD_OFFSET killed %11, killed %8,
 name:            s_fold_ashr_imm_regimm_32
 alignment:       0
@@ -390,7 +390,7 @@ registers:
   - { id: 12, class: sreg_32_xm0 }
   - { id: 13, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -407,9 +407,9 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %0 = COPY %sgpr0_sgpr1
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
     %5 = S_MOV_B32 999123
     %6 = COPY %4.sub1
@@ -417,42 +417,42 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
-    %12 = S_ASHR_I32 killed %5, 12, implicit-def dead %scc
+    %12 = S_ASHR_I32 killed %5, 12, implicit-def dead $scc
     %13 = COPY %12
-    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 
 # GCN-LABEL: name: v_fold_ashr_imm_regimm_32{{$}}
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %11,
 
-# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit %exec
+# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %12,
 
-# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit %exec
+# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %13,
 
-# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit %exec
+# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %14,
 
-# GCN: %15:vgpr_32 = V_MOV_B32_e32 -1, implicit %exec
+# GCN: %15:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %15,
 
-# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit %exec
+# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %22,
 
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %23,
 
-# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit %exec
+# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %25,
 
-# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit %exec
+# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %26,
 
-# GCN: %28:vgpr_32 = V_MOV_B32_e32 -1, implicit %exec
+# GCN: %28:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %28,
 
 name:            v_fold_ashr_imm_regimm_32
@@ -497,8 +497,8 @@ registers:
   - { id: 34, class: vgpr_32 }
   - { id: 35, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -515,59 +515,59 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %2 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %2 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %3 = S_LOAD_DWORDX2_IMM %0, 36, 0
-    %15 = V_ASHRREV_I32_e64 31, %2, implicit %exec
+    %15 = V_ASHRREV_I32_e64 31, %2, implicit $exec
     %16 = REG_SEQUENCE %2, 1, %15, 2
-    %17 = V_LSHLREV_B64 2, killed %16, implicit %exec
+    %17 = V_LSHLREV_B64 2, killed %16, implicit $exec
     %9 = COPY %3.sub1
-    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def %vcc, implicit %exec
+    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def $vcc, implicit $exec
     %19 = COPY killed %9
-    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def %vcc, implicit %vcc, implicit %exec
+    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def $vcc, implicit $vcc, implicit $exec
     %20 = REG_SEQUENCE %21, 1, killed %18, 2
-    %10 = V_MOV_B32_e32 999234234, implicit %exec
-    %24 = V_MOV_B32_e32 3871, implicit %exec
-    %6 = V_MOV_B32_e32 1000000, implicit %exec
+    %10 = V_MOV_B32_e32 999234234, implicit $exec
+    %24 = V_MOV_B32_e32 3871, implicit $exec
+    %6 = V_MOV_B32_e32 1000000, implicit $exec
     %7 = S_MOV_B32 13424252
     %8 = S_MOV_B32 4
     %27 = S_MOV_B32 -4
     %32 = S_MOV_B32 1
     %33 = S_MOV_B32 3841
-    %34 = V_MOV_B32_e32 3841, implicit %exec
-    %35 = V_MOV_B32_e32 2, implicit %exec
+    %34 = V_MOV_B32_e32 3841, implicit $exec
+    %35 = V_MOV_B32_e32 2, implicit $exec
 
-    %11 = V_ASHRREV_I32_e64 8, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %11 = V_ASHRREV_I32_e64 8, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %12 = V_ASHRREV_I32_e64 %8, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %12 = V_ASHRREV_I32_e64 %8, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %13 = V_ASHR_I32_e64 %7, 3, implicit %exec
-    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %13 = V_ASHR_I32_e64 %7, 3, implicit $exec
+    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %14 = V_ASHR_I32_e64 7, %32, implicit %exec
-    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %14 = V_ASHR_I32_e64 7, %32, implicit $exec
+    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %15 = V_ASHR_I32_e64 %27, %24, implicit %exec
-    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %15 = V_ASHR_I32_e64 %27, %24, implicit $exec
+    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %22 = V_ASHR_I32_e64 %6, 4, implicit %exec
-    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %22 = V_ASHR_I32_e64 %6, 4, implicit $exec
+    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %23 = V_ASHR_I32_e64 %6, %33, implicit %exec
-    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %23 = V_ASHR_I32_e64 %6, %33, implicit $exec
+    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %25 = V_ASHR_I32_e32 %34, %34, implicit %exec
-    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %25 = V_ASHR_I32_e32 %34, %34, implicit $exec
+    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %26 = V_ASHRREV_I32_e32 11, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %26 = V_ASHRREV_I32_e32 11, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %28 = V_ASHR_I32_e32 %27, %35, implicit %exec
-    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %28 = V_ASHR_I32_e32 %27, %35, implicit $exec
+    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     S_ENDPGM
 
@@ -575,7 +575,7 @@ body:             |
 ---
 
 # GCN-LABEL: name: s_fold_lshr_imm_regimm_32{{$}}
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 1048332, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 1048332, implicit $exec
 # GCN: BUFFER_STORE_DWORD_OFFSET killed %11, killed %8,
 name:            s_fold_lshr_imm_regimm_32
 alignment:       0
@@ -598,7 +598,7 @@ registers:
   - { id: 12, class: sreg_32_xm0 }
   - { id: 13, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -615,9 +615,9 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %0 = COPY %sgpr0_sgpr1
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
     %5 = S_MOV_B32 -999123
     %6 = COPY %4.sub1
@@ -625,43 +625,43 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
-    %12 = S_LSHR_B32 killed %5, 12, implicit-def dead %scc
+    %12 = S_LSHR_B32 killed %5, 12, implicit-def dead $scc
     %13 = COPY %12
-    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 
 # GCN-LABEL: name: v_fold_lshr_imm_regimm_32{{$}}
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %11,
 
-# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit %exec
+# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %12,
 
-# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit %exec
+# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %13,
 
-# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit %exec
+# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %14,
 
-# GCN: %15:vgpr_32 = V_MOV_B32_e32 1, implicit %exec
+# GCN: %15:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %15,
 
-# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit %exec
+# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %22,
 
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %23,
 
-# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit %exec
+# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %25,
 
-# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit %exec
+# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %26,
 
-# GCN: %28:vgpr_32 = V_MOV_B32_e32 1073741823, implicit %exec
+# GCN: %28:vgpr_32 = V_MOV_B32_e32 1073741823, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %28,
 
 name:            v_fold_lshr_imm_regimm_32
@@ -706,8 +706,8 @@ registers:
   - { id: 34, class: vgpr_32 }
   - { id: 35, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -724,59 +724,59 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %2 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %2 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %3 = S_LOAD_DWORDX2_IMM %0, 36, 0
-    %15 = V_ASHRREV_I32_e64 31, %2, implicit %exec
+    %15 = V_ASHRREV_I32_e64 31, %2, implicit $exec
     %16 = REG_SEQUENCE %2, 1, %15, 2
-    %17 = V_LSHLREV_B64 2, killed %16, implicit %exec
+    %17 = V_LSHLREV_B64 2, killed %16, implicit $exec
     %9 = COPY %3.sub1
-    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def %vcc, implicit %exec
+    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def $vcc, implicit $exec
     %19 = COPY killed %9
-    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def %vcc, implicit %vcc, implicit %exec
+    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def $vcc, implicit $vcc, implicit $exec
     %20 = REG_SEQUENCE %21, 1, killed %18, 2
-    %10 = V_MOV_B32_e32 999234234, implicit %exec
-    %24 = V_MOV_B32_e32 3871, implicit %exec
-    %6 = V_MOV_B32_e32 1000000, implicit %exec
+    %10 = V_MOV_B32_e32 999234234, implicit $exec
+    %24 = V_MOV_B32_e32 3871, implicit $exec
+    %6 = V_MOV_B32_e32 1000000, implicit $exec
     %7 = S_MOV_B32 13424252
     %8 = S_MOV_B32 4
     %27 = S_MOV_B32 -4
     %32 = S_MOV_B32 1
     %33 = S_MOV_B32 3841
-    %34 = V_MOV_B32_e32 3841, implicit %exec
-    %35 = V_MOV_B32_e32 2, implicit %exec
+    %34 = V_MOV_B32_e32 3841, implicit $exec
+    %35 = V_MOV_B32_e32 2, implicit $exec
 
-    %11 = V_LSHRREV_B32_e64 8, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %11 = V_LSHRREV_B32_e64 8, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %12 = V_LSHRREV_B32_e64 %8, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %12 = V_LSHRREV_B32_e64 %8, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %13 = V_LSHR_B32_e64 %7, 3, implicit %exec
-    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %13 = V_LSHR_B32_e64 %7, 3, implicit $exec
+    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %14 = V_LSHR_B32_e64 7, %32, implicit %exec
-    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %14 = V_LSHR_B32_e64 7, %32, implicit $exec
+    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %15 = V_LSHR_B32_e64 %27, %24, implicit %exec
-    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %15 = V_LSHR_B32_e64 %27, %24, implicit $exec
+    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %22 = V_LSHR_B32_e64 %6, 4, implicit %exec
-    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %22 = V_LSHR_B32_e64 %6, 4, implicit $exec
+    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %23 = V_LSHR_B32_e64 %6, %33, implicit %exec
-    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %23 = V_LSHR_B32_e64 %6, %33, implicit $exec
+    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %25 = V_LSHR_B32_e32 %34, %34, implicit %exec
-    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %25 = V_LSHR_B32_e32 %34, %34, implicit $exec
+    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %26 = V_LSHRREV_B32_e32 11, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %26 = V_LSHRREV_B32_e32 11, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %28 = V_LSHR_B32_e32 %27, %35, implicit %exec
-    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %28 = V_LSHR_B32_e32 %27, %35, implicit $exec
+    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     S_ENDPGM
 
@@ -798,9 +798,9 @@ registers:
   - { id: 3, class: vreg_64, preferred-register: '' }
 body:             |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %2 = V_XOR_B32_e64 killed %0, undef %1, implicit %exec
-    FLAT_STORE_DWORD undef %3, %2, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %2 = V_XOR_B32_e64 killed %0, undef %1, implicit $exec
+    FLAT_STORE_DWORD undef %3, %2, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/dead_copy.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/dead_copy.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/dead_copy.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/dead_copy.mir Wed Jan 31 14:04:26 2018
@@ -2,8 +2,8 @@
 
 # GCN-LABEL: dead_copy
 # GCN:       bb.0
-# GCN-NOT:   dead %vgpr5 = COPY undef %vgpr11, implicit %exec
-# GCN:       %vgpr5 = COPY %vgpr11, implicit %exec
+# GCN-NOT:   dead $vgpr5 = COPY undef $vgpr11, implicit $exec
+# GCN:       $vgpr5 = COPY $vgpr11, implicit $exec
 
 ---
 name: dead_copy
@@ -11,17 +11,17 @@ name: dead_copy
 body:    |
 
   bb.0:
-    liveins: %vgpr11, %sgpr0, %sgpr1, %vgpr6, %vgpr7, %vgpr4
+    liveins: $vgpr11, $sgpr0, $sgpr1, $vgpr6, $vgpr7, $vgpr4
 
-    dead %vgpr5 = COPY undef %vgpr11, implicit %exec
+    dead $vgpr5 = COPY undef $vgpr11, implicit $exec
 
-    %vgpr5 = COPY %vgpr11, implicit %exec
+    $vgpr5 = COPY $vgpr11, implicit $exec
 
-    %sgpr14 = S_ADD_U32 %sgpr0, target-flags(amdgpu-gotprel) 1136, implicit-def %scc
-    %sgpr15 = S_ADDC_U32 %sgpr1, target-flags(amdgpu-gotprel32-lo) 0, implicit-def dead %scc, implicit %scc
+    $sgpr14 = S_ADD_U32 $sgpr0, target-flags(amdgpu-gotprel) 1136, implicit-def $scc
+    $sgpr15 = S_ADDC_U32 $sgpr1, target-flags(amdgpu-gotprel32-lo) 0, implicit-def dead $scc, implicit $scc
 
-    %vgpr10 = COPY killed %sgpr14, implicit %exec
-    %vgpr11 = COPY killed %sgpr15, implicit %exec
+    $vgpr10 = COPY killed $sgpr14, implicit $exec
+    $vgpr11 = COPY killed $sgpr15, implicit $exec
 
-    FLAT_STORE_DWORDX4 %vgpr10_vgpr11, %vgpr4_vgpr5_vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORDX4 $vgpr10_vgpr11, $vgpr4_vgpr5_vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/debug-value2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/debug-value2.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/debug-value2.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/debug-value2.ll Wed Jan 31 14:04:26 2018
@@ -10,9 +10,9 @@ declare %struct.ShapeData addrspace(1)*
 
 define <4 x float> @Scene_transformT(i32 %subshapeIdx, <4 x float> %v, float %time, i8 addrspace(1)* %gScene, i32 addrspace(1)* %gSceneOffsets) local_unnamed_addr !dbg !110 {
 entry:
-; CHECK: ;DEBUG_VALUE: Scene_transformT:gScene <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] %vgpr6_vgpr7
+; CHECK: ;DEBUG_VALUE: Scene_transformT:gScene <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr6_vgpr7
   call void @llvm.dbg.value(metadata i8 addrspace(1)* %gScene, metadata !120, metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !154
-; CHECK: ;DEBUG_VALUE: Scene_transformT:gSceneOffsets <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] %vgpr8_vgpr9
+; CHECK: ;DEBUG_VALUE: Scene_transformT:gSceneOffsets <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr8_vgpr9
   call void @llvm.dbg.value(metadata i32 addrspace(1)* %gSceneOffsets, metadata !121, metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !155
   %call = tail call %struct.ShapeData addrspace(1)* @Scene_getSubShapeData(i32 %subshapeIdx, i8 addrspace(1)* %gScene, i32 addrspace(1)* %gSceneOffsets)
   %m_linearMotion = getelementptr inbounds %struct.ShapeData, %struct.ShapeData addrspace(1)* %call, i64 0, i32 2

Modified: llvm/trunk/test/CodeGen/AMDGPU/detect-dead-lanes.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/detect-dead-lanes.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/detect-dead-lanes.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/detect-dead-lanes.mir Wed Jan 31 14:04:26 2018
@@ -42,9 +42,9 @@ body: |
 # Check defined lanes transfer; Includes checking for some special cases like
 # undef operands or IMPLICIT_DEF definitions.
 # CHECK-LABEL: name: test1
-# CHECK: %0:sreg_128 = REG_SEQUENCE %sgpr0, %subreg.sub0, %sgpr0, %subreg.sub2
-# CHECK: %1:sreg_128 = INSERT_SUBREG %0, %sgpr1,  %subreg.sub3
-# CHECK: %2:sreg_64 = INSERT_SUBREG %0.sub2_sub3, %sgpr42,  %subreg.sub0
+# CHECK: %0:sreg_128 = REG_SEQUENCE $sgpr0, %subreg.sub0, $sgpr0, %subreg.sub2
+# CHECK: %1:sreg_128 = INSERT_SUBREG %0, $sgpr1,  %subreg.sub3
+# CHECK: %2:sreg_64 = INSERT_SUBREG %0.sub2_sub3, $sgpr42,  %subreg.sub0
 # CHECK: S_NOP 0, implicit %1.sub0
 # CHECK: S_NOP 0, implicit undef %1.sub1
 # CHECK: S_NOP 0, implicit %1.sub2
@@ -87,9 +87,9 @@ registers:
   - { id: 10, class: sreg_128 }
 body: |
   bb.0:
-    %0 = REG_SEQUENCE %sgpr0, %subreg.sub0, %sgpr0, %subreg.sub2
-    %1 = INSERT_SUBREG %0, %sgpr1, %subreg.sub3
-    %2 = INSERT_SUBREG %0.sub2_sub3, %sgpr42, %subreg.sub0
+    %0 = REG_SEQUENCE $sgpr0, %subreg.sub0, $sgpr0, %subreg.sub2
+    %1 = INSERT_SUBREG %0, $sgpr1, %subreg.sub3
+    %2 = INSERT_SUBREG %0.sub2_sub3, $sgpr42, %subreg.sub0
     S_NOP 0, implicit %1.sub0
     S_NOP 0, implicit %1.sub1
     S_NOP 0, implicit %1.sub2
@@ -204,8 +204,8 @@ body: |
 # lanes. So we should not get a dead/undef flag here.
 # CHECK-LABEL: name: test3
 # CHECK: S_NOP 0, implicit-def %0
-# CHECK: %vcc = COPY %0
-# CHECK: %1:sreg_64 = COPY %vcc
+# CHECK: $vcc = COPY %0
+# CHECK: %1:sreg_64 = COPY $vcc
 # CHECK: S_NOP 0, implicit %1
 name: test3
 tracksRegLiveness: true
@@ -215,9 +215,9 @@ registers:
 body: |
   bb.0:
     S_NOP 0, implicit-def %0
-    %vcc = COPY %0
+    $vcc = COPY %0
 
-    %1 = COPY %vcc
+    %1 = COPY $vcc
     S_NOP 0, implicit %1
 ...
 ---
@@ -296,7 +296,7 @@ body: |
     ; let's swiffle some lanes around for fun...
     %5 = REG_SEQUENCE %4.sub0, %subreg.sub0, %4.sub2, %subreg.sub1, %4.sub1, %subreg.sub2, %4.sub3, %subreg.sub3
 
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.2:
@@ -349,7 +349,7 @@ body: |
     ; rotate lanes, but skip sub2 lane...
     %6 = REG_SEQUENCE %5.sub1, %subreg.sub0, %5.sub3, %subreg.sub1, %5.sub2, %subreg.sub2, %5.sub0, %subreg.sub3
 
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.2:
@@ -392,7 +392,7 @@ body: |
     ; rotate subreg lanes, skipping sub1
     %3 = REG_SEQUENCE %2.sub3, %subreg.sub0, %2.sub1, %subreg.sub1, %2.sub0, %subreg.sub2, %2.sub2, %subreg.sub3
 
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.2:

Modified: llvm/trunk/test/CodeGen/AMDGPU/endpgm-dce.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/endpgm-dce.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/endpgm-dce.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/endpgm-dce.mir Wed Jan 31 14:04:26 2018
@@ -13,19 +13,19 @@ registers:
   - { id: 4, class: sgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
+    $vcc = IMPLICIT_DEF
     %0 = IMPLICIT_DEF
     %3 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec
-    %4 = S_ADD_U32 %3, 1, implicit-def %scc
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %4 = S_ADD_U32 %3, 1, implicit-def $scc
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: load_without_memoperand
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr
 # GCN-NEXT: S_ENDPGM
 name: load_without_memoperand
 tracksRegLiveness: true
@@ -37,19 +37,19 @@ registers:
   - { id: 4, class: sgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
+    $vcc = IMPLICIT_DEF
     %0 = IMPLICIT_DEF
     %3 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec
-    %4 = S_ADD_U32 %3, 1, implicit-def %scc
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %4 = S_ADD_U32 %3, 1, implicit-def $scc
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: load_volatile
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile load 4)
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load 4)
 # GCN-NEXT: S_ENDPGM
 name: load_volatile
 tracksRegLiveness: true
@@ -61,19 +61,19 @@ registers:
   - { id: 4, class: sgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
+    $vcc = IMPLICIT_DEF
     %0 = IMPLICIT_DEF
     %3 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile load 4)
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec
-    %4 = S_ADD_U32 %3, 1, implicit-def %scc
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load 4)
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %4 = S_ADD_U32 %3, 1, implicit-def $scc
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: store
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
 # GCN-NEXT: S_ENDPGM
 name: store
 tracksRegLiveness: true
@@ -82,45 +82,45 @@ registers:
   - { id: 1, class: vgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
+    $vcc = IMPLICIT_DEF
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: barrier
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
 # GCN-NEXT: S_BARRIER
 # GCN-NEXT: S_ENDPGM
 name: barrier
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
     S_BARRIER
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: call
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: %sgpr4_sgpr5 = S_SWAPPC_B64 %sgpr2_sgpr3
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: $sgpr4_sgpr5 = S_SWAPPC_B64 $sgpr2_sgpr3
 # GCN-NEXT: S_ENDPGM
 name: call
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    %sgpr4_sgpr5 = S_SWAPPC_B64 %sgpr2_sgpr3
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    $sgpr4_sgpr5 = S_SWAPPC_B64 $sgpr2_sgpr3
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: exp
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: EXP 32, undef %0:vgpr_32, undef %1:vgpr_32, %2, undef %3:vgpr_32, 0, 0, 15, implicit %exec
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: EXP 32, undef %0:vgpr_32, undef %1:vgpr_32, %2, undef %3:vgpr_32, 0, 0, 15, implicit $exec
 # GCN-NEXT: S_ENDPGM
 name: exp
 tracksRegLiveness: true
@@ -131,24 +131,24 @@ registers:
   - { id: 3, class: vgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
+    $vcc = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    EXP 32, undef %0, undef %1, killed %2, undef %3, 0, 0, 15, implicit %exec
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    EXP 32, undef %0, undef %1, killed %2, undef %3, 0, 0, 15, implicit $exec
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: return_to_epilog
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: SI_RETURN_TO_EPILOG killed %vgpr0
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: SI_RETURN_TO_EPILOG killed $vgpr0
 name: return_to_epilog
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %vgpr0 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    SI_RETURN_TO_EPILOG killed %vgpr0
+    $vcc = IMPLICIT_DEF
+    $vgpr0 = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    SI_RETURN_TO_EPILOG killed $vgpr0
 ...
 ---
 # GCN-LABEL: name: split_block
@@ -166,14 +166,14 @@ registers:
   - { id: 3, class: sgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
 
   bb.1:
     %0 = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %1 = V_ADD_F32_e64 0, killed %0, 0, 1, 0, 0, implicit %exec
-    %3 = S_ADD_U32 %2, 1, implicit-def %scc
+    %1 = V_ADD_F32_e64 0, killed %0, 0, 1, 0, 0, implicit $exec
+    %3 = S_ADD_U32 %2, 1, implicit-def $scc
     S_ENDPGM
 ...
 ---
@@ -188,8 +188,8 @@ name: split_block_empty_block
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
 
   bb.1:
 
@@ -208,8 +208,8 @@ name: split_block_uncond_branch
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
     S_BRANCH %bb.1
 
   bb.1:
@@ -219,8 +219,8 @@ body:             |
 # GCN-LABEL: name: split_block_cond_branch
 # GCN:      bb.0:
 # GCN-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
-# GCN:        %sgpr0_sgpr1 = S_OR_B64 %exec, %vcc, implicit-def %scc
-# GCN:        S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
+# GCN:        $sgpr0_sgpr1 = S_OR_B64 $exec, $vcc, implicit-def $scc
+# GCN:        S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc
 # GCN:      bb.1:
 # GCN:      bb.2:
 # GCN-NEXT:   S_ENDPGM
@@ -228,9 +228,9 @@ name: split_block_cond_branch
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, %vcc, implicit-def %scc
-    S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, $vcc, implicit-def $scc
+    S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc
 
   bb.1:
 
@@ -253,13 +253,13 @@ name: two_preds_both_dead
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
     S_BRANCH %bb.2
 
   bb.1:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc
     S_BRANCH %bb.2
 
   bb.2:
@@ -269,7 +269,7 @@ body:             |
 # GCN-LABEL: name: two_preds_one_dead
 # GCN:      bb.0:
 # GCN-NEXT:   successors: %bb.2
-# GCN:        %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+# GCN:        $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
 # GCN-NEXT:   S_BARRIER
 # GCN-NEXT:   S_BRANCH %bb.2
 # GCN:      bb.1:
@@ -282,14 +282,14 @@ name: two_preds_one_dead
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
     S_BARRIER
     S_BRANCH %bb.2
 
   bb.1:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc
     S_BRANCH %bb.2
 
   bb.2:

Modified: llvm/trunk/test/CodeGen/AMDGPU/fix-vgpr-copies.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fix-vgpr-copies.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/fix-vgpr-copies.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/fix-vgpr-copies.mir Wed Jan 31 14:04:26 2018
@@ -1,8 +1,8 @@
 # RUN: llc -march=amdgcn -start-after=greedy -stop-after=si-optimize-exec-masking -o - %s | FileCheck %s
 # Check that we first do all vector instructions and only then change exec
-# CHECK-DAG:  COPY %vgpr10_vgpr11
-# CHECK-DAG:  COPY %vgpr12_vgpr13
-# CHECK:      %exec = COPY
+# CHECK-DAG:  COPY $vgpr10_vgpr11
+# CHECK-DAG:  COPY $vgpr12_vgpr13
+# CHECK:      $exec = COPY
 
 ---
 name:            main
@@ -13,9 +13,9 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%sgpr4_sgpr5' }
-  - { reg: '%sgpr6' }
-  - { reg: '%vgpr0' }
+  - { reg: '$sgpr4_sgpr5' }
+  - { reg: '$sgpr6' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -32,13 +32,13 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.entry:
-    liveins: %vgpr3, %vgpr10_vgpr11, %vgpr12_vgpr13
+    liveins: $vgpr3, $vgpr10_vgpr11, $vgpr12_vgpr13
 
-    %vcc = V_CMP_NE_U32_e64 0, killed %vgpr3, implicit %exec
-    %sgpr4_sgpr5 = COPY %exec, implicit-def %exec
-    %sgpr6_sgpr7 = S_AND_B64 %sgpr4_sgpr5, killed %vcc, implicit-def dead %scc
-    %sgpr4_sgpr5 = S_XOR_B64 %sgpr6_sgpr7, killed %sgpr4_sgpr5, implicit-def dead %scc
-    %vgpr61_vgpr62 = COPY %vgpr10_vgpr11
-    %vgpr155_vgpr156 = COPY %vgpr12_vgpr13
-    %exec = S_MOV_B64_term killed %sgpr6_sgpr7
+    $vcc = V_CMP_NE_U32_e64 0, killed $vgpr3, implicit $exec
+    $sgpr4_sgpr5 = COPY $exec, implicit-def $exec
+    $sgpr6_sgpr7 = S_AND_B64 $sgpr4_sgpr5, killed $vcc, implicit-def dead $scc
+    $sgpr4_sgpr5 = S_XOR_B64 $sgpr6_sgpr7, killed $sgpr4_sgpr5, implicit-def dead $scc
+    $vgpr61_vgpr62 = COPY $vgpr10_vgpr11
+    $vgpr155_vgpr156 = COPY $vgpr12_vgpr13
+    $exec = S_MOV_B64_term killed $sgpr6_sgpr7
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/fix-wwm-liveness.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fix-wwm-liveness.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/fix-wwm-liveness.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/fix-wwm-liveness.mir Wed Jan 31 14:04:26 2018
@@ -1,5 +1,5 @@
 # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fix-wwm-liveness -o -  %s | FileCheck %s
-#CHECK: %exec = EXIT_WWM killed %19, implicit %21
+#CHECK: $exec = EXIT_WWM killed %19, implicit %21
 
 ---
 name:            test_wwm_liveness
@@ -18,7 +18,7 @@ registers:
   - { id: 5, class: vgpr_32, preferred-register: '' }
   - { id: 6, class: vgpr_32, preferred-register: '' }
   - { id: 7, class: vgpr_32, preferred-register: '' }
-  - { id: 8, class: sreg_64, preferred-register: '%vcc' }
+  - { id: 8, class: sreg_64, preferred-register: '$vcc' }
   - { id: 9, class: sreg_64, preferred-register: '' }
   - { id: 10, class: sreg_32_xm0, preferred-register: '' }
   - { id: 11, class: sreg_64, preferred-register: '' }
@@ -39,15 +39,15 @@ body:             |
   bb.0:
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
   
-    %21 = V_MOV_B32_e32 0, implicit %exec
-    %5 = V_MBCNT_LO_U32_B32_e64 -1, 0, implicit %exec
-    %6 = V_MBCNT_HI_U32_B32_e32 -1, killed %5, implicit %exec
-    %8 = V_CMP_GT_U32_e64 32, killed %6, implicit %exec
-    %22 = COPY %exec, implicit-def %exec
-    %23 = S_AND_B64 %22, %8, implicit-def dead %scc
-    %0 = S_XOR_B64 %23, %22, implicit-def dead %scc
-    %exec = S_MOV_B64_term killed %23
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    %21 = V_MOV_B32_e32 0, implicit $exec
+    %5 = V_MBCNT_LO_U32_B32_e64 -1, 0, implicit $exec
+    %6 = V_MBCNT_HI_U32_B32_e32 -1, killed %5, implicit $exec
+    %8 = V_CMP_GT_U32_e64 32, killed %6, implicit $exec
+    %22 = COPY $exec, implicit-def $exec
+    %23 = S_AND_B64 %22, %8, implicit-def dead $scc
+    %0 = S_XOR_B64 %23, %22, implicit-def dead $scc
+    $exec = S_MOV_B64_term killed %23
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
   
   bb.1:
@@ -56,18 +56,18 @@ body:             |
     %13 = S_MOV_B32 61440
     %14 = S_MOV_B32 -1
     %15 = REG_SEQUENCE undef %12, 1, undef %10, 2, killed %14, 3, killed %13, 4
-    %19 = COPY %exec
-    %exec = S_MOV_B64 -1
-    %16 = BUFFER_LOAD_DWORD_OFFSET %15, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4)
-    %17 = V_ADD_F32_e32 1065353216, killed %16, implicit %exec
-    %exec = EXIT_WWM killed %19
-    %21 = V_MOV_B32_e32 1, implicit %exec
-    early-clobber %18 = WWM killed %17, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %18, killed %15, 0, 0, 0, 0, 0, implicit %exec :: (store 4)
+    %19 = COPY $exec
+    $exec = S_MOV_B64 -1
+    %16 = BUFFER_LOAD_DWORD_OFFSET %15, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4)
+    %17 = V_ADD_F32_e32 1065353216, killed %16, implicit $exec
+    $exec = EXIT_WWM killed %19
+    %21 = V_MOV_B32_e32 1, implicit $exec
+    early-clobber %18 = WWM killed %17, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed %18, killed %15, 0, 0, 0, 0, 0, implicit $exec :: (store 4)
   
   bb.2:
-    %exec = S_OR_B64 %exec, killed %0, implicit-def %scc
-    %vgpr0 = COPY killed %21
-    SI_RETURN_TO_EPILOG killed %vgpr0
+    $exec = S_OR_B64 $exec, killed %0, implicit-def $scc
+    $vgpr0 = COPY killed %21
+    SI_RETURN_TO_EPILOG killed $vgpr0
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/flat-load-clustering.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/flat-load-clustering.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/flat-load-clustering.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/flat-load-clustering.mir Wed Jan 31 14:04:26 2018
@@ -46,32 +46,32 @@ registers:
   - { id: 12, class: vreg_64 }
   - { id: 13, class: vreg_64 }
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%0' }
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '%1' }
+  - { reg: '$vgpr0', virtual-reg: '%0' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '%1' }
 body:             |
   bb.0.bb:
-    liveins: %vgpr0, %sgpr4_sgpr5
+    liveins: $vgpr0, $sgpr4_sgpr5
 
-    %1 = COPY %sgpr4_sgpr5
-    %0 = COPY %vgpr0
+    %1 = COPY $sgpr4_sgpr5
+    %0 = COPY $vgpr0
     %3 = S_LOAD_DWORDX2_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %4 = S_LOAD_DWORDX2_IMM %1, 8, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %7 = V_LSHLREV_B32_e32 2, %0, implicit %exec
-    %2 = V_MOV_B32_e32 0, implicit %exec
-    undef %12.sub0 = V_ADD_I32_e32 %4.sub0, %7, implicit-def %vcc, implicit %exec
+    %7 = V_LSHLREV_B32_e32 2, %0, implicit $exec
+    %2 = V_MOV_B32_e32 0, implicit $exec
+    undef %12.sub0 = V_ADD_I32_e32 %4.sub0, %7, implicit-def $vcc, implicit $exec
     %11 = COPY %4.sub1
-    %12.sub1 = V_ADDC_U32_e32 %11, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec
-    %5 = FLAT_LOAD_DWORD %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.gep1)
-    undef %9.sub0 = V_ADD_I32_e32 %3.sub0, %7, implicit-def %vcc, implicit %exec
+    %12.sub1 = V_ADDC_U32_e32 %11, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec
+    %5 = FLAT_LOAD_DWORD %12, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.gep1)
+    undef %9.sub0 = V_ADD_I32_e32 %3.sub0, %7, implicit-def $vcc, implicit $exec
     %8 = COPY %3.sub1
-    %9.sub1 = V_ADDC_U32_e32 %8, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec
-    undef %13.sub0 = V_ADD_I32_e32 16, %12.sub0, implicit-def %vcc, implicit %exec
-    %13.sub1 = V_ADDC_U32_e32 %12.sub1, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec
-    %6 = FLAT_LOAD_DWORD %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.gep34)
-    undef %10.sub0 = V_ADD_I32_e32 16, %9.sub0, implicit-def %vcc, implicit %exec
-    %10.sub1 = V_ADDC_U32_e32 %9.sub1, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec
-    FLAT_STORE_DWORD %9, %5, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.gep2)
-    FLAT_STORE_DWORD %10, %6, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.gep4)
+    %9.sub1 = V_ADDC_U32_e32 %8, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec
+    undef %13.sub0 = V_ADD_I32_e32 16, %12.sub0, implicit-def $vcc, implicit $exec
+    %13.sub1 = V_ADDC_U32_e32 %12.sub1, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec
+    %6 = FLAT_LOAD_DWORD %13, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.gep34)
+    undef %10.sub0 = V_ADD_I32_e32 16, %9.sub0, implicit-def $vcc, implicit $exec
+    %10.sub1 = V_ADDC_U32_e32 %9.sub1, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec
+    FLAT_STORE_DWORD %9, %5, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.gep2)
+    FLAT_STORE_DWORD %10, %6, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.gep4)
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/fold-cndmask.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fold-cndmask.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/fold-cndmask.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/fold-cndmask.mir Wed Jan 31 14:04:26 2018
@@ -1,10 +1,10 @@
 # RUN: llc -march=amdgcn -run-pass si-fold-operands -verify-machineinstrs -o - %s | FileCheck %s
 
-# CHECK: %1:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-# CHECK: %2:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
+# CHECK: %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+# CHECK: %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
 # CHECK: %4:vgpr_32 = COPY %3
-# CHECK: %5:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-# CHECK: %6:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
+# CHECK: %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+# CHECK: %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
 # CHECK: %7:vgpr_32 = COPY %3
 
 ---
@@ -22,13 +22,13 @@ registers:
 body:             |
   bb.0.entry:
     %0 = IMPLICIT_DEF
-    %1 = V_CNDMASK_B32_e64 0, 0, %0, implicit %exec
-    %2 = V_CNDMASK_B32_e64 %1, %1, %0, implicit %exec
+    %1 = V_CNDMASK_B32_e64 0, 0, %0, implicit $exec
+    %2 = V_CNDMASK_B32_e64 %1, %1, %0, implicit $exec
     %3 = IMPLICIT_DEF
-    %4 = V_CNDMASK_B32_e64 %3, %3, %0, implicit %exec
+    %4 = V_CNDMASK_B32_e64 %3, %3, %0, implicit $exec
     %5 = COPY %1
-    %6 = V_CNDMASK_B32_e64 %5, 0, %0, implicit %exec
-    %vcc = IMPLICIT_DEF
-    %7 = V_CNDMASK_B32_e32 %3, %3, implicit %exec, implicit %vcc
+    %6 = V_CNDMASK_B32_e64 %5, 0, %0, implicit $exec
+    $vcc = IMPLICIT_DEF
+    %7 = V_CNDMASK_B32_e32 %3, %3, implicit $exec, implicit $vcc
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir Wed Jan 31 14:04:26 2018
@@ -111,7 +111,7 @@
 #  literal constant.
 
 # CHECK-LABEL: name: add_f32_1.0_one_f16_use
-# CHECK: %13:vgpr_32 = V_ADD_F16_e32  1065353216, killed %11, implicit %exec
+# CHECK: %13:vgpr_32 = V_ADD_F16_e32  1065353216, killed %11, implicit $exec
 
 name:            add_f32_1.0_one_f16_use
 alignment:       0
@@ -158,10 +158,10 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = V_MOV_B32_e32 1065353216, implicit %exec
-    %13 = V_ADD_F16_e64 0, killed %11, 0, %12, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = V_MOV_B32_e32 1065353216, implicit $exec
+    %13 = V_ADD_F16_e64 0, killed %11, 0, %12, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -170,9 +170,9 @@ body:             |
 # operands
 
 # CHECK-LABEL: name: add_f32_1.0_multi_f16_use
-# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1065353216, implicit %exec
-# CHECK: %14:vgpr_32 = V_ADD_F16_e32 killed %11, %13, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 killed %12, killed %13, implicit %exec
+# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F16_e32 killed %11, %13, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 killed %12, killed %13, implicit $exec
 
 
 name:            add_f32_1.0_multi_f16_use
@@ -222,13 +222,13 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %13 = V_MOV_B32_e32 1065353216, implicit %exec
-    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit %exec
-    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %13 = V_MOV_B32_e32 1065353216, implicit $exec
+    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $exec
+    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -238,8 +238,8 @@ body:             |
 #  immediate, and folded into the single f16 use as a literal constant
 
 # CHECK-LABEL: name: add_f32_1.0_one_f32_use_one_f16_use
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1065353216, %11, implicit %exec
-# CHECK: %16:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit %exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1065353216, %11, implicit $exec
+# CHECK: %16:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $exec
 
 name:            add_f32_1.0_one_f32_use_one_f16_use
 alignment:       0
@@ -289,14 +289,14 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %14 = V_MOV_B32_e32 1065353216, implicit %exec
-    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit %exec
-    %16 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %14 = V_MOV_B32_e32 1065353216, implicit $exec
+    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
+    %16 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -306,10 +306,10 @@ body:             |
 #  constant, and not folded as a multi-use literal for the f16 cases
 
 # CHECK-LABEL: name: add_f32_1.0_one_f32_use_multi_f16_use
-# CHECK: %14:vgpr_32 = V_MOV_B32_e32 1065353216, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32  %11, %14, implicit %exec
-# CHECK: %16:vgpr_32 = V_ADD_F16_e32 %12,  %14, implicit %exec
-# CHECK: %17:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit %exec
+# CHECK: %14:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32  %11, %14, implicit $exec
+# CHECK: %16:vgpr_32 = V_ADD_F16_e32 %12,  %14, implicit $exec
+# CHECK: %17:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $exec
 
 name:            add_f32_1.0_one_f32_use_multi_f16_use
 alignment:       0
@@ -360,24 +360,24 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %14 = V_MOV_B32_e32 1065353216, implicit %exec
-    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit %exec
-    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit %exec
-    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %14 = V_MOV_B32_e32 1065353216, implicit $exec
+    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
+    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $exec
+    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: add_i32_1_multi_f16_use
-# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1, implicit %exec
-# CHECK: %14:vgpr_32 = V_ADD_F16_e32 1, killed %11, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1, killed %12, implicit %exec
+# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F16_e32 1, killed %11, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1, killed %12, implicit $exec
 
 
 name:            add_i32_1_multi_f16_use
@@ -427,23 +427,23 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %13 = V_MOV_B32_e32 1, implicit %exec
-    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit %exec
-    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %13 = V_MOV_B32_e32 1, implicit $exec
+    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $exec
+    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 
 # CHECK-LABEL: name: add_i32_m2_one_f32_use_multi_f16_use
-# CHECK: %14:vgpr_32 = V_MOV_B32_e32 -2, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 -2, %11, implicit %exec
-# CHECK: %16:vgpr_32 = V_ADD_F16_e32 -2, %12, implicit %exec
-# CHECK: %17:vgpr_32 = V_ADD_F32_e32 -2, killed %13, implicit %exec
+# CHECK: %14:vgpr_32 = V_MOV_B32_e32 -2, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 -2, %11, implicit $exec
+# CHECK: %16:vgpr_32 = V_ADD_F16_e32 -2, %12, implicit $exec
+# CHECK: %17:vgpr_32 = V_ADD_F32_e32 -2, killed %13, implicit $exec
 
 name:            add_i32_m2_one_f32_use_multi_f16_use
 alignment:       0
@@ -494,16 +494,16 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %14 = V_MOV_B32_e32 -2, implicit %exec
-    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit %exec
-    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit %exec
-    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %14 = V_MOV_B32_e32 -2, implicit $exec
+    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
+    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $exec
+    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -513,9 +513,9 @@ body:             |
 #  constant, and not folded as a multi-use literal for the f16 cases
 
 # CHECK-LABEL: name: add_f16_1.0_multi_f32_use
-# CHECK: %13:vgpr_32 = V_MOV_B32_e32 15360, implicit %exec
-# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F32_e32 %12, %13, implicit %exec
+# CHECK: %13:vgpr_32 = V_MOV_B32_e32 15360, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F32_e32 %12, %13, implicit $exec
 
 name:            add_f16_1.0_multi_f32_use
 alignment:       0
@@ -564,13 +564,13 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %13 = V_MOV_B32_e32 15360, implicit %exec
-    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit %exec
-    %15 = V_ADD_F32_e64 0, %12, 0, %13, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %13 = V_MOV_B32_e32 15360, implicit $exec
+    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $exec
+    %15 = V_ADD_F32_e64 0, %12, 0, %13, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -580,9 +580,9 @@ body:             |
 # FIXME: Should be able to fold this
 
 # CHECK-LABEL: name: add_f16_1.0_other_high_bits_multi_f16_use
-# CHECK: %13:vgpr_32 = V_MOV_B32_e32 80886784, implicit %exec
-# CHECK: %14:vgpr_32 = V_ADD_F16_e32 %11, %13, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit %exec
+# CHECK: %13:vgpr_32 = V_MOV_B32_e32 80886784, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F16_e32 %11, %13, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $exec
 
 name:            add_f16_1.0_other_high_bits_multi_f16_use
 alignment:       0
@@ -631,13 +631,13 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %13 = V_MOV_B32_e32 80886784, implicit %exec
-    %14 = V_ADD_F16_e64 0, %11, 0, %13, 0, 0, implicit %exec
-    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %13 = V_MOV_B32_e32 80886784, implicit $exec
+    %14 = V_ADD_F16_e64 0, %11, 0, %13, 0, 0, implicit $exec
+    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -647,9 +647,9 @@ body:             |
 # f32 instruction.
 
 # CHECK-LABEL: name: add_f16_1.0_other_high_bits_use_f16_f32
-# CHECK: %13:vgpr_32 = V_MOV_B32_e32 305413120, implicit %exec
-# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit %exec
+# CHECK: %13:vgpr_32 = V_MOV_B32_e32 305413120, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $exec
 name:            add_f16_1.0_other_high_bits_use_f16_f32
 alignment:       0
 exposesReturnsTwice: false
@@ -697,13 +697,13 @@ body:             |
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %13 = V_MOV_B32_e32 305413120, implicit %exec
-    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit %exec
-    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %13 = V_MOV_B32_e32 305413120, implicit $exec
+    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $exec
+    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir Wed Jan 31 14:04:26 2018
@@ -1,8 +1,8 @@
 # RUN: llc -march=amdgcn -run-pass peephole-opt -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
 ...
 # GCN-LABEL: name: no_fold_imm_madak_mac_clamp_f32
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec
-# GCN-NEXT: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
+# GCN-NEXT: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
 
 name:            no_fold_imm_madak_mac_clamp_f32
 tracksRegLiveness: true
@@ -38,42 +38,42 @@ registers:
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
     %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
-    %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %28 = REG_SEQUENCE %3, 1, %27, 2
     %11 = S_MOV_B32 61440
     %12 = S_MOV_B32 0
     %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
     %14 = REG_SEQUENCE killed %5, 17, %13, 18
     %15 = S_MOV_B32 2
-    %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+    %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec
     %17 = REG_SEQUENCE killed %6, 17, %13, 18
     %18 = REG_SEQUENCE killed %4, 17, %13, 18
     %20 = COPY %29
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec
     %22 = COPY %29
-    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
-    %23 = V_MOV_B32_e32 1090519040, implicit %exec
-    %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec
+    %23 = V_MOV_B32_e32 1090519040, implicit $exec
+    %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
     %26 = COPY %29
-    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN-LABEL: name: no_fold_imm_madak_mac_omod_f32
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec
-# GCN: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
+# GCN: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit $exec
 
 name:            no_fold_imm_madak_mac_omod_f32
 tracksRegLiveness: true
@@ -109,42 +109,42 @@ registers:
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
     %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
-    %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %28 = REG_SEQUENCE %3, 1, %27, 2
     %11 = S_MOV_B32 61440
     %12 = S_MOV_B32 0
     %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
     %14 = REG_SEQUENCE killed %5, 17, %13, 18
     %15 = S_MOV_B32 2
-    %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+    %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec
     %17 = REG_SEQUENCE killed %6, 17, %13, 18
     %18 = REG_SEQUENCE killed %4, 17, %13, 18
     %20 = COPY %29
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec
     %22 = COPY %29
-    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
-    %23 = V_MOV_B32_e32 1090519040, implicit %exec
-    %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit %exec
+    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec
+    %23 = V_MOV_B32_e32 1090519040, implicit $exec
+    %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit $exec
     %26 = COPY %29
-    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN: name: no_fold_imm_madak_mad_clamp_f32
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec
-# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
+# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
 
 name:            no_fold_imm_madak_mad_clamp_f32
 tracksRegLiveness: true
@@ -180,42 +180,42 @@ registers:
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
     %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
-    %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %28 = REG_SEQUENCE %3, 1, %27, 2
     %11 = S_MOV_B32 61440
     %12 = S_MOV_B32 0
     %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
     %14 = REG_SEQUENCE killed %5, 17, %13, 18
     %15 = S_MOV_B32 2
-    %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+    %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec
     %17 = REG_SEQUENCE killed %6, 17, %13, 18
     %18 = REG_SEQUENCE killed %4, 17, %13, 18
     %20 = COPY %29
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec
     %22 = COPY %29
-    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
-    %23 = V_MOV_B32_e32 1090519040, implicit %exec
-    %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec
+    %23 = V_MOV_B32_e32 1090519040, implicit $exec
+    %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
     %26 = COPY %29
-    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN: name: no_fold_imm_madak_mad_omod_f32
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec
-# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
+# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit $exec
 
 name:            no_fold_imm_madak_mad_omod_f32
 tracksRegLiveness: true
@@ -251,35 +251,35 @@ registers:
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
     %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
-    %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %28 = REG_SEQUENCE %3, 1, %27, 2
     %11 = S_MOV_B32 61440
     %12 = S_MOV_B32 0
     %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
     %14 = REG_SEQUENCE killed %5, 17, %13, 18
     %15 = S_MOV_B32 2
-    %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+    %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec
     %17 = REG_SEQUENCE killed %6, 17, %13, 18
     %18 = REG_SEQUENCE killed %4, 17, %13, 18
     %20 = COPY %29
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec
     %22 = COPY %29
-    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
-    %23 = V_MOV_B32_e32 1090519040, implicit %exec
-    %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit %exec
+    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec
+    %23 = V_MOV_B32_e32 1090519040, implicit $exec
+    %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit $exec
     %26 = COPY %29
-    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/fold-multiple.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fold-multiple.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/fold-multiple.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/fold-multiple.mir Wed Jan 31 14:04:26 2018
@@ -14,8 +14,8 @@
 # being processed twice.
 
 # CHECK-LABEL: name: test
-# CHECK: %2:vgpr_32 = V_LSHLREV_B32_e32 2, killed %0, implicit %exec
-# CHECK: %4:vgpr_32 = V_AND_B32_e32 8, killed %2, implicit %exec
+# CHECK: %2:vgpr_32 = V_LSHLREV_B32_e32 2, killed %0, implicit $exec
+# CHECK: %4:vgpr_32 = V_AND_B32_e32 8, killed %2, implicit $exec
 
 name:            test
 tracksRegLiveness: true
@@ -30,11 +30,11 @@ body:             |
   bb.0 (%ir-block.0):
     %0 = IMPLICIT_DEF
     %1 = S_MOV_B32 2
-    %2 = V_LSHLREV_B32_e64 %1, killed %0, implicit %exec
-    %3 = S_LSHL_B32 %1, killed %1, implicit-def dead %scc
-    %4 = V_AND_B32_e64 killed %2, killed %3, implicit %exec
+    %2 = V_LSHLREV_B32_e64 %1, killed %0, implicit $exec
+    %3 = S_LSHL_B32 %1, killed %1, implicit-def dead $scc
+    %4 = V_AND_B32_e64 killed %2, killed %3, implicit $exec
     %5 = IMPLICIT_DEF
-    BUFFER_STORE_DWORD_OFFSET killed %4, killed %5, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET killed %4, killed %5, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/fold-operands-order.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/fold-operands-order.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/fold-operands-order.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/fold-operands-order.mir Wed Jan 31 14:04:26 2018
@@ -6,10 +6,10 @@
 # aren't made in users before the def is seen.
 
 # GCN-LABEL: name: mov_in_use_list_2x{{$}}
-# GCN: %2:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
+# GCN: %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
 # GCN-NEXT: %3:vgpr_32 = COPY undef %0
 
-# GCN: %1:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
+# GCN: %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
 
 
 name: mov_in_use_list_2x
@@ -30,12 +30,12 @@ body:             |
     successors: %bb.2
 
     %2 = COPY %1
-    %3 = V_XOR_B32_e64 killed %2, undef %0, implicit %exec
+    %3 = V_XOR_B32_e64 killed %2, undef %0, implicit $exec
 
   bb.2:
     successors: %bb.1
 
-    %1 = V_MOV_B32_e32 0, implicit %exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
     S_BRANCH %bb.1
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/hazard-inlineasm.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/hazard-inlineasm.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/hazard-inlineasm.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/hazard-inlineasm.mir Wed Jan 31 14:04:26 2018
@@ -16,8 +16,8 @@ name: hazard-inlineasm
 
 body: |
   bb.0:
-   FLAT_STORE_DWORDX4 %vgpr49_vgpr50, %vgpr26_vgpr27_vgpr28_vgpr29, 0, 0, 0, implicit %exec, implicit %flat_scr
-   INLINEASM &"v_mad_u64_u32 $0, $1, $2, $3, $4", 0, 2621450, def %vgpr26_vgpr27, 2818058, def dead %sgpr14_sgpr15, 589833, %sgpr12, 327689, killed %vgpr51, 2621449, %vgpr46_vgpr47
+   FLAT_STORE_DWORDX4 $vgpr49_vgpr50, $vgpr26_vgpr27_vgpr28_vgpr29, 0, 0, 0, implicit $exec, implicit $flat_scr
+   INLINEASM &"v_mad_u64_u32 $0, $1, $2, $3, $4", 0, 2621450, def $vgpr26_vgpr27, 2818058, def dead $sgpr14_sgpr15, 589833, $sgpr12, 327689, killed $vgpr51, 2621449, $vgpr46_vgpr47
    S_ENDPGM
 ...
 

Modified: llvm/trunk/test/CodeGen/AMDGPU/hazard.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/hazard.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/hazard.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/hazard.mir Wed Jan 31 14:04:26 2018
@@ -3,7 +3,7 @@
 
 # GCN-LABEL: name: hazard_implicit_def
 # GCN:    bb.0.entry:
-# GCN:      %m0 = S_MOV_B32
+# GCN:      $m0 = S_MOV_B32
 # GFX9:     S_NOP 0
 # VI-NOT:   S_NOP_0
 # GCN:      V_INTERP_P1_F32
@@ -18,22 +18,22 @@ selected:        false
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%sgpr7', virtual-reg: '' }
-  - { reg: '%vgpr4', virtual-reg: '' }
+  - { reg: '$sgpr7', virtual-reg: '' }
+  - { reg: '$vgpr4', virtual-reg: '' }
 body:             |
   bb.0.entry:
-    liveins: %sgpr7, %vgpr4
+    liveins: $sgpr7, $vgpr4
 
-    %m0 = S_MOV_B32 killed %sgpr7
-    %vgpr5 = IMPLICIT_DEF
-    %vgpr0 = V_INTERP_P1_F32 killed %vgpr4, 0, 0, implicit %m0, implicit %exec
-    SI_RETURN_TO_EPILOG killed %vgpr5, killed %vgpr0
+    $m0 = S_MOV_B32 killed $sgpr7
+    $vgpr5 = IMPLICIT_DEF
+    $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $m0, implicit $exec
+    SI_RETURN_TO_EPILOG killed $vgpr5, killed $vgpr0
 
 ...
 
 # GCN-LABEL: name: hazard_inlineasm
 # GCN:    bb.0.entry:
-# GCN:      %m0 = S_MOV_B32
+# GCN:      $m0 = S_MOV_B32
 # GFX9:     S_NOP 0
 # VI-NOT:   S_NOP_0
 # GCN:      V_INTERP_P1_F32
@@ -47,14 +47,14 @@ selected:        false
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%sgpr7', virtual-reg: '' }
-  - { reg: '%vgpr4', virtual-reg: '' }
+  - { reg: '$sgpr7', virtual-reg: '' }
+  - { reg: '$vgpr4', virtual-reg: '' }
 body:             |
   bb.0.entry:
-    liveins: %sgpr7, %vgpr4
+    liveins: $sgpr7, $vgpr4
 
-    %m0 = S_MOV_B32 killed %sgpr7
-    INLINEASM &"; no-op", 1, 327690, def %vgpr5
-    %vgpr0 = V_INTERP_P1_F32 killed %vgpr4, 0, 0, implicit %m0, implicit %exec
-    SI_RETURN_TO_EPILOG killed %vgpr5, killed %vgpr0
+    $m0 = S_MOV_B32 killed $sgpr7
+    INLINEASM &"; no-op", 1, 327690, def $vgpr5
+    $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $m0, implicit $exec
+    SI_RETURN_TO_EPILOG killed $vgpr5, killed $vgpr0
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir Wed Jan 31 14:04:26 2018
@@ -10,11 +10,11 @@
 # CHECK-LABEL: name: kill_uncond_branch
 
 # CHECK: bb.0:
-# CHECK: S_CBRANCH_VCCNZ %bb.1, implicit %vcc
+# CHECK: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
 
 # CHECK: bb.1:
 # CHECK: V_CMPX_LE_F32_e32
-# CHECK-NEXT: S_CBRANCH_EXECNZ %bb.2, implicit %exec
+# CHECK-NEXT: S_CBRANCH_EXECNZ %bb.2, implicit $exec
 
 # CHECK: bb.3:
 # CHECK-NEXT: EXP_DONE
@@ -28,12 +28,12 @@ name: kill_uncond_branch
 body: |
   bb.0:
     successors: %bb.1
-    S_CBRANCH_VCCNZ %bb.1, implicit %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit $vcc
 
   bb.1:
     successors: %bb.2
-    %vgpr0 = V_MOV_B32_e32 0, implicit %exec
-    SI_KILL_F32_COND_IMM_TERMINATOR %vgpr0, 0, 3, implicit-def %exec, implicit-def %vcc, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+    SI_KILL_F32_COND_IMM_TERMINATOR $vgpr0, 0, 3, implicit-def $exec, implicit-def $vcc, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:

Modified: llvm/trunk/test/CodeGen/AMDGPU/insert-waits-callee.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/insert-waits-callee.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/insert-waits-callee.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/insert-waits-callee.mir Wed Jan 31 14:04:26 2018
@@ -13,13 +13,13 @@
 # CHECK-NEXT: V_ADD_F32
 # CHECK-NEXT: S_SETPC_B64
 liveins:
-  - { reg: '%sgpr0_sgpr1' }
-  - { reg: '%vgpr0' }
+  - { reg: '$sgpr0_sgpr1' }
+  - { reg: '$vgpr0' }
 
 name: entry_callee_wait
 body:             |
   bb.0:
-    %vgpr0 = V_ADD_F32_e32 %vgpr0, %vgpr0, implicit %exec
-    S_SETPC_B64 killed %sgpr0_sgpr1
+    $vgpr0 = V_ADD_F32_e32 $vgpr0, $vgpr0, implicit $exec
+    S_SETPC_B64 killed $sgpr0_sgpr1
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/insert-waits-exp.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/insert-waits-exp.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/insert-waits-exp.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/insert-waits-exp.mir Wed Jan 31 14:04:26 2018
@@ -20,10 +20,10 @@
 # CHECK-LABEL: name: exp_done_waitcnt{{$}}
 # CHECK: EXP_DONE
 # CHECK-NEXT: S_WAITCNT 3855
-# CHECK: %vgpr0 = V_MOV_B32
-# CHECK: %vgpr1 = V_MOV_B32
-# CHECK: %vgpr2 = V_MOV_B32
-# CHECK: %vgpr3 = V_MOV_B32
+# CHECK: $vgpr0 = V_MOV_B32
+# CHECK: $vgpr1 = V_MOV_B32
+# CHECK: $vgpr2 = V_MOV_B32
+# CHECK: $vgpr3 = V_MOV_B32
 name:            exp_done_waitcnt
 alignment:       0
 exposesReturnsTwice: false
@@ -47,17 +47,17 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0 (%ir-block.2):
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %vgpr2 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %vgpr3 = BUFFER_LOAD_DWORD_OFFSET killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    EXP_DONE 0, killed %vgpr0, killed %vgpr1, killed %vgpr2, killed %vgpr3, -1, -1, 15, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 1056964608, implicit %exec
-    %vgpr1 = V_MOV_B32_e32 1065353216, implicit %exec
-    %vgpr2 = V_MOV_B32_e32 1073741824, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 1082130432, implicit %exec
-    SI_RETURN_TO_EPILOG killed %vgpr0, killed %vgpr1, killed %vgpr2, killed %vgpr3
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    $vgpr2 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    $vgpr3 = BUFFER_LOAD_DWORD_OFFSET killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    EXP_DONE 0, killed $vgpr0, killed $vgpr1, killed $vgpr2, killed $vgpr3, -1, -1, 15, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 1056964608, implicit $exec
+    $vgpr1 = V_MOV_B32_e32 1065353216, implicit $exec
+    $vgpr2 = V_MOV_B32_e32 1073741824, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 1082130432, implicit $exec
+    SI_RETURN_TO_EPILOG killed $vgpr0, killed $vgpr1, killed $vgpr2, killed $vgpr3
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/inserted-wait-states.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/inserted-wait-states.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/inserted-wait-states.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/inserted-wait-states.mir Wed Jan 31 14:04:26 2018
@@ -78,23 +78,23 @@ name: div_fmas
 
 body: |
   bb.0:
-    %vcc = S_MOV_B64 0
-    %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec
+    $vcc = S_MOV_B64 0
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
-    implicit %vcc = V_CMP_EQ_I32_e32 %vgpr1, %vgpr2, implicit %exec
-    %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec
+    implicit $vcc = V_CMP_EQ_I32_e32 $vgpr1, $vgpr2, implicit $exec
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:
-    %vcc = V_CMP_EQ_I32_e64 %vgpr1, %vgpr2, implicit %exec
-    %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec
+    $vcc = V_CMP_EQ_I32_e64 $vgpr1, $vgpr2, implicit $exec
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
     S_BRANCH %bb.3
 
   bb.3:
-    %vgpr4, %vcc = V_DIV_SCALE_F32 %vgpr1, %vgpr1, %vgpr3, implicit %exec
-    %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec
+    $vgpr4, $vcc = V_DIV_SCALE_F32 $vgpr1, $vgpr1, $vgpr3, implicit $exec
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
     S_ENDPGM
 
 ...
@@ -128,24 +128,24 @@ name: s_getreg
 
 body: |
   bb.0:
-    S_SETREG_B32 %sgpr0, 1
-    %sgpr1 = S_GETREG_B32 1
+    S_SETREG_B32 $sgpr0, 1
+    $sgpr1 = S_GETREG_B32 1
     S_BRANCH %bb.1
 
   bb.1:
     S_SETREG_IMM32_B32 0, 1
-    %sgpr1 = S_GETREG_B32 1
+    $sgpr1 = S_GETREG_B32 1
     S_BRANCH %bb.2
 
   bb.2:
-    S_SETREG_B32 %sgpr0, 1
-    %sgpr1 = S_MOV_B32 0
-    %sgpr2 = S_GETREG_B32 1
+    S_SETREG_B32 $sgpr0, 1
+    $sgpr1 = S_MOV_B32 0
+    $sgpr2 = S_GETREG_B32 1
     S_BRANCH %bb.3
 
   bb.3:
-    S_SETREG_B32 %sgpr0, 0
-    %sgpr1 = S_GETREG_B32 1
+    S_SETREG_B32 $sgpr0, 0
+    $sgpr1 = S_GETREG_B32 1
     S_ENDPGM
 ...
 
@@ -173,18 +173,18 @@ name: s_setreg
 
 body: |
   bb.0:
-    S_SETREG_B32 %sgpr0, 1
-    S_SETREG_B32 %sgpr1, 1
+    S_SETREG_B32 $sgpr0, 1
+    S_SETREG_B32 $sgpr1, 1
     S_BRANCH %bb.1
 
   bb.1:
-    S_SETREG_B32 %sgpr0, 64
-    S_SETREG_B32 %sgpr1, 128
+    S_SETREG_B32 $sgpr0, 64
+    S_SETREG_B32 $sgpr1, 128
     S_BRANCH %bb.2
 
   bb.2:
-    S_SETREG_B32 %sgpr0, 1
-    S_SETREG_B32 %sgpr1, 0
+    S_SETREG_B32 $sgpr0, 1
+    S_SETREG_B32 $sgpr1, 0
     S_ENDPGM
 ...
 
@@ -230,33 +230,33 @@ name: vmem_gt_8dw_store
 
 body: |
   bb.0:
-    BUFFER_STORE_DWORD_OFFSET %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_STORE_DWORDX3_OFFSET %vgpr2_vgpr3_vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_STORE_DWORDX4_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_STORE_DWORDX4_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_STORE_FORMAT_XYZ_OFFSET %vgpr2_vgpr3_vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_STORE_FORMAT_XYZW_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_ATOMIC_CMPSWAP_X2_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_STORE_DWORDX3_OFFSET $vgpr2_vgpr3_vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_STORE_DWORDX4_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_STORE_DWORDX4_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_STORE_FORMAT_XYZ_OFFSET $vgpr2_vgpr3_vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_STORE_FORMAT_XYZW_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_ATOMIC_CMPSWAP_X2_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
-    FLAT_STORE_DWORDX2 %vgpr0_vgpr1, %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    FLAT_STORE_DWORDX3 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    FLAT_STORE_DWORDX4 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    FLAT_ATOMIC_CMPSWAP_X2 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    FLAT_ATOMIC_FCMPSWAP_X2 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
+    FLAT_STORE_DWORDX2 $vgpr0_vgpr1, $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    FLAT_STORE_DWORDX3 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    FLAT_STORE_DWORDX4 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    FLAT_ATOMIC_CMPSWAP_X2 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    FLAT_ATOMIC_FCMPSWAP_X2 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -302,23 +302,23 @@ name: readwrite_lane
 
 body: |
   bb.0:
-    %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec
-    %sgpr4 = V_READLANE_B32 %vgpr4, %sgpr0
+    $vgpr0,$sgpr0_sgpr1 = V_ADD_I32_e64 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    $sgpr4 = V_READLANE_B32 $vgpr4, $sgpr0
     S_BRANCH %bb.1
 
   bb.1:
-    %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec
-    %vgpr4 = V_WRITELANE_B32 %sgpr0, %sgpr0
+    $vgpr0,$sgpr0_sgpr1 = V_ADD_I32_e64 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    $vgpr4 = V_WRITELANE_B32 $sgpr0, $sgpr0
     S_BRANCH %bb.2
 
   bb.2:
-    %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec
-    %sgpr4 = V_READLANE_B32 %vgpr4, %vcc_lo
+    $vgpr0,implicit $vcc = V_ADD_I32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    $sgpr4 = V_READLANE_B32 $vgpr4, $vcc_lo
     S_BRANCH %bb.3
 
   bb.3:
-    %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec
-    %vgpr4 = V_WRITELANE_B32 %sgpr4, %vcc_lo
+    $vgpr0,implicit $vcc = V_ADD_I32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    $vgpr4 = V_WRITELANE_B32 $sgpr4, $vcc_lo
     S_ENDPGM
 
 ...
@@ -341,13 +341,13 @@ name: rfe
 
 body: |
   bb.0:
-    S_SETREG_B32 %sgpr0, 3
-    S_RFE_B64 %sgpr2_sgpr3
+    S_SETREG_B32 $sgpr0, 3
+    S_RFE_B64 $sgpr2_sgpr3
     S_BRANCH %bb.1
 
   bb.1:
-    S_SETREG_B32 %sgpr0, 0
-    S_RFE_B64 %sgpr2_sgpr3
+    S_SETREG_B32 $sgpr0, 0
+    S_RFE_B64 $sgpr2_sgpr3
     S_ENDPGM
 
 ...
@@ -370,13 +370,13 @@ name: s_mov_fed_b32
 
 body: |
   bb.0:
-    %sgpr0 = S_MOV_FED_B32 %sgpr0
-    %sgpr0 = S_MOV_B32 %sgpr0
+    $sgpr0 = S_MOV_FED_B32 $sgpr0
+    $sgpr0 = S_MOV_B32 $sgpr0
     S_BRANCH %bb.1
 
   bb.1:
-    %sgpr0 = S_MOV_FED_B32 %sgpr0
-    %vgpr0 = V_MOV_B32_e32 %sgpr0, implicit %exec
+    $sgpr0 = S_MOV_FED_B32 $sgpr0
+    $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec
     S_ENDPGM
 
 ...
@@ -410,23 +410,23 @@ name: s_movrel
 
 body: |
   bb.0:
-    %m0 = S_MOV_B32 0
-    %sgpr0 = S_MOVRELS_B32 %sgpr0, implicit %m0
+    $m0 = S_MOV_B32 0
+    $sgpr0 = S_MOVRELS_B32 $sgpr0, implicit $m0
     S_BRANCH %bb.1
 
   bb.1:
-    %m0 = S_MOV_B32 0
-    %sgpr0_sgpr1 = S_MOVRELS_B64 %sgpr0_sgpr1, implicit %m0
+    $m0 = S_MOV_B32 0
+    $sgpr0_sgpr1 = S_MOVRELS_B64 $sgpr0_sgpr1, implicit $m0
     S_BRANCH %bb.2
 
   bb.2:
-    %m0 = S_MOV_B32 0
-    %sgpr0 = S_MOVRELD_B32 %sgpr0, implicit %m0
+    $m0 = S_MOV_B32 0
+    $sgpr0 = S_MOVRELD_B32 $sgpr0, implicit $m0
     S_BRANCH %bb.3
 
   bb.3:
-    %m0 = S_MOV_B32 0
-    %sgpr0_sgpr1 = S_MOVRELD_B64 %sgpr0_sgpr1, implicit %m0
+    $m0 = S_MOV_B32 0
+    $sgpr0_sgpr1 = S_MOVRELD_B64 $sgpr0_sgpr1, implicit $m0
     S_ENDPGM
 ...
 
@@ -459,23 +459,23 @@ name: v_interp
 
 body: |
   bb.0:
-    %m0 = S_MOV_B32 0
-    %vgpr0 = V_INTERP_P1_F32 %vgpr0, 0, 0, implicit %m0, implicit %exec
+    $m0 = S_MOV_B32 0
+    $vgpr0 = V_INTERP_P1_F32 $vgpr0, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
-    %m0 = S_MOV_B32 0
-    %vgpr0 = V_INTERP_P2_F32 %vgpr0, %vgpr1, 0, 0, implicit %m0, implicit %exec
+    $m0 = S_MOV_B32 0
+    $vgpr0 = V_INTERP_P2_F32 $vgpr0, $vgpr1, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:
-    %m0 = S_MOV_B32 0
-    %vgpr0 = V_INTERP_P1_F32_16bank %vgpr0, 0, 0, implicit %m0, implicit %exec
+    $m0 = S_MOV_B32 0
+    $vgpr0 = V_INTERP_P1_F32_16bank $vgpr0, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.3
 
   bb.3:
-    %m0 = S_MOV_B32 0
-    %vgpr0 = V_INTERP_MOV_F32 0, 0, 0, implicit %m0, implicit %exec
+    $m0 = S_MOV_B32 0
+    $vgpr0 = V_INTERP_MOV_F32 0, 0, 0, implicit $m0, implicit $exec
     S_ENDPGM
 ...
 
@@ -503,13 +503,13 @@ name: dpp
 
 body: |
   bb.0:
-    %vgpr0 = V_MOV_B32_e32 0, implicit %exec
-    %vgpr1 = V_MOV_B32_dpp %vgpr1, %vgpr0, 0, 15, 15, 0, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+    $vgpr1 = V_MOV_B32_dpp $vgpr1, $vgpr0, 0, 15, 15, 0, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
-    implicit %exec, implicit %vcc = V_CMPX_EQ_I32_e32 %vgpr0, %vgpr1, implicit %exec
-    %vgpr3 = V_MOV_B32_dpp %vgpr3, %vgpr0, 0, 15, 15, 0, implicit %exec
+    implicit $exec, implicit $vcc = V_CMPX_EQ_I32_e32 $vgpr0, $vgpr1, implicit $exec
+    $vgpr3 = V_MOV_B32_dpp $vgpr3, $vgpr0, 0, 15, 15, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -521,10 +521,10 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%sgpr4_sgpr5' }
-  - { reg: '%sgpr6_sgpr7' }
-  - { reg: '%sgpr9' }
-  - { reg: '%sgpr0_sgpr1_sgpr2_sgpr3' }
+  - { reg: '$sgpr4_sgpr5' }
+  - { reg: '$sgpr6_sgpr7' }
+  - { reg: '$sgpr9' }
+  - { reg: '$sgpr0_sgpr1_sgpr2_sgpr3' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -544,19 +544,19 @@ stack:
   - { id: 1, offset: 8, size: 4, alignment: 4 }
 body:             |
   bb.0.entry:
-    liveins: %sgpr4_sgpr5, %sgpr6_sgpr7, %sgpr9, %sgpr0_sgpr1_sgpr2_sgpr3
+    liveins: $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr9, $sgpr0_sgpr1_sgpr2_sgpr3
 
-    %flat_scr_lo = S_ADD_U32 %sgpr6, %sgpr9, implicit-def %scc
-    %flat_scr_hi = S_ADDC_U32 %sgpr7, 0, implicit-def %scc, implicit %scc
-    DBG_VALUE %noreg, 2, !5, !11, debug-location !12
-    %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    dead %sgpr6_sgpr7 = KILL %sgpr4_sgpr5
-    %sgpr8 = S_MOV_B32 %sgpr5
-    %vgpr0 = V_MOV_B32_e32 killed %sgpr8, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr9, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.A.addr + 4)
-    %sgpr8 = S_MOV_B32 %sgpr4, implicit killed %sgpr4_sgpr5
-    %vgpr0 = V_MOV_B32_e32 killed %sgpr8, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr9, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.A.addr)
+    $flat_scr_lo = S_ADD_U32 $sgpr6, $sgpr9, implicit-def $scc
+    $flat_scr_hi = S_ADDC_U32 $sgpr7, 0, implicit-def $scc, implicit $scc
+    DBG_VALUE $noreg, 2, !5, !11, debug-location !12
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed $sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    dead $sgpr6_sgpr7 = KILL $sgpr4_sgpr5
+    $sgpr8 = S_MOV_B32 $sgpr5
+    $vgpr0 = V_MOV_B32_e32 killed $sgpr8, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr9, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.A.addr + 4)
+    $sgpr8 = S_MOV_B32 $sgpr4, implicit killed $sgpr4_sgpr5
+    $vgpr0 = V_MOV_B32_e32 killed $sgpr8, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr9, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.A.addr)
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir Wed Jan 31 14:04:26 2018
@@ -26,7 +26,7 @@
 ...
 ---
 # CHECK-LABEL: name: invert_br_undef_vcc
-# CHECK: S_CBRANCH_VCCZ %bb.1, implicit undef %vcc
+# CHECK: S_CBRANCH_VCCZ %bb.1, implicit undef $vcc
 
 name:            invert_br_undef_vcc
 alignment:       0
@@ -36,7 +36,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%sgpr0_sgpr1' }
+  - { reg: '$sgpr0_sgpr1' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -53,34 +53,34 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.entry:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc
 
   bb.1.else:
-    liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %vgpr0 = V_MOV_B32_e32 100, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
-    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 100, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
+    $vgpr0 = V_MOV_B32_e32 1, implicit $exec
     S_BRANCH %bb.3
 
   bb.2.if:
-    liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %vgpr0 = V_MOV_B32_e32 9, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
-    %vgpr0 = V_MOV_B32_e32 0, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 9, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
+    $vgpr0 = V_MOV_B32_e32 0, implicit $exec
 
   bb.3.done:
-    liveins: %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out)
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/limit-coalesce.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/limit-coalesce.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/limit-coalesce.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/limit-coalesce.mir Wed Jan 31 14:04:26 2018
@@ -11,8 +11,8 @@
 # CHECK:  - { id: 8, class: vreg_128, preferred-register: '' }
 # No more registers shall be defined
 # CHECK-NEXT: liveins:
-# CHECK:    FLAT_STORE_DWORDX2 %vgpr0_vgpr1, %4,
-# CHECK:    FLAT_STORE_DWORDX3 %vgpr0_vgpr1, %6,
+# CHECK:    FLAT_STORE_DWORDX2 $vgpr0_vgpr1, %4,
+# CHECK:    FLAT_STORE_DWORDX3 $vgpr0_vgpr1, %6,
 
 ---
 name:            main
@@ -33,7 +33,7 @@ registers:
   - { id: 8, class: vreg_128 }
   - { id: 9, class: vreg_128 }
 liveins:
-  - { reg: '%sgpr6', virtual-reg: '%1' }
+  - { reg: '$sgpr6', virtual-reg: '%1' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -50,22 +50,22 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.entry:
-    liveins: %sgpr0, %vgpr0_vgpr1
+    liveins: $sgpr0, $vgpr0_vgpr1
 
     %3 = IMPLICIT_DEF
-    undef %4.sub0 = COPY %sgpr0
+    undef %4.sub0 = COPY $sgpr0
     %4.sub1 = COPY %3.sub0
     undef %5.sub0 = COPY %4.sub1
     %5.sub1 = COPY %4.sub0
-    FLAT_STORE_DWORDX2 %vgpr0_vgpr1, killed %5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORDX2 $vgpr0_vgpr1, killed %5, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     %6 = IMPLICIT_DEF
     undef %7.sub0_sub1 = COPY %6
     %7.sub2 = COPY %3.sub0
-    FLAT_STORE_DWORDX3 %vgpr0_vgpr1, killed %7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORDX3 $vgpr0_vgpr1, killed %7, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     %8 = IMPLICIT_DEF
     undef %9.sub0_sub1_sub2 = COPY %8
     %9.sub3 = COPY %3.sub0
-    FLAT_STORE_DWORDX4 %vgpr0_vgpr1, killed %9, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORDX4 $vgpr0_vgpr1, killed %9, 0, 0, 0, implicit $exec, implicit $flat_scr
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/liveness.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/liveness.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/liveness.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/liveness.mir Wed Jan 31 14:04:26 2018
@@ -17,7 +17,7 @@ registers:
 body: |
   bb.0:
     S_NOP 0, implicit-def undef %0.sub0
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.1:

Modified: llvm/trunk/test/CodeGen/AMDGPU/llvm.dbg.value.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/llvm.dbg.value.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/llvm.dbg.value.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/llvm.dbg.value.ll Wed Jan 31 14:04:26 2018
@@ -5,7 +5,7 @@
 ; NOOPT: s_load_dwordx2 s[4:5]
 
 ; FIXME: Why is the SGPR4_SGPR5 reference being removed from DBG_VALUE?
-; NOOPT: ; kill: def %sgpr8_sgpr9 killed %sgpr4_sgpr5
+; NOOPT: ; kill: def $sgpr8_sgpr9 killed $sgpr4_sgpr5
 ; NOOPT-NEXT: ;DEBUG_VALUE: test_debug_value:globalptr_arg <- undef
 
 ; GCN: flat_store_dword

Modified: llvm/trunk/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir Wed Jan 31 14:04:26 2018
@@ -1,9 +1,9 @@
 # RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -run-pass machine-scheduler -o - %s | FileCheck -check-prefix=GCN %s
 
 # GCN-LABEL: name: cluster_add_addc
-# GCN: S_NOP 0, implicit-def %vcc
-# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit %exec
-# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %3, implicit %exec
+# GCN: S_NOP 0, implicit-def $vcc
+# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit $exec
+# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %3, implicit $exec
 name: cluster_add_addc
 registers:
   - { id: 0, class: vgpr_32 }
@@ -17,20 +17,20 @@ registers:
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %2, %3 = V_ADD_I32_e64 %0, %1, implicit %exec
-    %6 = V_MOV_B32_e32 0, implicit %exec
-    %7 = V_MOV_B32_e32 0, implicit %exec
-    S_NOP 0, implicit def %vcc
-    %4, %5 = V_ADDC_U32_e64 %6, %7, %3, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %2, %3 = V_ADD_I32_e64 %0, %1, implicit $exec
+    %6 = V_MOV_B32_e32 0, implicit $exec
+    %7 = V_MOV_B32_e32 0, implicit $exec
+    S_NOP 0, implicit def $vcc
+    %4, %5 = V_ADDC_U32_e64 %6, %7, %3, implicit $exec
 ...
 
 # GCN-LABEL: name: interleave_add64s
-# GCN: dead %8:vgpr_32, %9:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit %exec
-# GCN-NEXT: dead %12:vgpr_32, dead %13:sreg_64_xexec = V_ADDC_U32_e64 %4, %5, %9, implicit %exec
-# GCN-NEXT: dead %10:vgpr_32, %11:sreg_64_xexec = V_ADD_I32_e64 %2, %3, implicit %exec
-# GCN-NEXT: dead %14:vgpr_32, dead %15:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %11, implicit %exec
+# GCN: dead %8:vgpr_32, %9:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit $exec
+# GCN-NEXT: dead %12:vgpr_32, dead %13:sreg_64_xexec = V_ADDC_U32_e64 %4, %5, %9, implicit $exec
+# GCN-NEXT: dead %10:vgpr_32, %11:sreg_64_xexec = V_ADD_I32_e64 %2, %3, implicit $exec
+# GCN-NEXT: dead %14:vgpr_32, dead %15:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %11, implicit $exec
 name: interleave_add64s
 registers:
   - { id: 0, class: vgpr_32 }
@@ -52,27 +52,27 @@ registers:
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %2 = V_MOV_B32_e32 0, implicit %exec
-    %3 = V_MOV_B32_e32 0, implicit %exec
-    %4 = V_MOV_B32_e32 0, implicit %exec
-    %5 = V_MOV_B32_e32 0, implicit %exec
-    %6 = V_MOV_B32_e32 0, implicit %exec
-    %7 = V_MOV_B32_e32 0, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %2 = V_MOV_B32_e32 0, implicit $exec
+    %3 = V_MOV_B32_e32 0, implicit $exec
+    %4 = V_MOV_B32_e32 0, implicit $exec
+    %5 = V_MOV_B32_e32 0, implicit $exec
+    %6 = V_MOV_B32_e32 0, implicit $exec
+    %7 = V_MOV_B32_e32 0, implicit $exec
 
-    %8, %9 = V_ADD_I32_e64 %0, %1, implicit %exec
-    %10, %11 = V_ADD_I32_e64 %2, %3, implicit %exec
+    %8, %9 = V_ADD_I32_e64 %0, %1, implicit $exec
+    %10, %11 = V_ADD_I32_e64 %2, %3, implicit $exec
 
 
-    %12, %13 = V_ADDC_U32_e64 %4, %5, %9, implicit %exec
-    %14, %15 = V_ADDC_U32_e64 %6, %7, %11, implicit %exec
+    %12, %13 = V_ADDC_U32_e64 %4, %5, %9, implicit $exec
+    %14, %15 = V_ADDC_U32_e64 %6, %7, %11, implicit $exec
 ...
 
 # GCN-LABEL: name: cluster_mov_addc
-# GCN: S_NOP 0, implicit-def %vcc
+# GCN: S_NOP 0, implicit-def $vcc
 # GCN-NEXT: %2:sreg_64_xexec = S_MOV_B64 0
-# GCN-NEXT: dead %3:vgpr_32, dead %4:sreg_64_xexec = V_ADDC_U32_e64 %0, %1, %2, implicit %exec
+# GCN-NEXT: dead %3:vgpr_32, dead %4:sreg_64_xexec = V_ADDC_U32_e64 %0, %1, %2, implicit $exec
 name: cluster_mov_addc
 registers:
   - { id: 0, class: vgpr_32 }
@@ -85,20 +85,20 @@ registers:
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
     %2 = S_MOV_B64 0
-    S_NOP 0, implicit def %vcc
-    %3, %4 = V_ADDC_U32_e64 %0, %1, %2, implicit %exec
+    S_NOP 0, implicit def $vcc
+    %3, %4 = V_ADDC_U32_e64 %0, %1, %2, implicit $exec
 ...
 
 # GCN-LABEL: name: no_cluster_add_addc_diff_sgpr
-# GCN: dead %2:vgpr_32, dead %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit %exec
-# GCN-NEXT: %6:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-# GCN-NEXT: %7:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-# GCN-NEXT: S_NOP 0, implicit-def %vcc
+# GCN: dead %2:vgpr_32, dead %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit $exec
+# GCN-NEXT: %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+# GCN-NEXT: %7:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+# GCN-NEXT: S_NOP 0, implicit-def $vcc
 # GCN-NEXT: %8:sreg_64_xexec = S_MOV_B64 0
-# GCN-NEXT: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %8, implicit %exec
+# GCN-NEXT: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %8, implicit $exec
 name: no_cluster_add_addc_diff_sgpr
 registers:
   - { id: 0, class: vgpr_32 }
@@ -112,19 +112,19 @@ registers:
   - { id: 8, class: sreg_64_xexec }
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
     %8 = S_MOV_B64 0
-    %2, %3 = V_ADD_I32_e64 %0, %1, implicit %exec
-    %6 = V_MOV_B32_e32 0, implicit %exec
-    %7 = V_MOV_B32_e32 0, implicit %exec
-    S_NOP 0, implicit def %vcc
-    %4, %5 = V_ADDC_U32_e64 %6, %7, %8, implicit %exec
+    %2, %3 = V_ADD_I32_e64 %0, %1, implicit $exec
+    %6 = V_MOV_B32_e32 0, implicit $exec
+    %7 = V_MOV_B32_e32 0, implicit $exec
+    S_NOP 0, implicit def $vcc
+    %4, %5 = V_ADDC_U32_e64 %6, %7, %8, implicit $exec
 ...
 # GCN-LABEL: name: cluster_sub_subb
-# GCN: S_NOP 0, implicit-def %vcc
-# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_SUB_I32_e64 %0, %1, implicit %exec
-# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_SUBB_U32_e64 %6, %7, %3, implicit %exec
+# GCN: S_NOP 0, implicit-def $vcc
+# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_SUB_I32_e64 %0, %1, implicit $exec
+# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_SUBB_U32_e64 %6, %7, %3, implicit $exec
 name: cluster_sub_subb
 registers:
   - { id: 0, class: vgpr_32 }
@@ -138,19 +138,19 @@ registers:
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %2, %3 = V_SUB_I32_e64 %0, %1, implicit %exec
-    %6 = V_MOV_B32_e32 0, implicit %exec
-    %7 = V_MOV_B32_e32 0, implicit %exec
-    S_NOP 0, implicit def %vcc
-    %4, %5 = V_SUBB_U32_e64 %6, %7, %3, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %2, %3 = V_SUB_I32_e64 %0, %1, implicit $exec
+    %6 = V_MOV_B32_e32 0, implicit $exec
+    %7 = V_MOV_B32_e32 0, implicit $exec
+    S_NOP 0, implicit def $vcc
+    %4, %5 = V_SUBB_U32_e64 %6, %7, %3, implicit $exec
 ...
 
 # GCN-LABEL: name: cluster_cmp_cndmask
-# GCN: S_NOP 0, implicit-def %vcc
-# GCN-NEXT: %3:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-# GCN-NEXT: dead %4:vgpr_32 = V_CNDMASK_B32_e64 %0, %1, %3, implicit %exec
+# GCN: S_NOP 0, implicit-def $vcc
+# GCN-NEXT: %3:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+# GCN-NEXT: dead %4:vgpr_32 = V_CNDMASK_B32_e64 %0, %1, %3, implicit $exec
 name: cluster_cmp_cndmask
 registers:
   - { id: 0, class: vgpr_32 }
@@ -164,17 +164,17 @@ registers:
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %3 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-    S_NOP 0, implicit def %vcc
-    %4 = V_CNDMASK_B32_e64 %0, %1, %3, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %3 = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+    S_NOP 0, implicit def $vcc
+    %4 = V_CNDMASK_B32_e64 %0, %1, %3, implicit $exec
 ...
 
 # GCN-LABEL: name: cluster_multi_use_cmp_cndmask
-# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
-# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec
+# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec
 name: cluster_multi_use_cmp_cndmask
 registers:
   - { id: 0, class: vgpr_32 }
@@ -188,22 +188,22 @@ registers:
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %2 = V_MOV_B32_e32 0, implicit %exec
-    %3 = V_MOV_B32_e32 0, implicit %exec
-
-    %4 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-    S_NOP 0, implicit def %vcc
-    %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
-    %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %2 = V_MOV_B32_e32 0, implicit $exec
+    %3 = V_MOV_B32_e32 0, implicit $exec
+
+    %4 = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+    S_NOP 0, implicit def $vcc
+    %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec
+    %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec
 ...
 
 # GCN-LABEL: name: cluster_multi_use_cmp_cndmask2
-# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
-# GCN-NEXT: %3:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec
+# GCN-NEXT: %3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec
 name: cluster_multi_use_cmp_cndmask2
 registers:
   - { id: 0, class: vgpr_32 }
@@ -217,11 +217,11 @@ registers:
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %4 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-    %2 = V_MOV_B32_e32 0, implicit %exec
-    %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
-    %3 = V_MOV_B32_e32 0, implicit %exec
-    %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %4 = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+    %2 = V_MOV_B32_e32 0, implicit $exec
+    %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec
+    %3 = V_MOV_B32_e32 0, implicit $exec
+    %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir Wed Jan 31 14:04:26 2018
@@ -65,8 +65,8 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%sgpr0_sgpr1' }
-  - { reg: '%vgpr0' }
+  - { reg: '$sgpr0_sgpr1' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -84,38 +84,38 @@ frameInfo:
 body:             |
   bb.0 (%ir-block.0):
     successors: %bb.1.atomic(0x40000000), %bb.2.exit(0x40000000)
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
  
-    %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %vgpr1 = V_ASHRREV_I32_e32 31, %vgpr0, implicit %exec
-    %vgpr1_vgpr2 = V_LSHL_B64 %vgpr0_vgpr1, 3, implicit %exec
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 0
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $vgpr1 = V_ASHRREV_I32_e32 31, $vgpr0, implicit $exec
+    $vgpr1_vgpr2 = V_LSHL_B64 $vgpr0_vgpr1, 3, implicit $exec
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 0
     S_WAITCNT 127
-    %vgpr1_vgpr2 = BUFFER_LOAD_DWORDX2_ADDR64 killed %vgpr1_vgpr2, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 8 from %ir.tid.gep)
-    %vgpr0 = V_XOR_B32_e32 1, killed %vgpr0, implicit %exec
-    V_CMP_NE_U32_e32 0, killed %vgpr0, implicit-def %vcc, implicit %exec
-    %sgpr2_sgpr3 = S_AND_SAVEEXEC_B64 killed %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
-    %sgpr2_sgpr3 = S_XOR_B64 %exec, killed %sgpr2_sgpr3, implicit-def dead %scc
-    SI_MASK_BRANCH %bb.2.exit, implicit %exec
+    $vgpr1_vgpr2 = BUFFER_LOAD_DWORDX2_ADDR64 killed $vgpr1_vgpr2, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 8 from %ir.tid.gep)
+    $vgpr0 = V_XOR_B32_e32 1, killed $vgpr0, implicit $exec
+    V_CMP_NE_U32_e32 0, killed $vgpr0, implicit-def $vcc, implicit $exec
+    $sgpr2_sgpr3 = S_AND_SAVEEXEC_B64 killed $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
+    $sgpr2_sgpr3 = S_XOR_B64 $exec, killed $sgpr2_sgpr3, implicit-def dead $scc
+    SI_MASK_BRANCH %bb.2.exit, implicit $exec
  
   bb.1.atomic:
     successors: %bb.2.exit(0x80000000)
-    liveins: %sgpr4_sgpr5_sgpr6_sgpr7:0x0000000C, %sgpr0_sgpr1, %sgpr2_sgpr3, %vgpr1_vgpr2_vgpr3_vgpr4:0x00000003
+    liveins: $sgpr4_sgpr5_sgpr6_sgpr7:0x0000000C, $sgpr0_sgpr1, $sgpr2_sgpr3, $vgpr1_vgpr2_vgpr3_vgpr4:0x00000003
  
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 15, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    dead %vgpr0 = V_MOV_B32_e32 -1, implicit %exec
-    dead %vgpr0 = V_MOV_B32_e32 61440, implicit %exec
-    %sgpr4_sgpr5 = S_MOV_B64 0
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 15, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    dead $vgpr0 = V_MOV_B32_e32 -1, implicit $exec
+    dead $vgpr0 = V_MOV_B32_e32 61440, implicit $exec
+    $sgpr4_sgpr5 = S_MOV_B64 0
     S_WAITCNT 127
-    %vgpr0 = V_MOV_B32_e32 killed %sgpr0, implicit %exec, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
     S_WAITCNT 3952
-    BUFFER_ATOMIC_SMAX_ADDR64 killed %vgpr0, killed %vgpr1_vgpr2, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 400, 0, implicit %exec :: (volatile load seq_cst 4 from %ir.gep)
+    BUFFER_ATOMIC_SMAX_ADDR64 killed $vgpr0, killed $vgpr1_vgpr2, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 400, 0, implicit $exec :: (volatile load seq_cst 4 from %ir.gep)
  
   bb.2.exit:
-    liveins: %sgpr2_sgpr3
+    liveins: $sgpr2_sgpr3
 
-    %exec = S_OR_B64 %exec, killed %sgpr2_sgpr3, implicit-def %scc
+    $exec = S_OR_B64 $exec, killed $sgpr2_sgpr3, implicit-def $scc
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir Wed Jan 31 14:04:26 2018
@@ -79,8 +79,8 @@ selected:        false
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '' }
-  - { reg: '%sgpr3', virtual-reg: '' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
+  - { reg: '$sgpr3', virtual-reg: '' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -112,52 +112,52 @@ constants:
 body:             |
   bb.0.entry:
     successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000)
-    liveins: %sgpr0_sgpr1, %sgpr3
+    liveins: $sgpr0_sgpr1, $sgpr3
 
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    %sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr0 = V_MOV_B32_e32 1, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr01)
     S_WAITCNT 127
-    S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc
+    S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 2, implicit %exec
-    %vgpr1 = V_MOV_B32_e32 32772, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12)
-    S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc
+    $vgpr0 = V_MOV_B32_e32 2, implicit $exec
+    $vgpr1 = V_MOV_B32_e32 32772, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr12)
+    S_CBRANCH_SCC0 %bb.1.if, implicit killed $scc
 
   bb.2.else:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 32772, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 32772, implicit $exec
     S_BRANCH %bb.3.done
 
   bb.1.if:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
 
   bb.3.done:
-    liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0
+    liveins: $sgpr3, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $sgpr0
 
     S_WAITCNT 127
-    %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc
-    %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (load syncscope("agent") unordered 4 from %ir.else_ptr), (load syncscope("workgroup") seq_cst 4 from %ir.if_ptr)
-    %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5
-    %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec
+    $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc
+    $vgpr0 = V_ADD_I32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (load syncscope("agent") unordered 4 from %ir.else_ptr), (load syncscope("workgroup") seq_cst 4 from %ir.if_ptr)
+    $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5
+    $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec
     S_WAITCNT 3952
-    FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out)
+    FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir Wed Jan 31 14:04:26 2018
@@ -66,7 +66,7 @@
 # CHECK-LABEL: name: multiple_mem_operands
 
 # CHECK-LABEL: bb.3.done:
-# CHECK: BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 1, 1, 0
+# CHECK: BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 1, 1, 0
 
 name:            multiple_mem_operands
 alignment:       0
@@ -77,8 +77,8 @@ selected:        false
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '' }
-  - { reg: '%sgpr3', virtual-reg: '' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
+  - { reg: '$sgpr3', virtual-reg: '' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -110,52 +110,52 @@ constants:
 body:             |
   bb.0.entry:
     successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000)
-    liveins: %sgpr0_sgpr1, %sgpr3
+    liveins: $sgpr0_sgpr1, $sgpr3
 
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    %sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr0 = V_MOV_B32_e32 1, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr01)
     S_WAITCNT 127
-    S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc
+    S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 2, implicit %exec
-    %vgpr1 = V_MOV_B32_e32 32772, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12)
-    S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc
+    $vgpr0 = V_MOV_B32_e32 2, implicit $exec
+    $vgpr1 = V_MOV_B32_e32 32772, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr12)
+    S_CBRANCH_SCC0 %bb.1.if, implicit killed $scc
 
   bb.2.else:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 32772, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 32772, implicit $exec
     S_BRANCH %bb.3.done
 
   bb.1.if:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
 
   bb.3.done:
-    liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0
+    liveins: $sgpr3, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $sgpr0
 
     S_WAITCNT 127
-    %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc
-    %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (non-temporal load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr)
-    %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5
-    %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec
+    $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc
+    $vgpr0 = V_ADD_I32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (non-temporal load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr)
+    $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5
+    $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec
     S_WAITCNT 3952
-    FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out)
+    FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir Wed Jan 31 14:04:26 2018
@@ -66,7 +66,7 @@
 # CHECK-LABEL: name: multiple_mem_operands
 
 # CHECK-LABEL: bb.3.done:
-# CHECK: BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0
+# CHECK: BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0
 
 name:            multiple_mem_operands
 alignment:       0
@@ -77,8 +77,8 @@ selected:        false
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '' }
-  - { reg: '%sgpr3', virtual-reg: '' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
+  - { reg: '$sgpr3', virtual-reg: '' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -110,52 +110,52 @@ constants:
 body:             |
   bb.0.entry:
     successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000)
-    liveins: %sgpr0_sgpr1, %sgpr3
+    liveins: $sgpr0_sgpr1, $sgpr3
 
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    %sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr0 = V_MOV_B32_e32 1, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr01)
     S_WAITCNT 127
-    S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc
+    S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 2, implicit %exec
-    %vgpr1 = V_MOV_B32_e32 32772, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12)
-    S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc
+    $vgpr0 = V_MOV_B32_e32 2, implicit $exec
+    $vgpr1 = V_MOV_B32_e32 32772, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr12)
+    S_CBRANCH_SCC0 %bb.1.if, implicit killed $scc
 
   bb.2.else:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 32772, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 32772, implicit $exec
     S_BRANCH %bb.3.done
 
   bb.1.if:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
 
   bb.3.done:
-    liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0
+    liveins: $sgpr3, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $sgpr0
 
     S_WAITCNT 127
-    %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc
-    %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr)
-    %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5
-    %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec
+    $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc
+    $vgpr0 = V_ADD_I32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr)
+    $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5
+    $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec
     S_WAITCNT 3952
-    FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out)
+    FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/merge-load-store-vreg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/merge-load-store-vreg.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/merge-load-store-vreg.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/merge-load-store-vreg.mir Wed Jan 31 14:04:26 2018
@@ -3,7 +3,7 @@
 
 # If there's a base offset, check that SILoadStoreOptimizer creates
 # V_ADD_{I|U}32_e64 for that offset; _e64 uses a vreg for the carry (rather than
-# %vcc, which is used in _e32); this ensures that %vcc is not inadvertently
+# $vcc, which is used in _e32); this ensures that $vcc is not inadvertently
 # clobbered.
 
 # GCN-LABEL: name: kernel
@@ -46,15 +46,15 @@ body:             |
     S_ENDPGM
 
   bb.2:
-    %1:sreg_64_xexec = V_CMP_NE_U32_e64 %0, 0, implicit %exec
-    %2:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %1, implicit %exec
-    V_CMP_NE_U32_e32 1, %2, implicit-def %vcc, implicit %exec
-    DS_WRITE_B32 %0, %0, 1024, 0, implicit %m0, implicit %exec :: (store 4 into %ir.tmp)
-    %3:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-    DS_WRITE_B32 %0, %3, 1056, 0, implicit %m0, implicit %exec :: (store 4 into %ir.tmp1)
-    %4:vgpr_32 = DS_READ_B32 %3, 1088, 0, implicit %m0, implicit %exec :: (load 4 from %ir.tmp2)
-    %5:vgpr_32 = DS_READ_B32 %3, 1120, 0, implicit %m0, implicit %exec :: (load 4 from %ir.tmp3)
-    %vcc = S_AND_B64 %exec, %vcc, implicit-def %scc
-    S_CBRANCH_VCCNZ %bb.1, implicit %vcc
+    %1:sreg_64_xexec = V_CMP_NE_U32_e64 %0, 0, implicit $exec
+    %2:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %1, implicit $exec
+    V_CMP_NE_U32_e32 1, %2, implicit-def $vcc, implicit $exec
+    DS_WRITE_B32 %0, %0, 1024, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp)
+    %3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    DS_WRITE_B32 %0, %3, 1056, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp1)
+    %4:vgpr_32 = DS_READ_B32 %3, 1088, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp2)
+    %5:vgpr_32 = DS_READ_B32 %3, 1120, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp3)
+    $vcc = S_AND_B64 $exec, $vcc, implicit-def $scc
+    S_CBRANCH_VCCNZ %bb.1, implicit $vcc
     S_BRANCH %bb.1
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/merge-load-store.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/merge-load-store.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/merge-load-store.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/merge-load-store.mir Wed Jan 31 14:04:26 2018
@@ -34,7 +34,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%1' }
+  - { reg: '$vgpr0', virtual-reg: '%1' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -51,20 +51,20 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %1:vgpr_32 = COPY %vgpr0
-    %m0 = S_MOV_B32 -1
-    %2:vgpr_32 = DS_READ_B32 %1, 0, 0, implicit %m0, implicit %exec :: (load 4 from %ir.ptr.0)
-    DS_WRITE_B32 %1, killed %2, 64, 0, implicit %m0, implicit %exec :: (store 4 into %ir.ptr.64)
+    %1:vgpr_32 = COPY $vgpr0
+    $m0 = S_MOV_B32 -1
+    %2:vgpr_32 = DS_READ_B32 %1, 0, 0, implicit $m0, implicit $exec :: (load 4 from %ir.ptr.0)
+    DS_WRITE_B32 %1, killed %2, 64, 0, implicit $m0, implicit $exec :: (store 4 into %ir.ptr.64)
 
     ; Make this load unmergeable, to tempt SILoadStoreOptimizer into merging the
     ; other two loads.
-    %6:vreg_64 = DS_READ2_B32 %1, 16, 17, 0, implicit %m0, implicit %exec :: (load 8 from %ir.ptr.64, align 4)
+    %6:vreg_64 = DS_READ2_B32 %1, 16, 17, 0, implicit $m0, implicit $exec :: (load 8 from %ir.ptr.64, align 4)
     %3:vgpr_32 = COPY %6.sub0
-    %4:vgpr_32 = DS_READ_B32 %1, 4, 0, implicit %m0, implicit %exec :: (load 4 from %ir.ptr.4)
-    %5:vgpr_32 = V_ADD_I32_e32 killed %3, killed %4, implicit-def %vcc, implicit %exec
-    DS_WRITE_B32 killed %1, %5, 0, 0, implicit killed %m0, implicit %exec :: (store 4 into %ir.ptr.0)
+    %4:vgpr_32 = DS_READ_B32 %1, 4, 0, implicit $m0, implicit $exec :: (load 4 from %ir.ptr.4)
+    %5:vgpr_32 = V_ADD_I32_e32 killed %3, killed %4, implicit-def $vcc, implicit $exec
+    DS_WRITE_B32 killed %1, %5, 0, 0, implicit killed $m0, implicit $exec :: (store 4 into %ir.ptr.0)
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/merge-m0.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/merge-m0.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/merge-m0.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/merge-m0.mir Wed Jan 31 14:04:26 2018
@@ -64,68 +64,68 @@ body:             |
 
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 65536, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 65536, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 65536, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    S_CBRANCH_VCCZ %bb.1, implicit undef %vcc
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 65536, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 65536, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 65536, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    S_CBRANCH_VCCZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.1:
     successors: %bb.2
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:
     successors: %bb.3
-    SI_INIT_M0 65536, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
+    SI_INIT_M0 65536, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.3
 
   bb.3:
     successors: %bb.4, %bb.5
-    S_CBRANCH_VCCZ %bb.4, implicit undef %vcc
+    S_CBRANCH_VCCZ %bb.4, implicit undef $vcc
     S_BRANCH %bb.5
 
   bb.4:
     successors: %bb.6
-    SI_INIT_M0 3, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 4, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
+    SI_INIT_M0 3, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 4, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.6
 
   bb.5:
     successors: %bb.6
-    SI_INIT_M0 3, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 4, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
+    SI_INIT_M0 3, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 4, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.6
 
   bb.6:
     successors: %bb.0.entry, %bb.6
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
     %2 = IMPLICIT_DEF
-    SI_INIT_M0 %2, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 %2, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    S_CBRANCH_VCCZ %bb.6, implicit undef %vcc
+    SI_INIT_M0 %2, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 %2, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    S_CBRANCH_VCCZ %bb.6, implicit undef $vcc
     S_BRANCH %bb.0.entry
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/misched-killflags.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/misched-killflags.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/misched-killflags.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/misched-killflags.mir Wed Jan 31 14:04:26 2018
@@ -5,41 +5,41 @@ name: func0
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3
+    liveins: $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3
 
-    %sgpr33 = S_MOV_B32 %sgpr7
-    %sgpr32 = S_MOV_B32 %sgpr33
-    %sgpr10 = S_MOV_B32 5
-    %sgpr9 = S_MOV_B32 4
-    %sgpr8 = S_MOV_B32 3
-    BUNDLE implicit-def %sgpr6_sgpr7, implicit-def %sgpr6, implicit-def %sgpr7, implicit-def %scc {
-      %sgpr6_sgpr7 = S_GETPC_B64
-      %sgpr6 = S_ADD_U32 internal %sgpr6, 0, implicit-def %scc
-      %sgpr7 = S_ADDC_U32 internal %sgpr7,0, implicit-def %scc, implicit internal %scc
+    $sgpr33 = S_MOV_B32 $sgpr7
+    $sgpr32 = S_MOV_B32 $sgpr33
+    $sgpr10 = S_MOV_B32 5
+    $sgpr9 = S_MOV_B32 4
+    $sgpr8 = S_MOV_B32 3
+    BUNDLE implicit-def $sgpr6_sgpr7, implicit-def $sgpr6, implicit-def $sgpr7, implicit-def $scc {
+      $sgpr6_sgpr7 = S_GETPC_B64
+      $sgpr6 = S_ADD_U32 internal $sgpr6, 0, implicit-def $scc
+      $sgpr7 = S_ADDC_U32 internal $sgpr7,0, implicit-def $scc, implicit internal $scc
     }
-    %sgpr4 = S_MOV_B32 %sgpr33
-    %vgpr0 = V_MOV_B32_e32 %sgpr8, implicit %exec, implicit-def %vgpr0_vgpr1_vgpr2_vgpr3, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr1 = V_MOV_B32_e32 %sgpr9, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr2 = V_MOV_B32_e32 %sgpr10, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr3 = V_MOV_B32_e32 %sgpr11, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %exec
-    S_NOP 0, implicit killed %sgpr6_sgpr7, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit killed %vgpr0_vgpr1_vgpr2_vgpr3
+    $sgpr4 = S_MOV_B32 $sgpr33
+    $vgpr0 = V_MOV_B32_e32 $sgpr8, implicit $exec, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr1 = V_MOV_B32_e32 $sgpr9, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr2 = V_MOV_B32_e32 $sgpr10, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr3 = V_MOV_B32_e32 $sgpr11, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec
+    S_NOP 0, implicit killed $sgpr6_sgpr7, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit killed $vgpr0_vgpr1_vgpr2_vgpr3
     S_ENDPGM
 ...
 # CHECK-LABEL: name: func0
-# CHECK: %sgpr10 = S_MOV_B32 5
-# CHECK: %sgpr9 = S_MOV_B32 4
-# CHECK: %sgpr8 = S_MOV_B32 3
-# CHECK: %sgpr33 = S_MOV_B32 killed %sgpr7
-# CHECK: %vgpr0 = V_MOV_B32_e32 %sgpr8, implicit %exec, implicit-def %vgpr0_vgpr1_vgpr2_vgpr3, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-# CHECK: BUNDLE implicit-def %sgpr6_sgpr7, implicit-def %sgpr6, implicit-def %sgpr7, implicit-def %scc {
-# CHECK:   %sgpr6_sgpr7 = S_GETPC_B64
-# CHECK:   %sgpr6 = S_ADD_U32 internal %sgpr6, 0, implicit-def %scc
-# CHECK:   %sgpr7 = S_ADDC_U32 internal %sgpr7, 0, implicit-def %scc, implicit internal %scc
+# CHECK: $sgpr10 = S_MOV_B32 5
+# CHECK: $sgpr9 = S_MOV_B32 4
+# CHECK: $sgpr8 = S_MOV_B32 3
+# CHECK: $sgpr33 = S_MOV_B32 killed $sgpr7
+# CHECK: $vgpr0 = V_MOV_B32_e32 $sgpr8, implicit $exec, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: BUNDLE implicit-def $sgpr6_sgpr7, implicit-def $sgpr6, implicit-def $sgpr7, implicit-def $scc {
+# CHECK:   $sgpr6_sgpr7 = S_GETPC_B64
+# CHECK:   $sgpr6 = S_ADD_U32 internal $sgpr6, 0, implicit-def $scc
+# CHECK:   $sgpr7 = S_ADDC_U32 internal $sgpr7, 0, implicit-def $scc, implicit internal $scc
 # CHECK: }
-# CHECK: %sgpr4 = S_MOV_B32 %sgpr33
-# CHECK: %vgpr1 = V_MOV_B32_e32 %sgpr9, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-# CHECK: %vgpr2 = V_MOV_B32_e32 %sgpr10, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-# CHECK: %vgpr3 = V_MOV_B32_e32 killed %sgpr11, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %exec
-# CHECK: %sgpr32 = S_MOV_B32 killed %sgpr33
-# CHECK: S_NOP 0, implicit killed %sgpr6_sgpr7, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit killed %vgpr0_vgpr1_vgpr2_vgpr3
+# CHECK: $sgpr4 = S_MOV_B32 $sgpr33
+# CHECK: $vgpr1 = V_MOV_B32_e32 $sgpr9, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: $vgpr2 = V_MOV_B32_e32 $sgpr10, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: $vgpr3 = V_MOV_B32_e32 killed $sgpr11, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec
+# CHECK: $sgpr32 = S_MOV_B32 killed $sgpr33
+# CHECK: S_NOP 0, implicit killed $sgpr6_sgpr7, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit killed $vgpr0_vgpr1_vgpr2_vgpr3
 # CHECK: S_ENDPGM

Modified: llvm/trunk/test/CodeGen/AMDGPU/movrels-bug.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/movrels-bug.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/movrels-bug.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/movrels-bug.mir Wed Jan 31 14:04:26 2018
@@ -20,12 +20,12 @@ name:            main
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %m0 = S_MOV_B32 undef %sgpr0
-    V_MOVRELD_B32_e32 undef %vgpr2, 0, implicit %m0, implicit %exec, implicit-def %vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8, implicit undef %vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8(tied-def 4)
-    %m0 = S_MOV_B32 undef %sgpr0
-    %vgpr1 = V_MOVRELS_B32_e32 undef %vgpr1, implicit %m0, implicit %exec, implicit killed %vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8
-    %vgpr4 = V_MAC_F32_e32 undef %vgpr0, undef %vgpr0, undef %vgpr4, implicit %exec
-    EXP_DONE 15, undef %vgpr0, killed %vgpr1, killed %vgpr4, undef %vgpr0, 0, 0, 12, implicit %exec
+    $m0 = S_MOV_B32 undef $sgpr0
+    V_MOVRELD_B32_e32 undef $vgpr2, 0, implicit $m0, implicit $exec, implicit-def $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8, implicit undef $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8(tied-def 4)
+    $m0 = S_MOV_B32 undef $sgpr0
+    $vgpr1 = V_MOVRELS_B32_e32 undef $vgpr1, implicit $m0, implicit $exec, implicit killed $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8
+    $vgpr4 = V_MAC_F32_e32 undef $vgpr0, undef $vgpr0, undef $vgpr4, implicit $exec
+    EXP_DONE 15, undef $vgpr0, killed $vgpr1, killed $vgpr4, undef $vgpr0, 0, 0, 12, implicit $exec
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir Wed Jan 31 14:04:26 2018
@@ -6,19 +6,19 @@
 # GCN:        %[[HI:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0
 # GCN-NEXT:   %[[LO:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048576
 # GCN-NEXT:   %[[SGPR_PAIR:[0-9]+]]:sreg_64 = REG_SEQUENCE killed %[[LO]], %subreg.sub0, killed %[[HI]], %subreg.sub1
-# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit %exec
+# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit $exec
 
 
 # GCN-LABEL: {{^}}name: const_to_sgpr_multiple_use{{$}}
 # GCN:        %[[HI:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0
 # GCN-NEXT:   %[[LO:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048576
 # GCN-NEXT:   %[[SGPR_PAIR:[0-9]+]]:sreg_64 = REG_SEQUENCE killed %[[LO]], %subreg.sub0, killed %[[HI]], %subreg.sub1
-# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit %exec
-# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit %exec
+# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit $exec
+# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit $exec
 
 # GCN-LABEL: {{^}}name: const_to_sgpr_subreg{{$}}
 # GCN:       %[[OP0:[0-9]+]]:vreg_64 = REG_SEQUENCE killed %{{[0-9]+}}, %subreg.sub0, killed %{{[0-9]+}}, %subreg.sub1
-# GCN-NEXT:  V_CMP_LT_U32_e64 killed %[[OP0]].sub0, 12, implicit %exec
+# GCN-NEXT:  V_CMP_LT_U32_e64 killed %[[OP0]].sub0, 12, implicit $exec
 
 --- |
   define amdgpu_kernel void @const_to_sgpr(i32 addrspace(1)* nocapture %arg, i64 %id) {
@@ -96,15 +96,15 @@ registers:
   - { id: 29, class: vgpr_32 }
   - { id: 30, class: vreg_64 }
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%2' }
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%3' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%3' }
 body:             |
   bb.0.bb:
     successors: %bb.1.bb1(0x40000000), %bb.2.bb2(0x40000000)
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %3 = COPY %sgpr0_sgpr1
-    %2 = COPY %vgpr0
+    %3 = COPY $sgpr0_sgpr1
+    %2 = COPY $vgpr0
     %7 = S_LOAD_DWORDX2_IMM %3, 9, 0
     %8 = S_LOAD_DWORDX2_IMM %3, 11, 0
     %6 = COPY %7
@@ -115,32 +115,32 @@ body:             |
     %12 = COPY %10.sub1
     %13 = COPY %8.sub0
     %14 = COPY %8.sub1
-    %15 = S_ADD_U32 killed %11, killed %13, implicit-def %scc
-    %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead %scc, implicit %scc
+    %15 = S_ADD_U32 killed %11, killed %13, implicit-def $scc
+    %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead $scc, implicit $scc
     %17 = REG_SEQUENCE killed %15, %subreg.sub0, killed %16, %subreg.sub1
     %18 = S_MOV_B32 0
     %19 = S_MOV_B32 1048576
     %20 = REG_SEQUENCE killed %19, %subreg.sub0, killed %18, %subreg.sub1
     %22 = COPY killed %20
-    %21 = V_CMP_LT_U64_e64 killed %17, %22, implicit %exec
-    %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    %21 = V_CMP_LT_U64_e64 killed %17, %22, implicit $exec
+    %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_BRANCH %bb.1.bb1
 
   bb.1.bb1:
     successors: %bb.2.bb2(0x80000000)
 
     %23 = S_MOV_B32 2
-    %24 = S_LSHL_B64 %0, killed %23, implicit-def dead %scc
+    %24 = S_LSHL_B64 %0, killed %23, implicit-def dead $scc
     %25 = S_MOV_B32 61440
     %26 = S_MOV_B32 0
     %27 = REG_SEQUENCE killed %26, %subreg.sub0, killed %25, %subreg.sub1
     %28 = REG_SEQUENCE %6, 17, killed %27, 18
-    %29 = V_MOV_B32_e32 0, implicit %exec
+    %29 = V_MOV_B32_e32 0, implicit $exec
     %30 = COPY %24
-    BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit $exec
 
   bb.2.bb2:
-    SI_END_CF %1, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    SI_END_CF %1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_ENDPGM
 
 ...
@@ -194,15 +194,15 @@ registers:
   - { id: 38, class: vgpr_32 }
   - { id: 39, class: vreg_64 }
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%2' }
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%3' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%3' }
 body:             |
   bb.0.bb:
     successors: %bb.1.bb1(0x40000000), %bb.2.bb2(0x40000000)
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %3 = COPY %sgpr0_sgpr1
-    %2 = COPY %vgpr0
+    %3 = COPY $sgpr0_sgpr1
+    %2 = COPY $vgpr0
     %7 = S_LOAD_DWORDX2_IMM %3, 9, 0
     %8 = S_LOAD_DWORDX2_IMM %3, 11, 0
     %9 = S_LOAD_DWORDX2_IMM %3, 13, 0
@@ -214,39 +214,39 @@ body:             |
     %13 = COPY %11.sub1
     %14 = COPY %8.sub0
     %15 = COPY %8.sub1
-    %16 = S_ADD_U32 %12, killed %14, implicit-def %scc
-    %17 = S_ADDC_U32 %13, killed %15, implicit-def dead %scc, implicit %scc
+    %16 = S_ADD_U32 %12, killed %14, implicit-def $scc
+    %17 = S_ADDC_U32 %13, killed %15, implicit-def dead $scc, implicit $scc
     %18 = REG_SEQUENCE killed %16, %subreg.sub0, killed %17, %subreg.sub1
     %19 = COPY %9.sub0
     %20 = COPY %9.sub1
-    %21 = S_ADD_U32 %12, killed %19, implicit-def %scc
-    %22 = S_ADDC_U32 %13, killed %20, implicit-def dead %scc, implicit %scc
+    %21 = S_ADD_U32 %12, killed %19, implicit-def $scc
+    %22 = S_ADDC_U32 %13, killed %20, implicit-def dead $scc, implicit $scc
     %23 = REG_SEQUENCE killed %21, %subreg.sub0, killed %22, %subreg.sub1
     %24 = S_MOV_B32 0
     %25 = S_MOV_B32 1048576
     %26 = REG_SEQUENCE killed %25, %subreg.sub0, killed %24, %subreg.sub1
     %28 = COPY %26
-    %27 = V_CMP_LT_U64_e64 killed %18, %28, implicit %exec
-    %29 = V_CMP_LT_U64_e64 killed %23, %28, implicit %exec
-    %31 = S_AND_B64 killed %27, killed %29, implicit-def dead %scc
-    %1 = SI_IF killed %31, %bb.2.bb2, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    %27 = V_CMP_LT_U64_e64 killed %18, %28, implicit $exec
+    %29 = V_CMP_LT_U64_e64 killed %23, %28, implicit $exec
+    %31 = S_AND_B64 killed %27, killed %29, implicit-def dead $scc
+    %1 = SI_IF killed %31, %bb.2.bb2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_BRANCH %bb.1.bb1
 
   bb.1.bb1:
     successors: %bb.2.bb2(0x80000000)
 
     %32 = S_MOV_B32 2
-    %33 = S_LSHL_B64 %0, killed %32, implicit-def dead %scc
+    %33 = S_LSHL_B64 %0, killed %32, implicit-def dead $scc
     %34 = S_MOV_B32 61440
     %35 = S_MOV_B32 0
     %36 = REG_SEQUENCE killed %35, %subreg.sub0, killed %34, %subreg.sub1
     %37 = REG_SEQUENCE %6, 17, killed %36, 18
-    %38 = V_MOV_B32_e32 0, implicit %exec
+    %38 = V_MOV_B32_e32 0, implicit $exec
     %39 = COPY %33
-    BUFFER_STORE_DWORD_ADDR64 killed %38, killed %39, killed %37, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %38, killed %39, killed %37, 0, 0, 0, 0, 0, implicit $exec
 
   bb.2.bb2:
-    SI_END_CF %1, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    SI_END_CF %1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_ENDPGM
 
 ...
@@ -291,15 +291,15 @@ registers:
   - { id: 29, class: vgpr_32 }
   - { id: 30, class: vreg_64 }
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%2' }
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%3' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%3' }
 body:             |
   bb.0.bb:
     successors: %bb.1.bb1(0x40000000), %bb.2.bb2(0x40000000)
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %3 = COPY %sgpr0_sgpr1
-    %2 = COPY %vgpr0
+    %3 = COPY $sgpr0_sgpr1
+    %2 = COPY $vgpr0
     %7 = S_LOAD_DWORDX2_IMM %3, 9, 0
     %8 = S_LOAD_DWORDX2_IMM %3, 11, 0
     %6 = COPY %7
@@ -310,32 +310,32 @@ body:             |
     %12 = COPY %10.sub1
     %13 = COPY %8.sub0
     %14 = COPY %8.sub1
-    %15 = S_ADD_U32 killed %11, killed %13, implicit-def %scc
-    %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead %scc, implicit %scc
+    %15 = S_ADD_U32 killed %11, killed %13, implicit-def $scc
+    %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead $scc, implicit $scc
     %17 = REG_SEQUENCE killed %15, %subreg.sub0, killed %16, %subreg.sub1
     %18 = S_MOV_B32 12
     %19 = S_MOV_B32 1048576
     %20 = REG_SEQUENCE killed %19, %subreg.sub0, killed %18, %subreg.sub1
     %22 = COPY killed %20.sub1
-    %21 = V_CMP_LT_U32_e64 killed %17.sub0, %22, implicit %exec
-    %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    %21 = V_CMP_LT_U32_e64 killed %17.sub0, %22, implicit $exec
+    %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_BRANCH %bb.1.bb1
 
   bb.1.bb1:
     successors: %bb.2.bb2(0x80000000)
 
     %23 = S_MOV_B32 2
-    %24 = S_LSHL_B64 %0, killed %23, implicit-def dead %scc
+    %24 = S_LSHL_B64 %0, killed %23, implicit-def dead $scc
     %25 = S_MOV_B32 61440
     %26 = S_MOV_B32 0
     %27 = REG_SEQUENCE killed %26, %subreg.sub0, killed %25, %subreg.sub1
     %28 = REG_SEQUENCE %6, 17, killed %27, 18
-    %29 = V_MOV_B32_e32 0, implicit %exec
+    %29 = V_MOV_B32_e32 0, implicit $exec
     %30 = COPY %24
-    BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit $exec
 
   bb.2.bb2:
-    SI_END_CF %1, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    SI_END_CF %1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir Wed Jan 31 14:04:26 2018
@@ -147,8 +147,8 @@
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec_xor{{$}}
-# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
+# CHECK: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
 # CHECK-NEXT: SI_MASK_BRANCH
 
 name:            optimize_if_and_saveexec_xor
@@ -159,7 +159,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -176,37 +176,37 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
 # CHECK-NEXT: SI_MASK_BRANCH
 
 name:            optimize_if_and_saveexec
@@ -217,7 +217,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -234,36 +234,36 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_or_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = S_OR_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_OR_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
 # CHECK-NEXT: SI_MASK_BRANCH
 
 name:            optimize_if_or_saveexec
@@ -274,7 +274,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -291,39 +291,39 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_OR_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_OR_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec_xor_valu_middle
-# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
+# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
 # CHECK-NEXT: SI_MASK_BRANCH
 name:            optimize_if_and_saveexec_xor_valu_middle
 alignment:       0
@@ -333,7 +333,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -350,41 +350,41 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
-    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    BUFFER_STORE_DWORD_OFFSET $vgpr0, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec_xor_wrong_reg{{$}}
-# CHECK: %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY %sgpr0_sgpr1
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 undef $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY $sgpr0_sgpr1
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
 name:            optimize_if_and_saveexec_xor_wrong_reg
 alignment:       0
 exposesReturnsTwice: false
@@ -393,7 +393,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -410,40 +410,40 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr6 = S_MOV_B32 -1
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term %sgpr0_sgpr1
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr6 = S_MOV_B32 -1
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr0_sgpr1 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $sgpr0_sgpr1 = S_XOR_B64 undef $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term $sgpr0_sgpr1
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    liveins: $sgpr0_sgpr1 , $sgpr4_sgpr5_sgpr6_sgpr7
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1, %sgpr4_sgpr5_sgpr6_sgpr7
+    liveins: $vgpr0, $sgpr0_sgpr1, $sgpr4_sgpr5_sgpr6_sgpr7
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec_xor_modify_copy_to_exec{{$}}
-# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: $sgpr2_sgpr3 = S_OR_B64 killed $sgpr2_sgpr3, 1, implicit-def $scc
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
 
 name:            optimize_if_and_saveexec_xor_modify_copy_to_exec
 alignment:       0
@@ -453,7 +453,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -470,42 +470,42 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
-    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $sgpr2_sgpr3 = S_OR_B64 killed $sgpr2_sgpr3, 1, implicit-def $scc
+    $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr0 = S_MOV_B32 0
-    %sgpr1 = S_MOV_B32 1
-    %sgpr2 = S_MOV_B32 -1
-    %sgpr3 = S_MOV_B32 61440
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr0 = S_MOV_B32 0
+    $sgpr1 = S_MOV_B32 1
+    $sgpr2 = S_MOV_B32 -1
+    $sgpr3 = S_MOV_B32 61440
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec_xor_live_out_setexec{{$}}
-# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY %sgpr2_sgpr3
+# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY $sgpr2_sgpr3
 # CHECK-NEXT: SI_MASK_BRANCH
 name:            optimize_if_and_saveexec_xor_live_out_setexec
 alignment:       0
@@ -515,7 +515,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -532,40 +532,40 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1, %sgpr2_sgpr3
-    S_SLEEP 0, implicit %sgpr2_sgpr3
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
+    S_SLEEP 0, implicit $sgpr2_sgpr3
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 
 # CHECK-LABEL: name: optimize_if_unknown_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = COPY %exec
-# CHECK: %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr0_sgpr1 = COPY $exec
+# CHECK: $sgpr2_sgpr3 = S_LSHR_B64 $sgpr0_sgpr1, killed $vcc_lo, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
 
 name:            optimize_if_unknown_saveexec
 alignment:       0
@@ -575,7 +575,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -592,36 +592,36 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_LSHR_B64 $sgpr0_sgpr1, killed $vcc_lo, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_andn2_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = S_ANDN2_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_ANDN2_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
 # CHECK-NEXT: SI_MASK_BRANCH
 
 name:            optimize_if_andn2_saveexec
@@ -632,7 +632,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -649,38 +649,38 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_ANDN2_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_ANDN2_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_andn2_saveexec_no_commute{{$}}
-# CHECK: %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr2_sgpr3 = S_ANDN2_B64 killed $vcc, $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
 name:            optimize_if_andn2_saveexec_no_commute
 alignment:       0
 exposesReturnsTwice: false
@@ -689,7 +689,7 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -706,30 +706,30 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_ANDN2_B64 killed $vcc, $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/readlane_exec0.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/readlane_exec0.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/readlane_exec0.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/readlane_exec0.mir Wed Jan 31 14:04:26 2018
@@ -10,23 +10,23 @@ name: readlane_exec0
 body:       |
   bb.0:
     successors: %bb.1, %bb.2
-    liveins: %vgpr1_vgpr2:0x00000001, %vgpr2_vgpr3:0x00000003
+    liveins: $vgpr1_vgpr2:0x00000001, $vgpr2_vgpr3:0x00000003
 
-    %vgpr4 = V_AND_B32_e32 1, %vgpr1, implicit %exec
-    V_CMP_EQ_U32_e32 1, killed %vgpr4, implicit-def %vcc, implicit %exec
-    %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 killed %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $vgpr4 = V_AND_B32_e32 1, $vgpr1, implicit $exec
+    V_CMP_EQ_U32_e32 1, killed $vgpr4, implicit-def $vcc, implicit $exec
+    $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 killed $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
 
-   %sgpr10 = V_READFIRSTLANE_B32 %vgpr2, implicit %exec
-   %sgpr11 = V_READFIRSTLANE_B32 %vgpr3, implicit %exec
-   %sgpr10 = S_LOAD_DWORD_IMM killed %sgpr10_sgpr11, 0, 0
+   $sgpr10 = V_READFIRSTLANE_B32 $vgpr2, implicit $exec
+   $sgpr11 = V_READFIRSTLANE_B32 $vgpr3, implicit $exec
+   $sgpr10 = S_LOAD_DWORD_IMM killed $sgpr10_sgpr11, 0, 0
    S_WAITCNT 127
-   %vgpr0 = V_XOR_B32_e32 killed %sgpr10, killed %vgpr0, implicit %exec
+   $vgpr0 = V_XOR_B32_e32 killed $sgpr10, killed $vgpr0, implicit $exec
 
   bb.2:
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/reduce-saveexec.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/reduce-saveexec.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/reduce-saveexec.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/reduce-saveexec.mir Wed Jan 31 14:04:26 2018
@@ -2,146 +2,146 @@
 
 ---
 # GCN-LABEL: name: reduce_and_saveexec
-# GCN:      %exec = S_AND_B64 %exec, killed %vcc
+# GCN:      $exec = S_AND_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_and_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_and_saveexec_commuted
-# GCN:      %exec = S_AND_B64 killed %vcc, %exec
+# GCN:      $exec = S_AND_B64 killed $vcc, $exec
 # GCN-NEXT: S_ENDPGM
 name: reduce_and_saveexec_commuted
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_AND_B64 killed %vcc, %exec, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_AND_B64 killed $vcc, $exec, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_and_saveexec_liveout
-# GCN:      %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc
-# GCN-NEXT: %exec = COPY
+# GCN:      $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc
+# GCN-NEXT: $exec = COPY
 name: reduce_and_saveexec_liveout
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: and_saveexec
-# GCN:      %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc
+# GCN:      $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc
 # GCN-NEXT: S_ENDPGM
 name: and_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = COPY %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %exec = S_MOV_B64_term %sgpr2_sgpr3
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = COPY $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $exec = S_MOV_B64_term $sgpr2_sgpr3
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_or_saveexec
-# GCN:      %exec = S_OR_B64 %exec, killed %vcc
+# GCN:      $exec = S_OR_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_or_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_xor_saveexec
-# GCN:      %exec = S_XOR_B64 %exec, killed %vcc
+# GCN:      $exec = S_XOR_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_xor_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_XOR_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_XOR_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_andn2_saveexec
-# GCN:      %exec = S_ANDN2_B64 %exec, killed %vcc
+# GCN:      $exec = S_ANDN2_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_andn2_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_ANDN2_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_ANDN2_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_orn2_saveexec
-# GCN:      %exec = S_ORN2_B64 %exec, killed %vcc
+# GCN:      $exec = S_ORN2_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_orn2_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_ORN2_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_ORN2_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_nand_saveexec
-# GCN:      %exec = S_NAND_B64 %exec, killed %vcc
+# GCN:      $exec = S_NAND_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_nand_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_NAND_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_NAND_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_nor_saveexec
-# GCN:      %exec = S_NOR_B64 %exec, killed %vcc
+# GCN:      $exec = S_NOR_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_nor_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_NOR_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_NOR_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_xnor_saveexec
-# GCN:      %exec = S_XNOR_B64 %exec, killed %vcc
+# GCN:      $exec = S_XNOR_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_xnor_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_XNOR_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_XNOR_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---

Modified: llvm/trunk/test/CodeGen/AMDGPU/regcoal-subrange-join.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/regcoal-subrange-join.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/regcoal-subrange-join.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/regcoal-subrange-join.mir Wed Jan 31 14:04:26 2018
@@ -4,8 +4,8 @@
 # This test will provoke a subrange join (see annotations below) during simple register coalescing
 # Without a fix for PR33524 this causes an unreachable in SubRange Join
 #
-# GCN-DAG: undef %[[REG0:[0-9]+]].sub0:sgpr_64 = COPY %sgpr5
-# GCN-DAG: undef %[[REG1:[0-9]+]].sub0:sgpr_64 = COPY %sgpr2
+# GCN-DAG: undef %[[REG0:[0-9]+]].sub0:sgpr_64 = COPY $sgpr5
+# GCN-DAG: undef %[[REG1:[0-9]+]].sub0:sgpr_64 = COPY $sgpr2
 # GCN-DAG: %[[REG0]].sub1:sgpr_64 = S_MOV_B32 1
 # GCN-DAG: %[[REG1]].sub1:sgpr_64 = S_MOV_B32 1
 
@@ -82,14 +82,14 @@ registers:
   - { id: 60, class: sreg_32_xm0 }
   - { id: 61, class: vreg_128 }
 liveins:
-  - { reg: '%sgpr2', virtual-reg: '%12' }
-  - { reg: '%sgpr5', virtual-reg: '%15' }
+  - { reg: '$sgpr2', virtual-reg: '%12' }
+  - { reg: '$sgpr5', virtual-reg: '%15' }
 body:             |
   bb.0:
-    liveins: %sgpr2, %sgpr5
+    liveins: $sgpr2, $sgpr5
 
-    %15 = COPY killed %sgpr5
-    %12 = COPY killed %sgpr2
+    %15 = COPY killed $sgpr5
+    %12 = COPY killed $sgpr2
     %17 = S_MOV_B32 1
     undef %18.sub1 = COPY %17
     %0 = COPY %18
@@ -104,7 +104,7 @@ body:             |
     %1 = COPY killed %25
     %26 = S_LOAD_DWORDX2_IMM %0, 2, 0
     dead %27 = S_LOAD_DWORD_IMM killed %26, 0, 0
-    S_CBRANCH_SCC0 %bb.1, implicit undef %scc
+    S_CBRANCH_SCC0 %bb.1, implicit undef $scc
 
   bb.5:
     %58 = COPY killed %1
@@ -112,11 +112,11 @@ body:             |
     S_BRANCH %bb.2
 
   bb.1:
-    %30 = V_MOV_B32_e32 1036831949, implicit %exec
-    %31 = V_ADD_F32_e32 %30, %1.sub3, implicit %exec
-    %33 = V_ADD_F32_e32 %30, %1.sub2, implicit %exec
-    %35 = V_ADD_F32_e32 %30, %1.sub1, implicit %exec
-    %37 = V_ADD_F32_e32 killed %30, killed %1.sub0, implicit %exec
+    %30 = V_MOV_B32_e32 1036831949, implicit $exec
+    %31 = V_ADD_F32_e32 %30, %1.sub3, implicit $exec
+    %33 = V_ADD_F32_e32 %30, %1.sub2, implicit $exec
+    %35 = V_ADD_F32_e32 %30, %1.sub1, implicit $exec
+    %37 = V_ADD_F32_e32 killed %30, killed %1.sub0, implicit $exec
     undef %56.sub0 = COPY killed %37
     %56.sub1 = COPY killed %35
     %56.sub2 = COPY killed %33
@@ -131,7 +131,7 @@ body:             |
     %3 = COPY killed %58
     %39 = S_LOAD_DWORDX2_IMM killed %0, 6, 0
     %40 = S_LOAD_DWORD_IMM killed %39, 0, 0
-    %43 = V_MOV_B32_e32 -1102263091, implicit %exec
+    %43 = V_MOV_B32_e32 -1102263091, implicit $exec
     %60 = COPY killed %4
     %61 = COPY killed %3
 
@@ -140,23 +140,23 @@ body:             |
 
     %7 = COPY killed %61
     %6 = COPY killed %60
-    %8 = S_ADD_I32 killed %6, 1, implicit-def dead %scc
-    %44 = V_ADD_F32_e32 %43, %7.sub3, implicit %exec
-    %46 = V_ADD_F32_e32 %43, %7.sub2, implicit %exec
-    %48 = V_ADD_F32_e32 %43, %7.sub1, implicit %exec
-    %50 = V_ADD_F32_e32 %43, killed %7.sub0, implicit %exec
+    %8 = S_ADD_I32 killed %6, 1, implicit-def dead $scc
+    %44 = V_ADD_F32_e32 %43, %7.sub3, implicit $exec
+    %46 = V_ADD_F32_e32 %43, %7.sub2, implicit $exec
+    %48 = V_ADD_F32_e32 %43, %7.sub1, implicit $exec
+    %50 = V_ADD_F32_e32 %43, killed %7.sub0, implicit $exec
     undef %57.sub0 = COPY killed %50
     %57.sub1 = COPY killed %48
     %57.sub2 = COPY %46
     %57.sub3 = COPY killed %44
-    S_CMP_LT_I32 %8, %40, implicit-def %scc
+    S_CMP_LT_I32 %8, %40, implicit-def $scc
     %60 = COPY killed %8
     %61 = COPY killed %57
-    S_CBRANCH_SCC1 %bb.3, implicit killed %scc
+    S_CBRANCH_SCC1 %bb.3, implicit killed $scc
     S_BRANCH %bb.4
 
   bb.4:
-    EXP 32, undef %53, undef %54, killed %46, undef %55, 0, 0, 15, implicit %exec
+    EXP 32, undef %53, undef %54, killed %46, undef %55, 0, 0, 15, implicit $exec
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/regcoalesce-dbg.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/regcoalesce-dbg.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/regcoalesce-dbg.mir Wed Jan 31 14:04:26 2018
@@ -48,29 +48,29 @@ registers:
   - { id: 19, class: vreg_64 }
   - { id: 20, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY killed %vgpr0
-    %0 = COPY killed %sgpr0_sgpr1
+    %3 = COPY killed $vgpr0
+    %0 = COPY killed $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORD_IMM killed %0, 13, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    %18 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %18 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     undef %19.sub0 = COPY killed %3
     %19.sub1 = COPY killed %18
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
-    DBG_VALUE debug-use %11, debug-use %noreg, !1, !8, debug-location !9
+    DBG_VALUE debug-use %11, debug-use $noreg, !1, !8, debug-location !9
     undef %12.sub0 = COPY killed %11
     %12.sub1 = COPY killed %10
     undef %13.sub0_sub1 = COPY killed %4
     %13.sub2_sub3 = COPY killed %12
-    %20 = V_LSHL_B64 killed %19, 2, implicit %exec
+    %20 = V_LSHL_B64 killed %19, 2, implicit $exec
     %16 = COPY killed %5
-    BUFFER_STORE_DWORD_ADDR64 killed %16, killed %20, killed %13, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out)
+    BUFFER_STORE_DWORD_ADDR64 killed %16, killed %20, killed %13, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/regcoalesce-prune.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/regcoalesce-prune.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/regcoalesce-prune.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/regcoalesce-prune.mir Wed Jan 31 14:04:26 2018
@@ -10,9 +10,9 @@ name: func
 tracksRegLiveness: true
 body: |
   bb.0:
-    undef %5.sub1 = V_MOV_B32_e32 0, implicit %exec
+    undef %5.sub1 = V_MOV_B32_e32 0, implicit $exec
     %6 = COPY %5
-    S_CBRANCH_VCCZ %bb.2, implicit undef %vcc
+    S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
 
   bb.1:
     %1 : sreg_32_xm0 = S_MOV_B32 0
@@ -23,9 +23,9 @@ body: |
     %6 : vreg_64 = COPY killed %4
 
   bb.2:
-    %2 : vgpr_32 = V_CVT_F32_I32_e32 killed %5.sub1, implicit %exec
+    %2 : vgpr_32 = V_CVT_F32_I32_e32 killed %5.sub1, implicit $exec
 
   bb.3:
-    %3 : vgpr_32 = V_CVT_F32_I32_e32 killed %6.sub1, implicit %exec
+    %3 : vgpr_32 = V_CVT_F32_I32_e32 killed %6.sub1, implicit $exec
     S_ENDPGM
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir Wed Jan 31 14:04:26 2018
@@ -2,7 +2,7 @@
 ---
 
 # GCN-LABEL: name: mac_invalid_operands
-# GCN: undef %18.sub0:vreg_128 = V_MAC_F32_e32 undef %3:vgpr_32, undef %9:vgpr_32, undef %18.sub0, implicit %exec
+# GCN: undef %18.sub0:vreg_128 = V_MAC_F32_e32 undef %3:vgpr_32, undef %9:vgpr_32, undef %18.sub0, implicit $exec
 
 name:            mac_invalid_operands
 alignment:       0
@@ -34,14 +34,14 @@ body:             |
   bb.0:
     successors: %bb.2, %bb.1
 
-    %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, implicit %exec
-    %vcc = COPY killed %7
-    S_CBRANCH_VCCZ %bb.2, implicit killed %vcc
+    %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, implicit $exec
+    $vcc = COPY killed %7
+    S_CBRANCH_VCCZ %bb.2, implicit killed $vcc
 
   bb.1:
     successors: %bb.3
 
-    %4 = V_ADD_F32_e32 undef %6, undef %5, implicit %exec
+    %4 = V_ADD_F32_e32 undef %6, undef %5, implicit $exec
     undef %12.sub0 = COPY killed %4
     %17 = COPY killed %12
     S_BRANCH %bb.3
@@ -49,7 +49,7 @@ body:             |
   bb.2:
     successors: %bb.3
 
-    %8 = V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit %exec
+    %8 = V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit $exec
     undef %13.sub0 = COPY %8
     %13.sub1 = COPY %8
     %13.sub2 = COPY killed %8
@@ -58,12 +58,12 @@ body:             |
 
   bb.3:
     %1 = COPY killed %17
-    FLAT_STORE_DWORD undef %10, %1.sub2, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORD undef %10, %1.sub2, 0, 0, 0, implicit $exec, implicit $flat_scr
     %14 = COPY %1.sub1
     %16 = COPY killed %1.sub0
     undef %15.sub0 = COPY killed %16
     %15.sub1 = COPY killed %14
-    FLAT_STORE_DWORDX2 undef %11, killed %15, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORDX2 undef %11, killed %15, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 
 ...
@@ -73,13 +73,13 @@ body:             |
 
 # GCN-LABEL: name: vreg_does_not_dominate
 
-# GCN: undef %8.sub1:vreg_128 = V_MAC_F32_e32 undef %2:vgpr_32, undef %1:vgpr_32, undef %8.sub1, implicit %exec
-# GCN: undef %7.sub0:vreg_128 = V_MOV_B32_e32 0, implicit %exec
+# GCN: undef %8.sub1:vreg_128 = V_MAC_F32_e32 undef %2:vgpr_32, undef %1:vgpr_32, undef %8.sub1, implicit $exec
+# GCN: undef %7.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
 # GCN: undef %9.sub2:vreg_128 = COPY %7.sub0
 
-# GCN: undef %6.sub3:vreg_128 = V_ADD_F32_e32 undef %3:vgpr_32, undef %3:vgpr_32, implicit %exec
-# GCN: undef %7.sub0:vreg_128 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit %exec
-# GCN: %8.sub1:vreg_128 = V_ADD_F32_e32 %8.sub1, %8.sub1, implicit %exec
+# GCN: undef %6.sub3:vreg_128 = V_ADD_F32_e32 undef %3:vgpr_32, undef %3:vgpr_32, implicit $exec
+# GCN: undef %7.sub0:vreg_128 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $exec
+# GCN: %8.sub1:vreg_128 = V_ADD_F32_e32 %8.sub1, %8.sub1, implicit $exec
 
 # GCN: BUFFER_STORE_DWORD_OFFEN %6.sub3, %0,
 # GCN: BUFFER_STORE_DWORD_OFFEN %9.sub2, %0,
@@ -101,43 +101,43 @@ registers:
   - { id: 5, class: sreg_64, preferred-register: '' }
   - { id: 6, class: vreg_128, preferred-register: '' }
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%0' }
-  - { reg: '%sgpr30_sgpr31', virtual-reg: '%5' }
+  - { reg: '$vgpr0', virtual-reg: '%0' }
+  - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
 body:             |
   bb.0:
     successors: %bb.2, %bb.1
-    liveins: %vgpr0, %sgpr30_sgpr31, %sgpr5
+    liveins: $vgpr0, $sgpr30_sgpr31, $sgpr5
 
-    %5 = COPY %sgpr30_sgpr31
-    %0 = COPY %vgpr0
-    undef %6.sub1 = V_MAC_F32_e32 undef %2, undef %1, undef %6.sub1, implicit %exec
-    %6.sub0 = V_MOV_B32_e32 0, implicit %exec
+    %5 = COPY $sgpr30_sgpr31
+    %0 = COPY $vgpr0
+    undef %6.sub1 = V_MAC_F32_e32 undef %2, undef %1, undef %6.sub1, implicit $exec
+    %6.sub0 = V_MOV_B32_e32 0, implicit $exec
     %6.sub2 = COPY %6.sub0
-    S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc
     S_BRANCH %bb.1
 
   bb.1:
     successors: %bb.2
 
-    %6.sub3 = V_ADD_F32_e32 undef %3, undef %3, implicit %exec
-    %6.sub0 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit %exec
-    %6.sub1 = V_ADD_F32_e32 %6.sub1, %6.sub1, implicit %exec
+    %6.sub3 = V_ADD_F32_e32 undef %3, undef %3, implicit $exec
+    %6.sub0 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $exec
+    %6.sub1 = V_ADD_F32_e32 %6.sub1, %6.sub1, implicit $exec
     %6.sub2 = COPY %6.sub0
 
   bb.2:
-    BUFFER_STORE_DWORD_OFFEN %6.sub3, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 12, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %6.sub2, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 8, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %6.sub1, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 4, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %6.sub0, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %sgpr30_sgpr31 = COPY %5
-    %sgpr5 = COPY %sgpr5
-    S_SETPC_B64_return %sgpr30_sgpr31, implicit %sgpr5
+    BUFFER_STORE_DWORD_OFFEN %6.sub3, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 12, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %6.sub2, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 8, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %6.sub1, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %6.sub0, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $sgpr30_sgpr31 = COPY %5
+    $sgpr5 = COPY $sgpr5
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $sgpr5
 
 ...
 
 # GCN-LABEL: name: inf_loop_tied_operand
 # GCN: bb.0:
-# GCN-NEXT: undef %2.sub0:vreg_128 = V_MAC_F32_e32 1073741824, undef %0:vgpr_32, undef %2.sub0, implicit %exec
+# GCN-NEXT: undef %2.sub0:vreg_128 = V_MAC_F32_e32 1073741824, undef %0:vgpr_32, undef %2.sub0, implicit $exec
 # GCN-NEXT: dead undef %3.sub1:vreg_128 = COPY %2.sub0
 
 name:            inf_loop_tied_operand
@@ -148,7 +148,7 @@ registers:
   - { id: 2, class: vreg_128, preferred-register: '' }
 body:             |
   bb.0:
-    %1 = V_MAC_F32_e32 1073741824, undef %0, undef %1, implicit %exec
+    %1 = V_MAC_F32_e32 1073741824, undef %0, undef %1, implicit $exec
     undef %2.sub0 = COPY %1
     %2.sub1 = COPY %1
 

Modified: llvm/trunk/test/CodeGen/AMDGPU/rename-independent-subregs.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/rename-independent-subregs.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/rename-independent-subregs.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/rename-independent-subregs.mir Wed Jan 31 14:04:26 2018
@@ -50,7 +50,7 @@ registers:
 body: |
   bb.0:
     S_NOP 0, implicit-def undef %0.sub2
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.1:

Modified: llvm/trunk/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir Wed Jan 31 14:04:26 2018
@@ -56,7 +56,7 @@ tracksRegLiveness: false
 
 body: |
   bb.0:
-    S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0
     S_ENDPGM
 ...
 ---
@@ -72,7 +72,7 @@ tracksRegLiveness: false
 
 body: |
   bb.0:
-    S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0
     S_DCACHE_WB
     S_ENDPGM
 ...
@@ -91,7 +91,7 @@ tracksRegLiveness: false
 body: |
   bb.0:
     S_DCACHE_WB
-    S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0
     S_ENDPGM
 ...
 ---
@@ -122,11 +122,11 @@ tracksRegLiveness: false
 
 body: |
   bb.0:
-    S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0
     S_ENDPGM
 
   bb.1:
-    S_STORE_DWORD_SGPR undef %sgpr4, undef %sgpr6_sgpr7, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr4, undef $sgpr6_sgpr7, undef $m0, 0
     S_ENDPGM
 ...
 ...
@@ -152,7 +152,7 @@ body: |
     S_ENDPGM
 
   bb.1:
-    S_STORE_DWORD_SGPR undef %sgpr4, undef %sgpr6_sgpr7, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr4, undef $sgpr6_sgpr7, undef $m0, 0
     S_ENDPGM
 ...
 ---
@@ -168,6 +168,6 @@ tracksRegLiveness: false
 
 body: |
   bb.0:
-    S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0
-    SI_RETURN_TO_EPILOG undef %vgpr0
+    S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0
+    SI_RETURN_TO_EPILOG undef $vgpr0
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir Wed Jan 31 14:04:26 2018
@@ -169,7 +169,7 @@
 ---
 
 # CHECK: name: sched_dbg_value_crash
-# CHECK: DBG_VALUE debug-use %99, debug-use %noreg, !5, !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef), debug-location !8
+# CHECK: DBG_VALUE debug-use %99, debug-use $noreg, !5, !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef), debug-location !8
 
 name:            sched_dbg_value_crash
 alignment:       0
@@ -179,11 +179,11 @@ regBankSelected: false
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%0' }
-  - { reg: '%vgpr1', virtual-reg: '%1' }
-  - { reg: '%vgpr2', virtual-reg: '%2' }
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '%3' }
-  - { reg: '%sgpr6_sgpr7', virtual-reg: '%4' }
+  - { reg: '$vgpr0', virtual-reg: '%0' }
+  - { reg: '$vgpr1', virtual-reg: '%1' }
+  - { reg: '$vgpr2', virtual-reg: '%2' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '%3' }
+  - { reg: '$sgpr6_sgpr7', virtual-reg: '%4' }
 fixedStack:
 stack:
   - { id: 0, name: tmp5, type: default, offset: 0, size: 128, alignment: 16,
@@ -192,104 +192,104 @@ stack:
 constants:
 body:             |
   bb.0.bb:
-    liveins: %vgpr0, %vgpr1, %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4_sgpr5, %sgpr6_sgpr7, %sgpr32, %sgpr101
+    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr32, $sgpr101
 
-    %4:sgpr_64 = COPY %sgpr6_sgpr7
-    %3:sgpr_64 = COPY %sgpr4_sgpr5
-    %2:vgpr_32 = COPY %vgpr2
-    %1:vgpr_32 = COPY %vgpr1
-    %0:vgpr_32 = COPY %vgpr0
+    %4:sgpr_64 = COPY $sgpr6_sgpr7
+    %3:sgpr_64 = COPY $sgpr4_sgpr5
+    %2:vgpr_32 = COPY $vgpr2
+    %1:vgpr_32 = COPY $vgpr1
+    %0:vgpr_32 = COPY $vgpr0
     %5:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %6:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 8, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %7:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 16, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %8:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 24, 0
     %9:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 32, 0
     %10:sreg_64_xexec = S_LOAD_DWORDX2_IMM %3, 4, 0
-    %11:sreg_32_xm0 = S_LSHR_B32 %10.sub0, 16, implicit-def dead %scc
+    %11:sreg_32_xm0 = S_LSHR_B32 %10.sub0, 16, implicit-def dead $scc
     %12:sreg_32_xm0 = S_MUL_I32 %11, %10.sub1
-    %13:vgpr_32 = V_MUL_LO_I32 0, %0, implicit %exec
-    %14:vgpr_32 = V_MUL_LO_I32 %1, %10.sub1, implicit %exec
-    %15:vgpr_32 = V_ADD_I32_e32 0, %13, implicit-def dead %vcc, implicit %exec
-    %16:vgpr_32 = V_ADD_I32_e32 0, %15, implicit-def dead %vcc, implicit %exec
+    %13:vgpr_32 = V_MUL_LO_I32 0, %0, implicit $exec
+    %14:vgpr_32 = V_MUL_LO_I32 %1, %10.sub1, implicit $exec
+    %15:vgpr_32 = V_ADD_I32_e32 0, %13, implicit-def dead $vcc, implicit $exec
+    %16:vgpr_32 = V_ADD_I32_e32 0, %15, implicit-def dead $vcc, implicit $exec
     %17:vgpr_32 = IMPLICIT_DEF
     %18:sreg_64 = S_MOV_B64 0
     %19:sreg_32_xm0_xexec = IMPLICIT_DEF
-    %20:vgpr_32 = V_ADD_I32_e32 %19, %0, implicit-def dead %vcc, implicit %exec
-    %21:vreg_64, dead %22:sreg_64 = V_MAD_I64_I32 %20, 12, %7, 0, implicit %exec
-    %23:vgpr_32 = GLOBAL_LOAD_DWORD %21, 4, 0, 0, implicit %exec
-    %24:vreg_64, dead %25:sreg_64 = V_MAD_I64_I32 %20, 48, %8, 0, implicit %exec
+    %20:vgpr_32 = V_ADD_I32_e32 %19, %0, implicit-def dead $vcc, implicit $exec
+    %21:vreg_64, dead %22:sreg_64 = V_MAD_I64_I32 %20, 12, %7, 0, implicit $exec
+    %23:vgpr_32 = GLOBAL_LOAD_DWORD %21, 4, 0, 0, implicit $exec
+    %24:vreg_64, dead %25:sreg_64 = V_MAD_I64_I32 %20, 48, %8, 0, implicit $exec
     %26:vreg_128 = IMPLICIT_DEF
     undef %27.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %6, 0, 0
     %27.sub1:sreg_64_xexec = S_MOV_B32 0
-    %28:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead %scc
-    undef %29.sub0:sreg_64 = S_ADD_U32 %5.sub0, %28.sub0, implicit-def %scc
-    %29.sub1:sreg_64 = S_ADDC_U32 %5.sub1, %28.sub1, implicit-def dead %scc, implicit killed %scc
+    %28:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead $scc
+    undef %29.sub0:sreg_64 = S_ADD_U32 %5.sub0, %28.sub0, implicit-def $scc
+    %29.sub1:sreg_64 = S_ADDC_U32 %5.sub1, %28.sub1, implicit-def dead $scc, implicit killed $scc
     undef %30.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %6, 4, 0
     %27.sub0:sreg_64_xexec = IMPLICIT_DEF
-    %31:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead %scc
-    %32:sreg_32_xm0 = S_ADD_U32 0, %31.sub0, implicit-def %scc
-    %33:sgpr_32 = S_ADDC_U32 %5.sub1, %31.sub1, implicit-def dead %scc, implicit killed %scc
+    %31:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead $scc
+    %32:sreg_32_xm0 = S_ADD_U32 0, %31.sub0, implicit-def $scc
+    %33:sgpr_32 = S_ADDC_U32 %5.sub1, %31.sub1, implicit-def dead $scc, implicit killed $scc
     %34:vgpr_32 = IMPLICIT_DEF
-    %35:vreg_64, dead %36:sreg_64 = V_MAD_I64_I32 %23, %34, 0, 0, implicit %exec
-    %37:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 32, 0, 0, implicit %exec
-    undef %38.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %37.sub0, implicit %exec
+    %35:vreg_64, dead %36:sreg_64 = V_MAD_I64_I32 %23, %34, 0, 0, implicit $exec
+    %37:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 32, 0, 0, implicit $exec
+    undef %38.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %37.sub0, implicit $exec
     %38.sub0:vreg_64 = COPY %37.sub0
-    %39:vreg_64 = V_LSHLREV_B64 3, %38, implicit %exec
-    undef %40.sub0:vreg_64, %41:sreg_64_xexec = V_ADD_I32_e64 0, %39.sub0, implicit %exec
+    %39:vreg_64 = V_LSHLREV_B64 3, %38, implicit $exec
+    undef %40.sub0:vreg_64, %41:sreg_64_xexec = V_ADD_I32_e64 0, %39.sub0, implicit $exec
     %42:vgpr_32 = COPY %33
-    %40.sub1:vreg_64, dead %43:sreg_64_xexec = V_ADDC_U32_e64 %42, %39.sub1, %41, implicit %exec
-    %44:vreg_64 = GLOBAL_LOAD_DWORDX2 %40, 0, 0, 0, implicit %exec :: (load 8 from %ir.tmp34)
+    %40.sub1:vreg_64, dead %43:sreg_64_xexec = V_ADDC_U32_e64 %42, %39.sub1, %41, implicit $exec
+    %44:vreg_64 = GLOBAL_LOAD_DWORDX2 %40, 0, 0, 0, implicit $exec :: (load 8 from %ir.tmp34)
     undef %45.sub1:vreg_64 = IMPLICIT_DEF
     %45.sub0:vreg_64 = COPY %37.sub1
-    %46:vreg_64 = V_LSHLREV_B64 3, %45, implicit %exec
-    undef %47.sub0:vreg_64, %48:sreg_64_xexec = V_ADD_I32_e64 %32, %46.sub0, implicit %exec
+    %46:vreg_64 = V_LSHLREV_B64 3, %45, implicit $exec
+    undef %47.sub0:vreg_64, %48:sreg_64_xexec = V_ADD_I32_e64 %32, %46.sub0, implicit $exec
     %49:vgpr_32 = COPY %33
-    %47.sub1:vreg_64, dead %50:sreg_64_xexec = V_ADDC_U32_e64 %49, %46.sub1, %48, implicit %exec
+    %47.sub1:vreg_64, dead %50:sreg_64_xexec = V_ADDC_U32_e64 %49, %46.sub1, %48, implicit $exec
     %51:vreg_64 = IMPLICIT_DEF
-    undef %52.sub0:vreg_64 = GLOBAL_LOAD_DWORD %35, 40, 0, 0, implicit %exec :: (load 4 from %ir.18 + 8)
+    undef %52.sub0:vreg_64 = GLOBAL_LOAD_DWORD %35, 40, 0, 0, implicit $exec :: (load 4 from %ir.18 + 8)
     %52.sub1:vreg_64 = IMPLICIT_DEF
-    %53:vreg_64 = V_LSHLREV_B64 3, %52, implicit %exec
-    undef %54.sub0:vreg_64, %55:sreg_64_xexec = V_ADD_I32_e64 0, %53.sub0, implicit %exec
+    %53:vreg_64 = V_LSHLREV_B64 3, %52, implicit $exec
+    undef %54.sub0:vreg_64, %55:sreg_64_xexec = V_ADD_I32_e64 0, %53.sub0, implicit $exec
     %56:vgpr_32 = COPY %33
-    %54.sub1:vreg_64, dead %57:sreg_64_xexec = V_ADDC_U32_e64 0, %53.sub1, %55, implicit %exec
+    %54.sub1:vreg_64, dead %57:sreg_64_xexec = V_ADDC_U32_e64 0, %53.sub1, %55, implicit $exec
     %58:vreg_64 = IMPLICIT_DEF
     %30.sub1:sreg_64_xexec = IMPLICIT_DEF
     %59:sreg_64 = IMPLICIT_DEF
-    %60:sreg_32_xm0 = S_ADD_U32 %5.sub0, %59.sub0, implicit-def %scc
-    %61:sgpr_32 = S_ADDC_U32 %5.sub1, %59.sub1, implicit-def dead %scc, implicit killed %scc
-    %62:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 0, 0, 0, implicit %exec :: (load 8 from %ir.20, align 4)
-    undef %63.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %62.sub0, implicit %exec
+    %60:sreg_32_xm0 = S_ADD_U32 %5.sub0, %59.sub0, implicit-def $scc
+    %61:sgpr_32 = S_ADDC_U32 %5.sub1, %59.sub1, implicit-def dead $scc, implicit killed $scc
+    %62:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 0, 0, 0, implicit $exec :: (load 8 from %ir.20, align 4)
+    undef %63.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %62.sub0, implicit $exec
     %63.sub0:vreg_64 = COPY %62.sub0
     %64:vreg_64 = IMPLICIT_DEF
-    undef %65.sub0:vreg_64, %66:sreg_64_xexec = V_ADD_I32_e64 %60, %64.sub0, implicit %exec
+    undef %65.sub0:vreg_64, %66:sreg_64_xexec = V_ADD_I32_e64 %60, %64.sub0, implicit $exec
     %67:vgpr_32 = COPY %61
-    %65.sub1:vreg_64, dead %68:sreg_64_xexec = V_ADDC_U32_e64 %67, %64.sub1, %66, implicit %exec
-    %69:vreg_128 = GLOBAL_LOAD_DWORDX4 %65, 0, 0, 0, implicit %exec :: (load 16 from %ir.tmp58)
+    %65.sub1:vreg_64, dead %68:sreg_64_xexec = V_ADDC_U32_e64 %67, %64.sub1, %66, implicit $exec
+    %69:vreg_128 = GLOBAL_LOAD_DWORDX4 %65, 0, 0, 0, implicit $exec :: (load 16 from %ir.tmp58)
     undef %70.sub1:vreg_64 = IMPLICIT_DEF
     %70.sub0:vreg_64 = IMPLICIT_DEF
     %71:vreg_64 = IMPLICIT_DEF
-    undef %72.sub0:vreg_64, %73:sreg_64_xexec = V_ADD_I32_e64 %60, %71.sub0, implicit %exec
+    undef %72.sub0:vreg_64, %73:sreg_64_xexec = V_ADD_I32_e64 %60, %71.sub0, implicit $exec
     %74:vgpr_32 = COPY %61
-    %72.sub1:vreg_64, dead %75:sreg_64_xexec = V_ADDC_U32_e64 0, %71.sub1, %73, implicit %exec
-    %76:vreg_128 = GLOBAL_LOAD_DWORDX4 %72, 0, 0, 0, implicit %exec
+    %72.sub1:vreg_64, dead %75:sreg_64_xexec = V_ADDC_U32_e64 0, %71.sub1, %73, implicit $exec
+    %76:vreg_128 = GLOBAL_LOAD_DWORDX4 %72, 0, 0, 0, implicit $exec
     %77:vgpr_32 = IMPLICIT_DEF
     %78:vgpr_32 = IMPLICIT_DEF
-    %79:vgpr_32 = V_MUL_F32_e32 0, %77, implicit %exec
+    %79:vgpr_32 = V_MUL_F32_e32 0, %77, implicit $exec
     %80:vgpr_32 = IMPLICIT_DEF
     %81:vgpr_32 = IMPLICIT_DEF
     %84:vgpr_32 = IMPLICIT_DEF
-    BUFFER_STORE_DWORD_OFFEN %84, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 108, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %81, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 104, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %80, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 100, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %78, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 96, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFEN %84, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 108, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %81, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 104, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %80, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 100, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %78, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 96, 0, 0, 0, implicit $exec
     %85:vgpr_32 = IMPLICIT_DEF
     %86:vgpr_32 = IMPLICIT_DEF
     %87:vgpr_32 = IMPLICIT_DEF
     %88:vgpr_32 = IMPLICIT_DEF
     %90:vgpr_32 = IMPLICIT_DEF
-    %91:vgpr_32, dead %92:sreg_64 = V_DIV_SCALE_F32 %90, %90, 1065353216, implicit %exec
-    %95:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, undef %93:vgpr_32, 0, 0, implicit %exec
-    %96:vgpr_32, %97:sreg_64 = V_DIV_SCALE_F32 1065353216, %90, 1065353216, implicit %exec
+    %91:vgpr_32, dead %92:sreg_64 = V_DIV_SCALE_F32 %90, %90, 1065353216, implicit $exec
+    %95:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, undef %93:vgpr_32, 0, 0, implicit $exec
+    %96:vgpr_32, %97:sreg_64 = V_DIV_SCALE_F32 1065353216, %90, 1065353216, implicit $exec
     %98:vgpr_32 = IMPLICIT_DEF
     %99:vgpr_32 = IMPLICIT_DEF
     %100:vgpr_32 = IMPLICIT_DEF
@@ -298,18 +298,18 @@ body:             |
     %103:vgpr_32 = IMPLICIT_DEF
     %104:vgpr_32 = IMPLICIT_DEF
     %105:vgpr_32 = IMPLICIT_DEF
-    %106:vgpr_32, dead %107:sreg_64 = V_DIV_SCALE_F32 %90, %90, %105, implicit %exec
-    %108:vgpr_32 = V_RCP_F32_e32 0, implicit %exec
+    %106:vgpr_32, dead %107:sreg_64 = V_DIV_SCALE_F32 %90, %90, %105, implicit $exec
+    %108:vgpr_32 = V_RCP_F32_e32 0, implicit $exec
     %109:vgpr_32 = IMPLICIT_DEF
-    %110:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, 0, 0, 0, implicit %exec
-    %111:vgpr_32, %112:sreg_64 = V_DIV_SCALE_F32 0, 0, 0, implicit %exec
-    %113:vgpr_32 = V_MUL_F32_e32 0, %110, implicit %exec
+    %110:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec
+    %111:vgpr_32, %112:sreg_64 = V_DIV_SCALE_F32 0, 0, 0, implicit $exec
+    %113:vgpr_32 = V_MUL_F32_e32 0, %110, implicit $exec
     %114:vgpr_32 = IMPLICIT_DEF
     %115:vgpr_32 = IMPLICIT_DEF
     %116:vgpr_32 = IMPLICIT_DEF
-    %vcc = IMPLICIT_DEF
-    %117:vgpr_32 = V_DIV_FMAS_F32 0, %116, 0, %110, 0, %115, 0, 0, implicit killed %vcc, implicit %exec
-    %118:vgpr_32 = V_DIV_FIXUP_F32 0, %117, 0, %90, 0, %105, 0, 0, implicit %exec
+    $vcc = IMPLICIT_DEF
+    %117:vgpr_32 = V_DIV_FMAS_F32 0, %116, 0, %110, 0, %115, 0, 0, implicit killed $vcc, implicit $exec
+    %118:vgpr_32 = V_DIV_FIXUP_F32 0, %117, 0, %90, 0, %105, 0, 0, implicit $exec
     %119:vgpr_32 = IMPLICIT_DEF
     %120:vgpr_32 = IMPLICIT_DEF
     %121:vgpr_32 = IMPLICIT_DEF
@@ -319,15 +319,15 @@ body:             |
     %125:vgpr_32 = IMPLICIT_DEF
     %126:vgpr_32 = IMPLICIT_DEF
     DBG_VALUE debug-use %103, debug-use _, !5, !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef), debug-location !8
-    ADJCALLSTACKUP 0, 0, implicit-def %sgpr32, implicit %sgpr32
-    %127:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead %scc
-    %sgpr4 = COPY %sgpr101
-    %vgpr0 = COPY %124
-    %vgpr1_vgpr2 = IMPLICIT_DEF
-    %vgpr3 = COPY %126
-    dead %sgpr30_sgpr31 = SI_CALL %127, @func, csr_amdgpu_highregs, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit %vgpr0, implicit %vgpr1_vgpr2, implicit killed %vgpr3
-    ADJCALLSTACKDOWN 0, 0, implicit-def %sgpr32, implicit %sgpr32
-    %128:vreg_64, dead %129:sreg_64 = V_MAD_I64_I32 %20, %34, 0, 0, implicit %exec
+    ADJCALLSTACKUP 0, 0, implicit-def $sgpr32, implicit $sgpr32
+    %127:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    $sgpr4 = COPY $sgpr101
+    $vgpr0 = COPY %124
+    $vgpr1_vgpr2 = IMPLICIT_DEF
+    $vgpr3 = COPY %126
+    dead $sgpr30_sgpr31 = SI_CALL %127, @func, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit $vgpr0, implicit $vgpr1_vgpr2, implicit killed $vgpr3
+    ADJCALLSTACKDOWN 0, 0, implicit-def $sgpr32, implicit $sgpr32
+    %128:vreg_64, dead %129:sreg_64 = V_MAD_I64_I32 %20, %34, 0, 0, implicit $exec
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/schedule-regpressure.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-regpressure.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/schedule-regpressure.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/schedule-regpressure.mir Wed Jan 31 14:04:26 2018
@@ -4,7 +4,7 @@
 # Check there is no SReg_32 pressure created by DS_* instructions because of M0 use
 
 # CHECK: ScheduleDAGMILive::schedule starting
-# CHECK: SU({{.*}} = DS_READ_B32 {{.*}} implicit %m0, implicit %exec
+# CHECK: SU({{.*}} = DS_READ_B32 {{.*}} implicit $m0, implicit $exec
 # CHECK: Pressure Diff : {{$}}
 # CHECK: SU({{.*}} DS_WRITE_B32
 
@@ -27,7 +27,7 @@ registers:
   - { id: 7, class: vgpr_32 }
   - { id: 8, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '%1' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '%1' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -44,14 +44,14 @@ frameInfo:
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr4_sgpr5
+    liveins: $sgpr4_sgpr5
 
-    %1 = COPY %sgpr4_sgpr5
+    %1 = COPY $sgpr4_sgpr5
     %5 = S_LOAD_DWORD_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    %m0 = S_MOV_B32 -1
+    $m0 = S_MOV_B32 -1
     %7 = COPY %5
-    %6 = DS_READ_B32 %7, 0, 0, implicit %m0, implicit %exec
-    DS_WRITE_B32 %7, %6, 4, 0, implicit killed %m0, implicit %exec
+    %6 = DS_READ_B32 %7, 0, 0, implicit $m0, implicit $exec
+    DS_WRITE_B32 %7, %6, 4, 0, implicit killed $m0, implicit $exec
     S_ENDPGM
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/sdwa-gfx9.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sdwa-gfx9.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/sdwa-gfx9.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/sdwa-gfx9.mir Wed Jan 31 14:04:26 2018
@@ -5,13 +5,13 @@
 # GCN-LABEL: {{^}}name: add_shr_i32
 # GCN: [[SMOV:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 123
 
-# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit %exec
-# CI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_e32 [[SMOV]], killed [[SHIFT]], implicit-def %vcc, implicit %exec
+# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+# CI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_e32 [[SMOV]], killed [[SHIFT]], implicit-def $vcc, implicit $exec
 
-# VI: [[VMOV:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[SMOV]], implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[VMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def %vcc, implicit %exec
+# VI: [[VMOV:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[SMOV]], implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[VMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def $vcc, implicit $exec
 
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[SMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def %vcc, implicit %exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[SMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def $vcc, implicit $exec
 
 ---
 name:            add_shr_i32
@@ -32,30 +32,30 @@ registers:
   - { id: 12, class: sreg_32_xm0 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
     %12 = S_MOV_B32 123
-    %10 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %11 = V_ADD_I32_e32 %12, killed %10, implicit-def %vcc, implicit %exec
-    FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %11 = V_ADD_I32_e32 %12, killed %10, implicit-def $vcc, implicit $exec
+    FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31
 
 ...
 
 # GCN-LABEL: {{^}}name: trunc_shr_f32
 
-# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit %exec
-# CI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def %vcc, implicit %exec
+# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+# CI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def $vcc, implicit $exec
 
-# VI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def %vcc, implicit %exec
+# VI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def $vcc, implicit $exec
 
-#GFX9: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_sdwa 0, %{{[0-9]+}}, 1, 2, 6, 0, 5, implicit %exec
+#GFX9: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_sdwa 0, %{{[0-9]+}}, 1, 2, 6, 0, 5, implicit $exec
 
 ---
 name:            trunc_shr_f32
@@ -75,14 +75,14 @@ registers:
   - { id: 11, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %10 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %11 = V_TRUNC_F32_e64 0, killed %10, 1, 2, implicit-def %vcc, implicit %exec
-    FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %11 = V_TRUNC_F32_e64 0, killed %10, 1, 2, implicit-def $vcc, implicit $exec
+    FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31

Modified: llvm/trunk/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir Wed Jan 31 14:04:26 2018
@@ -3,29 +3,29 @@
 
 # GFX89-LABEL: {{^}}name: vop1_instructions
 
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
-
-
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
-
-
-# VI: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_e64 %{{[0-9]+}}, 0, 1, implicit %exec
-
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
+
+
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
+
+
+# VI: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_e64 %{{[0-9]+}}, 0, 1, implicit $exec
+
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit $exec
 
 
 ---
@@ -84,105 +84,105 @@ registers:
   - { id: 100, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 
     %5 = S_MOV_B32 65535
     %6 = S_MOV_B32 65535
 
-    %10 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %11 = V_MOV_B32_e32 %10, implicit %exec
-    %12 = V_LSHLREV_B32_e64 16, %11, implicit %exec
-    %14 = V_FRACT_F32_e32 123, implicit %exec
-    %15 = V_LSHLREV_B32_e64 16, %14, implicit %exec
-    %16 = V_LSHRREV_B32_e64 16, %15, implicit %exec
-    %17 = V_SIN_F32_e32 %16, implicit %exec
-    %18 = V_LSHLREV_B32_e64 16, %17, implicit %exec
-    %19 = V_LSHRREV_B32_e64 16, %18, implicit %exec
-    %20 = V_CVT_U32_F32_e32 %19, implicit %exec
-    %21 = V_LSHLREV_B32_e64 16, %20, implicit %exec
-    %23 = V_CVT_F32_I32_e32 123, implicit %exec
-    %24 = V_LSHLREV_B32_e64 16, %23, implicit %exec
-
-    %25 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %26 = V_MOV_B32_e64 %25, implicit %exec
-    %26 = V_LSHLREV_B32_e64 16, %26, implicit %exec
-    %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit %exec
-    %28 = V_LSHLREV_B32_e64 16, %27, implicit %exec
-    %29 = V_LSHRREV_B32_e64 16, %28, implicit %exec
-    %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit %exec
-    %31 = V_LSHLREV_B32_e64 16, %30, implicit %exec
-    %32 = V_LSHRREV_B32_e64 16, %31, implicit %exec
-    %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit %exec
-    %34 = V_LSHLREV_B32_e64 16, %33, implicit %exec
-    %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit %exec
-    %36 = V_LSHLREV_B32_e64 16, %35, implicit %exec
-
-
-    %37 = V_LSHRREV_B32_e64 16, %36, implicit %exec
-    %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit %exec
-    %39 = V_LSHLREV_B32_e64 16, %38, implicit %exec
-    %40 = V_LSHRREV_B32_e64 16, %39, implicit %exec
-    %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit %exec
-    %42 = V_LSHLREV_B32_e64 16, %41, implicit %exec
-    %43 = V_LSHRREV_B32_e64 16, %42, implicit %exec
-    %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit %exec
-    %45 = V_LSHLREV_B32_e64 16, %44, implicit %exec
-    %46 = V_LSHRREV_B32_e64 16, %45, implicit %exec
-    %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit %exec
-    %48 = V_LSHLREV_B32_e64 16, %47, implicit %exec
-
-
-    %100 = V_MOV_B32_e32 %48, implicit %exec
-
-    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %11 = V_MOV_B32_e32 %10, implicit $exec
+    %12 = V_LSHLREV_B32_e64 16, %11, implicit $exec
+    %14 = V_FRACT_F32_e32 123, implicit $exec
+    %15 = V_LSHLREV_B32_e64 16, %14, implicit $exec
+    %16 = V_LSHRREV_B32_e64 16, %15, implicit $exec
+    %17 = V_SIN_F32_e32 %16, implicit $exec
+    %18 = V_LSHLREV_B32_e64 16, %17, implicit $exec
+    %19 = V_LSHRREV_B32_e64 16, %18, implicit $exec
+    %20 = V_CVT_U32_F32_e32 %19, implicit $exec
+    %21 = V_LSHLREV_B32_e64 16, %20, implicit $exec
+    %23 = V_CVT_F32_I32_e32 123, implicit $exec
+    %24 = V_LSHLREV_B32_e64 16, %23, implicit $exec
+
+    %25 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %26 = V_MOV_B32_e64 %25, implicit $exec
+    %26 = V_LSHLREV_B32_e64 16, %26, implicit $exec
+    %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit $exec
+    %28 = V_LSHLREV_B32_e64 16, %27, implicit $exec
+    %29 = V_LSHRREV_B32_e64 16, %28, implicit $exec
+    %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit $exec
+    %31 = V_LSHLREV_B32_e64 16, %30, implicit $exec
+    %32 = V_LSHRREV_B32_e64 16, %31, implicit $exec
+    %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit $exec
+    %34 = V_LSHLREV_B32_e64 16, %33, implicit $exec
+    %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit $exec
+    %36 = V_LSHLREV_B32_e64 16, %35, implicit $exec
+
+
+    %37 = V_LSHRREV_B32_e64 16, %36, implicit $exec
+    %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit $exec
+    %39 = V_LSHLREV_B32_e64 16, %38, implicit $exec
+    %40 = V_LSHRREV_B32_e64 16, %39, implicit $exec
+    %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit $exec
+    %42 = V_LSHLREV_B32_e64 16, %41, implicit $exec
+    %43 = V_LSHRREV_B32_e64 16, %42, implicit $exec
+    %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit $exec
+    %45 = V_LSHLREV_B32_e64 16, %44, implicit $exec
+    %46 = V_LSHRREV_B32_e64 16, %45, implicit $exec
+    %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit $exec
+    %48 = V_LSHLREV_B32_e64 16, %47, implicit $exec
+
+
+    %100 = V_MOV_B32_e32 %48, implicit $exec
+
+    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31
 
 ...
 ---
 # GCN-LABEL: {{^}}name: vop2_instructions
 
 
-# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
-
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit %exec
-
-
-# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
-
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit %exec
-
-
-# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, %{{[0-9]+}}, 1, 0, 6, 0, 6, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit %exec
-
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit %exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
+
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $exec
+
+
+# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
+
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $exec
+
+
+# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, %{{[0-9]+}}, 1, 0, 6, 0, 6, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $exec
+
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $exec
 
 name:            vop2_instructions
 tracksRegLiveness: true
@@ -251,114 +251,114 @@ registers:
   - { id: 100, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 
     %5 = S_MOV_B32 65535
     %6 = S_MOV_B32 65535
 
-    %11 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %12 = V_AND_B32_e32 %6, %11, implicit %exec
-    %13 = V_LSHLREV_B32_e64 16, %12, implicit %exec
-    %14 = V_LSHRREV_B32_e64 16, %13, implicit %exec
-    %15 = V_BFE_U32 %13, 8, 8, implicit %exec
-    %16 = V_ADD_F32_e32 %14, %15, implicit %exec
-    %17 = V_LSHLREV_B32_e64 16, %16, implicit %exec
-    %18 = V_LSHRREV_B32_e64 16, %17, implicit %exec
-    %19 = V_BFE_U32 %17, 8, 8, implicit %exec
-    %20 = V_SUB_F16_e32 %18, %19, implicit %exec
-    %21 = V_LSHLREV_B32_e64 16, %20, implicit %exec
-    %22 = V_BFE_U32 %20, 8, 8, implicit %exec
-    %23 = V_MAC_F32_e32 %21, %22, %22, implicit %exec
-    %24 = V_LSHLREV_B32_e64 16, %23, implicit %exec
-    %25 = V_LSHRREV_B32_e64 16, %24, implicit %exec
-    %26 = V_BFE_U32 %24, 8, 8, implicit %exec
-    %27 = V_MAC_F16_e32 %25, %26, %26, implicit %exec
-    %28 = V_LSHLREV_B32_e64 16, %27, implicit %exec
-
-    %29 = V_LSHRREV_B32_e64 16, %28, implicit %exec
-    %30 = V_AND_B32_e64 23, %29, implicit %exec
-    %31 = V_LSHLREV_B32_e64 16, %30, implicit %exec
-    %32 = V_LSHRREV_B32_e64 16, %31, implicit %exec
-    %33 = V_BFE_U32 %31, 8, 8, implicit %exec
-    %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit %exec
-    %35 = V_LSHLREV_B32_e64 16, %34, implicit %exec
-    %37 = V_BFE_U32 %35, 8, 8, implicit %exec
-    %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit %exec
-    %39 = V_LSHLREV_B32_e64 16, %38, implicit %exec
-    %40 = V_BFE_U32 %39, 8, 8, implicit %exec
-    %41 = V_MAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit %exec
-    %42 = V_LSHLREV_B32_e64 16, %41, implicit %exec
-    %43 = V_LSHRREV_B32_e64 16, %42, implicit %exec
-    %44 = V_BFE_U32 %42, 8, 8, implicit %exec
-    %45 = V_MAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit %exec
-    %46 = V_LSHLREV_B32_e64 16, %45, implicit %exec
-
-    %47 = V_LSHRREV_B32_e64 16, %46, implicit %exec
-    %48 = V_BFE_U32 %46, 8, 8, implicit %exec
-    %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit %exec
-    %50 = V_LSHLREV_B32_e64 16, %49, implicit %exec
-    %51 = V_BFE_U32 %50, 8, 8, implicit %exec
-    %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit %exec
-    %53 = V_LSHLREV_B32_e64 16, %52, implicit %exec
-    %54 = V_BFE_U32 %53, 8, 8, implicit %exec
-    %55 = V_MAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit %exec
-    %56 = V_LSHLREV_B32_e64 16, %55, implicit %exec
-    %57 = V_LSHRREV_B32_e64 16, %56, implicit %exec
-    %58 = V_BFE_U32 %56, 8, 8, implicit %exec
-    %59 = V_MAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit %exec
-    %60 = V_LSHLREV_B32_e64 16, %59, implicit %exec
-
-    %100 = V_MOV_B32_e32 %60, implicit %exec
-
-    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    %11 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %12 = V_AND_B32_e32 %6, %11, implicit $exec
+    %13 = V_LSHLREV_B32_e64 16, %12, implicit $exec
+    %14 = V_LSHRREV_B32_e64 16, %13, implicit $exec
+    %15 = V_BFE_U32 %13, 8, 8, implicit $exec
+    %16 = V_ADD_F32_e32 %14, %15, implicit $exec
+    %17 = V_LSHLREV_B32_e64 16, %16, implicit $exec
+    %18 = V_LSHRREV_B32_e64 16, %17, implicit $exec
+    %19 = V_BFE_U32 %17, 8, 8, implicit $exec
+    %20 = V_SUB_F16_e32 %18, %19, implicit $exec
+    %21 = V_LSHLREV_B32_e64 16, %20, implicit $exec
+    %22 = V_BFE_U32 %20, 8, 8, implicit $exec
+    %23 = V_MAC_F32_e32 %21, %22, %22, implicit $exec
+    %24 = V_LSHLREV_B32_e64 16, %23, implicit $exec
+    %25 = V_LSHRREV_B32_e64 16, %24, implicit $exec
+    %26 = V_BFE_U32 %24, 8, 8, implicit $exec
+    %27 = V_MAC_F16_e32 %25, %26, %26, implicit $exec
+    %28 = V_LSHLREV_B32_e64 16, %27, implicit $exec
+
+    %29 = V_LSHRREV_B32_e64 16, %28, implicit $exec
+    %30 = V_AND_B32_e64 23, %29, implicit $exec
+    %31 = V_LSHLREV_B32_e64 16, %30, implicit $exec
+    %32 = V_LSHRREV_B32_e64 16, %31, implicit $exec
+    %33 = V_BFE_U32 %31, 8, 8, implicit $exec
+    %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit $exec
+    %35 = V_LSHLREV_B32_e64 16, %34, implicit $exec
+    %37 = V_BFE_U32 %35, 8, 8, implicit $exec
+    %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit $exec
+    %39 = V_LSHLREV_B32_e64 16, %38, implicit $exec
+    %40 = V_BFE_U32 %39, 8, 8, implicit $exec
+    %41 = V_MAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit $exec
+    %42 = V_LSHLREV_B32_e64 16, %41, implicit $exec
+    %43 = V_LSHRREV_B32_e64 16, %42, implicit $exec
+    %44 = V_BFE_U32 %42, 8, 8, implicit $exec
+    %45 = V_MAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit $exec
+    %46 = V_LSHLREV_B32_e64 16, %45, implicit $exec
+
+    %47 = V_LSHRREV_B32_e64 16, %46, implicit $exec
+    %48 = V_BFE_U32 %46, 8, 8, implicit $exec
+    %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit $exec
+    %50 = V_LSHLREV_B32_e64 16, %49, implicit $exec
+    %51 = V_BFE_U32 %50, 8, 8, implicit $exec
+    %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit $exec
+    %53 = V_LSHLREV_B32_e64 16, %52, implicit $exec
+    %54 = V_BFE_U32 %53, 8, 8, implicit $exec
+    %55 = V_MAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit $exec
+    %56 = V_LSHLREV_B32_e64 16, %55, implicit $exec
+    %57 = V_LSHRREV_B32_e64 16, %56, implicit $exec
+    %58 = V_BFE_U32 %56, 8, 8, implicit $exec
+    %59 = V_MAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit $exec
+    %60 = V_LSHLREV_B32_e64 16, %59, implicit $exec
+
+    %100 = V_MOV_B32_e32 %60, implicit $exec
+
+    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31
 
 ...
 ---
 
 # GCN-LABEL: {{^}}name: vopc_instructions
 
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 123, implicit %exec
-# GFX89: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
-# GFX89: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX89: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
-# GFX89: %vcc = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-
-
-# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
-# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %3, 0, 6, 4, implicit-def %vcc, implicit %exec
-# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_e64 23, killed %{{[0-9]+}}, implicit-def %exec, implicit %exec
-
-# GFX9: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit %exec
-# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit %exec
-# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-
-
-# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-
-# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, implicit-def %exec, implicit %exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 123, implicit $exec
+# GFX89: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
+# GFX89: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX89: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
+# GFX89: $vcc = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+
+
+# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
+# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %3, 0, 6, 4, implicit-def $vcc, implicit $exec
+# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_e64 23, killed %{{[0-9]+}}, implicit-def $exec, implicit $exec
+
+# GFX9: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit $exec
+# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX9: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit $exec
+# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+
+
+# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+
+# GFX9: $vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX9: $vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, implicit-def $exec, implicit $exec
 
 
 
@@ -396,52 +396,52 @@ registers:
   - { id: 100, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 
     %5 = S_MOV_B32 65535
     %6 = S_MOV_B32 65535
 
-    %10 = V_AND_B32_e64 %5, %3, implicit %exec
-    V_CMP_EQ_F32_e32 123, killed %10, implicit-def %vcc, implicit %exec
-    %11 = V_AND_B32_e64 %5, %3, implicit %exec
-    V_CMPX_GT_F32_e32 123, killed %11, implicit-def %vcc, implicit-def %exec, implicit %exec
-    %12 = V_AND_B32_e64 %5, %3, implicit %exec
-    V_CMP_LT_I32_e32 123, killed %12, implicit-def %vcc, implicit %exec
-    %13 = V_AND_B32_e64 %5, %3, implicit %exec
-    V_CMPX_EQ_I32_e32 123, killed %13, implicit-def %vcc, implicit-def %exec, implicit %exec
-
-    %14 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, implicit %exec
-    %15 = V_AND_B32_e64 %5, %3, implicit %exec
-    %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, implicit-def %exec, implicit %exec
-    %16 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMP_LT_I32_e64 %6, killed %16, implicit %exec
-    %17 = V_AND_B32_e64 %5, %3, implicit %exec
-    %19 = V_CMPX_EQ_I32_e64 23, killed %17, implicit-def %exec, implicit %exec
-
-    %20 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, implicit %exec
-    %21 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, implicit-def %exec, implicit %exec
-    %23 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, implicit %exec
-    %24 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, implicit-def %exec, implicit %exec
-    %25 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, implicit-def %exec, implicit %exec
-    %26 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, implicit-def %exec, implicit %exec
-    %27 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, implicit-def %exec, implicit %exec
-
-
-    %100 = V_MOV_B32_e32 %vcc_lo, implicit %exec
-
-    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    %10 = V_AND_B32_e64 %5, %3, implicit $exec
+    V_CMP_EQ_F32_e32 123, killed %10, implicit-def $vcc, implicit $exec
+    %11 = V_AND_B32_e64 %5, %3, implicit $exec
+    V_CMPX_GT_F32_e32 123, killed %11, implicit-def $vcc, implicit-def $exec, implicit $exec
+    %12 = V_AND_B32_e64 %5, %3, implicit $exec
+    V_CMP_LT_I32_e32 123, killed %12, implicit-def $vcc, implicit $exec
+    %13 = V_AND_B32_e64 %5, %3, implicit $exec
+    V_CMPX_EQ_I32_e32 123, killed %13, implicit-def $vcc, implicit-def $exec, implicit $exec
+
+    %14 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, implicit $exec
+    %15 = V_AND_B32_e64 %5, %3, implicit $exec
+    %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, implicit-def $exec, implicit $exec
+    %16 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMP_LT_I32_e64 %6, killed %16, implicit $exec
+    %17 = V_AND_B32_e64 %5, %3, implicit $exec
+    %19 = V_CMPX_EQ_I32_e64 23, killed %17, implicit-def $exec, implicit $exec
+
+    %20 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, implicit $exec
+    %21 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, implicit-def $exec, implicit $exec
+    %23 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, implicit $exec
+    %24 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, implicit-def $exec, implicit $exec
+    %25 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, implicit-def $exec, implicit $exec
+    %26 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, implicit-def $exec, implicit $exec
+    %27 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, implicit-def $exec, implicit $exec
+
+
+    %100 = V_MOV_B32_e32 $vcc_lo, implicit $exec
+
+    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31

Modified: llvm/trunk/test/CodeGen/AMDGPU/sdwa-preserve.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sdwa-preserve.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/sdwa-preserve.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/sdwa-preserve.mir Wed Jan 31 14:04:26 2018
@@ -31,26 +31,26 @@ registers:
   - { id: 13, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
   
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %4 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    %4 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 
-    %5 = V_AND_B32_e32 65535, %3, implicit %exec
-    %6 = V_LSHRREV_B32_e64 16, %4, implicit %exec
-    %7 = V_BFE_U32 %3, 8, 8, implicit %exec
-    %8 = V_LSHRREV_B32_e32 24, %4, implicit %exec
+    %5 = V_AND_B32_e32 65535, %3, implicit $exec
+    %6 = V_LSHRREV_B32_e64 16, %4, implicit $exec
+    %7 = V_BFE_U32 %3, 8, 8, implicit $exec
+    %8 = V_LSHRREV_B32_e32 24, %4, implicit $exec
 
-    %9 = V_ADD_F16_e64 0, %5, 0, %6, 0, 0, implicit %exec
-    %10 = V_LSHLREV_B16_e64 8, %9, implicit %exec
-    %11 = V_MUL_F32_e64 0, %7, 0, %8, 0, 0, implicit %exec
-    %12 = V_LSHLREV_B32_e64 16, %11, implicit %exec
+    %9 = V_ADD_F16_e64 0, %5, 0, %6, 0, 0, implicit $exec
+    %10 = V_LSHLREV_B16_e64 8, %9, implicit $exec
+    %11 = V_MUL_F32_e64 0, %7, 0, %8, 0, 0, implicit $exec
+    %12 = V_LSHLREV_B32_e64 16, %11, implicit $exec
 
-    %13 = V_OR_B32_e64 %10, %12, implicit %exec
+    %13 = V_OR_B32_e64 %10, %12, implicit $exec
 
-    FLAT_STORE_DWORD %0, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    FLAT_STORE_DWORD %0, %13, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31

Modified: llvm/trunk/test/CodeGen/AMDGPU/sdwa-scalar-ops.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sdwa-scalar-ops.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/sdwa-scalar-ops.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/sdwa-scalar-ops.mir Wed Jan 31 14:04:26 2018
@@ -183,7 +183,7 @@ registers:
   - { id: 82, class: vgpr_32 }
   - { id: 83, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '%4' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '%4' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -200,13 +200,13 @@ frameInfo:
 body:             |
   bb.0.bb:
     successors: %bb.2.bb2(0x80000000)
-    liveins: %sgpr4_sgpr5
+    liveins: $sgpr4_sgpr5
 
-    %4 = COPY %sgpr4_sgpr5
+    %4 = COPY $sgpr4_sgpr5
     %9 = S_LOAD_DWORDX2_IMM %4, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %8 = S_MOV_B64 0
     %7 = COPY %9
-    %30 = V_MOV_B32_e32 1, implicit %exec
+    %30 = V_MOV_B32_e32 1, implicit $exec
     S_BRANCH %bb.2.bb2
 
   bb.1.bb1:
@@ -217,36 +217,36 @@ body:             |
 
     %0 = PHI %8, %bb.0.bb, %1, %bb.2.bb2
     %13 = COPY %7.sub1
-    %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def %scc
-    %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead %scc, implicit %scc
+    %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def $scc
+    %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead $scc, implicit $scc
     %16 = REG_SEQUENCE %14, 1, %15, 2
     %18 = COPY %16
-    %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.uglygep45)
-    %60 = V_BFE_U32 %17, 8, 8, implicit %exec
-    %61 = V_LSHLREV_B32_e32 2, killed %60, implicit %exec
-    %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def %vcc, implicit %exec
+    %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.uglygep45)
+    %60 = V_BFE_U32 %17, 8, 8, implicit $exec
+    %61 = V_LSHLREV_B32_e32 2, killed %60, implicit $exec
+    %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def $vcc, implicit $exec
     %66 = COPY %13
-    %65 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec
+    %65 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec
     %67 = REG_SEQUENCE %70, 1, killed %65, 2
-    FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp9)
-    %37 = S_ADD_U32 %14, 4, implicit-def %scc
-    %38 = S_ADDC_U32 %15, 0, implicit-def dead %scc, implicit %scc
+    FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp9)
+    %37 = S_ADD_U32 %14, 4, implicit-def $scc
+    %38 = S_ADDC_U32 %15, 0, implicit-def dead $scc, implicit $scc
     %71 = COPY killed %37
     %72 = COPY killed %38
     %41 = REG_SEQUENCE killed %71, 1, killed %72, 2
-    %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.scevgep)
-    %73 = V_BFE_U32 %40, 8, 8, implicit %exec
-    %74 = V_LSHLREV_B32_e32 2, killed %73, implicit %exec
-    %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def %vcc, implicit %exec
-    %78 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec
+    %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.scevgep)
+    %73 = V_BFE_U32 %40, 8, 8, implicit $exec
+    %74 = V_LSHLREV_B32_e32 2, killed %73, implicit $exec
+    %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def $vcc, implicit $exec
+    %78 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec
     %80 = REG_SEQUENCE %83, 1, killed %78, 2
-    FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp17)
-    %55 = S_ADD_U32 %0.sub0, 8, implicit-def %scc
-    %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead %scc, implicit %scc
+    FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp17)
+    %55 = S_ADD_U32 %0.sub0, 8, implicit-def $scc
+    %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead $scc, implicit $scc
     %57 = REG_SEQUENCE %55, 1, killed %56, 2
     %1 = COPY %57
-    S_CMPK_EQ_I32 %55, 4096, implicit-def %scc
-    S_CBRANCH_SCC1 %bb.1.bb1, implicit %scc
+    S_CMPK_EQ_I32 %55, 4096, implicit-def $scc
+    S_CBRANCH_SCC1 %bb.1.bb1, implicit $scc
     S_BRANCH %bb.2.bb2
 
 ...
@@ -345,7 +345,7 @@ registers:
   - { id: 83, class: vgpr_32 }
   - { id: 84, class: sreg_32_xm0 }
 liveins:
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '%4' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '%4' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -362,13 +362,13 @@ frameInfo:
 body:             |
   bb.0.bb:
     successors: %bb.2.bb2(0x80000000)
-    liveins: %sgpr4_sgpr5
+    liveins: $sgpr4_sgpr5
 
-    %4 = COPY %sgpr4_sgpr5
+    %4 = COPY $sgpr4_sgpr5
     %9 = S_LOAD_DWORDX2_IMM %4, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %8 = S_MOV_B64 0
     %7 = COPY %9
-    %30 = V_MOV_B32_e32 1, implicit %exec
+    %30 = V_MOV_B32_e32 1, implicit $exec
     %84 = S_MOV_B32 2
     S_BRANCH %bb.2.bb2
 
@@ -380,36 +380,36 @@ body:             |
 
     %0 = PHI %8, %bb.0.bb, %1, %bb.2.bb2
     %13 = COPY %7.sub1
-    %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def %scc
-    %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead %scc, implicit %scc
+    %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def $scc
+    %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead $scc, implicit $scc
     %16 = REG_SEQUENCE %14, 1, %15, 2
     %18 = COPY %16
-    %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.uglygep45)
-    %60 = V_BFE_U32 %17, 8, 8, implicit %exec
-    %61 = V_LSHLREV_B32_e32 %84, killed %60, implicit %exec
-    %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def %vcc, implicit %exec
+    %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.uglygep45)
+    %60 = V_BFE_U32 %17, 8, 8, implicit $exec
+    %61 = V_LSHLREV_B32_e32 %84, killed %60, implicit $exec
+    %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def $vcc, implicit $exec
     %66 = COPY %13
-    %65 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec
+    %65 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec
     %67 = REG_SEQUENCE %70, 1, killed %65, 2
-    FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp9)
-    %37 = S_ADD_U32 %14, 4, implicit-def %scc
-    %38 = S_ADDC_U32 %15, 0, implicit-def dead %scc, implicit %scc
+    FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp9)
+    %37 = S_ADD_U32 %14, 4, implicit-def $scc
+    %38 = S_ADDC_U32 %15, 0, implicit-def dead $scc, implicit $scc
     %71 = COPY killed %37
     %72 = COPY killed %38
     %41 = REG_SEQUENCE killed %71, 1, killed %72, 2
-    %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.scevgep)
-    %73 = V_BFE_U32 %40, 8, 8, implicit %exec
-    %74 = V_LSHLREV_B32_e32 %84, killed %73, implicit %exec
-    %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def %vcc, implicit %exec
-    %78 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec
+    %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.scevgep)
+    %73 = V_BFE_U32 %40, 8, 8, implicit $exec
+    %74 = V_LSHLREV_B32_e32 %84, killed %73, implicit $exec
+    %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def $vcc, implicit $exec
+    %78 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec
     %80 = REG_SEQUENCE %83, 1, killed %78, 2
-    FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp17)
-    %55 = S_ADD_U32 %0.sub0, 8, implicit-def %scc
-    %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead %scc, implicit %scc
+    FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp17)
+    %55 = S_ADD_U32 %0.sub0, 8, implicit-def $scc
+    %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead $scc, implicit $scc
     %57 = REG_SEQUENCE %55, 1, killed %56, 2
     %1 = COPY %57
-    S_CMPK_EQ_I32 %55, 4096, implicit-def %scc
-    S_CBRANCH_SCC1 %bb.1.bb1, implicit %scc
+    S_CMPK_EQ_I32 %55, 4096, implicit-def $scc
+    S_CBRANCH_SCC1 %bb.1.bb1, implicit $scc
     S_BRANCH %bb.2.bb2
 
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir Wed Jan 31 14:04:26 2018
@@ -6,10 +6,10 @@
 
 # GCN-LABEL: {{^}}name: vop2_64bit
 
-# GCN: %{{[0-9]+}}:vgpr_32 = V_BCNT_U32_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def %vcc, implicit %exec
-# GCN: %{{[0-9]+}}:vgpr_32 = V_BFM_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def %vcc, implicit %exec
-# GCN: %{{[0-9]+}}:vgpr_32 = V_CVT_PKNORM_I16_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 0, implicit-def %vcc, implicit %exec
-# GCN: %{{[0-9]+}}:sgpr_32 = V_READLANE_B32 killed %{{[0-9]+}}, 0, implicit-def %vcc, implicit %exec
+# GCN: %{{[0-9]+}}:vgpr_32 = V_BCNT_U32_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def $vcc, implicit $exec
+# GCN: %{{[0-9]+}}:vgpr_32 = V_BFM_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def $vcc, implicit $exec
+# GCN: %{{[0-9]+}}:vgpr_32 = V_CVT_PKNORM_I16_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 0, implicit-def $vcc, implicit $exec
+# GCN: %{{[0-9]+}}:sgpr_32 = V_READLANE_B32 killed %{{[0-9]+}}, 0, implicit-def $vcc, implicit $exec
 
 ---
 name:            vop2_64bit
@@ -36,26 +36,26 @@ registers:
   - { id: 20, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 
-    %12 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %13 = V_BCNT_U32_B32_e64 %3, killed %12, implicit-def %vcc, implicit %exec
+    %12 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %13 = V_BCNT_U32_B32_e64 %3, killed %12, implicit-def $vcc, implicit $exec
 
-    %14 = V_LSHRREV_B32_e64 16, %13, implicit %exec
-    %15 = V_BFM_B32_e64 %13, killed %14, implicit-def %vcc, implicit %exec
+    %14 = V_LSHRREV_B32_e64 16, %13, implicit $exec
+    %15 = V_BFM_B32_e64 %13, killed %14, implicit-def $vcc, implicit $exec
 
-    %16 = V_LSHRREV_B32_e64 16, %15, implicit %exec
-    %17 = V_CVT_PKNORM_I16_F32_e64 0, %15, 0, killed %16, 0, implicit-def %vcc, implicit %exec
+    %16 = V_LSHRREV_B32_e64 16, %15, implicit $exec
+    %17 = V_CVT_PKNORM_I16_F32_e64 0, %15, 0, killed %16, 0, implicit-def $vcc, implicit $exec
 
-    %18 = V_LSHRREV_B32_e64 16, %17, implicit %exec
-    %19 = V_READLANE_B32 killed %18, 0, implicit-def %vcc, implicit %exec
-    %20 = V_MOV_B32_e64 %19, implicit %exec
+    %18 = V_LSHRREV_B32_e64 16, %17, implicit $exec
+    %19 = V_READLANE_B32 killed %18, 0, implicit-def $vcc, implicit $exec
+    %20 = V_MOV_B32_e64 %19, implicit $exec
 
-    FLAT_STORE_DWORD %0, %20, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    FLAT_STORE_DWORD %0, %20, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31

Modified: llvm/trunk/test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir Wed Jan 31 14:04:26 2018
@@ -7,14 +7,14 @@
 name: m0_sendmsg
 body: |
   ; GCN-LABEL: name: m0_sendmsg
-  ; GCN:  %m0 = S_MOV_B32 -1
+  ; GCN:  $m0 = S_MOV_B32 -1
   ; VI-NEXT: S_NOP 0
   ; GFX9-NEXT: S_NOP 0
-  ; GCN-NEXT: S_SENDMSG 3, implicit %exec, implicit %m0
+  ; GCN-NEXT: S_SENDMSG 3, implicit $exec, implicit $m0
 
   bb.0:
-    %m0 = S_MOV_B32 -1
-    S_SENDMSG 3, implicit %exec, implicit %m0
+    $m0 = S_MOV_B32 -1
+    S_SENDMSG 3, implicit $exec, implicit $m0
     S_ENDPGM
 ...
 ---
@@ -22,14 +22,14 @@ body: |
 name: m0_sendmsghalt
 body: |
   ; GCN-LABEL: name: m0_sendmsghalt
-  ; GCN:  %m0 = S_MOV_B32 -1
+  ; GCN:  $m0 = S_MOV_B32 -1
   ; VI-NEXT: S_NOP 0
   ; GFX9-NEXT: S_NOP 0
-  ; GCN-NEXT: S_SENDMSGHALT 3, implicit %exec, implicit %m0
+  ; GCN-NEXT: S_SENDMSGHALT 3, implicit $exec, implicit $m0
 
   bb.0:
-    %m0 = S_MOV_B32 -1
-    S_SENDMSGHALT 3, implicit %exec, implicit %m0
+    $m0 = S_MOV_B32 -1
+    S_SENDMSGHALT 3, implicit $exec, implicit $m0
     S_ENDPGM
 ...
 ---
@@ -37,13 +37,13 @@ body: |
 name: m0_ttracedata
 body: |
   ; GCN-LABEL: name: m0_ttracedata
-  ; GCN:  %m0 = S_MOV_B32 -1
+  ; GCN:  $m0 = S_MOV_B32 -1
   ; VI-NEXT: S_NOP 0
   ; GFX9-NEXT: S_NOP 0
-  ; GCN-NEXT: S_TTRACEDATA implicit %m0
+  ; GCN-NEXT: S_TTRACEDATA implicit $m0
 
   bb.0:
-    %m0 = S_MOV_B32 -1
-    S_TTRACEDATA implicit %m0
+    $m0 = S_MOV_B32 -1
+    S_TTRACEDATA implicit $m0
     S_ENDPGM
 ...

Modified: llvm/trunk/test/CodeGen/AMDGPU/shrink-carry.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/shrink-carry.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/shrink-carry.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/shrink-carry.mir Wed Jan 31 14:04:26 2018
@@ -1,7 +1,7 @@
 # RUN: llc -march=amdgcn -verify-machineinstrs -start-before si-shrink-instructions -stop-before si-insert-skips -o - %s | FileCheck -check-prefix=GCN %s
 
 # GCN-LABEL: name: subbrev{{$}}
-# GCN:       V_SUBBREV_U32_e64 0, undef %vgpr0, killed %vcc, implicit %exec
+# GCN:       V_SUBBREV_U32_e64 0, undef $vgpr0, killed $vcc, implicit $exec
 
 ---
 name:            subbrev
@@ -19,13 +19,13 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %3 = V_CMP_GT_U32_e64 %0, %1, implicit %exec
-    %4, %5 = V_SUBBREV_U32_e64 0, %0, %3, implicit %exec
+    %3 = V_CMP_GT_U32_e64 %0, %1, implicit $exec
+    %4, %5 = V_SUBBREV_U32_e64 0, %0, %3, implicit $exec
 
 ...
 
 # GCN-LABEL: name: subb{{$}}
-# GCN:       V_SUBB_U32_e64 undef %vgpr0, 0, killed %vcc, implicit %exec
+# GCN:       V_SUBB_U32_e64 undef $vgpr0, 0, killed $vcc, implicit $exec
 
 ---
 name:            subb
@@ -43,13 +43,13 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %3 = V_CMP_GT_U32_e64 %0, %1, implicit %exec
-    %4, %5 = V_SUBB_U32_e64 %0, 0, %3, implicit %exec
+    %3 = V_CMP_GT_U32_e64 %0, %1, implicit $exec
+    %4, %5 = V_SUBB_U32_e64 %0, 0, %3, implicit $exec
 
 ...
 
 # GCN-LABEL: name: addc{{$}}
-# GCN:       V_ADDC_U32_e32 0, undef %vgpr0, implicit-def %vcc, implicit killed %vcc, implicit %exec
+# GCN:       V_ADDC_U32_e32 0, undef $vgpr0, implicit-def $vcc, implicit killed $vcc, implicit $exec
 
 ---
 name:            addc
@@ -67,13 +67,13 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %3 = V_CMP_GT_U32_e64 %0, %1, implicit %exec
-    %4, %5 = V_ADDC_U32_e64 0, %0, %3, implicit %exec
+    %3 = V_CMP_GT_U32_e64 %0, %1, implicit $exec
+    %4, %5 = V_ADDC_U32_e64 0, %0, %3, implicit $exec
 
 ...
 
 # GCN-LABEL: name: addc2{{$}}
-# GCN:       V_ADDC_U32_e32 0, undef %vgpr0, implicit-def %vcc, implicit killed %vcc, implicit %exec
+# GCN:       V_ADDC_U32_e32 0, undef $vgpr0, implicit-def $vcc, implicit killed $vcc, implicit $exec
 
 ---
 name:            addc2
@@ -91,7 +91,7 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %3 = V_CMP_GT_U32_e64 %0, %1, implicit %exec
-    %4, %5 = V_ADDC_U32_e64 %0, 0, %3, implicit %exec
+    %3 = V_CMP_GT_U32_e64 %0, %1, implicit $exec
+    %4, %5 = V_ADDC_U32_e64 %0, 0, %3, implicit $exec
 
 ...




More information about the llvm-commits mailing list