[llvm] [InstrEmitter] Use AddOperand in EmitCopyToRegClassNode. (PR #146637)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 1 22:52:37 PDT 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-powerpc

@llvm/pr-subscribers-backend-arm

Author: Craig Topper (topperc)

<details>
<summary>Changes</summary>

This is alternative to #<!-- -->145965 that allows RegisterSDNode to be handled without making a special case.

---

Patch is 39.10 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/146637.diff


14 Files Affected:

- (modified) llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp (+5-4) 
- (modified) llvm/test/CodeGen/AArch64/bf16_fast_math.ll (+18-18) 
- (modified) llvm/test/CodeGen/AMDGPU/divergence-driven-ctpop.ll (+1-1) 
- (modified) llvm/test/CodeGen/AMDGPU/fneg-fabs-divergence-driven-isel.ll (+3-3) 
- (modified) llvm/test/CodeGen/ARM/fp16_fast_math.ll (+10-10) 
- (modified) llvm/test/CodeGen/PowerPC/aix32-vector-vararg-caller-split.ll (+4-4) 
- (modified) llvm/test/CodeGen/PowerPC/nofpexcept.ll (+18-18) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/apx/kmov-domain-assignment.ll (+4-4) 
- (modified) llvm/test/CodeGen/X86/fp-intrinsics-flags.ll (+1-1) 
- (modified) llvm/test/CodeGen/X86/sqrt-fastmath-mir.ll (+7-7) 
- (modified) llvm/test/CodeGen/X86/unpredictable-brcond.ll (+1-1) 
- (modified) llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics-flags.ll (+3-3) 
- (added) llvm/test/CodeGen/X86/x86-access-to-global.ll (+27) 


``````````diff
diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 4b7a9127b3fc3..03d3e8eab35d0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -631,16 +631,17 @@ void InstrEmitter::EmitSubregNode(SDNode *Node, VRBaseMapType &VRBaseMap,
 void
 InstrEmitter::EmitCopyToRegClassNode(SDNode *Node,
                                      VRBaseMapType &VRBaseMap) {
-  Register VReg = getVR(Node->getOperand(0), VRBaseMap);
-
   // Create the new VReg in the destination class and emit a copy.
   unsigned DstRCIdx = Node->getConstantOperandVal(1);
   const TargetRegisterClass *DstRC =
     TRI->getAllocatableClass(TRI->getRegClass(DstRCIdx));
   Register NewVReg = MRI->createVirtualRegister(DstRC);
-  BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TargetOpcode::COPY),
-    NewVReg).addReg(VReg);
+  const MCInstrDesc &II = TII->get(TargetOpcode::COPY);
+  MachineInstrBuilder MIB = BuildMI(*MF, Node->getDebugLoc(), II, NewVReg);
+  AddOperand(MIB, Node->getOperand(0), 1, &II, VRBaseMap, /*IsDebug=*/false,
+             /*IsClone=*/false, /*IsCloned*/ false);
 
+  MBB->insert(InsertPos, MIB);
   SDValue Op(Node, 0);
   bool isNew = VRBaseMap.insert(std::make_pair(Op, NewVReg)).second;
   (void)isNew; // Silence compiler warning.
diff --git a/llvm/test/CodeGen/AArch64/bf16_fast_math.ll b/llvm/test/CodeGen/AArch64/bf16_fast_math.ll
index 871ca12c9de77..e52c76fcc3f20 100644
--- a/llvm/test/CodeGen/AArch64/bf16_fast_math.ll
+++ b/llvm/test/CodeGen/AArch64/bf16_fast_math.ll
@@ -19,13 +19,13 @@ define bfloat @normal_fadd(bfloat %x, bfloat %y) {
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_1:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG1]]
   ; CHECK-NOBF16-NEXT:   [[COPY3:%[0-9]+]]:fpr32 = COPY [[SHLLv4i16_1]].ssub
   ; CHECK-NOBF16-NEXT:   [[FADDSrr:%[0-9]+]]:fpr32 = nofpexcept FADDSrr killed [[COPY3]], killed [[COPY2]], implicit $fpcr
-  ; CHECK-NOBF16-NEXT:   [[COPY4:%[0-9]+]]:gpr32 = COPY [[FADDSrr]]
+  ; CHECK-NOBF16-NEXT:   [[COPY4:%[0-9]+]]:gpr32 = COPY killed [[FADDSrr]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY4]], 16, 16
   ; CHECK-NOBF16-NEXT:   [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr killed [[UBFMWri]], [[COPY4]]
   ; CHECK-NOBF16-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 32767
   ; CHECK-NOBF16-NEXT:   [[ADDWrr1:%[0-9]+]]:gpr32 = ADDWrr killed [[ADDWrr]], killed [[MOVi32imm]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri killed [[ADDWrr1]], 16, 31
-  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:fpr32 = COPY [[UBFMWri1]]
+  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:fpr32 = COPY killed [[UBFMWri1]]
   ; CHECK-NOBF16-NEXT:   [[COPY6:%[0-9]+]]:fpr16 = COPY [[COPY5]].hsub
   ; CHECK-NOBF16-NEXT:   $h0 = COPY [[COPY6]]
   ; CHECK-NOBF16-NEXT:   RET_ReallyLR implicit $h0
@@ -65,13 +65,13 @@ define bfloat @fast_fadd(bfloat %x, bfloat %y) {
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_1:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG1]]
   ; CHECK-NOBF16-NEXT:   [[COPY3:%[0-9]+]]:fpr32 = COPY [[SHLLv4i16_1]].ssub
   ; CHECK-NOBF16-NEXT:   [[FADDSrr:%[0-9]+]]:fpr32 = nnan ninf nsz arcp contract afn reassoc nofpexcept FADDSrr killed [[COPY3]], killed [[COPY2]], implicit $fpcr
-  ; CHECK-NOBF16-NEXT:   [[COPY4:%[0-9]+]]:gpr32 = COPY [[FADDSrr]]
+  ; CHECK-NOBF16-NEXT:   [[COPY4:%[0-9]+]]:gpr32 = COPY killed [[FADDSrr]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY4]], 16, 16
   ; CHECK-NOBF16-NEXT:   [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr killed [[UBFMWri]], [[COPY4]]
   ; CHECK-NOBF16-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 32767
   ; CHECK-NOBF16-NEXT:   [[ADDWrr1:%[0-9]+]]:gpr32 = ADDWrr killed [[ADDWrr]], killed [[MOVi32imm]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri killed [[ADDWrr1]], 16, 31
-  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:fpr32 = COPY [[UBFMWri1]]
+  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:fpr32 = COPY killed [[UBFMWri1]]
   ; CHECK-NOBF16-NEXT:   [[COPY6:%[0-9]+]]:fpr16 = COPY [[COPY5]].hsub
   ; CHECK-NOBF16-NEXT:   $h0 = COPY [[COPY6]]
   ; CHECK-NOBF16-NEXT:   RET_ReallyLR implicit $h0
@@ -111,13 +111,13 @@ define bfloat @ninf_fadd(bfloat %x, bfloat %y) {
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_1:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG1]]
   ; CHECK-NOBF16-NEXT:   [[COPY3:%[0-9]+]]:fpr32 = COPY [[SHLLv4i16_1]].ssub
   ; CHECK-NOBF16-NEXT:   [[FADDSrr:%[0-9]+]]:fpr32 = ninf nofpexcept FADDSrr killed [[COPY3]], killed [[COPY2]], implicit $fpcr
-  ; CHECK-NOBF16-NEXT:   [[COPY4:%[0-9]+]]:gpr32 = COPY [[FADDSrr]]
+  ; CHECK-NOBF16-NEXT:   [[COPY4:%[0-9]+]]:gpr32 = COPY killed [[FADDSrr]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY4]], 16, 16
   ; CHECK-NOBF16-NEXT:   [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr killed [[UBFMWri]], [[COPY4]]
   ; CHECK-NOBF16-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 32767
   ; CHECK-NOBF16-NEXT:   [[ADDWrr1:%[0-9]+]]:gpr32 = ADDWrr killed [[ADDWrr]], killed [[MOVi32imm]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri killed [[ADDWrr1]], 16, 31
-  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:fpr32 = COPY [[UBFMWri1]]
+  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:fpr32 = COPY killed [[UBFMWri1]]
   ; CHECK-NOBF16-NEXT:   [[COPY6:%[0-9]+]]:fpr16 = COPY [[COPY5]].hsub
   ; CHECK-NOBF16-NEXT:   $h0 = COPY [[COPY6]]
   ; CHECK-NOBF16-NEXT:   RET_ReallyLR implicit $h0
@@ -161,13 +161,13 @@ define bfloat @normal_fadd_sequence(bfloat %x, bfloat %y, bfloat %z) {
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_1:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG1]]
   ; CHECK-NOBF16-NEXT:   [[COPY4:%[0-9]+]]:fpr32 = COPY [[SHLLv4i16_1]].ssub
   ; CHECK-NOBF16-NEXT:   [[FADDSrr:%[0-9]+]]:fpr32 = nofpexcept FADDSrr killed [[COPY4]], killed [[COPY3]], implicit $fpcr
-  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:gpr32 = COPY [[FADDSrr]]
+  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:gpr32 = COPY killed [[FADDSrr]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY5]], 16, 16
   ; CHECK-NOBF16-NEXT:   [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr killed [[UBFMWri]], [[COPY5]]
   ; CHECK-NOBF16-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 32767
   ; CHECK-NOBF16-NEXT:   [[ADDWrr1:%[0-9]+]]:gpr32 = ADDWrr killed [[ADDWrr]], [[MOVi32imm]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri killed [[ADDWrr1]], 16, 31
-  ; CHECK-NOBF16-NEXT:   [[COPY6:%[0-9]+]]:fpr32 = COPY [[UBFMWri1]]
+  ; CHECK-NOBF16-NEXT:   [[COPY6:%[0-9]+]]:fpr32 = COPY killed [[UBFMWri1]]
   ; CHECK-NOBF16-NEXT:   [[COPY7:%[0-9]+]]:fpr16 = COPY [[COPY6]].hsub
   ; CHECK-NOBF16-NEXT:   [[SUBREG_TO_REG2:%[0-9]+]]:fpr64 = SUBREG_TO_REG 0, killed [[COPY7]], %subreg.hsub
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_2:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG2]]
@@ -176,12 +176,12 @@ define bfloat @normal_fadd_sequence(bfloat %x, bfloat %y, bfloat %z) {
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_3:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG3]]
   ; CHECK-NOBF16-NEXT:   [[COPY9:%[0-9]+]]:fpr32 = COPY [[SHLLv4i16_3]].ssub
   ; CHECK-NOBF16-NEXT:   [[FADDSrr1:%[0-9]+]]:fpr32 = nofpexcept FADDSrr killed [[COPY8]], killed [[COPY9]], implicit $fpcr
-  ; CHECK-NOBF16-NEXT:   [[COPY10:%[0-9]+]]:gpr32 = COPY [[FADDSrr1]]
+  ; CHECK-NOBF16-NEXT:   [[COPY10:%[0-9]+]]:gpr32 = COPY killed [[FADDSrr1]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri2:%[0-9]+]]:gpr32 = UBFMWri [[COPY10]], 16, 16
   ; CHECK-NOBF16-NEXT:   [[ADDWrr2:%[0-9]+]]:gpr32 = ADDWrr killed [[UBFMWri2]], [[COPY10]]
   ; CHECK-NOBF16-NEXT:   [[ADDWrr3:%[0-9]+]]:gpr32 = ADDWrr killed [[ADDWrr2]], [[MOVi32imm]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri3:%[0-9]+]]:gpr32 = UBFMWri killed [[ADDWrr3]], 16, 31
-  ; CHECK-NOBF16-NEXT:   [[COPY11:%[0-9]+]]:fpr32 = COPY [[UBFMWri3]]
+  ; CHECK-NOBF16-NEXT:   [[COPY11:%[0-9]+]]:fpr32 = COPY killed [[UBFMWri3]]
   ; CHECK-NOBF16-NEXT:   [[COPY12:%[0-9]+]]:fpr16 = COPY [[COPY11]].hsub
   ; CHECK-NOBF16-NEXT:   $h0 = COPY [[COPY12]]
   ; CHECK-NOBF16-NEXT:   RET_ReallyLR implicit $h0
@@ -232,13 +232,13 @@ define bfloat @nnan_ninf_contract_fadd_sequence(bfloat %x, bfloat %y, bfloat %z)
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_1:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG1]]
   ; CHECK-NOBF16-NEXT:   [[COPY4:%[0-9]+]]:fpr32 = COPY [[SHLLv4i16_1]].ssub
   ; CHECK-NOBF16-NEXT:   [[FADDSrr:%[0-9]+]]:fpr32 = nnan ninf contract nofpexcept FADDSrr killed [[COPY4]], killed [[COPY3]], implicit $fpcr
-  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:gpr32 = COPY [[FADDSrr]]
+  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:gpr32 = COPY killed [[FADDSrr]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY5]], 16, 16
   ; CHECK-NOBF16-NEXT:   [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr killed [[UBFMWri]], [[COPY5]]
   ; CHECK-NOBF16-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 32767
   ; CHECK-NOBF16-NEXT:   [[ADDWrr1:%[0-9]+]]:gpr32 = ADDWrr killed [[ADDWrr]], [[MOVi32imm]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri killed [[ADDWrr1]], 16, 31
-  ; CHECK-NOBF16-NEXT:   [[COPY6:%[0-9]+]]:fpr32 = COPY [[UBFMWri1]]
+  ; CHECK-NOBF16-NEXT:   [[COPY6:%[0-9]+]]:fpr32 = COPY killed [[UBFMWri1]]
   ; CHECK-NOBF16-NEXT:   [[COPY7:%[0-9]+]]:fpr16 = COPY [[COPY6]].hsub
   ; CHECK-NOBF16-NEXT:   [[SUBREG_TO_REG2:%[0-9]+]]:fpr64 = SUBREG_TO_REG 0, killed [[COPY7]], %subreg.hsub
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_2:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG2]]
@@ -247,12 +247,12 @@ define bfloat @nnan_ninf_contract_fadd_sequence(bfloat %x, bfloat %y, bfloat %z)
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_3:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG3]]
   ; CHECK-NOBF16-NEXT:   [[COPY9:%[0-9]+]]:fpr32 = COPY [[SHLLv4i16_3]].ssub
   ; CHECK-NOBF16-NEXT:   [[FADDSrr1:%[0-9]+]]:fpr32 = nnan ninf contract nofpexcept FADDSrr killed [[COPY8]], killed [[COPY9]], implicit $fpcr
-  ; CHECK-NOBF16-NEXT:   [[COPY10:%[0-9]+]]:gpr32 = COPY [[FADDSrr1]]
+  ; CHECK-NOBF16-NEXT:   [[COPY10:%[0-9]+]]:gpr32 = COPY killed [[FADDSrr1]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri2:%[0-9]+]]:gpr32 = UBFMWri [[COPY10]], 16, 16
   ; CHECK-NOBF16-NEXT:   [[ADDWrr2:%[0-9]+]]:gpr32 = ADDWrr killed [[UBFMWri2]], [[COPY10]]
   ; CHECK-NOBF16-NEXT:   [[ADDWrr3:%[0-9]+]]:gpr32 = ADDWrr killed [[ADDWrr2]], [[MOVi32imm]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri3:%[0-9]+]]:gpr32 = UBFMWri killed [[ADDWrr3]], 16, 31
-  ; CHECK-NOBF16-NEXT:   [[COPY11:%[0-9]+]]:fpr32 = COPY [[UBFMWri3]]
+  ; CHECK-NOBF16-NEXT:   [[COPY11:%[0-9]+]]:fpr32 = COPY killed [[UBFMWri3]]
   ; CHECK-NOBF16-NEXT:   [[COPY12:%[0-9]+]]:fpr16 = COPY [[COPY11]].hsub
   ; CHECK-NOBF16-NEXT:   $h0 = COPY [[COPY12]]
   ; CHECK-NOBF16-NEXT:   RET_ReallyLR implicit $h0
@@ -299,13 +299,13 @@ define bfloat @ninf_fadd_sequence(bfloat %x, bfloat %y, bfloat %z) {
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_1:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG1]]
   ; CHECK-NOBF16-NEXT:   [[COPY4:%[0-9]+]]:fpr32 = COPY [[SHLLv4i16_1]].ssub
   ; CHECK-NOBF16-NEXT:   [[FADDSrr:%[0-9]+]]:fpr32 = ninf nofpexcept FADDSrr killed [[COPY4]], killed [[COPY3]], implicit $fpcr
-  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:gpr32 = COPY [[FADDSrr]]
+  ; CHECK-NOBF16-NEXT:   [[COPY5:%[0-9]+]]:gpr32 = COPY killed [[FADDSrr]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY5]], 16, 16
   ; CHECK-NOBF16-NEXT:   [[ADDWrr:%[0-9]+]]:gpr32 = ADDWrr killed [[UBFMWri]], [[COPY5]]
   ; CHECK-NOBF16-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 32767
   ; CHECK-NOBF16-NEXT:   [[ADDWrr1:%[0-9]+]]:gpr32 = ADDWrr killed [[ADDWrr]], [[MOVi32imm]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri killed [[ADDWrr1]], 16, 31
-  ; CHECK-NOBF16-NEXT:   [[COPY6:%[0-9]+]]:fpr32 = COPY [[UBFMWri1]]
+  ; CHECK-NOBF16-NEXT:   [[COPY6:%[0-9]+]]:fpr32 = COPY killed [[UBFMWri1]]
   ; CHECK-NOBF16-NEXT:   [[COPY7:%[0-9]+]]:fpr16 = COPY [[COPY6]].hsub
   ; CHECK-NOBF16-NEXT:   [[SUBREG_TO_REG2:%[0-9]+]]:fpr64 = SUBREG_TO_REG 0, killed [[COPY7]], %subreg.hsub
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_2:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG2]]
@@ -314,12 +314,12 @@ define bfloat @ninf_fadd_sequence(bfloat %x, bfloat %y, bfloat %z) {
   ; CHECK-NOBF16-NEXT:   [[SHLLv4i16_3:%[0-9]+]]:fpr128 = SHLLv4i16 killed [[SUBREG_TO_REG3]]
   ; CHECK-NOBF16-NEXT:   [[COPY9:%[0-9]+]]:fpr32 = COPY [[SHLLv4i16_3]].ssub
   ; CHECK-NOBF16-NEXT:   [[FADDSrr1:%[0-9]+]]:fpr32 = ninf nofpexcept FADDSrr killed [[COPY8]], killed [[COPY9]], implicit $fpcr
-  ; CHECK-NOBF16-NEXT:   [[COPY10:%[0-9]+]]:gpr32 = COPY [[FADDSrr1]]
+  ; CHECK-NOBF16-NEXT:   [[COPY10:%[0-9]+]]:gpr32 = COPY killed [[FADDSrr1]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri2:%[0-9]+]]:gpr32 = UBFMWri [[COPY10]], 16, 16
   ; CHECK-NOBF16-NEXT:   [[ADDWrr2:%[0-9]+]]:gpr32 = ADDWrr killed [[UBFMWri2]], [[COPY10]]
   ; CHECK-NOBF16-NEXT:   [[ADDWrr3:%[0-9]+]]:gpr32 = ADDWrr killed [[ADDWrr2]], [[MOVi32imm]]
   ; CHECK-NOBF16-NEXT:   [[UBFMWri3:%[0-9]+]]:gpr32 = UBFMWri killed [[ADDWrr3]], 16, 31
-  ; CHECK-NOBF16-NEXT:   [[COPY11:%[0-9]+]]:fpr32 = COPY [[UBFMWri3]]
+  ; CHECK-NOBF16-NEXT:   [[COPY11:%[0-9]+]]:fpr32 = COPY killed [[UBFMWri3]]
   ; CHECK-NOBF16-NEXT:   [[COPY12:%[0-9]+]]:fpr16 = COPY [[COPY11]].hsub
   ; CHECK-NOBF16-NEXT:   $h0 = COPY [[COPY12]]
   ; CHECK-NOBF16-NEXT:   RET_ReallyLR implicit $h0
diff --git a/llvm/test/CodeGen/AMDGPU/divergence-driven-ctpop.ll b/llvm/test/CodeGen/AMDGPU/divergence-driven-ctpop.ll
index 4a3a44d09210e..ea28af4508382 100644
--- a/llvm/test/CodeGen/AMDGPU/divergence-driven-ctpop.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergence-driven-ctpop.ll
@@ -11,7 +11,7 @@ define amdgpu_kernel void @s_ctpop_i32(ptr addrspace(1) noalias %out, i32 %val)
 
 ; GCN-LABEL: name:            s_ctpop_i64
 ; GCN: %[[BCNT:[0-9]+]]:sreg_32 = S_BCNT1_I32_B64
-; GCN: %[[SREG1:[0-9]+]]:sreg_32 = COPY %[[BCNT]]
+; GCN: %[[SREG1:[0-9]+]]:sreg_32 = COPY killed %[[BCNT]]
 ; GCN: %[[SREG2:[0-9]+]]:sreg_32 = S_MOV_B32 0
 ; GCN: REG_SEQUENCE killed %[[SREG1]], %subreg.sub0, killed %[[SREG2]], %subreg.sub1
 define amdgpu_kernel void @s_ctpop_i64(ptr addrspace(1) noalias %out, i64 %val) nounwind {
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs-divergence-driven-isel.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs-divergence-driven-isel.ll
index d431503643d63..230b73a037221 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-fabs-divergence-driven-isel.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs-divergence-driven-isel.ll
@@ -398,7 +398,7 @@ define amdgpu_kernel void @uniform_fneg_f64(ptr addrspace(1) %out, ptr addrspace
 ; GCN: %[[HI32:[0-9]+]]:sreg_32 = COPY %[[VREG64]].sub1
 ; GCN: %[[SREG_MASK:[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648
 ; GCN: %[[XOR:[0-9]+]]:sreg_32 = S_XOR_B32 killed %[[HI32]], killed %[[SREG_MASK]]
-; GCN: %[[XOR_COPY:[0-9]+]]:sreg_32 = COPY %[[XOR]]
+; GCN: %[[XOR_COPY:[0-9]+]]:sreg_32 = COPY killed %[[XOR]]
 ; GCN: REG_SEQUENCE killed %[[LO32]], %subreg.sub0, killed %[[XOR_COPY]], %subreg.sub1
 
   %in.gep = getelementptr inbounds double, ptr addrspace(1) %in, i64 %idx
@@ -440,7 +440,7 @@ define amdgpu_kernel void @uniform_fabs_f64(ptr addrspace(1) %out, ptr addrspace
 ; GCN: %[[HI32:[0-9]+]]:sreg_32 = COPY %[[VREG64]].sub1
 ; GCN: %[[SREG_MASK:[0-9]+]]:sreg_32 = S_MOV_B32 2147483647
 ; GCN: %[[AND:[0-9]+]]:sreg_32 = S_AND_B32 killed %[[HI32]], killed %[[SREG_MASK]]
-; GCN: %[[AND_COPY:[0-9]+]]:sreg_32 = COPY %[[AND]]
+; GCN: %[[AND_COPY:[0-9]+]]:sreg_32 = COPY killed %[[AND]]
 ; GCN: REG_SEQUENCE killed %[[LO32]], %subreg.sub0, killed %[[AND_COPY]], %subreg.sub1
 
 
@@ -484,7 +484,7 @@ define amdgpu_kernel void @uniform_fneg_fabs_f64(ptr addrspace(1) %out, ptr addr
 ; GCN: %[[HI32:[0-9]+]]:sreg_32 = COPY %[[VREG64]].sub1
 ; GCN: %[[SREG_MASK:[0-9]+]]:sreg_32 = S_MOV_B32 -2147483648
 ; GCN: %[[OR:[0-9]+]]:sreg_32 = S_OR_B32 killed %[[HI32]], killed %[[SREG_MASK]]
-; GCN: %[[OR_COPY:[0-9]+]]:sreg_32 = COPY %[[OR]]
+; GCN: %[[OR_COPY:[0-9]+]]:sreg_32 = COPY killed %[[OR]]
 ; GCN: REG_SEQUENCE killed %[[LO32]], %subreg.sub0, killed %[[OR_COPY]], %subreg.sub1
 
 
diff --git a/llvm/test/CodeGen/ARM/fp16_fast_math.ll b/llvm/test/CodeGen/ARM/fp16_fast_math.ll
index 4c2e3ce4efcd5..165eb4b8af43e 100644
--- a/llvm/test/CodeGen/ARM/fp16_fast_math.ll
+++ b/llvm/test/CodeGen/ARM/fp16_fast_math.ll
@@ -21,7 +21,7 @@ define half @normal_fadd(half %x, half %y) {
   ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
   ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY [[VCVTBSH]]
+  ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY killed [[VCVTBSH]]
   ; CHECK-CVT-NEXT:   $r0 = COPY [[COPY4]]
   ; CHECK-CVT-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
   ;
@@ -55,7 +55,7 @@ define half @fast_fadd(half %x, half %y) {
   ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
   ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY [[VCVTBSH]]
+  ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY killed [[VCVTBSH]]
   ; CHECK-CVT-NEXT:   $r0 = COPY [[COPY4]]
   ; CHECK-CVT-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
   ;
@@ -89,7 +89,7 @@ define half @ninf_fadd(half %x, half %y) {
   ; CHECK-CVT-NEXT:   [[VADDS:%[0-9]+]]:spr = ninf VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
   ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY [[VCVTBSH]]
+  ; CHECK-CVT-NEXT:   [[COPY4:%[0-9]+]]:gpr = COPY killed [[VCVTBSH]]
   ; CHECK-CVT-NEXT:   $r0 = COPY [[COPY4]]
   ; CHECK-CVT-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
   ;
@@ -129,13 +129,13 @@ define half @normal_fadd_sequence(half %x, half %y, half %z) {
   ; CHECK-CVT-NEXT:   [[VCVTBHS2:%[0-9]+]]:spr = VCVTBHS killed [[COPY5]], 14 /* CC::al */, $noreg
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
   ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY [[VCVTBSH]]
-  ; CHECK-CVT-NEXT:   [[COPY7:%[0-9]+]]:spr = COPY [[COPY6]]
+  ; CHECK-CVT-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY killed [[VCVTBSH]]
+  ; CHECK-CVT-NEXT:   [[COPY7:%[0-9]+]]:spr = COPY killed [[COPY6]]
   ; CHECK-CVT-NEXT:   [[VCVTBHS3:%[0-9]+]]:spr = VCVTBHS killed [[COPY7]], 14 /* CC::al */, $noreg
   ; CHECK-CVT-NEXT:   [[VADDS1:%[0-9]+]]:spr = VADDS killed [[VCVTBHS3]], killed [[VCVTBHS2]], 14 /* CC::al */, $noreg
   ; CHECK-CVT-NEXT:   [[DEF1:%[0-9]+]]:spr = IMPLICIT_DEF
   ; CHECK-CVT-NEXT:   [[VCVTBSH1:%[0-9]+]]:spr = VCVTBSH [[DEF1]], killed [[VADDS1]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[COPY8:%[0-9]+]]:gpr = COPY [[VCVTBSH1]]
+  ; CHECK-CVT-NEXT:   [[COPY8:%[0-9]+]]:gpr = COPY killed [[VCVTBSH1]]
   ; CHECK-CVT-NEXT:   $r0 = COPY [[COPY8]]
   ; CHECK-CVT-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
   ;
@@ -177,7 +177,7 @@ define half @nnan_ninf_contract_fadd_sequence(half %x, half %y, half %z) {
   ; CHECK-CVT-NEXT:   [[VADDS1:%[0-9]+]]:spr = nnan ninf contract VADDS killed [[VADDS]], killed [[VCVTBHS2]], 14 /* CC::al */, $noreg
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
   ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS1]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY [[VCVTBSH]]
+  ; CHECK-CVT-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY killed [[VCVTBSH]]
   ; CHECK-CVT-NEXT:   $r0 = COPY [[COPY6]]
   ; CHECK-CVT-NEXT:   MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
   ;
@@ -218,13 +218,13 @@ define half @ninf_fadd_sequence(half %x, half %y, half %z) {
   ; CHECK-CVT-NEXT:   [[VCVTBHS2:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY5]], 14 /* CC::al */, $noreg
   ; CHECK-CVT-NEXT:   [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
   ; CHECK-CVT-NEXT:   [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
-  ; CHECK-CVT-NEXT:   [[COPY6:%[0-9]+]]:gpr = COPY [[VCVTBSH]]
-  ; CHECK-CVT-NEXT:   [[COPY7:%[0-9]+]]:sp...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/146637


More information about the llvm-commits mailing list