[llvm] 3f7b7e7 - [AMDGPU] Update SCC defs to VCC when uses are changed to VCC

Brendon Cahoon via llvm-commits llvm-commits at lists.llvm.org
Fri May 14 15:11:07 PDT 2021


Author: Brendon Cahoon
Date: 2021-05-14T18:05:05-04:00
New Revision: 3f7b7e7393f802da480c2e24463a5722da0016e7

URL: https://github.com/llvm/llvm-project/commit/3f7b7e7393f802da480c2e24463a5722da0016e7
DIFF: https://github.com/llvm/llvm-project/commit/3f7b7e7393f802da480c2e24463a5722da0016e7.diff

LOG: [AMDGPU] Update SCC defs to VCC when uses are changed to VCC

The FixSGPRCopies pass converts instructions to VALU when
removing illegal VGPR to SGPR copies. Instructions that use SCC
are changed to use VCC instead. When that happens, the pass must
also change instructions that define SCC to define VCC.

The pass was not changing the SCC definition when an ADDC is
converted due to a input that is a VGPR to SGPR copy. But, the
initial ADD insruction, which define SCC, is not converted.
This causes a compilation failure due to a use of an undefined
physical register.

This patch adds code that inserts the SCC definition in the
MoveToVALU worklist when a SCC use is converted to a VCC use.

Differential Revision: https://reviews.llvm.org/D102111

Added: 
    llvm/test/CodeGen/AMDGPU/change-scc-to-vcc.mir

Modified: 
    llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
    llvm/lib/Target/AMDGPU/SIInstrInfo.h
    llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
    llvm/test/CodeGen/AMDGPU/sdiv64.ll
    llvm/test/CodeGen/AMDGPU/srem64.ll
    llvm/test/CodeGen/AMDGPU/udiv64.ll
    llvm/test/CodeGen/AMDGPU/urem64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 498c6f021db98..342667d566a4f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -5975,6 +5975,8 @@ MachineBasicBlock *SIInstrInfo::moveToVALU(MachineInstr &TopInst,
         // Only propagate through live-def of SCC.
         if (Op.isDef() && !Op.isDead())
           addSCCDefUsersToVALUWorklist(Op, Inst, Worklist);
+        if (Op.isUse())
+          addSCCDefsToVALUWorklist(Op, Worklist);
         Inst.RemoveOperand(i);
       }
     }
@@ -6810,6 +6812,32 @@ void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
   }
 }
 
+// Instructions that use SCC may be converted to VALU instructions. When that
+// happens, the SCC register is changed to VCC_LO. The instruction that defines
+// SCC must be changed to an instruction that defines VCC. This function makes
+// sure that the instruction that defines SCC is added to the moveToVALU
+// worklist.
+void SIInstrInfo::addSCCDefsToVALUWorklist(MachineOperand &Op,
+                                           SetVectorType &Worklist) const {
+  assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isUse());
+
+  MachineInstr *SCCUseInst = Op.getParent();
+  // Look for a preceeding instruction that either defines VCC or SCC. If VCC
+  // then there is nothing to do because the defining instruction has been
+  // converted to a VALU already. If SCC then that instruction needs to be
+  // converted to a VALU.
+  for (MachineInstr &MI :
+       make_range(std::next(MachineBasicBlock::reverse_iterator(SCCUseInst)),
+                  SCCUseInst->getParent()->rend())) {
+    if (MI.modifiesRegister(AMDGPU::VCC, &RI))
+      break;
+    if (MI.definesRegister(AMDGPU::SCC, &RI)) {
+      Worklist.insert(&MI);
+      break;
+    }
+  }
+}
+
 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
   const MachineInstr &Inst) const {
   const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);

diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 6539d6baf9e61..768cfd888bf07 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -123,6 +123,8 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
   void addSCCDefUsersToVALUWorklist(MachineOperand &Op,
                                     MachineInstr &SCCDefInst,
                                     SetVectorType &Worklist) const;
+  void addSCCDefsToVALUWorklist(MachineOperand &Op,
+                                SetVectorType &Worklist) const;
 
   const TargetRegisterClass *
   getDestEquivalentVGPRClass(const MachineInstr &Inst) const;

diff  --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index 34206463db0c6..2721aba316b07 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -7126,7 +7126,7 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX6-NEXT:    v_mul_hi_u32 v3, v1, v3
 ; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v8, v4, vcc
 ; GFX6-NEXT:    s_mov_b32 s10, -1
-; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v3, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v9, v7, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
@@ -7169,7 +7169,7 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v8, v4, vcc
 ; GFX6-NEXT:    v_mul_lo_u32 v4, s7, v0
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s7, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
@@ -7246,7 +7246,7 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v7, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v7, v1, v4
 ; GFX9-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v7, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v7
 ; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v5, v4, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v8, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
@@ -7290,7 +7290,7 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX9-NEXT:    v_mul_lo_u32 v4, s7, v0
 ; GFX9-NEXT:    v_mul_hi_u32 v0, s7, v0
 ; GFX9-NEXT:    s_mov_b32 s2, 0x976a7377
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v4, v2
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v4
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v7, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
@@ -7504,7 +7504,7 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, v7, v8, vcc
 ; GFX6-NEXT:    v_mul_lo_u32 v8, v1, v4
 ; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
 ; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v6, v4, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v9, v2, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
@@ -7546,7 +7546,7 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GFX6-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s11, v0
 ; GFX6-NEXT:    s_lshr_b64 s[2:3], s[8:9], 12
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
 ; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v4, v0, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v6, v2, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
@@ -7613,7 +7613,7 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GFX9-NEXT:    v_addc_co_u32_e32 v6, vcc, v7, v8, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v8, v1, v3
 ; GFX9-NEXT:    v_mul_hi_u32 v3, v1, v3
-; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v8, v4
+; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v4, v8
 ; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v6, v3, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v9, v5, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
@@ -7656,7 +7656,7 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GFX9-NEXT:    v_mul_lo_u32 v4, s7, v0
 ; GFX9-NEXT:    v_mul_hi_u32 v0, s7, v0
 ; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v4, v2
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v4
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v6, v5, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
@@ -7798,7 +7798,7 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX6-NEXT:    v_mul_hi_u32 v3, v1, v3
 ; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v8, v4, vcc
 ; GFX6-NEXT:    s_movk_i32 s5, 0x11e
-; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v3, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v9, v7, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
@@ -7842,7 +7842,7 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v8, v4, vcc
 ; GFX6-NEXT:    v_mul_lo_u32 v4, s7, v0
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s7, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
@@ -7918,7 +7918,7 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v7, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v7, v1, v4
 ; GFX9-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v7, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v7
 ; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v5, v4, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v8, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
@@ -7961,7 +7961,7 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v4, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v4, s7, v0
 ; GFX9-NEXT:    v_mul_hi_u32 v0, s7, v0
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v4, v2
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v4
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v7, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
@@ -8247,7 +8247,7 @@ define amdgpu_kernel void @sdiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v8, v3, vcc
 ; GFX6-NEXT:    s_mov_b32 s5, s9
-; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v3, v4, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v9, v7, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
@@ -8292,7 +8292,7 @@ define amdgpu_kernel void @sdiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX6-NEXT:    v_mul_lo_u32 v4, s1, v0
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s1, v0
 ; GFX6-NEXT:    s_mov_b32 s3, 0x12d8fb
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
@@ -8362,7 +8362,7 @@ define amdgpu_kernel void @sdiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX9-NEXT:    v_addc_co_u32_e32 v6, vcc, v7, v8, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v8, v1, v4
 ; GFX9-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v8, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v8
 ; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v6, v4, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v9, v5, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
@@ -8408,7 +8408,7 @@ define amdgpu_kernel void @sdiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX9-NEXT:    v_mul_lo_u32 v4, s1, v0
 ; GFX9-NEXT:    v_mul_hi_u32 v0, s1, v0
 ; GFX9-NEXT:    s_mov_b32 s3, 0x12d8fb
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v4, v2
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v4
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v6, v5, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
@@ -8547,7 +8547,7 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
 ; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v4
 ; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
 ; GFX6-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v7, v4, vcc
@@ -8594,7 +8594,7 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GFX6-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s11, v0
 ; GFX6-NEXT:    s_mov_b32 s4, s8
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
 ; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
@@ -8684,7 +8684,7 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GFX9-NEXT:    v_mul_hi_u32 v9, v1, v3
 ; GFX9-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v8, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v5, v4
+; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v4, v5
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v6, v7, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, v9, v2, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v4, v3
@@ -8732,7 +8732,7 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, 0, v5, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v5, s7, v0
 ; GFX9-NEXT:    v_mul_hi_u32 v0, s7, v0
-; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v5, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v5
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v4, v0, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v7, v2, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
@@ -8898,7 +8898,7 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v3
 ; GFX6-NEXT:    v_mul_hi_u32 v3, v1, v3
 ; GFX6-NEXT:    s_mov_b32 s9, s8
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
+; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v3, vcc
 ; GFX6-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v7, v4, vcc
@@ -8944,7 +8944,7 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s1, v0
 ; GFX6-NEXT:    s_movk_i32 s9, 0xfff
 ; GFX6-NEXT:    s_mov_b32 s6, -1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
 ; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
@@ -9022,7 +9022,7 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GFX9-NEXT:    v_mul_lo_u32 v7, v1, v5
 ; GFX9-NEXT:    v_mul_hi_u32 v5, v1, v5
 ; GFX9-NEXT:    s_ashr_i64 s[4:5], s[4:5], 12
-; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v7, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v7
 ; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v6, v5, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, v8, v4, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
@@ -9069,7 +9069,7 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GFX9-NEXT:    v_mul_lo_u32 v5, s7, v0
 ; GFX9-NEXT:    v_mul_hi_u32 v0, s7, v0
 ; GFX9-NEXT:    s_movk_i32 s3, 0xfff
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v5, v2
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v5
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v7, v4, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
@@ -9177,7 +9177,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v6, vcc
 ; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v5
 ; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v5
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v5, vcc
 ; GFX6-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v7, v4, vcc
@@ -9228,7 +9228,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s9, v0
 ; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[16:17]
 ; GFX6-NEXT:    s_mov_b32 s6, -1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
 ; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
@@ -9303,7 +9303,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX6-NEXT:    v_addc_u32_e32 v9, vcc, 0, v10, vcc
 ; GFX6-NEXT:    v_mul_lo_u32 v10, v5, v7
 ; GFX6-NEXT:    v_mul_hi_u32 v7, v5, v7
-; GFX6-NEXT:    v_add_i32_e32 v8, vcc, v10, v8
+; GFX6-NEXT:    v_add_i32_e32 v8, vcc, v8, v10
 ; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, v9, v7, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v8, vcc, v11, v4, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
@@ -9349,7 +9349,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX6-NEXT:    v_mul_lo_u32 v9, s11, v2
 ; GFX6-NEXT:    v_mul_hi_u32 v2, s11, v2
 ; GFX6-NEXT:    v_mov_b32_e32 v8, s3
-; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v9, v5
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v5, v9
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v7, v2, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v10, v4, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
@@ -9446,7 +9446,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v7, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v7, v1, v4
 ; GFX9-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v7, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v7
 ; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v5, v4, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v8, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
@@ -9496,7 +9496,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX9-NEXT:    v_mul_hi_u32 v0, s17, v0
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
 ; GFX9-NEXT:    s_xor_b64 s[12:13], s[14:15], s[12:13]
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v4, v2
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v4
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v7, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
@@ -9571,7 +9571,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX9-NEXT:    v_mul_hi_u32 v2, v4, v2
 ; GFX9-NEXT:    s_ashr_i32 s10, s7, 31
 ; GFX9-NEXT:    s_mov_b32 s11, s10
-; GFX9-NEXT:    v_add_co_u32_e32 v8, vcc, v10, v8
+; GFX9-NEXT:    v_add_co_u32_e32 v8, vcc, v8, v10
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v9, v2, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v8, vcc, v11, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v7
@@ -9616,7 +9616,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX9-NEXT:    v_mul_hi_u32 v2, s7, v2
 ; GFX9-NEXT:    v_xor_b32_e32 v0, s12, v0
 ; GFX9-NEXT:    v_xor_b32_e32 v1, s13, v1
-; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v9, v4
+; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v4, v9
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v7, v2, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v10, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v3
@@ -9715,7 +9715,7 @@ define amdgpu_kernel void @srem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v8, v3, vcc
 ; GFX6-NEXT:    s_mov_b32 s5, s9
-; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v3, v4, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v9, v7, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
@@ -9760,7 +9760,7 @@ define amdgpu_kernel void @srem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX6-NEXT:    v_mul_lo_u32 v4, s1, v0
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s1, v0
 ; GFX6-NEXT:    s_mov_b32 s3, 0x12d8fb
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
@@ -9828,7 +9828,7 @@ define amdgpu_kernel void @srem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX9-NEXT:    v_addc_co_u32_e32 v6, vcc, v7, v8, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v8, v1, v4
 ; GFX9-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v8, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v8
 ; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v6, v4, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v9, v5, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
@@ -9874,7 +9874,7 @@ define amdgpu_kernel void @srem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GFX9-NEXT:    v_mul_lo_u32 v4, s1, v0
 ; GFX9-NEXT:    v_mul_hi_u32 v0, s1, v0
 ; GFX9-NEXT:    s_mov_b32 s3, 0x12d8fb
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v4, v2
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v4
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v6, v5, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
@@ -10017,7 +10017,7 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
 ; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v4
 ; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
 ; GFX6-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v7, v4, vcc
@@ -10062,7 +10062,7 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
 ; GFX6-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
 ; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
@@ -10150,7 +10150,7 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GFX9-NEXT:    v_mul_hi_u32 v9, v1, v3
 ; GFX9-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v8, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v5, v4
+; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v4, v5
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v6, v7, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, v9, v2, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v4, v3
@@ -10198,7 +10198,7 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, 0, v5, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v5, s7, v0
 ; GFX9-NEXT:    v_mul_hi_u32 v0, s7, v0
-; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v5, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v5
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v4, v0, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v7, v2, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
@@ -10389,7 +10389,7 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v5
 ; GFX6-NEXT:    s_addc_u32 s1, s9, s12
 ; GFX6-NEXT:    s_xor_b64 s[8:9], s[0:1], s[12:13]
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
+; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
 ; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v5, vcc
 ; GFX6-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v7, v4, vcc
@@ -10433,7 +10433,7 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX6-NEXT:    v_mul_lo_u32 v5, s9, v0
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s9, v0
 ; GFX6-NEXT:    s_mov_b32 s6, -1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
 ; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
@@ -10509,7 +10509,7 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX6-NEXT:    v_mul_hi_u32 v7, v5, v7
 ; GFX6-NEXT:    v_xor_b32_e32 v0, s12, v0
 ; GFX6-NEXT:    v_xor_b32_e32 v1, s12, v1
-; GFX6-NEXT:    v_add_i32_e32 v8, vcc, v10, v8
+; GFX6-NEXT:    v_add_i32_e32 v8, vcc, v8, v10
 ; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, v9, v7, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v8, vcc, v11, v4, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
@@ -10553,7 +10553,7 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX6-NEXT:    v_mul_lo_u32 v9, s11, v2
 ; GFX6-NEXT:    v_mul_hi_u32 v2, s11, v2
 ; GFX6-NEXT:    v_mov_b32_e32 v8, s12
-; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v9, v5
+; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v5, v9
 ; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v7, v2, vcc
 ; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, v10, v4, vcc
 ; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
@@ -10652,7 +10652,7 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX9-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v7, vcc
 ; GFX9-NEXT:    v_mul_lo_u32 v7, v1, v4
 ; GFX9-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v7, v3
+; GFX9-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v7
 ; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v5, v4, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v8, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v3, v2
@@ -10697,7 +10697,7 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX9-NEXT:    v_mul_lo_u32 v4, s9, v0
 ; GFX9-NEXT:    v_mul_hi_u32 v0, s9, v0
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v4, v2
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v4
 ; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, v3, v0, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v7, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v1
@@ -10771,7 +10771,7 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX9-NEXT:    v_mul_hi_u32 v2, v4, v2
 ; GFX9-NEXT:    s_ashr_i32 s12, s11, 31
 ; GFX9-NEXT:    s_mov_b32 s13, s12
-; GFX9-NEXT:    v_add_co_u32_e32 v8, vcc, v10, v8
+; GFX9-NEXT:    v_add_co_u32_e32 v8, vcc, v8, v10
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v9, v2, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v8, vcc, v11, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v7
@@ -10816,7 +10816,7 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GFX9-NEXT:    v_mul_hi_u32 v2, s11, v2
 ; GFX9-NEXT:    v_xor_b32_e32 v0, s6, v0
 ; GFX9-NEXT:    v_xor_b32_e32 v1, s6, v1
-; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v9, v4
+; GFX9-NEXT:    v_add_co_u32_e32 v4, vcc, v4, v9
 ; GFX9-NEXT:    v_addc_co_u32_e32 v2, vcc, v7, v2, vcc
 ; GFX9-NEXT:    v_addc_co_u32_e32 v4, vcc, v10, v6, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v3

diff  --git a/llvm/test/CodeGen/AMDGPU/change-scc-to-vcc.mir b/llvm/test/CodeGen/AMDGPU/change-scc-to-vcc.mir
new file mode 100644
index 0000000000000..3992c569ad77d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/change-scc-to-vcc.mir
@@ -0,0 +1,74 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -mcpu=gfx1030 -run-pass=si-fix-sgpr-copies -verify-machineinstrs -o - %s | FileCheck --check-prefix=GCN %s
+
+# Test that the Fix SGPR Copy pass changes scc definitions to vcc if the
+# instruction that uses the scc is changed to use vcc.
+
+---
+name: change_scc_def
+body:               |
+  bb.0:
+    ; GCN-LABEL: name: change_scc_def
+    ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 681
+    ; GCN: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+    ; GCN: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+    ; GCN: [[DEF2:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+    ; GCN: [[DEF3:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+    ; GCN: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 1
+    ; GCN: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+    ; GCN: [[V_MUL_HI_U32_U24_e64_:%[0-9]+]]:vgpr_32 = V_MUL_HI_U32_U24_e64 [[S_MOV_B32_1]], [[S_MOV_B32_]], implicit $exec
+    ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY killed [[DEF1]]
+    ; GCN: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 killed [[DEF]], [[COPY]], implicit-def $vcc_lo, implicit $exec
+    ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[DEF3]]
+    ; GCN: [[V_ADDC_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADDC_U32_e32 killed [[DEF2]], [[COPY1]], implicit-def $vcc_lo, implicit $vcc_lo, implicit $exec
+    ; GCN: [[DEF4:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+    ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_2]]
+    ; GCN: [[V_ADDC_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADDC_U32_e32 [[V_MUL_HI_U32_U24_e64_]], [[COPY2]], implicit-def $vcc_lo, implicit $vcc_lo, implicit $exec
+    %0:sreg_32 = S_MOV_B32 681
+    %1:sreg_32 = IMPLICIT_DEF
+    %2:sreg_32 = IMPLICIT_DEF
+    %3:sreg_32 = IMPLICIT_DEF
+    %4:sreg_32 = IMPLICIT_DEF
+    %5:sreg_32 = S_MOV_B32 1
+    %6:sreg_32 = S_MOV_B32 0
+    %7:vgpr_32 = V_MUL_HI_U32_U24_e64 %5, %0, implicit $exec
+    %8:sreg_32 = S_ADD_U32 killed %1, killed %2, implicit-def $scc
+    %9:sreg_32 = S_ADDC_U32 killed %3, killed %4, implicit-def $scc, implicit $scc
+    %10:sreg_32 = COPY %7
+    %11:sreg_32 = S_ADDC_U32 killed %10, %6, implicit-def dead $scc, implicit $scc
+
+...
+
+# This case successfully converted scc definitions to vcc because the input
+# to the first add is a vgpr. This test checks that it still works correctly.
+---
+name: test-working-scc-def
+body:               |
+  bb.0:
+    ; GCN-LABEL: name: test-working-scc-def
+    ; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; GCN: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+    ; GCN: [[DEF2:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+    ; GCN: [[DEF3:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+    ; GCN: [[DEF4:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+    ; GCN: [[DEF5:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+    ; GCN: [[DEF6:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+    ; GCN: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 killed [[DEF2]], [[DEF]], implicit-def $vcc_lo, implicit $exec
+    ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY killed [[DEF4]]
+    ; GCN: [[V_ADDC_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADDC_U32_e32 killed [[DEF3]], [[COPY]], implicit-def $vcc_lo, implicit $vcc_lo, implicit $exec
+    ; GCN: [[DEF7:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+    ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[DEF5]]
+    ; GCN: [[V_ADDC_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADDC_U32_e32 killed [[V_ADDC_U32_e32_1]], [[COPY1]], implicit-def $vcc_lo, implicit $vcc_lo, implicit $exec
+    %0:vgpr_32 = IMPLICIT_DEF
+    %1:vgpr_32 = IMPLICIT_DEF
+    %2:sreg_32 = IMPLICIT_DEF
+    %3:sreg_32 = IMPLICIT_DEF
+    %4:sreg_32 = IMPLICIT_DEF
+    %5:sreg_32 = IMPLICIT_DEF
+    %6:sreg_32 = COPY %0
+    %7:sreg_32 = S_ADD_U32 killed %6, killed %2, implicit-def $scc
+    %8:sreg_32 = S_ADDC_U32 killed %3, killed %4, implicit-def $scc, implicit $scc
+    %9:sreg_32 = COPY %1
+    %10:sreg_32 = S_ADDC_U32 killed %10, %5, implicit-def dead $scc, implicit $scc
+
+...

diff  --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 8ae54e8d91ae4..f630ab9a3039c 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -46,7 +46,7 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v7, v8, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v8, v2, v5
 ; GCN-NEXT:    v_mul_hi_u32 v5, v2, v5
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v8, v4
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v8
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v6, v5, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v9, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
@@ -91,7 +91,7 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
 ; GCN-NEXT:    s_mov_b32 s4, s8
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v4, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
@@ -1171,7 +1171,7 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
 ; GCN-NEXT:    v_mul_hi_u32 v9, v3, v4
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v9, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4

diff  --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 4f11ae191b83c..a513d41caeb79 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -41,7 +41,7 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
 ; GCN-NEXT:    v_mul_hi_u32 v9, v3, v4
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v9, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
@@ -81,7 +81,7 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v2, v6, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v6, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v5, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v7, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
@@ -927,7 +927,7 @@ define amdgpu_kernel void @s_test_srem33_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v7, v8, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v8, v2, v5
 ; GCN-NEXT:    v_mul_hi_u32 v5, v2, v5
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v8, v4
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v8
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v6, v5, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v9, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
@@ -972,7 +972,7 @@ define amdgpu_kernel void @s_test_srem33_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    v_mul_lo_u32 v5, s15, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s15, v0
 ; GCN-NEXT:    s_mov_b32 s4, s8
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v4, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
@@ -1359,7 +1359,7 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
 ; GCN-NEXT:    v_mul_hi_u32 v9, v3, v4
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v9, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index eefd825e8c02f..bc38031288e7e 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -39,7 +39,7 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
 ; GCN-NEXT:    v_mul_hi_u32 v9, v3, v4
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v9, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
@@ -81,7 +81,7 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-NEXT:    v_mul_lo_u32 v6, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
 ; GCN-NEXT:    s_mov_b32 s4, s8
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v5, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v7, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
@@ -746,7 +746,7 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-NEXT:    v_mul_lo_u32 v7, v2, v4
 ; GCN-NEXT:    v_mul_hi_u32 v4, v2, v4
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v9, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, v6, v7
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v5, v4, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v10, v8, vcc
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
@@ -975,7 +975,7 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_hi_u32 v10, v3, v4
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v8, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v10, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
@@ -1450,7 +1450,7 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v3, vcc
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_mov_b32 s4, s8
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v3, v4, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v9, v7, vcc
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
@@ -1490,7 +1490,7 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v4, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v4, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
@@ -1639,7 +1639,7 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_mul_lo_u32 v8, v3, v6
 ; GCN-NEXT:    v_mul_hi_u32 v6, v3, v6
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v10, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v7, vcc, v8, v7
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, v7, v8
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v5, v6, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v11, v9, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4

diff  --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index f2ae6d0b384fa..d9c95cdc63e6f 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -41,7 +41,7 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
 ; GCN-NEXT:    v_mul_hi_u32 v9, v3, v4
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v9, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
@@ -81,7 +81,7 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v2, v6, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v6, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v5, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v7, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
@@ -785,7 +785,7 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_hi_u32 v10, v3, v4
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v8, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v10, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
@@ -986,7 +986,7 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v3, vcc
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v3, v4, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v9, v7, vcc
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
@@ -1026,7 +1026,7 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v4, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v4, s7, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s7, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1


        


More information about the llvm-commits mailing list