[llvm-branch-commits] [llvm] MachineUniformityAnalysis: Improve isConstantOrUndefValuePhi (PR #112866)
Petar Avramovic via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Nov 5 07:51:17 PST 2024
https://github.com/petar-avramovic updated https://github.com/llvm/llvm-project/pull/112866
>From a8d15f3f4854a364fc0b905544840112283b41a3 Mon Sep 17 00:00:00 2001
From: Petar Avramovic <Petar.Avramovic at amd.com>
Date: Thu, 31 Oct 2024 14:10:57 +0100
Subject: [PATCH] MachineUniformityAnalysis: Improve isConstantOrUndefValuePhi
Change existing code for G_PHI to match what LLVM-IR version is doing
via PHINode::hasConstantOrUndefValue. This is not safe for regular PHI
since it may appear with an undef operand and getVRegDef can fail.
Most notably this improves number of values that can be allocated
to sgpr register bank in AMDGPURegBankSelect.
Common case here are phis that appear in structurize-cfg lowering
for cycles with multiple exits:
Undef incoming value is coming from block that reached cycle exit
condition, if other incoming is uniform keep the phi uniform despite
the fact it is joining values from pair of blocks that are entered
via divergent condition branch.
---
llvm/lib/CodeGen/MachineSSAContext.cpp | 27 +++++-
.../AMDGPU/MIR/hidden-diverge-gmir.mir | 28 +++----
.../AMDGPU/MIR/hidden-loop-diverge.mir | 4 +-
.../AMDGPU/MIR/uses-value-from-cycle.mir | 8 +-
.../GlobalISel/divergence-structurizer.mir | 80 ++++++++----------
.../regbankselect-mui-regbanklegalize.mir | 69 ++++++++-------
.../regbankselect-mui-regbankselect.mir | 18 ++--
.../AMDGPU/GlobalISel/regbankselect-mui.ll | 84 ++++++++++---------
.../AMDGPU/GlobalISel/regbankselect-mui.mir | 51 ++++++-----
9 files changed, 191 insertions(+), 178 deletions(-)
diff --git a/llvm/lib/CodeGen/MachineSSAContext.cpp b/llvm/lib/CodeGen/MachineSSAContext.cpp
index e384187b6e8593..8e13c0916dd9e1 100644
--- a/llvm/lib/CodeGen/MachineSSAContext.cpp
+++ b/llvm/lib/CodeGen/MachineSSAContext.cpp
@@ -54,9 +54,34 @@ const MachineBasicBlock *MachineSSAContext::getDefBlock(Register value) const {
return F->getRegInfo().getVRegDef(value)->getParent();
}
+static bool isUndef(const MachineInstr &MI) {
+ return MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ MI.getOpcode() == TargetOpcode::IMPLICIT_DEF;
+}
+
+/// MachineInstr equivalent of PHINode::hasConstantOrUndefValue() for G_PHI.
template <>
bool MachineSSAContext::isConstantOrUndefValuePhi(const MachineInstr &Phi) {
- return Phi.isConstantValuePHI();
+ if (!Phi.isPHI())
+ return false;
+
+ // In later passes PHI may appear with an undef operand, getVRegDef can fail.
+ if (Phi.getOpcode() == TargetOpcode::PHI)
+ return Phi.isConstantValuePHI();
+
+ // For G_PHI we do equivalent of PHINode::hasConstantOrUndefValue().
+ const MachineRegisterInfo &MRI = Phi.getMF()->getRegInfo();
+ Register This = Phi.getOperand(0).getReg();
+ Register ConstantValue;
+ for (unsigned i = 1, e = Phi.getNumOperands(); i < e; i += 2) {
+ Register Incoming = Phi.getOperand(i).getReg();
+ if (Incoming != This && !isUndef(*MRI.getVRegDef(Incoming))) {
+ if (ConstantValue && ConstantValue != Incoming)
+ return false;
+ ConstantValue = Incoming;
+ }
+ }
+ return true;
}
template <>
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-diverge-gmir.mir b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-diverge-gmir.mir
index ce00edf3363f77..9694a340b5e906 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-diverge-gmir.mir
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-diverge-gmir.mir
@@ -1,24 +1,24 @@
# RUN: llc -mtriple=amdgcn-- -run-pass=print-machine-uniformity -o - %s 2>&1 | FileCheck %s
# CHECK-LABEL: MachineUniformityInfo for function: hidden_diverge
# CHECK-LABEL: BLOCK bb.0
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.workitem.id.x)
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1) = G_ICMP intpred(slt)
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1) = G_XOR %{{[0-9]*}}:_, %{{[0-9]*}}:_
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
-# CHECK: DIVERGENT: G_BRCOND %{{[0-9]*}}:_(s1), %bb.1
-# CHECK: DIVERGENT: G_BR %bb.2
+# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.workitem.id.x)
+# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1) = G_ICMP intpred(slt)
+# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1) = G_XOR %{{[0-9]*}}:_, %{{[0-9]*}}:_
+# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
+# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
+# CHECK: DIVERGENT: G_BRCOND %{{[0-9]*}}:_(s1), %bb.1
+# CHECK: DIVERGENT: G_BR %bb.2
# CHECK-LABEL: BLOCK bb.1
# CHECK-LABEL: BLOCK bb.2
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI %{{[0-9]*}}:_(s32), %bb.1, %{{[0-9]*}}:_(s32), %bb.0
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1) = G_PHI %{{[0-9]*}}:_(s1), %bb.1, %{{[0-9]*}}:_(s1), %bb.0
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
-# CHECK: DIVERGENT: G_BRCOND %{{[0-9]*}}:_(s1), %bb.3
-# CHECK: DIVERGENT: G_BR %bb.4
+# CHECK-NOT: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI %{{[0-9]*}}:_(s32), %bb.1, %{{[0-9]*}}:_(s32), %bb.0
+# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1) = G_PHI %{{[0-9]*}}:_(s1), %bb.1, %{{[0-9]*}}:_(s1), %bb.0
+# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
+# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1), %{{[0-9]*}}:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.if)
+# CHECK: DIVERGENT: G_BRCOND %{{[0-9]*}}:_(s1), %bb.3
+# CHECK: DIVERGENT: G_BR %bb.4
# CHECK-LABEL: BLOCK bb.3
# CHECK-LABEL: BLOCK bb.4
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI %{{[0-9]*}}:_(s32), %bb.2, %{{[0-9]*}}:_(s32), %bb.3
+# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI %{{[0-9]*}}:_(s32), %bb.2, %{{[0-9]*}}:_(s32), %bb.3
---
name: hidden_diverge
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-loop-diverge.mir b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-loop-diverge.mir
index 2f4dc588aa9936..2d01ab1269d61d 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-loop-diverge.mir
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/hidden-loop-diverge.mir
@@ -22,10 +22,10 @@
# CHECK-NOT: DIVERGENT: G_BR %bb.5
# CHECK-LABEL: BLOCK bb.4
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI %{{[0-9]*}}:_(s32), %bb.3, %{{[0-9]*}}:_(s32), %bb.2
+# CHECK-NOT: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI %{{[0-9]*}}:_(s32), %bb.3, %{{[0-9]*}}:_(s32), %bb.2
# CHECK-LABEL: BLOCK bb.5
-# CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI %{{[0-9]*}}:_(s32), %bb.3, %{{[0-9]*}}:_(s32), %bb.4
+# CHECK-NOT: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI %{{[0-9]*}}:_(s32), %bb.3, %{{[0-9]*}}:_(s32), %bb.4
---
name: hidden_loop_diverge
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/uses-value-from-cycle.mir b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/uses-value-from-cycle.mir
index b7e0d5449d2e8b..c1acbb3a1575d5 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/uses-value-from-cycle.mir
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/uses-value-from-cycle.mir
@@ -40,10 +40,10 @@ body: |
bb.5:
successors: %bb.6(0x04000000), %bb.2(0x7c000000)
- ; CHECK: DIVERGENT: %{{[0-9]}}: %{{[0-9]}}:_(s32) = G_PHI
- ; CHECK: DIVERGENT: %{{[0-9]}}: %{{[0-9]}}:_(s32) = G_PHI
- ; CHECK: DIVERGENT: %{{[0-9]}}: %{{[0-9]}}:_(s32) = G_PHI
- ; CHECK-NOT: DIVERGENT: %{{[0-9]}}: %{{[0-9]}}:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break)
+ ; CHECK-NOT: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI
+ ; CHECK-NOT: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s32) = G_PHI
+ ; CHECK: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:_(s1) = G_PHI
+ ; CHECK-NOT: DIVERGENT: %{{[0-9]*}}: %{{[0-9]*}}:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break)
%19:_(s32) = G_PHI %18(s32), %bb.7, %25(s32), %bb.4
%20:_(s32) = G_PHI %6(s32), %bb.7, %25(s32), %bb.4
%21:_(s1) = G_PHI %34(s1), %bb.7, %33(s1), %bb.4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir
index 1d291eeab8e9d7..39ebf66411cc65 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.mir
@@ -971,12 +971,10 @@ body: |
; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s1) = G_IMPLICIT_DEF
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[DEF]](s1)
; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY4]](s32), [[COPY1]]
; GFX10-NEXT: [[DEF1:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
; GFX10-NEXT: [[DEF2:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
- ; GFX10-NEXT: [[DEF3:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
- ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP]](s1)
; GFX10-NEXT: G_BR %bb.7
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.1:
@@ -989,19 +987,18 @@ body: |
; GFX10-NEXT: bb.2:
; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.7(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI %67(s1), %bb.6, %71(s1), %bb.7
- ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI %49(s1), %bb.6, %48(s1), %bb.7
- ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI %35(s1), %bb.6, %34(s1), %bb.7
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI %53(s1), %bb.6, %57(s1), %bb.7
+ ; GFX10-NEXT: [[PHI1:%[0-9]+]]:sreg_32(s1) = PHI %35(s1), %bb.6, %34(s1), %bb.7
+ ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s1) = G_PHI %12(s1), %bb.6, [[DEF]](s1), %bb.7
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI2]](s1)
; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI]](s1)
; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI1]](s1)
- ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
- ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[COPY10]](s1)
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %15(s32)
; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[COPY9]](s1), %17(s32)
; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY7]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[S_OR_B32_]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[S_OR_B32_]](s1)
; GFX10-NEXT: SI_LOOP [[INT]](s32), %bb.7, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.4
; GFX10-NEXT: {{ $}}
@@ -1018,28 +1015,27 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT]](s32)
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY5]](s32), [[COPY]]
- ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; GFX10-NEXT: [[COPY11:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[C2]](s1)
+ ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[C2]](s1)
; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP]], [[C2]]
; GFX10-NEXT: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP2]], [[XOR]]
; GFX10-NEXT: [[INT2:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[OR]](s1), %25(s32)
- ; GFX10-NEXT: [[DEF4:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
- ; GFX10-NEXT: [[DEF5:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
- ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 %63(s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY13]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[DEF3:%[0-9]+]]:sreg_32(s1) = IMPLICIT_DEF
+ ; GFX10-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 %49(s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY11]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
- ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY12]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_ANDN2_B32 [[COPY10]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_AND_B32 $exec_lo, [[COPY12]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32_xm0_xexec(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
; GFX10-NEXT: SI_LOOP [[INT2]](s32), %bb.7, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.5
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.5:
; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[INT2]](s32), %bb.4
- ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_1]](s1)
+ ; GFX10-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_1]](s1)
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI4]](s32)
- ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY15]](s1), [[COPY3]], [[COPY2]]
+ ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[COPY13]](s1), [[COPY3]], [[COPY2]]
; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:_(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.readfirstlane), [[SELECT]](s32)
; GFX10-NEXT: $sgpr0 = COPY [[INTRINSIC_CONVERGENT]](s32)
; GFX10-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0
@@ -1049,42 +1045,32 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI5:%[0-9]+]]:_(s32) = G_PHI [[INT1]](s32), %bb.3
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
- ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
+ ; GFX10-NEXT: [[COPY14:%[0-9]+]]:sreg_32(s1) = COPY [[C3]](s1)
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI5]](s32)
; GFX10-NEXT: [[S_ANDN2_B32_3:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 %42(s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY17]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_3:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY14]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_3:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_3]](s1), [[S_AND_B32_3]](s1), implicit-def $scc
- ; GFX10-NEXT: [[S_ANDN2_B32_4:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 %56(s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_4:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY16]](s1), implicit-def $scc
- ; GFX10-NEXT: [[S_OR_B32_4:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_4]](s1), [[S_AND_B32_4]](s1), implicit-def $scc
- ; GFX10-NEXT: [[DEF6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = IMPLICIT_DEF
+ ; GFX10-NEXT: [[DEF4:%[0-9]+]]:sreg_32_xm0_xexec(s1) = IMPLICIT_DEF
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.7:
; GFX10-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[COPY7]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.2, [[S_OR_B32_2]](s1), %bb.4
- ; GFX10-NEXT: [[PHI7:%[0-9]+]]:sreg_32(s1) = PHI [[DEF3]](s1), %bb.0, [[PHI7]](s1), %bb.2, [[S_OR_B32_1]](s1), %bb.4
- ; GFX10-NEXT: [[PHI8:%[0-9]+]]:sreg_32(s1) = PHI [[DEF2]](s1), %bb.0, [[PHI1]](s1), %bb.2, [[DEF5]](s1), %bb.4
- ; GFX10-NEXT: [[PHI9:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.0, [[PHI2]](s1), %bb.2, [[DEF4]](s1), %bb.4
- ; GFX10-NEXT: [[PHI10:%[0-9]+]]:_(s32) = G_PHI [[INT2]](s32), %bb.4, [[PHI10]](s32), %bb.2, [[C]](s32), %bb.0
- ; GFX10-NEXT: [[PHI11:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.4, [[INT]](s32), %bb.2, [[C]](s32), %bb.0
- ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI6]](s1)
- ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
- ; GFX10-NEXT: [[COPY20:%[0-9]+]]:sreg_32(s1) = COPY [[PHI8]](s1)
- ; GFX10-NEXT: [[COPY21:%[0-9]+]]:sreg_32(s1) = COPY [[PHI9]](s1)
+ ; GFX10-NEXT: [[PHI6:%[0-9]+]]:sreg_32_xm0_xexec(s1) = PHI [[COPY6]](s1), %bb.0, [[S_OR_B32_]](s1), %bb.2, [[S_OR_B32_2]](s1), %bb.4
+ ; GFX10-NEXT: [[PHI7:%[0-9]+]]:sreg_32(s1) = PHI [[DEF2]](s1), %bb.0, [[PHI7]](s1), %bb.2, [[S_OR_B32_1]](s1), %bb.4
+ ; GFX10-NEXT: [[PHI8:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.0, [[PHI1]](s1), %bb.2, [[DEF3]](s1), %bb.4
+ ; GFX10-NEXT: [[PHI9:%[0-9]+]]:_(s32) = G_PHI [[INT2]](s32), %bb.4, [[PHI9]](s32), %bb.2, [[C]](s32), %bb.0
+ ; GFX10-NEXT: [[PHI10:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.4, [[INT]](s32), %bb.2, [[C]](s32), %bb.0
+ ; GFX10-NEXT: [[COPY15:%[0-9]+]]:sreg_32_xm0_xexec(s1) = COPY [[PHI6]](s1)
+ ; GFX10-NEXT: [[COPY16:%[0-9]+]]:sreg_32(s1) = COPY [[PHI7]](s1)
+ ; GFX10-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[PHI8]](s1)
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY22:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_5:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY21]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_5:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY6]](s1), implicit-def $scc
- ; GFX10-NEXT: [[S_OR_B32_5:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_5]](s1), [[S_AND_B32_5]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY23:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_5]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_6:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY20]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_6:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY22]](s1), implicit-def $scc
- ; GFX10-NEXT: [[S_OR_B32_6:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_6]](s1), [[S_AND_B32_6]](s1), implicit-def $scc
- ; GFX10-NEXT: [[COPY24:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_6]](s1)
- ; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY18]](s1), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_4:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY17]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_4:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY18]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[S_OR_B32_4:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_4]](s1), [[S_AND_B32_4]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[S_OR_B32_4]](s1)
+ ; GFX10-NEXT: [[SI_IF:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[COPY15]](s1), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.1
bb.0:
successors: %bb.7(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui-regbanklegalize.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui-regbanklegalize.mir
index ad566097af81e4..5c33f50fe6d89a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui-regbanklegalize.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui-regbanklegalize.mir
@@ -740,15 +740,15 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.0, %13(s1), %bb.3
; CHECK-NEXT: [[PHI1:%[0-9]+]]:sgpr(s32) = G_PHI %15(s32), %bb.3, [[C]](s32), %bb.0
- ; CHECK-NEXT: [[PHI2:%[0-9]+]]:vgpr(s32) = G_PHI [[C]](s32), %bb.0, %17(s32), %bb.3
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:sgpr(s32) = G_PHI [[C]](s32), %bb.0, %17(s32), %bb.3
; CHECK-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
- ; CHECK-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 31
- ; CHECK-NEXT: [[ASHR:%[0-9]+]]:vgpr(s32) = G_ASHR [[PHI2]], [[C1]](s32)
- ; CHECK-NEXT: [[MV3:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[PHI2]](s32), [[ASHR]](s32)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 31
+ ; CHECK-NEXT: [[ASHR:%[0-9]+]]:sgpr(s32) = G_ASHR [[PHI2]], [[C1]](s32)
+ ; CHECK-NEXT: [[MV3:%[0-9]+]]:sgpr(s64) = G_MERGE_VALUES [[PHI2]](s32), [[ASHR]](s32)
; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY [[C2]](s32)
- ; CHECK-NEXT: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[MV3]], [[COPY7]](s32)
- ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV1]], [[SHL]](s64)
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:sgpr(s64) = G_SHL [[MV3]], [[C2]](s32)
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr(s64) = COPY [[SHL]](s64)
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV1]], [[COPY7]](s64)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32), addrspace 1)
; CHECK-NEXT: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY [[C3]](s32)
@@ -767,9 +767,9 @@ body: |
; CHECK-NEXT: successors: %bb.4(0x40000000), %bb.5(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C5:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[COPY11:%[0-9]+]]:vgpr(s32) = COPY [[C5]](s32)
- ; CHECK-NEXT: [[SHL1:%[0-9]+]]:vgpr(s64) = G_SHL [[MV3]], [[COPY11]](s32)
- ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV2]], [[SHL1]](s64)
+ ; CHECK-NEXT: [[SHL1:%[0-9]+]]:sgpr(s64) = G_SHL [[MV3]], [[C5]](s32)
+ ; CHECK-NEXT: [[COPY11:%[0-9]+]]:vgpr(s64) = COPY [[SHL1]](s64)
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV2]], [[COPY11]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:vgpr(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s32), addrspace 1)
; CHECK-NEXT: [[C6:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
; CHECK-NEXT: [[COPY12:%[0-9]+]]:vgpr(s32) = COPY [[C6]](s32)
@@ -785,7 +785,7 @@ body: |
; CHECK-NEXT: successors: %bb.6(0x04000000), %bb.1(0x7c000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_]](s1), %bb.1, %45(s1), %bb.5
- ; CHECK-NEXT: [[PHI4:%[0-9]+]]:vgpr(s32) = G_PHI %46(s32), %bb.5, [[DEF]](s32), %bb.1
+ ; CHECK-NEXT: [[PHI4:%[0-9]+]]:sgpr(s32) = G_PHI %46(s32), %bb.5, [[DEF]](s32), %bb.1
; CHECK-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
; CHECK-NEXT: [[COPY16:%[0-9]+]]:sgpr(s32) = COPY [[SI_IF]](s32)
; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[COPY16]](s32)
@@ -799,35 +799,34 @@ body: |
; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C8:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[COPY19:%[0-9]+]]:vgpr(s32) = COPY [[C8]](s32)
- ; CHECK-NEXT: [[SHL2:%[0-9]+]]:vgpr(s64) = G_SHL [[MV3]], [[COPY19]](s32)
- ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV]], [[SHL2]](s64)
+ ; CHECK-NEXT: [[SHL2:%[0-9]+]]:sgpr(s64) = G_SHL [[MV3]], [[C8]](s32)
+ ; CHECK-NEXT: [[COPY19:%[0-9]+]]:vgpr(s64) = COPY [[SHL2]](s64)
+ ; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV]], [[COPY19]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:vgpr(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s32), addrspace 1)
; CHECK-NEXT: [[C9:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[COPY20:%[0-9]+]]:vgpr(s32) = COPY [[C9]](s32)
; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[LOAD2]], [[COPY20]]
; CHECK-NEXT: G_STORE [[ADD]](s32), [[PTR_ADD2]](p1) :: (store (s32), addrspace 1)
- ; CHECK-NEXT: [[COPY21:%[0-9]+]]:vgpr(s32) = COPY [[C9]](s32)
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:vgpr(s32) = G_ADD [[PHI2]], [[COPY21]]
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:sgpr(s32) = G_ADD [[PHI2]], [[C9]]
; CHECK-NEXT: [[C10:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 100
- ; CHECK-NEXT: [[COPY22:%[0-9]+]]:vgpr(s32) = COPY [[C10]](s32)
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ult), [[PHI2]](s32), [[COPY22]]
- ; CHECK-NEXT: [[COPY23:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:sgpr(s32) = G_ICMP intpred(ult), [[PHI2]](s32), [[C10]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[ICMP2]], [[C9]]
+ ; CHECK-NEXT: [[COPY_VCC_SCC2:%[0-9]+]]:sreg_32(s1) = G_COPY_VCC_SCC [[AND]](s32)
; CHECK-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY14]](s1), $exec_lo, implicit-def $scc
- ; CHECK-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY23]](s1), implicit-def $scc
+ ; CHECK-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY_VCC_SCC2]](s1), implicit-def $scc
; CHECK-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.5:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[COPY_VCC_SCC1]](s1), %bb.2, [[S_OR_B32_1]](s1), %bb.4
- ; CHECK-NEXT: [[PHI6:%[0-9]+]]:vgpr(s32) = G_PHI [[ADD1]](s32), %bb.4, [[DEF]](s32), %bb.2
- ; CHECK-NEXT: [[COPY24:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
- ; CHECK-NEXT: [[COPY25:%[0-9]+]]:sreg_32(s1) = COPY [[COPY24]](s1)
- ; CHECK-NEXT: [[COPY26:%[0-9]+]]:sgpr(s32) = COPY [[SI_IF1]](s32)
- ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[COPY26]](s32)
+ ; CHECK-NEXT: [[PHI6:%[0-9]+]]:sgpr(s32) = G_PHI [[ADD1]](s32), %bb.4, [[DEF]](s32), %bb.2
+ ; CHECK-NEXT: [[COPY21:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
+ ; CHECK-NEXT: [[COPY22:%[0-9]+]]:sreg_32(s1) = COPY [[COPY21]](s1)
+ ; CHECK-NEXT: [[COPY23:%[0-9]+]]:sgpr(s32) = COPY [[SI_IF1]](s32)
+ ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[COPY23]](s32)
; CHECK-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY10]](s1), $exec_lo, implicit-def $scc
- ; CHECK-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY25]](s1), implicit-def $scc
+ ; CHECK-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY22]](s1), implicit-def $scc
; CHECK-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
; CHECK-NEXT: G_BR %bb.3
; CHECK-NEXT: {{ $}}
@@ -857,11 +856,11 @@ body: |
%12:sreg_32(s1) = PHI %11(s1), %bb.0, %13(s1), %bb.3
%14:sgpr(s32) = G_PHI %67(s32), %bb.3, %10(s32), %bb.0
- %16:vgpr(s32) = G_PHI %10(s32), %bb.0, %17(s32), %bb.3
+ %16:sgpr(s32) = G_PHI %10(s32), %bb.0, %17(s32), %bb.3
%18:sreg_32(s1) = COPY %12(s1)
- %19:vgpr(s64) = G_SEXT %16(s32)
+ %19:sgpr(s64) = G_SEXT %16(s32)
%20:sgpr(s32) = G_CONSTANT i32 2
- %21:vgpr(s64) = G_SHL %19, %20(s32)
+ %21:sgpr(s64) = G_SHL %19, %20(s32)
%22:vgpr(p1) = G_PTR_ADD %5, %21(s64)
%23:vgpr(s32) = G_LOAD %22(p1) :: (load (s32), addrspace 1)
%24:sgpr(s32) = G_CONSTANT i32 0
@@ -880,7 +879,7 @@ body: |
successors: %bb.4(0x40000000), %bb.5(0x40000000)
%33:sgpr(s32) = G_CONSTANT i32 2
- %34:vgpr(s64) = G_SHL %19, %33(s32)
+ %34:sgpr(s64) = G_SHL %19, %33(s32)
%35:vgpr(p1) = G_PTR_ADD %8, %34(s64)
%36:vgpr(s32) = G_LOAD %35(p1) :: (load (s32), addrspace 1)
%37:sgpr(s32) = G_CONSTANT i32 0
@@ -896,7 +895,7 @@ body: |
successors: %bb.6(0x04000000), %bb.1(0x7c000000)
%13:sreg_32(s1) = PHI %30(s1), %bb.1, %43(s1), %bb.5
- %17:vgpr(s32) = G_PHI %44(s32), %bb.5, %9(s32), %bb.1
+ %17:sgpr(s32) = G_PHI %44(s32), %bb.5, %9(s32), %bb.1
%45:sreg_32(s1) = COPY %13(s1)
%68:sgpr(s32) = COPY %32(s32)
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %68(s32)
@@ -910,15 +909,15 @@ body: |
successors: %bb.5(0x80000000)
%46:sgpr(s32) = G_CONSTANT i32 2
- %47:vgpr(s64) = G_SHL %19, %46(s32)
+ %47:sgpr(s64) = G_SHL %19, %46(s32)
%48:vgpr(p1) = G_PTR_ADD %2, %47(s64)
%49:vgpr(s32) = G_LOAD %48(p1) :: (load (s32), addrspace 1)
%50:sgpr(s32) = G_CONSTANT i32 1
%51:vgpr(s32) = G_ADD %49, %50
G_STORE %51(s32), %48(p1) :: (store (s32), addrspace 1)
- %52:vgpr(s32) = G_ADD %16, %50
+ %52:sgpr(s32) = G_ADD %16, %50
%53:sgpr(s32) = G_CONSTANT i32 100
- %54:vcc(s1) = G_ICMP intpred(ult), %16(s32), %53
+ %54:sgpr(s1) = G_ICMP intpred(ult), %16(s32), %53
%55:sreg_32(s1) = COPY %54(s1)
%56:sreg_32(s1) = S_ANDN2_B32 %41(s1), $exec_lo, implicit-def $scc
%57:sreg_32(s1) = S_AND_B32 $exec_lo, %55(s1), implicit-def $scc
@@ -928,7 +927,7 @@ body: |
successors: %bb.3(0x80000000)
%59:sreg_32(s1) = PHI %40(s1), %bb.2, %58(s1), %bb.4
- %44:vgpr(s32) = G_PHI %52(s32), %bb.4, %9(s32), %bb.2
+ %44:sgpr(s32) = G_PHI %52(s32), %bb.4, %9(s32), %bb.2
%60:sreg_32(s1) = COPY %59(s1)
%61:sreg_32(s1) = COPY %60(s1)
%70:sgpr(s32) = COPY %42(s32)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui-regbankselect.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui-regbankselect.mir
index fbbe13615e7fd5..2a9d7f66a0408f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui-regbankselect.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui-regbankselect.mir
@@ -678,11 +678,11 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.0, %13(s1), %bb.3
; CHECK-NEXT: [[PHI1:%[0-9]+]]:sgpr(s32) = G_PHI %67(s32), %bb.3, [[C]](s32), %bb.0
- ; CHECK-NEXT: [[PHI2:%[0-9]+]]:vgpr(s32) = G_PHI [[C]](s32), %bb.0, %17(s32), %bb.3
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:sgpr(s32) = G_PHI [[C]](s32), %bb.0, %17(s32), %bb.3
; CHECK-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
- ; CHECK-NEXT: [[SEXT:%[0-9]+]]:vgpr(s64) = G_SEXT [[PHI2]](s32)
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:sgpr(s64) = G_SEXT [[PHI2]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[SEXT]], [[C1]](s32)
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:sgpr(s64) = G_SHL [[SEXT]], [[C1]](s32)
; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV1]], [[SHL]](s64)
; CHECK-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32), addrspace 1)
; CHECK-NEXT: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
@@ -701,7 +701,7 @@ body: |
; CHECK-NEXT: successors: %bb.4(0x40000000), %bb.5(0x40000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C4:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[SHL1:%[0-9]+]]:vgpr(s64) = G_SHL [[SEXT]], [[C4]](s32)
+ ; CHECK-NEXT: [[SHL1:%[0-9]+]]:sgpr(s64) = G_SHL [[SEXT]], [[C4]](s32)
; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV2]], [[SHL1]](s64)
; CHECK-NEXT: [[LOAD1:%[0-9]+]]:vgpr(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s32), addrspace 1)
; CHECK-NEXT: [[C5:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
@@ -717,7 +717,7 @@ body: |
; CHECK-NEXT: successors: %bb.6(0x04000000), %bb.1(0x7c000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_]](s1), %bb.1, %43(s1), %bb.5
- ; CHECK-NEXT: [[PHI4:%[0-9]+]]:vgpr(s32) = G_PHI %44(s32), %bb.5, [[DEF]](s32), %bb.1
+ ; CHECK-NEXT: [[PHI4:%[0-9]+]]:sgpr(s32) = G_PHI %44(s32), %bb.5, [[DEF]](s32), %bb.1
; CHECK-NEXT: [[COPY13:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
; CHECK-NEXT: [[COPY14:%[0-9]+]]:sgpr(s32) = COPY [[SI_IF]](s32)
; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[COPY14]](s32)
@@ -731,15 +731,15 @@ body: |
; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[C7:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
- ; CHECK-NEXT: [[SHL2:%[0-9]+]]:vgpr(s64) = G_SHL [[SEXT]], [[C7]](s32)
+ ; CHECK-NEXT: [[SHL2:%[0-9]+]]:sgpr(s64) = G_SHL [[SEXT]], [[C7]](s32)
; CHECK-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV]], [[SHL2]](s64)
; CHECK-NEXT: [[LOAD2:%[0-9]+]]:vgpr(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s32), addrspace 1)
; CHECK-NEXT: [[C8:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[LOAD2]], [[C8]]
; CHECK-NEXT: G_STORE [[ADD]](s32), [[PTR_ADD2]](p1) :: (store (s32), addrspace 1)
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:vgpr(s32) = G_ADD [[PHI2]], [[C8]]
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:sgpr(s32) = G_ADD [[PHI2]], [[C8]]
; CHECK-NEXT: [[C9:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 100
- ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ult), [[PHI2]](s32), [[C9]]
+ ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:sgpr(s1) = G_ICMP intpred(ult), [[PHI2]](s32), [[C9]]
; CHECK-NEXT: [[COPY17:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
; CHECK-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY12]](s1), $exec_lo, implicit-def $scc
; CHECK-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY17]](s1), implicit-def $scc
@@ -749,7 +749,7 @@ body: |
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[COPY11]](s1), %bb.2, [[S_OR_B32_1]](s1), %bb.4
- ; CHECK-NEXT: [[PHI6:%[0-9]+]]:vgpr(s32) = G_PHI [[ADD1]](s32), %bb.4, [[DEF]](s32), %bb.2
+ ; CHECK-NEXT: [[PHI6:%[0-9]+]]:sgpr(s32) = G_PHI [[ADD1]](s32), %bb.4, [[DEF]](s32), %bb.2
; CHECK-NEXT: [[COPY18:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
; CHECK-NEXT: [[COPY19:%[0-9]+]]:sreg_32(s1) = COPY [[COPY18]](s1)
; CHECK-NEXT: [[COPY20:%[0-9]+]]:sgpr(s32) = COPY [[SI_IF1]](s32)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
index 63dbf3a8d31645..191739b37672e2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
@@ -483,9 +483,6 @@ exit:
}
; Variables that hande counter can be allocated to sgprs.
-; Machine uniformity analysis claims some of those registers are divergent while
-; LLVM-IR uniformity analysis claims corresponding values are uniform.
-; TODO: fix this in Machine uniformity analysis.
define amdgpu_cs void @loop_with_2breaks(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %b) {
; OLD_RBS-LABEL: loop_with_2breaks:
; OLD_RBS: ; %bb.0: ; %entry
@@ -551,62 +548,69 @@ define amdgpu_cs void @loop_with_2breaks(ptr addrspace(1) %x, ptr addrspace(1) %
;
; NEW_RBS-LABEL: loop_with_2breaks:
; NEW_RBS: ; %bb.0: ; %entry
+; NEW_RBS-NEXT: s_mov_b32 s4, 0
; NEW_RBS-NEXT: s_mov_b32 s0, 0
-; NEW_RBS-NEXT: ; implicit-def: $sgpr1
-; NEW_RBS-NEXT: v_mov_b32_e32 v6, s0
+; NEW_RBS-NEXT: ; implicit-def: $sgpr5
; NEW_RBS-NEXT: s_branch .LBB16_3
; NEW_RBS-NEXT: .LBB16_1: ; %Flow3
; NEW_RBS-NEXT: ; in Loop: Header=BB16_3 Depth=1
; NEW_RBS-NEXT: s_waitcnt_depctr 0xffe3
-; NEW_RBS-NEXT: s_or_b32 exec_lo, exec_lo, s4
-; NEW_RBS-NEXT: s_andn2_b32 s1, s1, exec_lo
-; NEW_RBS-NEXT: s_and_b32 s3, exec_lo, s3
-; NEW_RBS-NEXT: s_or_b32 s1, s1, s3
+; NEW_RBS-NEXT: s_or_b32 exec_lo, exec_lo, s7
+; NEW_RBS-NEXT: s_andn2_b32 s2, s5, exec_lo
+; NEW_RBS-NEXT: s_and_b32 s3, exec_lo, s6
+; NEW_RBS-NEXT: s_or_b32 s5, s2, s3
; NEW_RBS-NEXT: .LBB16_2: ; %Flow
; NEW_RBS-NEXT: ; in Loop: Header=BB16_3 Depth=1
-; NEW_RBS-NEXT: s_or_b32 exec_lo, exec_lo, s2
-; NEW_RBS-NEXT: s_and_b32 s2, exec_lo, s1
-; NEW_RBS-NEXT: s_or_b32 s0, s2, s0
-; NEW_RBS-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
+; NEW_RBS-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; NEW_RBS-NEXT: s_and_b32 s1, exec_lo, s5
+; NEW_RBS-NEXT: s_or_b32 s4, s1, s4
+; NEW_RBS-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; NEW_RBS-NEXT: s_cbranch_execz .LBB16_6
; NEW_RBS-NEXT: .LBB16_3: ; %A
; NEW_RBS-NEXT: ; =>This Inner Loop Header: Depth=1
-; NEW_RBS-NEXT: v_ashrrev_i32_e32 v7, 31, v6
-; NEW_RBS-NEXT: s_andn2_b32 s1, s1, exec_lo
-; NEW_RBS-NEXT: s_and_b32 s2, exec_lo, exec_lo
-; NEW_RBS-NEXT: s_or_b32 s1, s1, s2
-; NEW_RBS-NEXT: v_lshlrev_b64 v[7:8], 2, v[6:7]
-; NEW_RBS-NEXT: v_add_co_u32 v9, vcc_lo, v2, v7
-; NEW_RBS-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v3, v8, vcc_lo
-; NEW_RBS-NEXT: global_load_dword v9, v[9:10], off
+; NEW_RBS-NEXT: s_ashr_i32 s1, s0, 31
+; NEW_RBS-NEXT: s_lshl_b64 s[2:3], s[0:1], 2
+; NEW_RBS-NEXT: s_andn2_b32 s1, s5, exec_lo
+; NEW_RBS-NEXT: v_mov_b32_e32 v7, s3
+; NEW_RBS-NEXT: v_mov_b32_e32 v6, s2
+; NEW_RBS-NEXT: s_and_b32 s5, exec_lo, exec_lo
+; NEW_RBS-NEXT: s_or_b32 s5, s1, s5
+; NEW_RBS-NEXT: v_add_co_u32 v6, vcc_lo, v2, v6
+; NEW_RBS-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v3, v7, vcc_lo
+; NEW_RBS-NEXT: global_load_dword v6, v[6:7], off
; NEW_RBS-NEXT: s_waitcnt vmcnt(0)
-; NEW_RBS-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v9
-; NEW_RBS-NEXT: s_and_saveexec_b32 s2, vcc_lo
+; NEW_RBS-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
+; NEW_RBS-NEXT: s_and_saveexec_b32 s1, vcc_lo
; NEW_RBS-NEXT: s_cbranch_execz .LBB16_2
; NEW_RBS-NEXT: ; %bb.4: ; %B
; NEW_RBS-NEXT: ; in Loop: Header=BB16_3 Depth=1
-; NEW_RBS-NEXT: v_add_co_u32 v9, vcc_lo, v4, v7
-; NEW_RBS-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v5, v8, vcc_lo
-; NEW_RBS-NEXT: s_mov_b32 s3, exec_lo
-; NEW_RBS-NEXT: global_load_dword v9, v[9:10], off
+; NEW_RBS-NEXT: v_mov_b32_e32 v7, s3
+; NEW_RBS-NEXT: v_mov_b32_e32 v6, s2
+; NEW_RBS-NEXT: s_mov_b32 s6, exec_lo
+; NEW_RBS-NEXT: v_add_co_u32 v6, vcc_lo, v4, v6
+; NEW_RBS-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v5, v7, vcc_lo
+; NEW_RBS-NEXT: global_load_dword v6, v[6:7], off
; NEW_RBS-NEXT: s_waitcnt vmcnt(0)
-; NEW_RBS-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v9
-; NEW_RBS-NEXT: s_and_saveexec_b32 s4, vcc_lo
+; NEW_RBS-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6
+; NEW_RBS-NEXT: s_and_saveexec_b32 s7, vcc_lo
; NEW_RBS-NEXT: s_cbranch_execz .LBB16_1
; NEW_RBS-NEXT: ; %bb.5: ; %loop.body
; NEW_RBS-NEXT: ; in Loop: Header=BB16_3 Depth=1
-; NEW_RBS-NEXT: v_add_co_u32 v7, vcc_lo, v0, v7
-; NEW_RBS-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, v1, v8, vcc_lo
-; NEW_RBS-NEXT: v_add_nc_u32_e32 v10, 1, v6
-; NEW_RBS-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x64, v6
-; NEW_RBS-NEXT: s_andn2_b32 s3, s3, exec_lo
-; NEW_RBS-NEXT: global_load_dword v9, v[7:8], off
-; NEW_RBS-NEXT: v_mov_b32_e32 v6, v10
-; NEW_RBS-NEXT: s_and_b32 s5, exec_lo, vcc_lo
-; NEW_RBS-NEXT: s_or_b32 s3, s3, s5
+; NEW_RBS-NEXT: v_mov_b32_e32 v7, s3
+; NEW_RBS-NEXT: v_mov_b32_e32 v6, s2
+; NEW_RBS-NEXT: s_add_i32 s2, s0, 1
+; NEW_RBS-NEXT: s_cmpk_lt_u32 s0, 0x64
+; NEW_RBS-NEXT: s_cselect_b32 s0, exec_lo, 0
+; NEW_RBS-NEXT: v_add_co_u32 v6, vcc_lo, v0, v6
+; NEW_RBS-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v1, v7, vcc_lo
+; NEW_RBS-NEXT: s_andn2_b32 s3, s6, exec_lo
+; NEW_RBS-NEXT: s_and_b32 s0, exec_lo, s0
+; NEW_RBS-NEXT: s_or_b32 s6, s3, s0
+; NEW_RBS-NEXT: global_load_dword v8, v[6:7], off
+; NEW_RBS-NEXT: s_mov_b32 s0, s2
; NEW_RBS-NEXT: s_waitcnt vmcnt(0)
-; NEW_RBS-NEXT: v_add_nc_u32_e32 v9, 1, v9
-; NEW_RBS-NEXT: global_store_dword v[7:8], v9, off
+; NEW_RBS-NEXT: v_add_nc_u32_e32 v8, 1, v8
+; NEW_RBS-NEXT: global_store_dword v[6:7], v8, off
; NEW_RBS-NEXT: s_branch .LBB16_1
; NEW_RBS-NEXT: .LBB16_6: ; %exit
; NEW_RBS-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.mir
index a064b2d2c103f9..415eb723638042 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.mir
@@ -1194,15 +1194,15 @@ body: |
; NEW_RBS-NEXT: {{ $}}
; NEW_RBS-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[DEF1]](s1), %bb.0, %13(s1), %bb.3
; NEW_RBS-NEXT: [[PHI1:%[0-9]+]]:sgpr(s32) = G_PHI %67(s32), %bb.3, [[C]](s32), %bb.0
- ; NEW_RBS-NEXT: [[PHI2:%[0-9]+]]:vgpr(s32) = G_PHI [[C]](s32), %bb.0, %17(s32), %bb.3
+ ; NEW_RBS-NEXT: [[PHI2:%[0-9]+]]:sgpr(s32) = G_PHI [[C]](s32), %bb.0, %17(s32), %bb.3
; NEW_RBS-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
- ; NEW_RBS-NEXT: [[C1:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 31
- ; NEW_RBS-NEXT: [[ASHR:%[0-9]+]]:vgpr(s32) = G_ASHR [[PHI2]], [[C1]](s32)
- ; NEW_RBS-NEXT: [[MV3:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[PHI2]](s32), [[ASHR]](s32)
+ ; NEW_RBS-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 31
+ ; NEW_RBS-NEXT: [[ASHR:%[0-9]+]]:sgpr(s32) = G_ASHR [[PHI2]], [[C1]](s32)
+ ; NEW_RBS-NEXT: [[MV3:%[0-9]+]]:sgpr(s64) = G_MERGE_VALUES [[PHI2]](s32), [[ASHR]](s32)
; NEW_RBS-NEXT: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
- ; NEW_RBS-NEXT: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY [[C2]](s32)
- ; NEW_RBS-NEXT: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[MV3]], [[COPY7]](s32)
- ; NEW_RBS-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV1]], [[SHL]](s64)
+ ; NEW_RBS-NEXT: [[SHL:%[0-9]+]]:sgpr(s64) = G_SHL [[MV3]], [[C2]](s32)
+ ; NEW_RBS-NEXT: [[COPY7:%[0-9]+]]:vgpr(s64) = COPY [[SHL]](s64)
+ ; NEW_RBS-NEXT: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV1]], [[COPY7]](s64)
; NEW_RBS-NEXT: [[LOAD:%[0-9]+]]:vgpr(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32), addrspace 1)
; NEW_RBS-NEXT: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
; NEW_RBS-NEXT: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY [[C3]](s32)
@@ -1221,9 +1221,9 @@ body: |
; NEW_RBS-NEXT: successors: %bb.4(0x40000000), %bb.5(0x40000000)
; NEW_RBS-NEXT: {{ $}}
; NEW_RBS-NEXT: [[C5:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
- ; NEW_RBS-NEXT: [[COPY11:%[0-9]+]]:vgpr(s32) = COPY [[C5]](s32)
- ; NEW_RBS-NEXT: [[SHL1:%[0-9]+]]:vgpr(s64) = G_SHL [[MV3]], [[COPY11]](s32)
- ; NEW_RBS-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV2]], [[SHL1]](s64)
+ ; NEW_RBS-NEXT: [[SHL1:%[0-9]+]]:sgpr(s64) = G_SHL [[MV3]], [[C5]](s32)
+ ; NEW_RBS-NEXT: [[COPY11:%[0-9]+]]:vgpr(s64) = COPY [[SHL1]](s64)
+ ; NEW_RBS-NEXT: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV2]], [[COPY11]](s64)
; NEW_RBS-NEXT: [[LOAD1:%[0-9]+]]:vgpr(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s32), addrspace 1)
; NEW_RBS-NEXT: [[C6:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0
; NEW_RBS-NEXT: [[COPY12:%[0-9]+]]:vgpr(s32) = COPY [[C6]](s32)
@@ -1239,7 +1239,7 @@ body: |
; NEW_RBS-NEXT: successors: %bb.6(0x04000000), %bb.1(0x7c000000)
; NEW_RBS-NEXT: {{ $}}
; NEW_RBS-NEXT: [[PHI3:%[0-9]+]]:sreg_32(s1) = PHI [[S_OR_B32_]](s1), %bb.1, %43(s1), %bb.5
- ; NEW_RBS-NEXT: [[PHI4:%[0-9]+]]:vgpr(s32) = G_PHI %44(s32), %bb.5, [[DEF]](s32), %bb.1
+ ; NEW_RBS-NEXT: [[PHI4:%[0-9]+]]:sgpr(s32) = G_PHI %44(s32), %bb.5, [[DEF]](s32), %bb.1
; NEW_RBS-NEXT: [[COPY15:%[0-9]+]]:sreg_32(s1) = COPY [[PHI3]](s1)
; NEW_RBS-NEXT: [[COPY16:%[0-9]+]]:sgpr(s32) = COPY [[SI_IF]](s32)
; NEW_RBS-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[COPY16]](s32)
@@ -1253,35 +1253,34 @@ body: |
; NEW_RBS-NEXT: successors: %bb.5(0x80000000)
; NEW_RBS-NEXT: {{ $}}
; NEW_RBS-NEXT: [[C8:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 2
- ; NEW_RBS-NEXT: [[COPY19:%[0-9]+]]:vgpr(s32) = COPY [[C8]](s32)
- ; NEW_RBS-NEXT: [[SHL2:%[0-9]+]]:vgpr(s64) = G_SHL [[MV3]], [[COPY19]](s32)
- ; NEW_RBS-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV]], [[SHL2]](s64)
+ ; NEW_RBS-NEXT: [[SHL2:%[0-9]+]]:sgpr(s64) = G_SHL [[MV3]], [[C8]](s32)
+ ; NEW_RBS-NEXT: [[COPY19:%[0-9]+]]:vgpr(s64) = COPY [[SHL2]](s64)
+ ; NEW_RBS-NEXT: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[MV]], [[COPY19]](s64)
; NEW_RBS-NEXT: [[LOAD2:%[0-9]+]]:vgpr(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s32), addrspace 1)
; NEW_RBS-NEXT: [[C9:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
; NEW_RBS-NEXT: [[COPY20:%[0-9]+]]:vgpr(s32) = COPY [[C9]](s32)
; NEW_RBS-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[LOAD2]], [[COPY20]]
; NEW_RBS-NEXT: G_STORE [[ADD]](s32), [[PTR_ADD2]](p1) :: (store (s32), addrspace 1)
- ; NEW_RBS-NEXT: [[COPY21:%[0-9]+]]:vgpr(s32) = COPY [[C9]](s32)
- ; NEW_RBS-NEXT: [[ADD1:%[0-9]+]]:vgpr(s32) = G_ADD [[PHI2]], [[COPY21]]
+ ; NEW_RBS-NEXT: [[ADD1:%[0-9]+]]:sgpr(s32) = G_ADD [[PHI2]], [[C9]]
; NEW_RBS-NEXT: [[C10:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 100
- ; NEW_RBS-NEXT: [[COPY22:%[0-9]+]]:vgpr(s32) = COPY [[C10]](s32)
- ; NEW_RBS-NEXT: [[ICMP2:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ult), [[PHI2]](s32), [[COPY22]]
- ; NEW_RBS-NEXT: [[COPY23:%[0-9]+]]:sreg_32(s1) = COPY [[ICMP2]](s1)
+ ; NEW_RBS-NEXT: [[ICMP2:%[0-9]+]]:sgpr(s32) = G_ICMP intpred(ult), [[PHI2]](s32), [[C10]]
+ ; NEW_RBS-NEXT: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[ICMP2]], [[C9]]
+ ; NEW_RBS-NEXT: [[COPY_VCC_SCC2:%[0-9]+]]:sreg_32(s1) = G_COPY_VCC_SCC [[AND]](s32)
; NEW_RBS-NEXT: [[S_ANDN2_B32_1:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY14]](s1), $exec_lo, implicit-def $scc
- ; NEW_RBS-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY23]](s1), implicit-def $scc
+ ; NEW_RBS-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY_VCC_SCC2]](s1), implicit-def $scc
; NEW_RBS-NEXT: [[S_OR_B32_1:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_1]](s1), [[S_AND_B32_1]](s1), implicit-def $scc
; NEW_RBS-NEXT: {{ $}}
; NEW_RBS-NEXT: bb.5:
; NEW_RBS-NEXT: successors: %bb.3(0x80000000)
; NEW_RBS-NEXT: {{ $}}
; NEW_RBS-NEXT: [[PHI5:%[0-9]+]]:sreg_32(s1) = PHI [[COPY_VCC_SCC1]](s1), %bb.2, [[S_OR_B32_1]](s1), %bb.4
- ; NEW_RBS-NEXT: [[PHI6:%[0-9]+]]:vgpr(s32) = G_PHI [[ADD1]](s32), %bb.4, [[DEF]](s32), %bb.2
- ; NEW_RBS-NEXT: [[COPY24:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
- ; NEW_RBS-NEXT: [[COPY25:%[0-9]+]]:sreg_32(s1) = COPY [[COPY24]](s1)
- ; NEW_RBS-NEXT: [[COPY26:%[0-9]+]]:sgpr(s32) = COPY [[SI_IF1]](s32)
- ; NEW_RBS-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[COPY26]](s32)
+ ; NEW_RBS-NEXT: [[PHI6:%[0-9]+]]:sgpr(s32) = G_PHI [[ADD1]](s32), %bb.4, [[DEF]](s32), %bb.2
+ ; NEW_RBS-NEXT: [[COPY21:%[0-9]+]]:sreg_32(s1) = COPY [[PHI5]](s1)
+ ; NEW_RBS-NEXT: [[COPY22:%[0-9]+]]:sreg_32(s1) = COPY [[COPY21]](s1)
+ ; NEW_RBS-NEXT: [[COPY23:%[0-9]+]]:sgpr(s32) = COPY [[SI_IF1]](s32)
+ ; NEW_RBS-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[COPY23]](s32)
; NEW_RBS-NEXT: [[S_ANDN2_B32_2:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY10]](s1), $exec_lo, implicit-def $scc
- ; NEW_RBS-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY25]](s1), implicit-def $scc
+ ; NEW_RBS-NEXT: [[S_AND_B32_2:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY22]](s1), implicit-def $scc
; NEW_RBS-NEXT: [[S_OR_B32_2:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_2]](s1), [[S_AND_B32_2]](s1), implicit-def $scc
; NEW_RBS-NEXT: G_BR %bb.3
; NEW_RBS-NEXT: {{ $}}
More information about the llvm-branch-commits
mailing list