[llvm-branch-commits] [llvm] AMDGPU/GlobalISel: RBSelect (PR #112863)
Matt Arsenault via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Sat Oct 19 07:45:40 PDT 2024
================
@@ -63,4 +70,189 @@ char &llvm::AMDGPURBSelectID = AMDGPURBSelect::ID;
FunctionPass *llvm::createAMDGPURBSelectPass() { return new AMDGPURBSelect(); }
-bool AMDGPURBSelect::runOnMachineFunction(MachineFunction &MF) { return true; }
+bool shouldRBSelect(MachineInstr &MI) {
+ if (isTargetSpecificOpcode(MI.getOpcode()) && !MI.isPreISelOpcode())
+ return false;
+
+ if (MI.getOpcode() == AMDGPU::PHI || MI.getOpcode() == AMDGPU::IMPLICIT_DEF)
+ return false;
+
+ if (MI.isInlineAsm())
+ return false;
+
+ return true;
+}
+
+void setRB(MachineInstr &MI, MachineOperand &DefOP, MachineIRBuilder B,
+ MachineRegisterInfo &MRI, const RegisterBank &RB) {
+ Register Reg = DefOP.getReg();
+ // Register that already has Register class got it during pre-inst selection
+ // of another instruction. Maybe cross bank copy was required so we insert a
+ // copy trat can be removed later. This simplifies post-rb-legalize artifact
+ // combiner and avoids need to special case some patterns.
+ if (MRI.getRegClassOrNull(Reg)) {
+ LLT Ty = MRI.getType(Reg);
+ Register NewReg = MRI.createVirtualRegister({&RB, Ty});
+ DefOP.setReg(NewReg);
+
+ auto &MBB = *MI.getParent();
+ B.setInsertPt(MBB, MI.isPHI() ? MBB.getFirstNonPHI()
+ : std::next(MI.getIterator()));
+ B.buildCopy(Reg, NewReg);
+
+ // The problem was discoverd for uniform S1 that was used as both
+ // lane mask(vcc) and regular sgpr S1.
+ // - lane-mask(vcc) use was by si_if, this use is divergent and requires
+ // non-trivial sgpr-S1-to-vcc copy. But pre-inst-selection of si_if sets
+ // sreg_64_xexec(S1) on def of uniform S1 making it lane-mask.
+ // - the regular regular sgpr S1(uniform) instruction is now broken since
+ // it uses sreg_64_xexec(S1) which is divergent.
+
+ // "Clear" reg classes from uses on generic instructions and but register
+ // banks instead.
+ for (auto &UseMI : MRI.use_instructions(Reg)) {
+ if (shouldRBSelect(UseMI)) {
+ for (MachineOperand &Op : UseMI.operands()) {
+ if (Op.isReg() && Op.isUse() && Op.getReg() == Reg)
+ Op.setReg(NewReg);
+ }
+ }
+ }
+
+ } else {
+ MRI.setRegBank(Reg, RB);
+ }
+}
+
+void setRBUse(MachineInstr &MI, MachineOperand &UseOP, MachineIRBuilder B,
+ MachineRegisterInfo &MRI, const RegisterBank &RB) {
+ Register Reg = UseOP.getReg();
+
+ LLT Ty = MRI.getType(Reg);
+ Register NewReg = MRI.createVirtualRegister({&RB, Ty});
+ UseOP.setReg(NewReg);
+
+ if (MI.isPHI()) {
+ auto DefMI = MRI.getVRegDef(Reg)->getIterator();
+ MachineBasicBlock *DefMBB = DefMI->getParent();
+ B.setInsertPt(*DefMBB, DefMBB->SkipPHIsAndLabels(std::next(DefMI)));
+ } else {
+ B.setInstr(MI);
+ }
+
+ B.buildCopy(NewReg, Reg);
+}
+
+// Temporal divergence copy: COPY to vgpr with implicit use of $exec inside of
+// the cycle
+// Note: uniformity analysis does not consider that registers with vgpr def are
+// divergent (you can have uniform value in vgpr).
+// - TODO: implicit use of $exec could be implemented as indicator that
+// instruction is divergent
+bool isTemporalDivergenceCopy(Register Reg, MachineRegisterInfo &MRI) {
+ MachineInstr *MI = MRI.getVRegDef(Reg);
+ if (MI->getOpcode() == AMDGPU::COPY) {
+ for (auto Op : MI->implicit_operands()) {
+ if (!Op.isReg())
+ continue;
+ Register Reg = Op.getReg();
+ if (Reg == AMDGPU::EXEC) {
----------------
arsenm wrote:
What about wave32
https://github.com/llvm/llvm-project/pull/112863
More information about the llvm-branch-commits
mailing list