[llvm] Co-issue packed instructions by unpacking (PR #151704)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 4 14:47:39 PDT 2025
================
@@ -225,6 +247,313 @@ bool GCNPreRAOptimizationsImpl::processReg(Register Reg) {
return true;
}
+bool GCNPreRAOptimizationsImpl::isUnpackingSupportedInstr(MachineInstr &MI) const {
+ unsigned Opcode = MI.getOpcode();
+ switch (Opcode) {
+ case AMDGPU::V_PK_ADD_F32:
+ case AMDGPU::V_PK_MUL_F32:
+ return true;
+
+ default:
+ return false;
+
+ }
+}
+
+uint16_t GCNPreRAOptimizationsImpl::mapToUnpackedOpcode(MachineInstr &I) {
+ unsigned Opcode = I.getOpcode();
+ // use 64 bit encoding to allow use of VOP3 instructions.
+ // VOP3 instructions allow VOP3P source modifiers to be translated to VOP3
+ // e32 instructions are VOP2 and don't allow source modifiers
+ switch (Opcode) {
+ case AMDGPU::V_PK_ADD_F32:
+ return AMDGPU::V_ADD_F32_e64;
+ case AMDGPU::V_PK_MUL_F32:
+ return AMDGPU::V_MUL_F32_e64;
+ default:
+ return std::numeric_limits<uint16_t>::max();
+
+ }
+}
+
+SmallVector<MachineInstr *, 2>
+GCNPreRAOptimizationsImpl::copyToVregAndInsertMI(MachineInstr &I,
+ unsigned SGPRSrcPos) {
+ SmallVector<MachineInstr *, 2> MIList;
+
+ MachineBasicBlock &MBB = *I.getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ MachineFunction &MF = *MBB.getParent();
+ const DebugLoc &DL = I.getDebugLoc();
+
+ Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VReg_64_Align2RegClass);
+ MachineInstr *CopySGPR1 =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY))
+ .addDef(TmpReg, RegState::Undef)
+ .addReg(I.getOperand(SGPRSrcPos).getReg(), 0, AMDGPU::sub0);
+ unsigned SubIdx = TRI->composeSubRegIndices(
+ AMDGPU::sub0, CopySGPR1->getOperand(0).getSubReg());
+ CopySGPR1->getOperand(0).setReg(CopySGPR1->getOperand(0).getReg());
+ CopySGPR1->getOperand(0).setSubReg(SubIdx);
+ LIS->InsertMachineInstrInMaps(*CopySGPR1);
+ MIList.push_back(CopySGPR1);
+
+ MachineInstr *CopySGPR2 =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY))
+ .addDef(TmpReg)
+ .addReg(I.getOperand(SGPRSrcPos).getReg(), 0, AMDGPU::sub1);
+ SubIdx = TRI->composeSubRegIndices(AMDGPU::sub1,
+ CopySGPR2->getOperand(0).getSubReg());
+ CopySGPR2->getOperand(0).setReg(CopySGPR2->getOperand(0).getReg());
+ CopySGPR2->getOperand(0).setSubReg(SubIdx);
+ LIS->InsertMachineInstrInMaps(*CopySGPR2);
+ MIList.push_back(CopySGPR2);
+ return MIList;
+}
+
+bool GCNPreRAOptimizationsImpl::createListOfPackedInstr(
+ MachineInstr &BeginMI, DenseSet<MachineInstr *> &instrsToUnpack) {
+ auto *BB = BeginMI.getParent();
+ auto *MF = BB->getParent();
+ int NumInst = 0;
+
+ auto E = BB->end();
+ auto schedModel = TII->getSchedModel();
+ const MCSchedClassDesc *schedClassDesc = schedModel.resolveSchedClass(&BeginMI);
+ const int NumMFMACycles = schedModel.getWriteProcResBegin(schedClassDesc)->ReleaseAtCycle;
+ int totalCyclesBetweenCandidates = 0;
+ for (auto I = std::next(BeginMI.getIterator()); I != E; ++I) {
+ MachineInstr &Instr = *I;
+ const MCSchedClassDesc *instrSchedClassDesc = schedModel.resolveSchedClass(&Instr);
+ totalCyclesBetweenCandidates += schedModel.getWriteProcResBegin(instrSchedClassDesc)->ReleaseAtCycle;
+ if (Instr.isMetaInstruction())
+ continue;
+
+ if (Instr.isTerminator())
+ return false;
+
+ if (totalCyclesBetweenCandidates > NumMFMACycles)
+ return false;
+
+ if ((isUnpackingSupportedInstr(Instr)) && TII->isNeverCoissue(Instr)) {
+ totalCyclesBetweenCandidates += 1;
+ instrsToUnpack.insert(&Instr);
+ }
+ }
+ return true;
+}
+
+SmallVector<MachineInstr *, 2> GCNPreRAOptimizationsImpl::insertUnpackedMI(
+ MachineInstr &I, MachineOperand &DstMO, MachineOperand &LoSrcMO1, MachineOperand &LoSrcMO2,
+ MachineOperand &HiSrcMO1, MachineOperand &HiSrcMO2, bool isVreg_64) {
+
+ SmallVector<MachineInstr *, 2> MIList;
+ MachineBasicBlock &MBB = *I.getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ MachineFunction &MF = *MBB.getParent();
+ const DebugLoc &DL = I.getDebugLoc();
+ Register DstReg = DstMO.getReg();
+
+ unsigned SrcSubIdx1 =
+ TRI->composeSubRegIndices(LoSrcMO1.getSubReg(), AMDGPU::sub0);
+ unsigned SrcSubIdx2 =
+ TRI->composeSubRegIndices(LoSrcMO2.getSubReg(), AMDGPU::sub0);
+ unsigned DestSubIdx =
+ TRI->composeSubRegIndices(DstMO.getSubReg(), AMDGPU::sub0);
+
+ const MCInstrDesc instrDesc = I.getDesc();
+
+ int clampIdx = AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::clamp);
+ int64_t clampVal = I.getOperand(clampIdx).getImm();
+
+ int src0_modifiers_Idx = AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::src0_modifiers);
+ int src1_modifiers_Idx = AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::src1_modifiers);
+ unsigned src0_Mods = I.getOperand(src0_modifiers_Idx).getImm();
+ unsigned src1_Mods = I.getOperand(src1_modifiers_Idx).getImm();
+
+ //don't worry about abs values. Packed instructions (VOP3P) do not support them
+ unsigned Lo_src0_mods = 0;
+ unsigned Lo_src1_mods = 0;
+ uint16_t unpackedOpcode = mapToUnpackedOpcode(I);
+ MachineInstrBuilder Op0L_Op1L = BuildMI(MBB, I, DL, TII->get(unpackedOpcode));
+ Op0L_Op1L.addDef(DstReg, 0, DestSubIdx); //vdst
+ if (src0_Mods & SISrcMods::OP_SEL_0) {
+ if (src0_Mods & SISrcMods::NEG) {
+ Lo_src0_mods |= SISrcMods::NEG;
+ }
+ Op0L_Op1L.addImm(Lo_src0_mods); //src0_modifiers
+ unsigned Src0SubIdx = TRI->composeSubRegIndices(LoSrcMO1.getSubReg(), AMDGPU::sub1);
+ Op0L_Op1L.addReg(LoSrcMO1.getReg(), 0, Src0SubIdx); //src0
+ }
+ else {
+ Op0L_Op1L.addImm(Lo_src0_mods); //src0_modifiers
+ unsigned Src0SubIdx = TRI->composeSubRegIndices(LoSrcMO1.getSubReg(), AMDGPU::sub0);
+ Op0L_Op1L.addReg(LoSrcMO1.getReg(), 0, Src0SubIdx); //src0 //if op_sel == 0, select register 0 of reg:sub0_sub1
+ }
+
+ if (src1_Mods & SISrcMods::OP_SEL_0) {
+ if (src1_Mods & SISrcMods::NEG) {
+ Lo_src1_mods |= SISrcMods::NEG;
+ }
+ Op0L_Op1L.addImm(Lo_src1_mods); //src0_modifiers
+ unsigned Src1SubIdx = TRI->composeSubRegIndices(LoSrcMO2.getSubReg(), AMDGPU::sub1);
+ Op0L_Op1L.addReg(LoSrcMO2.getReg(), 0, Src1SubIdx); //src0
+ }
+ else {
+ Op0L_Op1L.addImm(Lo_src1_mods); //src0_modifiers
+ unsigned Src1SubIdx = TRI->composeSubRegIndices(LoSrcMO2.getSubReg(), AMDGPU::sub0);
+ Op0L_Op1L.addReg(LoSrcMO2.getReg(), 0, Src1SubIdx); //src0 //if op_sel_hi == 0, select register 0 of reg:sub0_sub1
+ }
+ Op0L_Op1L.addImm(clampVal); //clamp
+ //packed instructions do not support output modifiers. safe to assign them 0 for this use case
+ Op0L_Op1L.addImm(0); //omod
+
+ if (isVreg_64) {
+ Op0L_Op1L->getOperand(0).setIsUndef();
+ }
+ else {
----------------
bcahoon wrote:
} else if {
https://github.com/llvm/llvm-project/pull/151704
More information about the llvm-commits
mailing list