[llvm] Co-issue packed instructions by unpacking (PR #151704)
Jeffrey Byrnes via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 25 11:48:15 PDT 2025
================
@@ -225,6 +254,712 @@ bool GCNPreRAOptimizationsImpl::processReg(Register Reg) {
return true;
}
+bool GCNPreRAOptimizationsImpl::isUnpackingSupportedInstr(
+ MachineInstr &MI) const {
+ unsigned Opcode = MI.getOpcode();
+ switch (Opcode) {
+ case AMDGPU::V_PK_ADD_F32:
+ case AMDGPU::V_PK_MUL_F32:
+ case AMDGPU::V_PK_MUL_F16:
+ case AMDGPU::V_PK_ADD_F16:
+ case AMDGPU::V_PK_FMA_F32:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+uint16_t GCNPreRAOptimizationsImpl::mapToUnpackedOpcode(MachineInstr &I) {
+ unsigned Opcode = I.getOpcode();
+ // use 64 bit encoding to allow use of VOP3 instructions.
+ // VOP3 instructions allow VOP3P source modifiers to be translated to VOP3
+ // e32 instructions are VOP2 and don't allow source modifiers
+ switch (Opcode) {
+ case AMDGPU::V_PK_ADD_F32:
+ return AMDGPU::V_ADD_F32_e64;
+ case AMDGPU::V_PK_MUL_F32:
+ return AMDGPU::V_MUL_F32_e64;
+ case AMDGPU::V_PK_ADD_F16:
+ return AMDGPU::V_ADD_F16_e64;
+ case AMDGPU::V_PK_MUL_F16:
+ return AMDGPU::V_MUL_F16_e64;
+ case AMDGPU::V_PK_FMA_F32:
+ return AMDGPU::V_FMA_F32_e64;
+ default:
+ return std::numeric_limits<uint16_t>::max();
+ }
+}
+
+bool GCNPreRAOptimizationsImpl::createListOfPackedInstr(
+ MachineInstr &BeginMI, SetVector<MachineInstr *> &InstrsToUnpack,
+ uint16_t NumMFMACycles) {
+ auto *BB = BeginMI.getParent();
+ auto *MF = BB->getParent();
+ int NumInst = 0;
+
+ auto E = BB->end();
+
+ int TotalCyclesBetweenCandidates = 0;
+ auto SchedModel = TII->getSchedModel();
+ for (auto I = std::next(BeginMI.getIterator()); I != E; ++I) {
+ MachineInstr &Instr = *I;
+ const MCSchedClassDesc *InstrSchedClassDesc =
+ SchedModel.resolveSchedClass(&Instr);
+ TotalCyclesBetweenCandidates +=
+ SchedModel.getWriteProcResBegin(InstrSchedClassDesc)->ReleaseAtCycle;
+ if (Instr.isMetaInstruction())
+ continue;
+
+ if (Instr.isTerminator())
+ return false;
+
+ if (TotalCyclesBetweenCandidates > NumMFMACycles)
+ return false;
+
+ if ((isUnpackingSupportedInstr(Instr)) && TII->isNeverCoissue(Instr)) {
+ if ((Instr.getOpcode() == AMDGPU::V_PK_MUL_F16) ||
+ (Instr.getOpcode() == AMDGPU::V_PK_ADD_F16)) {
+ // unpacking packed F16 instructions requires multiple instructions.
+ // Instructions are issued to extract lower and higher bits for each
+ // operand Instructions are then issued for 2 unpacked instructions, and
+ // additional instructions to put them back into the original
+ // destination register The following sequence of instructions are
+ // issued
+
+ // The next two are needed to move masks into vgprs. Ideally, immediates
+ // should be used. However, if one of the source operands are
+ // sgpr/sregs, then immediates are not allowed. Hence, the need to move
+ // these into vgprs
+
+ // vgpr_32 = V_MOV_B32_e32 65535
+ // vgpr_32 = V_MOV_B32_e32 16
+
+ // vgpr_32 = V_AND_B32_e32 sub1:sreg_64, vgpr_32
+ // vgpr_32 = V_LSHRREV_B32_e64 vgpr_32, sub1:sreg_64
+ // vgpr_32 = V_AND_B32_e32 vgpr_32, vgpr_32
+ // vgpr_32 = V_LSHRREV_B32_e64 vgpr_32, vgpr_32
+ // vgpr_32 = V_MUL_F16_e64 0, killed vgpr_32, 0, killed vgpr_32, 0, 0
+ // vgpr_32 = V_MUL_F16_e64 0, killed vgpr_32, 0, killed vgpr_32, 0, 0
+ // vgpr_32 = V_LSHLREV_B32_e64 vgpr_32, vgpr_32
+ // dst_reg = V_OR_B32_e64 vgpr_32, vgpr_32
+
+ // we need to issue the MOV instructions above only once. Once these are
+ // issued, the IsF16MaskSet flag is set subsequent unpacking only needs
+ // to issue the remaining instructions The number of latency cycles for
+ // each instruction above is 1. It's hard coded into the code to reduce
+ // code complexity.
+ if (IsF16MaskSet)
+ TotalCyclesBetweenCandidates += 7;
+ else
+ TotalCyclesBetweenCandidates += 9;
+ } else
+ TotalCyclesBetweenCandidates += 1;
+
+ if (!(TotalCyclesBetweenCandidates > NumMFMACycles))
+ InstrsToUnpack.insert(&Instr);
+ }
+ }
+ return true;
+}
+
+void GCNPreRAOptimizationsImpl::insertUnpackedF32MI(
+ MachineInstr &I, MachineOperand &DstMO, MachineOperand &LoSrcMO1,
+ MachineOperand &LoSrcMO2, MachineOperand &HiSrcMO1,
+ MachineOperand &HiSrcMO2, bool IsVreg_64) {
+
+ MachineBasicBlock &MBB = *I.getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ MachineFunction &MF = *MBB.getParent();
+ const DebugLoc &DL = I.getDebugLoc();
+ Register DstReg = DstMO.getReg();
+
+ unsigned SrcSubIdx1 =
+ TRI->composeSubRegIndices(LoSrcMO1.getSubReg(), AMDGPU::sub0);
+ unsigned SrcSubIdx2 =
+ TRI->composeSubRegIndices(LoSrcMO2.getSubReg(), AMDGPU::sub0);
+ unsigned DestSubIdx =
+ TRI->composeSubRegIndices(DstMO.getSubReg(), AMDGPU::sub0);
+
+ const MCInstrDesc InstrDesc = I.getDesc();
+
+ int ClampIdx =
+ AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::clamp);
+ int64_t ClampVal = I.getOperand(ClampIdx).getImm();
+
+ int Src0_modifiers_Idx =
+ AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::src0_modifiers);
+ int Src1_modifiers_Idx =
+ AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::src1_modifiers);
+ unsigned Src0_Mods = I.getOperand(Src0_modifiers_Idx).getImm();
+ unsigned Src1_Mods = I.getOperand(Src1_modifiers_Idx).getImm();
+
+ // Packed instructions (VOP3P) do not support abs. It is okay to ignore them.
+ unsigned Lo_src0_mods = 0;
+ unsigned Lo_src1_mods = 0;
+ uint16_t UnpackedOpcode = mapToUnpackedOpcode(I);
+ if (UnpackedOpcode == std::numeric_limits<uint16_t>::max())
+ return;
+
+ MachineInstrBuilder Op0L_Op1L = BuildMI(MBB, I, DL, TII->get(UnpackedOpcode));
+ Op0L_Op1L.addDef(DstReg, 0, DestSubIdx); // vdst
+ if (Src0_Mods & SISrcMods::NEG) {
+ Lo_src0_mods |= SISrcMods::NEG;
+ }
+ Op0L_Op1L.addImm(Lo_src0_mods); // src0_modifiers
+ if (Src0_Mods & SISrcMods::OP_SEL_0) {
+ unsigned Src0SubIdx =
+ TRI->composeSubRegIndices(LoSrcMO1.getSubReg(), AMDGPU::sub1);
+ Op0L_Op1L.addReg(LoSrcMO1.getReg(), 0, Src0SubIdx); // src0
+ } else {
+ unsigned Src0SubIdx =
+ TRI->composeSubRegIndices(LoSrcMO1.getSubReg(), AMDGPU::sub0);
+ Op0L_Op1L.addReg(LoSrcMO1.getReg(), 0,
+ Src0SubIdx); // src0 //if op_sel == 0, select register 0 of
+ // reg:sub0_sub1
+ }
+ if (Src1_Mods & SISrcMods::NEG) {
+ Lo_src1_mods |= SISrcMods::NEG;
+ }
+ Op0L_Op1L.addImm(Lo_src1_mods); // src1_modifiers
+ if (Src1_Mods & SISrcMods::OP_SEL_0) {
+ unsigned Src1SubIdx =
+ TRI->composeSubRegIndices(LoSrcMO2.getSubReg(), AMDGPU::sub1);
+ Op0L_Op1L.addReg(LoSrcMO2.getReg(), 0, Src1SubIdx); // src0
+ } else {
+ unsigned Src1SubIdx =
+ TRI->composeSubRegIndices(LoSrcMO2.getSubReg(), AMDGPU::sub0);
+ // src0 //if op_sel_hi == 0, select register 0 of reg:sub0_sub1
+ Op0L_Op1L.addReg(LoSrcMO2.getReg(), 0, Src1SubIdx);
+ }
+ Op0L_Op1L.addImm(ClampVal); // clamp
+ // packed instructions do not support output modifiers. safe to assign them 0
+ // for this use case
+ Op0L_Op1L.addImm(0); // omod
+
+ if (I.getOperand(0).isUndef()) {
+ Op0L_Op1L->getOperand(0).setIsUndef();
+ }
+ LIS->InsertMachineInstrInMaps(*Op0L_Op1L);
+ SrcSubIdx1 = TRI->composeSubRegIndices(LoSrcMO1.getSubReg(), AMDGPU::sub1);
+ SrcSubIdx2 = TRI->composeSubRegIndices(LoSrcMO2.getSubReg(), AMDGPU::sub1);
+ DestSubIdx = TRI->composeSubRegIndices(DstMO.getSubReg(), AMDGPU::sub1);
+
+ // Packed instructions (VOP3P) do not support abs. It is okay to ignore them.
+ unsigned Hi_src0_mods = 0;
+ unsigned Hi_src1_mods = 0;
+ MachineInstrBuilder Op0H_Op1H = BuildMI(MBB, I, DL, TII->get(UnpackedOpcode));
+ Op0H_Op1H.addDef(DstReg, 0, DestSubIdx); // vdst
+ if (Src0_Mods & SISrcMods::NEG_HI) {
+ Hi_src0_mods |= SISrcMods::NEG_HI;
+ }
+ Op0H_Op1H.addImm(Hi_src0_mods); // src0_modifiers
+ if (Src0_Mods & SISrcMods::OP_SEL_1) {
+ unsigned Src0SubIdx =
+ TRI->composeSubRegIndices(HiSrcMO1.getSubReg(), AMDGPU::sub1);
+ Op0H_Op1H.addReg(HiSrcMO1.getReg(), 0, Src0SubIdx); // src0
+ } else {
+ unsigned Src0SubIdx =
+ TRI->composeSubRegIndices(HiSrcMO1.getSubReg(), AMDGPU::sub0);
+ // src0 //if op_sel_hi == 0, select register 0 of reg:sub0_sub1
+ Op0H_Op1H.addReg(HiSrcMO1.getReg(), 0, Src0SubIdx);
+ }
+ if (Src1_Mods & SISrcMods::NEG_HI) {
+ Hi_src1_mods |= SISrcMods::NEG_HI;
+ }
+ Op0H_Op1H.addImm(Hi_src1_mods); // src1_modifiers
+ if (Src1_Mods & SISrcMods::OP_SEL_1) {
+ unsigned Src1SubIdx =
+ TRI->composeSubRegIndices(HiSrcMO2.getSubReg(), AMDGPU::sub1);
+ Op0H_Op1H.addReg(HiSrcMO2.getReg(), 0, Src1SubIdx); // src0
+ } else {
+ unsigned Src1SubIdx =
+ TRI->composeSubRegIndices(HiSrcMO2.getSubReg(), AMDGPU::sub0);
+ // src0 //if op_sel_hi == 0, select register 0 of reg:sub0_sub1
+ Op0H_Op1H.addReg(HiSrcMO2.getReg(), 0, Src1SubIdx);
+ }
+ Op0H_Op1H.addImm(ClampVal); // clamp
+ // packed instructions do not support output modifiers. safe to assign them 0
+ // for this use case
+ Op0H_Op1H.addImm(0); // omod
+ LIS->InsertMachineInstrInMaps(*Op0H_Op1H);
+
+ if (I.getFlag(MachineInstr::MIFlag::NoFPExcept)) {
+ Op0L_Op1L->setFlag(MachineInstr::MIFlag::NoFPExcept);
+ Op0H_Op1H->setFlag(MachineInstr::MIFlag::NoFPExcept);
+ }
+ LIS->RemoveMachineInstrFromMaps(I);
+ I.eraseFromParent();
+ LIS->removeInterval(DstReg);
+ LIS->createAndComputeVirtRegInterval(DstReg);
+ return;
+}
+
+void GCNPreRAOptimizationsImpl::processFMAF32Unpacking(MachineInstr &I) {
+ MachineBasicBlock &MBB = *I.getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ MachineFunction &MF = *MBB.getParent();
+
+ Register DstReg = I.getOperand(0).getReg();
+ Register SrcReg1 = I.getOperand(2).getReg();
+ Register SrcReg2 = I.getOperand(4).getReg();
+ Register SrcReg3 = I.getOperand(6).getReg();
+ MachineOperand &DstMO = I.getOperand(0);
+ MachineOperand &SrcMO1 = I.getOperand(2);
+ MachineOperand &SrcMO2 = I.getOperand(4);
+ MachineOperand &SrcMO3 = I.getOperand(6);
+
+ const DebugLoc &DL = I.getDebugLoc();
+ const TargetRegisterClass *DstRC = MRI.getRegClass(I.getOperand(0).getReg());
+ const TargetRegisterClass *Src0RC = MRI.getRegClass(I.getOperand(2).getReg());
+ const TargetRegisterClass *Src1RC = MRI.getRegClass(I.getOperand(4).getReg());
+ const TargetRegisterClass *Src2RC = MRI.getRegClass(I.getOperand(6).getReg());
+
+ bool IsVReg64 = (DstRC->getID() == AMDGPU::VReg_64_Align2RegClassID);
+
+ // insertUnpackedF32MI(I, DstMO, SrcMO1, SrcMO2, SrcMO1, SrcMO2, IsVReg64);
+ unsigned SrcSubIdx1 =
+ TRI->composeSubRegIndices(SrcMO1.getSubReg(), AMDGPU::sub0);
+ unsigned SrcSubIdx2 =
+ TRI->composeSubRegIndices(SrcMO2.getSubReg(), AMDGPU::sub0);
+ unsigned SrcSubIdx3 =
+ TRI->composeSubRegIndices(SrcMO3.getSubReg(), AMDGPU::sub0);
+ unsigned DestSubIdx =
+ TRI->composeSubRegIndices(DstMO.getSubReg(), AMDGPU::sub0);
+
+ const MCInstrDesc InstrDesc = I.getDesc();
+ int ClampIdx =
+ AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::clamp);
+ int64_t ClampVal = I.getOperand(ClampIdx).getImm();
+ int Src0_modifiers_Idx =
+ AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::src0_modifiers);
+ int Src1_modifiers_Idx =
+ AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::src1_modifiers);
+ int Src2_modifiers_Idx =
+ AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::src2_modifiers);
+ unsigned Src0_Mods = I.getOperand(Src0_modifiers_Idx).getImm();
+ unsigned Src1_Mods = I.getOperand(Src1_modifiers_Idx).getImm();
+ unsigned Src2_Mods = I.getOperand(Src2_modifiers_Idx).getImm();
+
+ // Packed instructions (VOP3P) do not support abs. It is okay to ignore them.
+ unsigned Lo_src0_mods = 0;
+ unsigned Lo_src1_mods = 0;
+ unsigned Lo_src2_mods = 0;
+ uint16_t UnpackedOpcode = mapToUnpackedOpcode(I);
+ if (UnpackedOpcode == std::numeric_limits<uint16_t>::max())
+ return;
+
+ MachineInstrBuilder Op0L_Op1L = BuildMI(MBB, I, DL, TII->get(UnpackedOpcode));
+ Op0L_Op1L.addDef(DstReg, 0, DestSubIdx); // vdst
+ if (Src0_Mods & SISrcMods::NEG) {
+ Lo_src0_mods |= SISrcMods::NEG;
+ }
+ Op0L_Op1L.addImm(Lo_src0_mods); // src0_modifiers
+ if (Src0_Mods & SISrcMods::OP_SEL_0) {
+ unsigned Src0SubIdx =
+ TRI->composeSubRegIndices(SrcMO1.getSubReg(), AMDGPU::sub1);
+ Op0L_Op1L.addReg(SrcMO1.getReg(), 0, Src0SubIdx); // src0
+ } else {
+ unsigned Src0SubIdx =
+ TRI->composeSubRegIndices(SrcMO1.getSubReg(), AMDGPU::sub0);
+ // if op_sel == 0, select register 0 of reg:sub0_sub1
+ Op0L_Op1L.addReg(SrcMO1.getReg(), 0, Src0SubIdx);
+ }
+
+ if (Src1_Mods & SISrcMods::NEG) {
+ Lo_src1_mods |= SISrcMods::NEG;
+ }
+ Op0L_Op1L.addImm(Lo_src1_mods); // src1_modifiers
+ if (Src1_Mods & SISrcMods::OP_SEL_0) {
+ unsigned Src1SubIdx =
+ TRI->composeSubRegIndices(SrcMO2.getSubReg(), AMDGPU::sub1);
+ Op0L_Op1L.addReg(SrcMO2.getReg(), 0, Src1SubIdx); // src0
+ } else {
+ unsigned Src1SubIdx =
+ TRI->composeSubRegIndices(SrcMO2.getSubReg(), AMDGPU::sub0);
+ Op0L_Op1L.addReg(SrcMO2.getReg(), 0,
+ Src1SubIdx); // src0 //if op_sel_hi == 0, select register 0
+ // of reg:sub0_sub1
+ }
+
+ if (Src2_Mods & SISrcMods::NEG) {
+ Lo_src2_mods |= SISrcMods::NEG;
+ }
+ Op0L_Op1L.addImm(Lo_src2_mods); // src2_modifiers
+ if (Src2_Mods & SISrcMods::OP_SEL_0) {
+ unsigned Src2SubIdx =
+ TRI->composeSubRegIndices(SrcMO3.getSubReg(), AMDGPU::sub1);
+ Op0L_Op1L.addReg(SrcMO3.getReg(), 0, Src2SubIdx);
+ } else {
+ unsigned Src2SubIdx =
+ TRI->composeSubRegIndices(SrcMO3.getSubReg(), AMDGPU::sub0);
+ // if op_sel_hi == 0, select register 0 of reg:sub0_sub1
+ Op0L_Op1L.addReg(SrcMO3.getReg(), 0, Src2SubIdx);
+ }
+ Op0L_Op1L.addImm(ClampVal); // clamp
+ // packed instructions do not support output modifiers. safe to assign them 0
+ // for this use case
+ Op0L_Op1L.addImm(0); // omod
+
+ if (I.getOperand(0).isUndef()) {
+ Op0L_Op1L->getOperand(0).setIsUndef();
+ }
+
+ LIS->InsertMachineInstrInMaps(*Op0L_Op1L);
+
+ SrcSubIdx1 = TRI->composeSubRegIndices(SrcMO1.getSubReg(), AMDGPU::sub1);
+ SrcSubIdx2 = TRI->composeSubRegIndices(SrcMO2.getSubReg(), AMDGPU::sub1);
+ SrcSubIdx3 = TRI->composeSubRegIndices(SrcMO3.getSubReg(), AMDGPU::sub1);
+ DestSubIdx = TRI->composeSubRegIndices(DstMO.getSubReg(), AMDGPU::sub1);
+
+ // Packed instructions (VOP3P) do not support abs. It is safe to ignore them.
+ unsigned Hi_src0_mods = 0;
+ unsigned Hi_src1_mods = 0;
+ unsigned Hi_src2_mods = 0;
+
+ MachineInstrBuilder Op0H_Op1H = BuildMI(MBB, I, DL, TII->get(UnpackedOpcode));
+ Op0H_Op1H.addDef(DstReg, 0, DestSubIdx); // vdst
+ if (Src0_Mods & SISrcMods::NEG_HI) {
+ Hi_src0_mods |= SISrcMods::NEG_HI;
+ }
+ Op0H_Op1H.addImm(Hi_src0_mods); // src0_modifiers
+ if (Src0_Mods & SISrcMods::OP_SEL_1) {
+ unsigned Src0SubIdx =
+ TRI->composeSubRegIndices(SrcMO1.getSubReg(), AMDGPU::sub1);
+ Op0H_Op1H.addReg(SrcMO1.getReg(), 0, Src0SubIdx); // src0
+ } else {
+ unsigned Src0SubIdx =
+ TRI->composeSubRegIndices(SrcMO1.getSubReg(), AMDGPU::sub0);
+ // src0 //if op_sel_hi == 0, select register 0 of reg:sub0_sub1
+ Op0H_Op1H.addReg(SrcMO1.getReg(), 0, Src0SubIdx);
+ }
+
+ if (Src1_Mods & SISrcMods::NEG_HI) {
+ Hi_src1_mods |= SISrcMods::NEG_HI;
+ }
+ Op0H_Op1H.addImm(Hi_src1_mods); // src0_modifiers
+
+ if (Src1_Mods & SISrcMods::OP_SEL_1) {
+ unsigned Src1SubIdx =
+ TRI->composeSubRegIndices(SrcMO2.getSubReg(), AMDGPU::sub1);
+ Op0H_Op1H.addReg(SrcMO2.getReg(), 0, Src1SubIdx); // src0
+ } else {
+ Op0H_Op1H.addImm(Hi_src1_mods); // src1_modifiers
+ unsigned Src1SubIdx =
+ TRI->composeSubRegIndices(SrcMO2.getSubReg(), AMDGPU::sub0);
+ // if op_sel_hi == 0, select register 0 of reg:sub0_sub1
+ Op0H_Op1H.addReg(SrcMO2.getReg(), 0, Src1SubIdx);
+ }
+
+ if (Src2_Mods & SISrcMods::NEG_HI) {
+ Hi_src2_mods |= SISrcMods::NEG_HI;
+ }
+ Op0H_Op1H.addImm(Hi_src2_mods); // src2_modifiers
+
+ if (Src2_Mods & SISrcMods::OP_SEL_1) {
+ unsigned Src2SubIdx =
+ TRI->composeSubRegIndices(SrcMO3.getSubReg(), AMDGPU::sub1);
+ Op0H_Op1H.addReg(SrcMO3.getReg(), 0, Src2SubIdx); // src0
+ } else {
+ Op0H_Op1H.addImm(Hi_src2_mods); // src2_modifiers
+ unsigned Src2SubIdx =
+ TRI->composeSubRegIndices(SrcMO2.getSubReg(), AMDGPU::sub0);
+ // src0 //if op_sel_hi == 0, select register 0 of reg:sub0_sub1
+ Op0H_Op1H.addReg(SrcMO2.getReg(), 0, Src2SubIdx);
+ }
+ Op0H_Op1H.addImm(ClampVal); // clamp
+ // packed instructions do not support output modifiers. safe to assign them 0
+ // for this use case
+ Op0H_Op1H.addImm(0); // omod
+ LIS->InsertMachineInstrInMaps(*Op0H_Op1H);
+
+ if (I.getFlag(MachineInstr::MIFlag::NoFPExcept)) {
+ Op0L_Op1L->setFlag(MachineInstr::MIFlag::NoFPExcept);
+ Op0H_Op1H->setFlag(MachineInstr::MIFlag::NoFPExcept);
+ }
+ LIS->RemoveMachineInstrFromMaps(I);
+ I.eraseFromParent();
+ LIS->removeInterval(DstReg);
+ LIS->createAndComputeVirtRegInterval(DstReg);
+ return;
+}
+
+void GCNPreRAOptimizationsImpl::processF32Unpacking(MachineInstr &I) {
+ if (I.getOpcode() == AMDGPU::V_PK_FMA_F32) {
+ processFMAF32Unpacking(I);
+ return;
+ }
+ MachineBasicBlock &MBB = *I.getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ MachineFunction &MF = *MBB.getParent();
+
+ Register DstReg = I.getOperand(0).getReg();
+ Register SrcReg1 = I.getOperand(2).getReg();
+ Register SrcReg2 = I.getOperand(4).getReg();
+ MachineOperand &DstMO = I.getOperand(0);
+ MachineOperand &SrcMO1 = I.getOperand(2);
+ MachineOperand &SrcMO2 = I.getOperand(4);
+
+ const DebugLoc &DL = I.getDebugLoc();
+ const TargetRegisterClass *DstRC = MRI.getRegClass(I.getOperand(0).getReg());
+ const TargetRegisterClass *Src0RC = MRI.getRegClass(I.getOperand(2).getReg());
+ const TargetRegisterClass *Src1RC = MRI.getRegClass(I.getOperand(4).getReg());
+
+ bool IsVReg64 = (DstRC->getID() == AMDGPU::VReg_64_Align2RegClassID);
+ insertUnpackedF32MI(I, DstMO, SrcMO1, SrcMO2, SrcMO1, SrcMO2, IsVReg64);
+ return;
+}
+
+void GCNPreRAOptimizationsImpl::processF16Unpacking(MachineInstr &I,
+ uint16_t AvailableBudget) {
+ MachineBasicBlock &MBB = *I.getParent();
+ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ MachineOperand &DstMO = I.getOperand(0);
+ MachineOperand &SrcMO0 = I.getOperand(2);
+ MachineOperand &SrcMO1 = I.getOperand(4);
+ Register DstReg = DstMO.getReg();
+ Register SrcReg0 = SrcMO0.getReg();
+ Register SrcReg1 = SrcMO1.getReg();
+ const DebugLoc &DL = I.getDebugLoc();
+
+ const TargetRegisterClass *RC = &AMDGPU::VGPR_32RegClass;
+ auto SchedModel = TII->getSchedModel();
+
+ auto BuildImm = [&](uint32_t Val) -> std::pair<Register, uint16_t> {
+ Register ImmReg = MRI.createVirtualRegister(RC);
+ auto NewMI = BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), ImmReg)
+ .addImm(Val);
+ LIS->InsertMachineInstrInMaps(*NewMI);
+ const MCSchedClassDesc *SchedClassDesc =
+ SchedModel.resolveSchedClass(NewMI);
+ uint16_t LatencyCycles =
+ SchedModel.getWriteProcResBegin(SchedClassDesc)->ReleaseAtCycle;
+ return {ImmReg, LatencyCycles};
+ };
+
+ if (!IsF16MaskSet) {
+ std::pair<Register, uint16_t> RegAndLatency = BuildImm(0x0000FFFF);
+ MaskLo = RegAndLatency.first; // mask for lower 16 bits
+ RegAndLatency = BuildImm(16);
+ ShiftAmt = RegAndLatency.first; // mask for higher 16 bits
+ IsF16MaskSet = true;
+ }
+
+ Register Src0_Lo = MRI.createVirtualRegister(RC);
+ Register Src1_Lo = MRI.createVirtualRegister(RC);
+ Register Src0_Hi = MRI.createVirtualRegister(RC);
+ Register Src1_Hi = MRI.createVirtualRegister(RC);
+
+ unsigned SubRegID = 0;
+ if (SrcMO0.getSubReg())
+ SubRegID = SrcMO0.getSubReg();
+
+ int Src0_modifiers_Idx =
+ AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::src0_modifiers);
+ int Src1_modifiers_Idx =
+ AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::src1_modifiers);
+ unsigned Src0_Mods = I.getOperand(Src0_modifiers_Idx).getImm();
+ unsigned Src1_Mods = I.getOperand(Src1_modifiers_Idx).getImm();
+ int ClampIdx =
+ AMDGPU::getNamedOperandIdx(I.getOpcode(), AMDGPU::OpName::clamp);
+ int64_t ClampVal = I.getOperand(ClampIdx).getImm();
+
+ // handle op_sel for src0
+ if (Src0_Mods & SISrcMods::OP_SEL_0) {
+ // if op_sel is set, select higher 16 bits and copy into lower 16 bits of
+ // new vgpr
+ MachineInstrBuilder LoInput0_MI =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), Src0_Lo)
+ .addReg(ShiftAmt);
+ if (SubRegID)
+ LoInput0_MI.addReg(SrcReg0, 0, SubRegID);
+ else
+ LoInput0_MI.addReg(SrcReg0);
+ LIS->InsertMachineInstrInMaps(*LoInput0_MI);
+ } else {
+ // if op_sel is not set, select lower 16 bits and copy into lower 16 bits of
+ // new vgpr
+ MachineInstrBuilder LoInput0_MI =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::V_AND_B32_e32), Src0_Lo);
+ if (SubRegID)
+ LoInput0_MI.addReg(SrcReg0, 0, SubRegID);
+ else
+ LoInput0_MI.addReg(SrcReg0);
+ LoInput0_MI.addReg(MaskLo);
+ LIS->InsertMachineInstrInMaps(*LoInput0_MI);
+ }
+
+ // handle op_sel_hi for src0
+ if (Src0_Mods & SISrcMods::OP_SEL_1) {
+ // if op_sel_hi is set, select higher 16 bits and copy into lower 16 bits of
+ // new vgpr
+ MachineInstrBuilder HiInput0_MI =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), Src0_Hi)
+ .addReg(ShiftAmt);
+ if (SubRegID)
+ HiInput0_MI.addReg(SrcReg0, 0, SubRegID);
+ else
+ HiInput0_MI.addReg(SrcReg0);
+ LIS->InsertMachineInstrInMaps(*HiInput0_MI);
+ } else {
+ // if op_sel_hi is not set, select lower 16 bits and copy into lower 16 bits
+ // of new vgpr
+ MachineInstrBuilder HiInput0_MI =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::V_AND_B32_e32), Src0_Hi);
+ if (SubRegID)
+ HiInput0_MI.addReg(SrcReg0, 0, SubRegID);
+ else
+ HiInput0_MI.addReg(SrcReg0);
+ HiInput0_MI.addReg(MaskLo);
+ LIS->InsertMachineInstrInMaps(*HiInput0_MI);
+ }
+
+ SubRegID = 0;
+ if (SrcMO0.getSubReg())
+ SubRegID = SrcMO1.getSubReg();
+ // handle op_sel for src1
+ if (Src1_Mods & SISrcMods::OP_SEL_0) {
+ // if op_sel is set, select higher 16 bits and copy into lower 16 bits of
+ // new vgpr
+ MachineInstrBuilder LoInput1_MI =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), Src1_Lo)
+ .addReg(ShiftAmt);
+ if (SubRegID)
+ LoInput1_MI.addReg(SrcReg1, 0, SubRegID);
+ else
+ LoInput1_MI.addReg(SrcReg1);
+ LIS->InsertMachineInstrInMaps(*LoInput1_MI);
+ } else {
+ // if op_sel is not set, select lower 16 bits and copy into lower 16 bits of
+ // new vgpr
+ MachineInstrBuilder LoInput1_MI =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::V_AND_B32_e32), Src1_Lo);
+ if (SubRegID)
+ LoInput1_MI.addReg(SrcReg1, 0, SubRegID);
+ else
+ LoInput1_MI.addReg(SrcReg1);
+ LoInput1_MI.addReg(MaskLo);
+ LIS->InsertMachineInstrInMaps(*LoInput1_MI);
+ }
+
+ // handle op_sel_hi for src1
+ if (Src1_Mods & SISrcMods::OP_SEL_1) {
+ // if op_sel_hi is set, select higher 16 bits and copy into lower 16 bits of
+ // new vgpr
+ MachineInstrBuilder HiInput1_MI =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), Src1_Hi)
+ .addReg(ShiftAmt);
+ if (SubRegID)
+ HiInput1_MI.addReg(SrcReg1, 0, SubRegID);
+ else
+ HiInput1_MI.addReg(SrcReg1);
+ LIS->InsertMachineInstrInMaps(*HiInput1_MI);
+ } else {
+ // if op_sel_hi is not set, select lower 16 bits and copy into lower 16 bits
+ // of new vgpr
+ MachineInstrBuilder HiInput1_MI =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::V_AND_B32_e32), Src1_Hi);
+ if (SubRegID)
+ HiInput1_MI.addReg(SrcReg1, 0, SubRegID);
+ else
+ HiInput1_MI.addReg(SrcReg1);
+ HiInput1_MI.addReg(MaskLo);
+ LIS->InsertMachineInstrInMaps(*HiInput1_MI);
+ }
+
+ Register LoMul = MRI.createVirtualRegister(RC);
+ Register HiMul = MRI.createVirtualRegister(RC);
+
+ unsigned Lo_src0_mods = 0;
+ unsigned Lo_src1_mods = 0;
+ uint16_t UnpackedOpcode = mapToUnpackedOpcode(I);
+
+ if (UnpackedOpcode == std::numeric_limits<uint16_t>::max())
+ return;
+ // Unpacked instructions
+ MachineInstrBuilder LoMul_MI =
+ BuildMI(MBB, I, DL, TII->get(UnpackedOpcode), LoMul);
+
+ if (Src0_Mods & SISrcMods::NEG)
+ Lo_src0_mods |= SISrcMods::NEG;
+
+ LoMul_MI.addImm(Lo_src0_mods); // src0_modifiers
+ LoMul_MI.addReg(Src0_Lo, RegState::Kill); // src0
+
+ if (Src1_Mods & SISrcMods::NEG)
+ Lo_src1_mods |= SISrcMods::NEG;
+
+ LoMul_MI.addImm(Lo_src1_mods); // src1_modifiers
+ LoMul_MI.addReg(Src1_Lo, RegState::Kill); // src1
+ LoMul_MI.addImm(ClampVal); // clamp
+ // packed instructions do not support output modifiers. safe to assign them 0
+ // for this use case
+ LoMul_MI.addImm(0); // omod
+
+ // unpacked instruction with VOP3 encoding for Hi bits
+ unsigned Hi_src0_mods = 0;
+ unsigned Hi_src1_mods = 0;
+
+ MachineInstrBuilder HiMul_MI =
+ BuildMI(MBB, I, DL, TII->get(UnpackedOpcode), HiMul);
+ if (Src0_Mods & SISrcMods::NEG_HI)
+ Hi_src0_mods |= SISrcMods::NEG_HI;
+
+ HiMul_MI.addImm(Hi_src0_mods); // src0_modifiers
+ HiMul_MI.addReg(Src0_Hi,
+ RegState::Kill); // select higher 16 bits if op_sel_hi is set
+
+ if (Src1_Mods & SISrcMods::NEG_HI)
+ Hi_src1_mods |= SISrcMods::NEG_HI;
+
+ HiMul_MI.addImm(Hi_src1_mods); // src0_modifiers
+ HiMul_MI.addReg(
+ Src1_Hi,
+ RegState::Kill); // select higher 16 bits from src1 if op_sel_hi is set
+ HiMul_MI.addImm(ClampVal); // clamp
+ // packed instructions do not support output modifiers. safe to assign them 0
+ // for this use case
+ HiMul_MI.addImm(0); // omod
+
+ // Shift HiMul left by 16
+ Register HiMulShifted = MRI.createVirtualRegister(RC);
+ MachineInstrBuilder HiMulShifted_MI =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::V_LSHLREV_B32_e64), HiMulShifted)
+ .addReg(ShiftAmt)
+ .addReg(HiMul);
+
+ SubRegID = 0;
+ if (DstMO.getSubReg())
+ SubRegID = DstMO.getSubReg();
+ // OR LoMul | (HiMul << 16)
+ MachineInstrBuilder RewriteBackToDst_MI =
+ BuildMI(MBB, I, DL, TII->get(AMDGPU::V_OR_B32_e64));
+ if (SubRegID) {
+ if (DstMO.isUndef()) {
+ RewriteBackToDst_MI.addDef(DstReg, RegState::Undef, SubRegID);
+ } else {
+ RewriteBackToDst_MI.addDef(DstReg, 0, SubRegID);
+ }
+ } else {
+ if (DstMO.isUndef()) {
+ RewriteBackToDst_MI.addDef(DstReg, RegState::Undef);
+ } else {
+ RewriteBackToDst_MI.addDef(DstReg);
+ }
+ }
+ RewriteBackToDst_MI.addReg(LoMul);
+ RewriteBackToDst_MI.addReg(HiMulShifted);
+
+ LIS->InsertMachineInstrInMaps(*LoMul_MI);
+ LIS->InsertMachineInstrInMaps(*HiMul_MI);
+ LIS->InsertMachineInstrInMaps(*HiMulShifted_MI);
+ LIS->InsertMachineInstrInMaps(*RewriteBackToDst_MI);
+ LIS->RemoveMachineInstrFromMaps(I);
+ I.eraseFromParent();
+ LIS->removeInterval(DstReg);
+ LIS->createAndComputeVirtRegInterval(DstReg);
----------------
jrbyrnes wrote:
Are you also [re]computing the virtRegInterval for the use regs -- these may shift slightly due to SlotIndex numbering. Or, in the case where we are creating new regs to hold temporary results for 16 bit unpack, we'll need LiveIntervals for all those.
https://github.com/llvm/llvm-project/pull/151704
More information about the llvm-commits
mailing list