[llvm] [AArch64] Optimize when storing symmetry constants (PR #93717)
David Green via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 25 07:22:24 PDT 2024
================
@@ -2252,6 +2260,155 @@ MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
return E;
}
+static bool isSymmetricLoadCandidate(MachineInstr &MI, Register BaseReg) {
+ auto MatchBaseReg = [&](unsigned Count) {
+ for (unsigned I = 0; I < Count; I++) {
+ auto OpI = MI.getOperand(I);
+ if (OpI.isReg() && OpI.getReg() != BaseReg)
+ return false;
+ }
+ return true;
+ };
+
+ unsigned Opc = MI.getOpcode();
+ switch (Opc) {
+ default:
+ return false;
+ case AArch64::MOVZXi:
+ return MatchBaseReg(1);
+ case AArch64::MOVKXi:
+ return MatchBaseReg(2);
+ case AArch64::ORRXrs:
+ MachineOperand &Imm = MI.getOperand(3);
+ // Fourth operand of ORR must be 32 which mean
+ // 32bit symmetric constant load.
+ // ex) renamable $x8 = ORRXrs $x8, $x8, 32
+ if (MatchBaseReg(3) && Imm.isImm() && Imm.getImm() == 32)
+ return true;
+ }
+
+ return false;
+}
+
+MachineBasicBlock::iterator AArch64LoadStoreOpt::doFoldSymmetryConstantLoad(
+ MachineInstr &MI, SmallVectorImpl<MachineBasicBlock::iterator> &MIs,
+ int UpperLoadIdx, int Accumulated) {
+ MachineBasicBlock::iterator I = MI.getIterator();
+ MachineBasicBlock::iterator E = I->getParent()->end();
+ MachineBasicBlock::iterator NextI = next_nodbg(I, E);
+ MachineBasicBlock *MBB = MI.getParent();
+
+ if (!UpperLoadIdx) {
+ // ORR ensures that previous instructions load lower 32-bit constants.
+ // Remove ORR only.
+ (*MIs.begin())->eraseFromParent();
+ } else {
+ // We need to remove MOV for upper of 32bit because We know these instrs
+ // is part of symmetric constant.
+ int Index = 0;
+ for (auto MI = MIs.begin(); Index < UpperLoadIdx; ++MI, Index++) {
+ (*MI)->eraseFromParent();
+ }
+ }
+
+ Register BaseReg = getLdStRegOp(MI).getReg();
+ const MachineOperand MO = AArch64InstrInfo::getLdStBaseOp(MI);
+ Register DstRegW = TRI->getSubReg(BaseReg, AArch64::sub_32);
+ unsigned DstRegState = getRegState(MI.getOperand(0));
+ int Offset = AArch64InstrInfo::getLdStOffsetOp(MI).getImm();
+ BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(AArch64::STPWi))
+ .addReg(DstRegW, DstRegState)
+ .addReg(DstRegW, DstRegState)
+ .addReg(MO.getReg(), getRegState(MO))
+ .addImm(Offset * 2)
+ .setMemRefs(MI.memoperands())
+ .setMIFlags(MI.getFlags());
+ I->eraseFromParent();
+ return NextI;
+}
+
+bool AArch64LoadStoreOpt::tryFoldSymmetryConstantLoad(
+ MachineBasicBlock::iterator &I, unsigned Limit) {
+ MachineInstr &MI = *I;
+ if (MI.getOpcode() != AArch64::STRXui)
+ return false;
+
+ MachineBasicBlock::iterator MBBI = I;
+ MachineBasicBlock::iterator B = I->getParent()->begin();
+ if (MBBI == B)
+ return false;
+
+ TypeSize Scale(0U, false), Width(0U, false);
+ int64_t MinOffset, MaxOffset;
+ if (!AArch64InstrInfo::getMemOpInfo(AArch64::STPWi, Scale, Width, MinOffset,
+ MaxOffset))
+ return false;
+
+ // We replace the STRX instruction, which stores 64 bits, with the STPW
+ // instruction, which stores two consecutive 32 bits. therefore, we compare
----------------
davemgreen wrote:
therefore -> Therefore
https://github.com/llvm/llvm-project/pull/93717
More information about the llvm-commits
mailing list