[llvm-commits] [PATCH] More Spill Annotations
David Greene
dag at cray.com
Mon Nov 23 07:01:47 PST 2009
On Friday 20 November 2009 16:22, you wrote:
> This patch adds information to spill/reload comments as to whether they are
> vector or scalar. This is helpful when doing static code analysis of
> performance issues and other things. It's only implemented for X86.
> Experts on other architectures will have to fill things in.
>
> Please review. Thanks!
Ping!
-Dave
> Index: include/llvm/Target/TargetInstrInfo.h
> ===================================================================
> --- include/llvm/Target/TargetInstrInfo.h (revision 89484)
> +++ include/llvm/Target/TargetInstrInfo.h (working copy)
> @@ -142,6 +142,23 @@
> return false;
> }
>
> + /// isVectorInstr - Return true if the instruction is a vector
> operation. + virtual bool isVectorInstr(const MachineInstr& MI) const {
> + return false;
> + }
> +
> + /// isVectorOperand - Return true if the operand is of vector type.
> + virtual bool isVectorOperand(const MachineInstr &MI,
> + const MachineOperand *MO) const {
> + return false;
> + }
> +
> + /// isVectorOperand - Return true if the mem operand is of vector type.
> + virtual bool isVectorOperand(const MachineInstr &MI,
> + const MachineMemOperand *MMO) const {
> + return false;
> + }
> +
> /// isIdentityCopy - Return true if the instruction is a copy (or
> /// extract_subreg, insert_subreg, subreg_to_reg) where the source and
> /// destination registers are the same.
> @@ -182,11 +199,13 @@
>
> /// hasLoadFromStackSlot - If the specified machine instruction has
> /// a load from a stack slot, return true along with the FrameIndex
> - /// of the loaded stack slot. If not, return false. Unlike
> + /// of the loaded stack slot and the machine mem operand containing
> + /// the reference. If not, return false. Unlike
> /// isLoadFromStackSlot, this returns true for any instructions that
> /// loads from the stack. This is just a hint, as some cases may be
> /// missed.
> virtual bool hasLoadFromStackSlot(const MachineInstr *MI,
> + const MachineMemOperand *&MMO,
> int &FrameIndex) const {
> return 0;
> }
> @@ -205,17 +224,18 @@
> /// stack locations as well. This uses a heuristic so it isn't
> /// reliable for correctness.
> virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
> - int &FrameIndex) const {
> + int &FrameIndex) const {
> return 0;
> }
>
> /// hasStoreToStackSlot - If the specified machine instruction has a
> /// store to a stack slot, return true along with the FrameIndex of
> - /// the loaded stack slot. If not, return false. Unlike
> - /// isStoreToStackSlot, this returns true for any instructions that
> - /// loads from the stack. This is just a hint, as some cases may be
> - /// missed.
> + /// the loaded stack slot and the machine mem operand containing the
> + /// reference. If not, return false. Unlike isStoreToStackSlot,
> + /// this returns true for any instructions that loads from the
> + /// stack. This is just a hint, as some cases may be missed.
> virtual bool hasStoreToStackSlot(const MachineInstr *MI,
> + const MachineMemOperand *&MMO,
> int &FrameIndex) const {
> return 0;
> }
> Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp
> ===================================================================
> --- lib/CodeGen/AsmPrinter/AsmPrinter.cpp (revision 89484)
> +++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp (working copy)
> @@ -1854,35 +1854,46 @@
>
> // We assume a single instruction only has a spill or reload, not
> // both.
> + const MachineMemOperand *MMO;
> if (TM.getInstrInfo()->isLoadFromStackSlotPostFE(&MI, FI)) {
> if (FrameInfo->isSpillSlotObjectIndex(FI)) {
> + MMO = *MI.memoperands_begin();
> + bool isVector = TM.getInstrInfo()->isVectorOperand(MI, MMO);
> if (Newline) O << '\n';
> O.PadToColumn(MAI->getCommentColumn());
> - O << MAI->getCommentString() << " Reload";
> + O << MAI->getCommentString() << (isVector? " Vector" : " Scalar")
> + << " Reload";
> Newline = true;
> }
> }
> - else if (TM.getInstrInfo()->hasLoadFromStackSlot(&MI, FI)) {
> + else if (TM.getInstrInfo()->hasLoadFromStackSlot(&MI, MMO, FI)) {
> if (FrameInfo->isSpillSlotObjectIndex(FI)) {
> + bool isVector = TM.getInstrInfo()->isVectorOperand(MI, MMO);
> if (Newline) O << '\n';
> O.PadToColumn(MAI->getCommentColumn());
> - O << MAI->getCommentString() << " Folded Reload";
> + O << MAI->getCommentString() << (isVector? " Vector" : " Scalar")
> + << " Folded Reload";
> Newline = true;
> }
> }
> else if (TM.getInstrInfo()->isStoreToStackSlotPostFE(&MI, FI)) {
> if (FrameInfo->isSpillSlotObjectIndex(FI)) {
> + MMO = *MI.memoperands_begin();
> + bool isVector = TM.getInstrInfo()->isVectorOperand(MI, MMO);
> if (Newline) O << '\n';
> O.PadToColumn(MAI->getCommentColumn());
> - O << MAI->getCommentString() << " Spill";
> + O << MAI->getCommentString() << (isVector? " Vector" : " Scalar")
> + << " Spill";
> Newline = true;
> }
> }
> - else if (TM.getInstrInfo()->hasStoreToStackSlot(&MI, FI)) {
> + else if (TM.getInstrInfo()->hasStoreToStackSlot(&MI, MMO, FI)) {
> if (FrameInfo->isSpillSlotObjectIndex(FI)) {
> + bool isVector = TM.getInstrInfo()->isVectorOperand(MI, MMO);
> if (Newline) O << '\n';
> O.PadToColumn(MAI->getCommentColumn());
> - O << MAI->getCommentString() << " Folded Spill";
> + O << MAI->getCommentString() << (isVector? " Vector" : " Scalar")
> + << " Folded Spill";
> Newline = true;
> }
> }
> @@ -1892,9 +1903,11 @@
> if (TM.getInstrInfo()->isMoveInstr(MI, SrcReg, DstReg,
> SrcSubIdx, DstSubIdx)) {
> if (MI.getAsmPrinterFlag(ReloadReuse)) {
> + bool isVector = TM.getInstrInfo()->isVectorInstr(MI);
> if (Newline) O << '\n';
> O.PadToColumn(MAI->getCommentColumn());
> - O << MAI->getCommentString() << " Reload Reuse";
> + O << MAI->getCommentString() << (isVector? " Vector" : " Scalar")
> + << " Reload Reuse";
> Newline = true;
> }
> }
> Index: lib/Target/X86/X86InstrInfo.cpp
> ===================================================================
> --- lib/Target/X86/X86InstrInfo.cpp (revision 89484)
> +++ lib/Target/X86/X86InstrInfo.cpp (working copy)
> @@ -34,6 +34,7 @@
> #include "llvm/MC/MCAsmInfo.h"
>
> #include <limits>
> +#include <cstring>
>
> using namespace llvm;
>
> @@ -711,6 +712,393 @@
> }
> }
>
> +bool X86InstrInfo::isVectorInstr(const MachineInstr &MI) const{
> + // Handle special cases here.
> + switch(MI.getOpcode()) {
> + case X86::MOVDDUPrr:
> + case X86::MOVDDUPrm:
> + case X86::MOVSHDUPrr:
> + case X86::MOVSHDUPrm:
> + case X86::MOVSLDUPrr:
> + case X86::MOVSLDUPrm:
> + case X86::MPSADBWrri: // "PS" is lucky. Be explicit.
> + case X86::MPSADBWrmi:
> + return true;
> + case X86::MMX_MOVQ2DQrr:
> + return false;
> + }
> +
> + // Look for the common cases.
> + const TargetInstrDesc &InstrDesc = get(MI.getOpcode());
> + const char *Name = InstrDesc.getName();
> + if (std::strstr(Name, "PS") != 0 // SSE packed single
> + || std::strstr(Name, "PD") != 0 // SSE packed double
> + || std::strstr(Name, "DQ") != 0 // SSE packed integer
> + || Name[0] == 'P' // MMX/SSE packed integer
> + || Name[0] == 'V' && Name[1] == 'P') // AVX packed integer
> + return true;
> +
> + return false;
> +}
> +
> +bool X86InstrInfo::isVectorOperand(const MachineInstr &MI,
> + const MachineOperand *MO) const {
> + // Handle special cases here. These are for mixed vector/scalar
> + // instructions.
> + if (MO->getType() != MachineOperand::MO_Register
> + && MO->getType() != MachineOperand::MO_FrameIndex
> + && MO->getType() != MachineOperand::MO_ExternalSymbol
> + && MO->getType() != MachineOperand::MO_GlobalAddress)
> + return false;
> +
> + // Operands that are part of memory addresses are never vector.
> + // Come Larrabee, we will need to handle vector address operands so
> + // this will get more complicated.
> + for (unsigned OpNum = 0; OpNum < MI.getNumOperands(); ++OpNum) {
> + if (&MI.getOperand(OpNum) == MO) {
> + switch(MI.getOpcode()) {
> + case X86::EXTRACTPSmr:
> + case X86::EXTRACTPSrr:
> + return OpNum == MI.getNumExplicitOperands() - 1;
> + case X86::INSERTPSrm:
> + case X86::INSERTPSrr:
> + return OpNum == 0;
> + case X86::MOVDDUPrm:
> + case X86::MOVDDUPrr:
> + return OpNum == 0;
> + case X86::MOVHPDmr:
> + return OpNum == MI.getNumExplicitOperands() - 1;
> + case X86::MOVHPDrm:
> + // Address operands are never vector.
> + return false;
> + case X86::MOVLPDmr:
> + return OpNum == MI.getNumExplicitOperands() - 1;
> + case X86::MOVLPDrr:
> + case X86::MOVLPDrm:
> + return OpNum == 0;
> + case X86::MOVMSKPDrr:
> + case X86::MOVMSKPSrr:
> + return OpNum == 1;
> + case X86::PBLENDVBrr0:
> + case X86::PBLENDVBrm0:
> + return !(MO->isReg() && MO->isImplicit());
> + case X86::PCMPESTRIrr:
> + case X86::PCMPESTRIrm:
> + case X86::PCMPESTRIArr:
> + case X86::PCMPESTRIArm:
> + case X86::PCMPESTRICrr:
> + case X86::PCMPESTRICrm:
> + case X86::PCMPESTRIOrr:
> + case X86::PCMPESTRIOrm:
> + case X86::PCMPESTRISrr:
> + case X86::PCMPESTRISrm:
> + case X86::PCMPESTRIZrr:
> + case X86::PCMPESTRIZrm:
> + case X86::PCMPESTRM128MEM:
> + case X86::PCMPESTRM128REG:
> + case X86::PCMPESTRM128rr:
> + case X86::PCMPESTRM128rm:
> + case X86::PCMPISTRIrr:
> + case X86::PCMPISTRIrm:
> + case X86::PCMPISTRIArr:
> + case X86::PCMPISTRIArm:
> + case X86::PCMPISTRICrr:
> + case X86::PCMPISTRICrm:
> + case X86::PCMPISTRIOrr:
> + case X86::PCMPISTRIOrm:
> + case X86::PCMPISTRISrr:
> + case X86::PCMPISTRISrm:
> + case X86::PCMPISTRIZrr:
> + case X86::PCMPISTRIZrm:
> + case X86::PCMPISTRM128MEM:
> + case X86::PCMPISTRM128REG:
> + case X86::PCMPISTRM128rr:
> + case X86::PCMPISTRM128rm:
> + return !(MO->isReg() && MO->isImplicit());
> + case X86::PEXTRBrr:
> + case X86::MMX_PEXTRWri:
> + case X86::PEXTRWri:
> + case X86::PEXTRDrr:
> + case X86::PEXTRQrr:
> + case X86::PEXTRBmr:
> + case X86::PEXTRWmr:
> + case X86::PEXTRDmr:
> + case X86::PEXTRQmr:
> + // Account for the immediate operand.
> + return OpNum == MI.getNumExplicitOperands() - 2;
> + case X86::PINSRBrr:
> + case X86::PINSRBrm:
> + case X86::MMX_PINSRWrri:
> + case X86::PINSRWrri:
> + case X86::MMX_PINSRWrmi:
> + case X86::PINSRWrmi:
> + case X86::PINSRDrr:
> + case X86::PINSRDrm:
> + case X86::PINSRQrr:
> + case X86::PINSRQrm:
> + return OpNum == 0;
> + case X86::PMOVMSKBrr:
> + return OpNum == 1;
> + case X86::MMX_PSLLWrr:
> + case X86::MMX_PSLLWri:
> + case X86::MMX_PSLLWrm:
> + case X86::PSLLWrr:
> + case X86::PSLLWri:
> + case X86::PSLLWrm:
> + case X86::MMX_PSLLDrr:
> + case X86::MMX_PSLLDri:
> + case X86::MMX_PSLLDrm:
> + case X86::PSLLDrr:
> + case X86::PSLLDri:
> + case X86::PSLLDrm:
> + case X86::MMX_PSLLQrr:
> + case X86::MMX_PSLLQri:
> + case X86::MMX_PSLLQrm:
> + case X86::PSLLQrr:
> + case X86::PSLLQri:
> + case X86::PSLLQrm:
> + case X86::MMX_PSRAWrr:
> + case X86::MMX_PSRAWri:
> + case X86::MMX_PSRAWrm:
> + case X86::PSRAWrr:
> + case X86::PSRAWri:
> + case X86::PSRAWrm:
> + case X86::MMX_PSRADrr:
> + case X86::MMX_PSRADri:
> + case X86::MMX_PSRADrm:
> + case X86::PSRADrr:
> + case X86::PSRADri:
> + case X86::PSRADrm:
> + case X86::MMX_PSRLWrr:
> + case X86::MMX_PSRLWri:
> + case X86::MMX_PSRLWrm:
> + case X86::PSRLWrr:
> + case X86::PSRLWri:
> + case X86::PSRLWrm:
> + case X86::MMX_PSRLDrr:
> + case X86::MMX_PSRLDri:
> + case X86::MMX_PSRLDrm:
> + case X86::PSRLDrr:
> + case X86::PSRLDri:
> + case X86::PSRLDrm:
> + case X86::MMX_PSRLQrr:
> + case X86::MMX_PSRLQri:
> + case X86::MMX_PSRLQrm:
> + case X86::PSRLQrr:
> + case X86::PSRLQri:
> + case X86::PSRLQrm:
> + return OpNum == 0;
> + case X86::PTESTrr:
> + case X86::PTESTrm:
> + return !(MO->isReg() && MO->isImplicit());
> + case X86::UNPCKLPDrr:
> + case X86::UNPCKLPDrm:
> + return OpNum == 0;
> + }
> + return isVectorInstr(MI);
> + }
> + }
> +
> + assert(0 && "Did not find operand in instruction!");
> +
> + return false;
> +}
> +
> +bool X86InstrInfo::isVectorOperand(const MachineInstr &MI,
> + const MachineMemOperand *MMO) const {
> + bool found = false;
> + for (MachineInstr::mmo_iterator m = MI.memoperands_begin(),
> + mend = MI.memoperands_end();
> + m != mend;
> + ++m) {
> + if (*m == MMO)
> + found = true;
> + }
> +
> + if (!found)
> + assert(0 && "Wrong machine mem operands for instruction!");
> +
> + // Handle special cases here. These are for mixed vector/scalar
> + // instructions.
> + switch(MI.getOpcode()) {
> + case X86::EXTRACTPSrr:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::EXTRACTPSmr:
> + assert(MMO->isStore() && "Wrong machine mem operand for
> instruction!"); + return false;
> + case X86::INSERTPSrr:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::INSERTPSrm:
> + assert(MMO->isLoad() && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::MOVDDUPrr:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::MOVDDUPrm:
> + assert(MMO->isLoad() && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::MOVHPDmr:
> + assert(MMO->isStore() && "Wrong machine mem operand for
> instruction!"); + return false;
> + case X86::MOVHPDrm:
> + assert(MMO->isLoad() && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::MOVLPDmr:
> + assert(MMO->isStore() && "Wrong machine mem operand for
> instruction!"); + return false;
> + case X86::MOVLPDrr:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::MOVLPDrm:
> + assert(MMO->isLoad() && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::MOVMSKPDrr:
> + case X86::MOVMSKPSrr:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::PBLENDVBrr0:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::PBLENDVBrm0:
> + assert(MMO->isLoad() && "Wrong machine mem operand for instruction!");
> + return true;
> + case X86::PCMPESTRIrm:
> + case X86::PCMPESTRIArm:
> + case X86::PCMPESTRICrm:
> + case X86::PCMPESTRIOrm:
> + case X86::PCMPESTRISrm:
> + case X86::PCMPESTRIZrm:
> + case X86::PCMPESTRM128MEM:
> + case X86::PCMPESTRM128rm:
> + case X86::PCMPISTRIrm:
> + case X86::PCMPISTRIArm:
> + case X86::PCMPISTRICrm:
> + case X86::PCMPISTRIOrm:
> + case X86::PCMPISTRISrm:
> + case X86::PCMPISTRIZrm:
> + case X86::PCMPISTRM128MEM:
> + case X86::PCMPISTRM128rm:
> + assert(MMO->isLoad() && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::PCMPESTRIrr:
> + case X86::PCMPESTRIArr:
> + case X86::PCMPESTRICrr:
> + case X86::PCMPESTRIOrr:
> + case X86::PCMPESTRISrr:
> + case X86::PCMPESTRIZrr:
> + case X86::PCMPESTRM128REG:
> + case X86::PCMPESTRM128rr:
> + case X86::PCMPISTRIrr:
> + case X86::PCMPISTRIArr:
> + case X86::PCMPISTRICrr:
> + case X86::PCMPISTRIOrr:
> + case X86::PCMPISTRISrr:
> + case X86::PCMPISTRIZrr:
> + case X86::PCMPISTRM128REG:
> + case X86::PCMPISTRM128rr:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::PEXTRBrr:
> + case X86::MMX_PEXTRWri:
> + case X86::PEXTRWri:
> + case X86::PEXTRDrr:
> + case X86::PEXTRQrr:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::PEXTRBmr:
> + case X86::PEXTRWmr:
> + case X86::PEXTRDmr:
> + case X86::PEXTRQmr:
> + assert(MMO->isStore() && "Wrong machine mem operand for
> instruction!"); + return false;
> + case X86::PINSRBrr:
> + case X86::MMX_PINSRWrri:
> + case X86::PINSRWrri:
> + case X86::PINSRDrr:
> + case X86::PINSRQrr:
> + assert(MMO->isStore() && "Wrong machine mem operand for
> instruction!"); + return false;
> + case X86::PINSRBrm:
> + case X86::MMX_PINSRWrmi:
> + case X86::PINSRWrmi:
> + case X86::PINSRDrm:
> + case X86::PINSRQrm:
> + assert(MMO->isLoad() && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::PMOVMSKBrr:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::MMX_PSLLWrm:
> + case X86::PSLLWrm:
> + case X86::MMX_PSLLDrm:
> + case X86::PSLLDrm:
> + case X86::MMX_PSLLQrm:
> + case X86::PSLLQrm:
> + case X86::MMX_PSRAWrm:
> + case X86::PSRAWrm:
> + case X86::MMX_PSRADrm:
> + case X86::PSRADrm:
> + case X86::MMX_PSRLWrm:
> + case X86::PSRLWrm:
> + case X86::MMX_PSRLDrm:
> + case X86::PSRLDrm:
> + case X86::MMX_PSRLQrm:
> + case X86::PSRLQrm:
> + assert(MMO->isLoad() && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::MMX_PSLLWrr:
> + case X86::MMX_PSLLWri:
> + case X86::PSLLWrr:
> + case X86::PSLLWri:
> + case X86::MMX_PSLLDrr:
> + case X86::MMX_PSLLDri:
> + case X86::PSLLDrr:
> + case X86::PSLLDri:
> + case X86::MMX_PSLLQrr:
> + case X86::MMX_PSLLQri:
> + case X86::PSLLQrr:
> + case X86::PSLLQri:
> + case X86::MMX_PSRAWrr:
> + case X86::MMX_PSRAWri:
> + case X86::PSRAWrr:
> + case X86::PSRAWri:
> + case X86::MMX_PSRADrr:
> + case X86::MMX_PSRADri:
> + case X86::PSRADrr:
> + case X86::PSRADri:
> + case X86::MMX_PSRLWrr:
> + case X86::MMX_PSRLWri:
> + case X86::PSRLWrr:
> + case X86::PSRLWri:
> + case X86::MMX_PSRLDrr:
> + case X86::MMX_PSRLDri:
> + case X86::PSRLDrr:
> + case X86::PSRLDri:
> + case X86::MMX_PSRLQrr:
> + case X86::MMX_PSRLQri:
> + case X86::PSRLQrr:
> + case X86::PSRLQri:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::PTESTrr:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::PTESTrm:
> + assert(MMO->isLoad() && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::UNPCKLPDrr:
> + assert(0 && "Wrong machine mem operand for instruction!");
> + return false;
> + case X86::UNPCKLPDrm:
> + assert(MMO->isLoad() && "Wrong machine mem operand for instruction!");
> + return false;
> + }
> +
> + return isVectorInstr(MI);
> +}
> +
> /// isFrameOperand - Return true and the FrameIndex if the specified
> /// operand and follow operands form a reference to the stack frame.
> bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op,
> @@ -783,12 +1171,14 @@
> if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
> return Reg;
> // Check for post-frame index elimination operations
> - return hasLoadFromStackSlot(MI, FrameIndex);
> + const MachineMemOperand *Dummy;
> + return hasLoadFromStackSlot(MI, Dummy, FrameIndex);
> }
> return 0;
> }
>
> bool X86InstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
> + const MachineMemOperand *&MMO,
> int &FrameIndex) const {
> for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
> oe = MI->memoperands_end();
> @@ -798,6 +1188,7 @@
> if (const FixedStackPseudoSourceValue *Value =
> dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
> FrameIndex = Value->getFrameIndex();
> + MMO = *o;
> return true;
> }
> }
> @@ -819,12 +1210,14 @@
> if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
> return Reg;
> // Check for post-frame index elimination operations
> - return hasStoreToStackSlot(MI, FrameIndex);
> + const MachineMemOperand *Dummy;
> + return hasStoreToStackSlot(MI, Dummy, FrameIndex);
> }
> return 0;
> }
>
> bool X86InstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
> + const MachineMemOperand *&MMO,
> int &FrameIndex) const {
> for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
> oe = MI->memoperands_end();
> @@ -834,6 +1227,7 @@
> if (const FixedStackPseudoSourceValue *Value =
> dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
> FrameIndex = Value->getFrameIndex();
> + MMO = *o;
> return true;
> }
> }
> Index: lib/Target/X86/X86InstrInfo.h
> ===================================================================
> --- lib/Target/X86/X86InstrInfo.h (revision 89484)
> +++ lib/Target/X86/X86InstrInfo.h (working copy)
> @@ -448,6 +448,17 @@
> unsigned &SrcReg, unsigned &DstReg,
> unsigned &SrcSubIdx, unsigned &DstSubIdx)
> const;
>
> + /// isVectorInstr - Return true if the instruction is a vector
> operation. + virtual bool isVectorInstr(const MachineInstr& MI) const;
> +
> + /// isVectorOperand - Return true if the operand is of vector type..
> + virtual bool isVectorOperand(const MachineInstr& MI,
> + const MachineOperand *MO) const;
> +
> + /// isVectorOperand - Return true if the mem operand is of vector type..
> + virtual bool isVectorOperand(const MachineInstr& MI,
> + const MachineMemOperand *MMO) const;
> +
> unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex)
> const;
> /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
> /// stack locations as well. This uses a heuristic so it isn't
> @@ -457,11 +468,14 @@
>
> /// hasLoadFromStackSlot - If the specified machine instruction has
> /// a load from a stack slot, return true along with the FrameIndex
> - /// of the loaded stack slot. If not, return false. Unlike
> + /// of the loaded stack slot and the machine mem operand containing
> + /// the reference. If not, return false. Unlike
> /// isLoadFromStackSlot, this returns true for any instructions that
> /// loads from the stack. This is a hint only and may not catch all
> /// cases.
> - bool hasLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex)
> const; + bool hasLoadFromStackSlot(const MachineInstr *MI,
> + const MachineMemOperand *&MMO,
> + int &FrameIndex) const;
>
> unsigned isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex)
> const; /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
> @@ -472,11 +486,13 @@
>
> /// hasStoreToStackSlot - If the specified machine instruction has a
> /// store to a stack slot, return true along with the FrameIndex of
> - /// the loaded stack slot. If not, return false. Unlike
> - /// isStoreToStackSlot, this returns true for any instructions that
> - /// loads from the stack. This is a hint only and may not catch all
> - /// cases.
> - bool hasStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const;
> + /// the loaded stack slot and the machine mem operand containing the
> + /// reference. If not, return false. Unlike isStoreToStackSlot,
> + /// this returns true for any instructions that loads from the
> + /// stack. This is a hint only and may not catch all cases.
> + bool hasStoreToStackSlot(const MachineInstr *MI,
> + const MachineMemOperand *&MMO,
> + int &FrameIndex) const;
>
> bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
> AliasAnalysis *AA) const;
> Index: test/CodeGen/X86/2009-11-20-VectorSpillComments.ll
> ===================================================================
> --- test/CodeGen/X86/2009-11-20-VectorSpillComments.ll (revision 0)
> +++ test/CodeGen/X86/2009-11-20-VectorSpillComments.ll (revision 0)
> @@ -0,0 +1,19 @@
> +; RUN: llc < %s -march=x86-64 | FileCheck %s
> +; CHECK: Vector Spill
> +; CHECK: Vector Reload
> +; CHECK: Vector Folded Reload
> +; CHECK: Scalar Spill
> +; CHECK: Scalar Folded Reload
> +
> +define <8 x i32> @foo(<8 x i32> %t, <8 x i32> %u) {
> + %m = srem <8 x i32> %t, %u
> + ret <8 x i32> %m
> +}
> +define <8 x i32> @bar(<8 x i32> %t, <8 x i32> %u) {
> + %m = urem <8 x i32> %t, %u
> + ret <8 x i32> %m
> +}
> +define <8 x float> @qux(<8 x float> %t, <8 x float> %u) {
> + %m = frem <8 x float> %t, %u
> + ret <8 x float> %m
> +}
More information about the llvm-commits
mailing list