[llvm-commits] [llvm] r46893 - in /llvm/trunk: include/llvm/Target/ lib/Target/ARM/ lib/Target/Alpha/ lib/Target/CellSPU/ lib/Target/Mips/ lib/Target/PowerPC/ lib/Target/Sparc/ lib/Target/X86/ test/CodeGen/X86/

Andrew Lenharth andrewl at lenharth.org
Fri Feb 8 13:58:35 PST 2008


This breaks the build.  I think you forgot some files in /lib/Codegen.

Andrew

On 2/8/08, Evan Cheng <evan.cheng at apple.com> wrote:
> Author: evancheng
> Date: Fri Feb  8 15:20:40 2008
> New Revision: 46893
>
> URL: http://llvm.org/viewvc/llvm-project?rev=46893&view=rev
> Log:
> It's not always safe to fold movsd into xorpd, etc. Check the alignment of the load address first to make sure it's 16 byte aligned.
>
> Added:
>     llvm/trunk/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll
> Modified:
>     llvm/trunk/include/llvm/Target/TargetInstrInfo.h
>     llvm/trunk/lib/Target/ARM/ARMInstrInfo.cpp
>     llvm/trunk/lib/Target/ARM/ARMInstrInfo.h
>     llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.cpp
>     llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.h
>     llvm/trunk/lib/Target/CellSPU/SPUInstrInfo.cpp
>     llvm/trunk/lib/Target/CellSPU/SPUInstrInfo.h
>     llvm/trunk/lib/Target/Mips/MipsInstrInfo.cpp
>     llvm/trunk/lib/Target/Mips/MipsInstrInfo.h
>     llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp
>     llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.h
>     llvm/trunk/lib/Target/Sparc/SparcInstrInfo.cpp
>     llvm/trunk/lib/Target/Sparc/SparcInstrInfo.h
>     llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
>     llvm/trunk/lib/Target/X86/X86InstrInfo.h
>
> Modified: llvm/trunk/include/llvm/Target/TargetInstrInfo.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Target/TargetInstrInfo.h?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/include/llvm/Target/TargetInstrInfo.h (original)
> +++ llvm/trunk/include/llvm/Target/TargetInstrInfo.h Fri Feb  8 15:20:40 2008
> @@ -262,7 +262,8 @@
>    /// operand folded, otherwise NULL is returned. The client is responsible for
>    /// removing the old instruction and adding the new one in the instruction
>    /// stream.
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            int FrameIndex) const {
>      return 0;
> @@ -271,7 +272,8 @@
>    /// foldMemoryOperand - Same as the previous version except it allows folding
>    /// of any load and store from / to any address, not just from a specific
>    /// stack slot.
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            MachineInstr* LoadMI) const {
>      return 0;
>
> Modified: llvm/trunk/lib/Target/ARM/ARMInstrInfo.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrInfo.cpp?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/ARM/ARMInstrInfo.cpp (original)
> +++ llvm/trunk/lib/Target/ARM/ARMInstrInfo.cpp Fri Feb  8 15:20:40 2008
> @@ -640,9 +640,10 @@
>    return true;
>  }
>
> -MachineInstr *ARMInstrInfo::foldMemoryOperand(MachineInstr *MI,
> -                                                 SmallVectorImpl<unsigned> &Ops,
> -                                                 int FI) const {
> +MachineInstr *ARMInstrInfo::foldMemoryOperand(MachineFunction &MF,
> +                                              MachineInstr *MI,
> +                                              SmallVectorImpl<unsigned> &Ops,
> +                                              int FI) const {
>    if (Ops.size() != 1) return NULL;
>
>    unsigned OpNum = Ops[0];
> @@ -721,7 +722,7 @@
>  }
>
>  bool ARMInstrInfo::canFoldMemoryOperand(MachineInstr *MI,
> -                                         SmallVectorImpl<unsigned> &Ops) const {
> +                                        SmallVectorImpl<unsigned> &Ops) const {
>    if (Ops.size() != 1) return false;
>
>    unsigned OpNum = Ops[0];
>
> Modified: llvm/trunk/lib/Target/ARM/ARMInstrInfo.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrInfo.h?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/ARM/ARMInstrInfo.h (original)
> +++ llvm/trunk/lib/Target/ARM/ARMInstrInfo.h Fri Feb  8 15:20:40 2008
> @@ -191,11 +191,13 @@
>                                             MachineBasicBlock::iterator MI,
>                                   const std::vector<CalleeSavedInfo> &CSI) const;
>
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            int FrameIndex) const;
>
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            MachineInstr* LoadMI) const {
>      return 0;
>
> Modified: llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.cpp?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.cpp (original)
> +++ llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.cpp Fri Feb  8 15:20:40 2008
> @@ -250,9 +250,10 @@
>    NewMIs.push_back(MIB);
>  }
>
> -MachineInstr *AlphaInstrInfo::foldMemoryOperand(MachineInstr *MI,
> -                                                 SmallVectorImpl<unsigned> &Ops,
> -                                                 int FrameIndex) const {
> +MachineInstr *AlphaInstrInfo::foldMemoryOperand(MachineFunction &MF,
> +                                                MachineInstr *MI,
> +                                                SmallVectorImpl<unsigned> &Ops,
> +                                                int FrameIndex) const {
>     if (Ops.size() != 1) return NULL;
>
>     // Make sure this is a reg-reg copy.
>
> Modified: llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.h?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.h (original)
> +++ llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.h Fri Feb  8 15:20:40 2008
> @@ -67,11 +67,13 @@
>                                 const TargetRegisterClass *RC,
>                                 SmallVectorImpl<MachineInstr*> &NewMIs) const;
>
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            int FrameIndex) const;
>
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            MachineInstr* LoadMI) const {
>      return 0;
>
> Modified: llvm/trunk/lib/Target/CellSPU/SPUInstrInfo.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/CellSPU/SPUInstrInfo.cpp?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/CellSPU/SPUInstrInfo.cpp (original)
> +++ llvm/trunk/lib/Target/CellSPU/SPUInstrInfo.cpp Fri Feb  8 15:20:40 2008
> @@ -391,9 +391,10 @@
>  /// foldMemoryOperand - SPU, like PPC, can only fold spills into
>  /// copy instructions, turning them into load/store instructions.
>  MachineInstr *
> -SPUInstrInfo::foldMemoryOperand(MachineInstr *MI,
> -                                   SmallVectorImpl<unsigned> &Ops,
> -                                   int FrameIndex) const
> +SPUInstrInfo::foldMemoryOperand(MachineFunction &MF,
> +                                MachineInstr *MI,
> +                                SmallVectorImpl<unsigned> &Ops,
> +                                int FrameIndex) const
>  {
>  #if SOMEDAY_SCOTT_LOOKS_AT_ME_AGAIN
>    if (Ops.size() != 1) return NULL;
>
> Modified: llvm/trunk/lib/Target/CellSPU/SPUInstrInfo.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/CellSPU/SPUInstrInfo.h?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/CellSPU/SPUInstrInfo.h (original)
> +++ llvm/trunk/lib/Target/CellSPU/SPUInstrInfo.h Fri Feb  8 15:20:40 2008
> @@ -77,12 +77,14 @@
>                                   SmallVectorImpl<MachineInstr*> &NewMIs) const;
>
>      //! Fold spills into load/store instructions
> -    virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +    virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                            MachineInstr* MI,
>                                              SmallVectorImpl<unsigned> &Ops,
>                                              int FrameIndex) const;
>
>      //! Fold any load/store to an operand
> -    virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +    virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                            MachineInstr* MI,
>                                              SmallVectorImpl<unsigned> &Ops,
>                                              MachineInstr* LoadMI) const {
>        return 0;
>
> Modified: llvm/trunk/lib/Target/Mips/MipsInstrInfo.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsInstrInfo.cpp?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/Mips/MipsInstrInfo.cpp (original)
> +++ llvm/trunk/lib/Target/Mips/MipsInstrInfo.cpp Fri Feb  8 15:20:40 2008
> @@ -370,7 +370,8 @@
>  }
>
>  MachineInstr *MipsInstrInfo::
> -foldMemoryOperand(MachineInstr* MI,
> +foldMemoryOperand(MachineFunction &MF,
> +                  MachineInstr* MI,
>                    SmallVectorImpl<unsigned> &Ops, int FI) const
>  {
>    if (Ops.size() != 1) return NULL;
>
> Modified: llvm/trunk/lib/Target/Mips/MipsInstrInfo.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsInstrInfo.h?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/Mips/MipsInstrInfo.h (original)
> +++ llvm/trunk/lib/Target/Mips/MipsInstrInfo.h Fri Feb  8 15:20:40 2008
> @@ -106,11 +106,13 @@
>                                 const TargetRegisterClass *RC,
>                                 SmallVectorImpl<MachineInstr*> &NewMIs) const;
>
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            int FrameIndex) const;
>
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            MachineInstr* LoadMI) const {
>      return 0;
>
> Modified: llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp (original)
> +++ llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.cpp Fri Feb  8 15:20:40 2008
> @@ -536,7 +536,8 @@
>
>  /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
>  /// copy instructions, turning them into load/store instructions.
> -MachineInstr *PPCInstrInfo::foldMemoryOperand(MachineInstr *MI,
> +MachineInstr *PPCInstrInfo::foldMemoryOperand(MachineFunction &MF,
> +                                              MachineInstr *MI,
>                                                SmallVectorImpl<unsigned> &Ops,
>                                                int FrameIndex) const {
>    if (Ops.size() != 1) return NULL;
> @@ -594,7 +595,7 @@
>  }
>
>  bool PPCInstrInfo::canFoldMemoryOperand(MachineInstr *MI,
> -                                         SmallVectorImpl<unsigned> &Ops) const {
> +                                        SmallVectorImpl<unsigned> &Ops) const {
>    if (Ops.size() != 1) return false;
>
>    // Make sure this is a reg-reg copy.  Note that we can't handle MCRF, because
>
> Modified: llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.h?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.h (original)
> +++ llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.h Fri Feb  8 15:20:40 2008
> @@ -131,11 +131,13 @@
>
>    /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
>    /// copy instructions, turning them into load/store instructions.
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            int FrameIndex) const;
>
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            MachineInstr* LoadMI) const {
>      return 0;
>
> Modified: llvm/trunk/lib/Target/Sparc/SparcInstrInfo.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Sparc/SparcInstrInfo.cpp?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/Sparc/SparcInstrInfo.cpp (original)
> +++ llvm/trunk/lib/Target/Sparc/SparcInstrInfo.cpp Fri Feb  8 15:20:40 2008
> @@ -222,9 +222,10 @@
>    return;
>  }
>
> -MachineInstr *SparcInstrInfo::foldMemoryOperand(MachineInstr* MI,
> -                                                 SmallVectorImpl<unsigned> &Ops,
> -                                                 int FI) const {
> +MachineInstr *SparcInstrInfo::foldMemoryOperand(MachineFunction &MF,
> +                                                MachineInstr* MI,
> +                                                SmallVectorImpl<unsigned> &Ops,
> +                                                int FI) const {
>    if (Ops.size() != 1) return NULL;
>
>    unsigned OpNum = Ops[0];
>
> Modified: llvm/trunk/lib/Target/Sparc/SparcInstrInfo.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Sparc/SparcInstrInfo.h?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/Sparc/SparcInstrInfo.h (original)
> +++ llvm/trunk/lib/Target/Sparc/SparcInstrInfo.h Fri Feb  8 15:20:40 2008
> @@ -94,11 +94,13 @@
>                                 const TargetRegisterClass *RC,
>                                 SmallVectorImpl<MachineInstr*> &NewMIs) const;
>
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            int FrameIndex) const;
>
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            MachineInstr* LoadMI) const {
>      return 0;
>
> Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.cpp?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp (original)
> +++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp Fri Feb  8 15:20:40 2008
> @@ -1670,7 +1670,7 @@
>
>  MachineInstr*
>  X86InstrInfo::foldMemoryOperand(MachineInstr *MI, unsigned i,
> -                                   SmallVector<MachineOperand,4> &MOs) const {
> +                                SmallVector<MachineOperand,4> &MOs) const {
>    const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL;
>    bool isTwoAddrFold = false;
>    unsigned NumOps = MI->getDesc().getNumOperands();
> @@ -1730,12 +1730,33 @@
>  }
>
>
> -MachineInstr* X86InstrInfo::foldMemoryOperand(MachineInstr *MI,
> +MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
> +                                              MachineInstr *MI,
>                                                SmallVectorImpl<unsigned> &Ops,
>                                                int FrameIndex) const {
>    // Check switch flag
>    if (NoFusing) return NULL;
>
> +  const MachineFrameInfo *MFI = MF.getFrameInfo();
> +  unsigned Alignment = MFI->getObjectAlignment(FrameIndex);
> +  // FIXME: Move alignment requirement into tables?
> +  if (Alignment < 16) {
> +    switch (MI->getOpcode()) {
> +    default: break;
> +    // Not always safe to fold movsd into these instructions since their load
> +    // folding variants expects the address to be 16 byte aligned.
> +    case X86::FsANDNPDrr:
> +    case X86::FsANDNPSrr:
> +    case X86::FsANDPDrr:
> +    case X86::FsANDPSrr:
> +    case X86::FsORPDrr:
> +    case X86::FsORPSrr:
> +    case X86::FsXORPDrr:
> +    case X86::FsXORPSrr:
> +      return NULL;
> +    }
> +  }
> +
>    if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
>      unsigned NewOpc = 0;
>      switch (MI->getOpcode()) {
> @@ -1756,12 +1777,39 @@
>    return foldMemoryOperand(MI, Ops[0], MOs);
>  }
>
> -MachineInstr* X86InstrInfo::foldMemoryOperand(MachineInstr *MI,
> +MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
> +                                              MachineInstr *MI,
>                                                SmallVectorImpl<unsigned> &Ops,
>                                                MachineInstr *LoadMI) const {
>    // Check switch flag
>    if (NoFusing) return NULL;
>
> +  unsigned Alignment = 0;
> +  for (unsigned i = 0, e = LoadMI->getNumMemOperands(); i != e; ++i) {
> +    const MemOperand &MRO = LoadMI->getMemOperand(i);
> +    unsigned Align = MRO.getAlignment();
> +    if (Align > Alignment)
> +      Alignment = Align;
> +  }
> +
> +  // FIXME: Move alignment requirement into tables?
> +  if (Alignment < 16) {
> +    switch (MI->getOpcode()) {
> +    default: break;
> +    // Not always safe to fold movsd into these instructions since their load
> +    // folding variants expects the address to be 16 byte aligned.
> +    case X86::FsANDNPDrr:
> +    case X86::FsANDNPSrr:
> +    case X86::FsANDPDrr:
> +    case X86::FsANDPSrr:
> +    case X86::FsORPDrr:
> +    case X86::FsORPSrr:
> +    case X86::FsXORPDrr:
> +    case X86::FsXORPSrr:
> +      return NULL;
> +    }
> +  }
> +
>    if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
>      unsigned NewOpc = 0;
>      switch (MI->getOpcode()) {
>
> Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.h?rev=46893&r1=46892&r2=46893&view=diff
>
> ==============================================================================
> --- llvm/trunk/lib/Target/X86/X86InstrInfo.h (original)
> +++ llvm/trunk/lib/Target/X86/X86InstrInfo.h Fri Feb  8 15:20:40 2008
> @@ -324,14 +324,16 @@
>    /// folding and return true, otherwise it should return false.  If it folds
>    /// the instruction, it is likely that the MachineInstruction the iterator
>    /// references has been changed.
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                            SmallVectorImpl<unsigned> &Ops,
>                                            int FrameIndex) const;
>
>    /// foldMemoryOperand - Same as the previous version except it allows folding
>    /// of any load and store from / to any address, not just from a specific
>    /// stack slot.
> -  virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
> +  virtual MachineInstr* foldMemoryOperand(MachineFunction &MF,
> +                                          MachineInstr* MI,
>                                    SmallVectorImpl<unsigned> &Ops,
>                                    MachineInstr* LoadMI) const;
>
>
> Added: llvm/trunk/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll?rev=46893&view=auto
>
> ==============================================================================
> --- llvm/trunk/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll (added)
> +++ llvm/trunk/test/CodeGen/X86/2008-02-08-LoadFoldingBug.ll Fri Feb  8 15:20:40 2008
> @@ -0,0 +1,99 @@
> +; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | grep andpd | not grep esp
> +
> +declare double @llvm.sqrt.f64(double) nounwind readnone
> +
> +declare fastcc void @ApplyGivens(double**, double, double, i32, i32, i32, i32) nounwind
> +
> +declare double @fabs(double)
> +
> +define void @main_bb114_2E_outer_2E_i_bb3_2E_i27(double** %tmp12.sub.i.i, [51 x double*]* %tmp12.i.i.i, i32 %i.0.reg2mem.0.ph.i, i32 %tmp11688.i, i32 %tmp19.i, i32 %tmp24.i, [51 x double*]* %tmp12.i.i) {
> +newFuncRoot:
> +       br label %bb3.i27
> +
> +bb111.i77.bb121.i_crit_edge.exitStub:          ; preds = %bb111.i77
> +       ret void
> +
> +bb3.i27:               ; preds = %bb111.i77.bb3.i27_crit_edge, %newFuncRoot
> +       %indvar94.i = phi i32 [ 0, %newFuncRoot ], [ %tmp113.i76, %bb111.i77.bb3.i27_crit_edge ]                ; <i32> [#uses=6]
> +       %tmp6.i20 = getelementptr [51 x double*]* %tmp12.i.i, i32 0, i32 %indvar94.i            ; <double**> [#uses=1]
> +       %tmp7.i21 = load double** %tmp6.i20, align 4            ; <double*> [#uses=2]
> +       %tmp10.i = add i32 %indvar94.i, %i.0.reg2mem.0.ph.i             ; <i32> [#uses=5]
> +       %tmp11.i22 = getelementptr double* %tmp7.i21, i32 %tmp10.i              ; <double*> [#uses=1]
> +       %tmp12.i23 = load double* %tmp11.i22, align 8           ; <double> [#uses=4]
> +       %tmp20.i24 = add i32 %tmp19.i, %indvar94.i              ; <i32> [#uses=3]
> +       %tmp21.i = getelementptr double* %tmp7.i21, i32 %tmp20.i24              ; <double*> [#uses=1]
> +       %tmp22.i25 = load double* %tmp21.i, align 8             ; <double> [#uses=3]
> +       %tmp1.i.i26 = fcmp oeq double %tmp12.i23, 0.000000e+00          ; <i1> [#uses=1]
> +       br i1 %tmp1.i.i26, label %bb3.i27.Givens.exit.i49_crit_edge, label %bb5.i.i31
> +
> +bb5.i.i31:             ; preds = %bb3.i27
> +       %tmp7.i.i28 = call double @fabs( double %tmp12.i23 ) nounwind           ; <double> [#uses=1]
> +       %tmp9.i.i29 = call double @fabs( double %tmp22.i25 ) nounwind           ; <double> [#uses=1]
> +       %tmp10.i.i30 = fcmp ogt double %tmp7.i.i28, %tmp9.i.i29         ; <i1> [#uses=1]
> +       br i1 %tmp10.i.i30, label %bb13.i.i37, label %bb30.i.i43
> +
> +bb13.i.i37:            ; preds = %bb5.i.i31
> +       %tmp15.i.i32 = sub double -0.000000e+00, %tmp22.i25             ; <double> [#uses=1]
> +       %tmp17.i.i33 = fdiv double %tmp15.i.i32, %tmp12.i23             ; <double> [#uses=3]
> +       %tmp20.i4.i = mul double %tmp17.i.i33, %tmp17.i.i33             ; <double> [#uses=1]
> +       %tmp21.i.i34 = add double %tmp20.i4.i, 1.000000e+00             ; <double> [#uses=1]
> +       %tmp22.i.i35 = call double @llvm.sqrt.f64( double %tmp21.i.i34 ) nounwind               ; <double> [#uses=1]
> +       %tmp23.i5.i = fdiv double 1.000000e+00, %tmp22.i.i35            ; <double> [#uses=2]
> +       %tmp28.i.i36 = mul double %tmp23.i5.i, %tmp17.i.i33             ; <double> [#uses=1]
> +       br label %Givens.exit.i49
> +
> +bb30.i.i43:            ; preds = %bb5.i.i31
> +       %tmp32.i.i38 = sub double -0.000000e+00, %tmp12.i23             ; <double> [#uses=1]
> +       %tmp34.i.i39 = fdiv double %tmp32.i.i38, %tmp22.i25             ; <double> [#uses=3]
> +       %tmp37.i6.i = mul double %tmp34.i.i39, %tmp34.i.i39             ; <double> [#uses=1]
> +       %tmp38.i.i40 = add double %tmp37.i6.i, 1.000000e+00             ; <double> [#uses=1]
> +       %tmp39.i7.i = call double @llvm.sqrt.f64( double %tmp38.i.i40 ) nounwind                ; <double> [#uses=1]
> +       %tmp40.i.i41 = fdiv double 1.000000e+00, %tmp39.i7.i            ; <double> [#uses=2]
> +       %tmp45.i.i42 = mul double %tmp40.i.i41, %tmp34.i.i39            ; <double> [#uses=1]
> +       br label %Givens.exit.i49
> +
> +Givens.exit.i49:               ; preds = %bb3.i27.Givens.exit.i49_crit_edge, %bb30.i.i43, %bb13.i.i37
> +       %s.0.i44 = phi double [ %tmp45.i.i42, %bb30.i.i43 ], [ %tmp23.i5.i, %bb13.i.i37 ], [ 0.000000e+00, %bb3.i27.Givens.exit.i49_crit_edge ]         ; <double> [#uses=2]
> +       %c.0.i45 = phi double [ %tmp40.i.i41, %bb30.i.i43 ], [ %tmp28.i.i36, %bb13.i.i37 ], [ 1.000000e+00, %bb3.i27.Givens.exit.i49_crit_edge ]                ; <double> [#uses=2]
> +       %tmp26.i46 = add i32 %tmp24.i, %indvar94.i              ; <i32> [#uses=2]
> +       %tmp27.i47 = icmp slt i32 %tmp26.i46, 51                ; <i1> [#uses=1]
> +       %min.i48 = select i1 %tmp27.i47, i32 %tmp26.i46, i32 50         ; <i32> [#uses=1]
> +       call fastcc void @ApplyGivens( double** %tmp12.sub.i.i, double %s.0.i44, double %c.0.i45, i32 %tmp20.i24, i32 %tmp10.i, i32 %indvar94.i, i32 %min.i48 ) nounwind
> +       br label %codeRepl
> +
> +codeRepl:              ; preds = %Givens.exit.i49
> +       call void @main_bb114_2E_outer_2E_i_bb3_2E_i27_bb_2E_i48_2E_i( i32 %tmp10.i, i32 %tmp20.i24, double %s.0.i44, double %c.0.i45, [51 x double*]* %tmp12.i.i.i )
> +       br label %ApplyRGivens.exit49.i
> +
> +ApplyRGivens.exit49.i:         ; preds = %codeRepl
> +       %tmp10986.i = icmp sgt i32 %tmp11688.i, %tmp10.i                ; <i1> [#uses=1]
> +       br i1 %tmp10986.i, label %ApplyRGivens.exit49.i.bb52.i57_crit_edge, label %ApplyRGivens.exit49.i.bb111.i77_crit_edge
> +
> +codeRepl1:             ; preds = %ApplyRGivens.exit49.i.bb52.i57_crit_edge
> +       call void @main_bb114_2E_outer_2E_i_bb3_2E_i27_bb52_2E_i57( i32 %tmp10.i, double** %tmp12.sub.i.i, [51 x double*]* %tmp12.i.i.i, i32 %i.0.reg2mem.0.ph.i, i32 %tmp11688.i, i32 %tmp19.i, i32 %tmp24.i, [51 x double*]* %tmp12.i.i )
> +       br label %bb105.i.bb111.i77_crit_edge
> +
> +bb111.i77:             ; preds = %bb105.i.bb111.i77_crit_edge, %ApplyRGivens.exit49.i.bb111.i77_crit_edge
> +       %tmp113.i76 = add i32 %indvar94.i, 1            ; <i32> [#uses=2]
> +       %tmp118.i = icmp sgt i32 %tmp11688.i, %tmp113.i76               ; <i1> [#uses=1]
> +       br i1 %tmp118.i, label %bb111.i77.bb3.i27_crit_edge, label %bb111.i77.bb121.i_crit_edge.exitStub
> +
> +bb3.i27.Givens.exit.i49_crit_edge:             ; preds = %bb3.i27
> +       br label %Givens.exit.i49
> +
> +ApplyRGivens.exit49.i.bb52.i57_crit_edge:              ; preds = %ApplyRGivens.exit49.i
> +       br label %codeRepl1
> +
> +ApplyRGivens.exit49.i.bb111.i77_crit_edge:             ; preds = %ApplyRGivens.exit49.i
> +       br label %bb111.i77
> +
> +bb105.i.bb111.i77_crit_edge:           ; preds = %codeRepl1
> +       br label %bb111.i77
> +
> +bb111.i77.bb3.i27_crit_edge:           ; preds = %bb111.i77
> +       br label %bb3.i27
> +}
> +
> +declare void @main_bb114_2E_outer_2E_i_bb3_2E_i27_bb_2E_i48_2E_i(i32, i32, double, double, [51 x double*]*)
> +
> +declare void @main_bb114_2E_outer_2E_i_bb3_2E_i27_bb52_2E_i57(i32, double**, [51 x double*]*, i32, i32, i32, i32, [51 x double*]*)
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>



More information about the llvm-commits mailing list