[llvm-branch-commits] [llvm-branch] r107465 [1/2] - in /llvm/branches/wendling/eh: ./ autoconf/ bindings/ada/llvm/ cmake/ docs/ docs/tutorial/ examples/Kaleidoscope/Chapter3/ examples/Kaleidoscope/Chapter5/ examples/Kaleidoscope/Chapter6/ examples/Kaleidoscope/Chapter7/ include/llvm-c/ include/llvm/ include/llvm/ADT/ include/llvm/Analysis/ include/llvm/CodeGen/ include/llvm/Config/ include/llvm/MC/ include/llvm/MC/MCParser/ include/llvm/Support/ include/llvm/System/ include/llvm/Target/ include/llvm/Transforms/ include/llvm/T...

Bill Wendling isanbard at gmail.com
Fri Jul 2 02:57:15 PDT 2010


Author: void
Date: Fri Jul  2 04:57:13 2010
New Revision: 107465

URL: http://llvm.org/viewvc/llvm-project?rev=107465&view=rev
Log:
Another merge to ToT.

Added:
    llvm/branches/wendling/eh/include/llvm/Support/COFF.h
      - copied unchanged from r107464, llvm/trunk/include/llvm/Support/COFF.h
    llvm/branches/wendling/eh/lib/CodeGen/InlineSpiller.cpp
      - copied unchanged from r107464, llvm/trunk/lib/CodeGen/InlineSpiller.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/Thumb2HazardRecognizer.cpp
      - copied unchanged from r107464, llvm/trunk/lib/Target/ARM/Thumb2HazardRecognizer.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/Thumb2HazardRecognizer.h
      - copied unchanged from r107464, llvm/trunk/lib/Target/ARM/Thumb2HazardRecognizer.h
    llvm/branches/wendling/eh/lib/Transforms/Hello/Hello.exports
      - copied unchanged from r107464, llvm/trunk/lib/Transforms/Hello/Hello.exports
    llvm/branches/wendling/eh/test/Analysis/BasicAA/args-rets-allocas-loads.ll
      - copied unchanged from r107464, llvm/trunk/test/Analysis/BasicAA/args-rets-allocas-loads.ll
    llvm/branches/wendling/eh/test/Analysis/BasicAA/interprocedural.ll
      - copied unchanged from r107464, llvm/trunk/test/Analysis/BasicAA/interprocedural.ll
    llvm/branches/wendling/eh/test/Analysis/BasicAA/unreachable-block.ll
      - copied unchanged from r107464, llvm/trunk/test/Analysis/BasicAA/unreachable-block.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-21-LdStMultipleBug.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/ARM/2010-06-21-LdStMultipleBug.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-28-DAGCombineUndef.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/ARM/2010-06-28-DAGCombineUndef.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/flag-crash.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/ARM/flag-crash.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/ifcvt6.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/ARM/ifcvt6.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/va_arg.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/ARM/va_arg.ll
    llvm/branches/wendling/eh/test/CodeGen/CellSPU/shuffles.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/CellSPU/shuffles.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb/2010-06-18-SibCallCrash.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/Thumb/2010-06-18-SibCallCrash.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb/2010-07-01-FuncAlign.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/Thumb/2010-07-01-FuncAlign.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2010-06-24-g-constraint-crash.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/2010-06-24-g-constraint-crash.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/2010-06-28-FastAllocTiedOperand.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/crash-O0.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/crash-O0.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/fast-isel-loads.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/fast-isel-loads.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/sse-commute.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/sse-commute.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/switch-bt.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/switch-bt.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/v-binop-widen.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/v-binop-widen.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/v-binop-widen2.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/v-binop-widen2.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/x86-64-tls-1.ll
      - copied unchanged from r107464, llvm/trunk/test/CodeGen/X86/x86-64-tls-1.ll
    llvm/branches/wendling/eh/test/DebugInfo/2010-06-29-InlinedFnLocalVar.ll
      - copied unchanged from r107464, llvm/trunk/test/DebugInfo/2010-06-29-InlinedFnLocalVar.ll
    llvm/branches/wendling/eh/test/Feature/linker_private_linkages.ll
      - copied unchanged from r107464, llvm/trunk/test/Feature/linker_private_linkages.ll
    llvm/branches/wendling/eh/test/FrontendC++/2010-06-21-LocalVarDbg.cpp
      - copied unchanged from r107464, llvm/trunk/test/FrontendC++/2010-06-21-LocalVarDbg.cpp
    llvm/branches/wendling/eh/test/FrontendC++/2010-06-22-BitfieldInit.cpp
      - copied unchanged from r107464, llvm/trunk/test/FrontendC++/2010-06-22-BitfieldInit.cpp
    llvm/branches/wendling/eh/test/FrontendC++/2010-06-22-ZeroBitfield.cpp
      - copied unchanged from r107464, llvm/trunk/test/FrontendC++/2010-06-22-ZeroBitfield.cpp
    llvm/branches/wendling/eh/test/FrontendC++/thunk-linkonce-odr.cpp
      - copied unchanged from r107464, llvm/trunk/test/FrontendC++/thunk-linkonce-odr.cpp
    llvm/branches/wendling/eh/test/FrontendC/2010-06-28-DbgEntryPC.c
      - copied unchanged from r107464, llvm/trunk/test/FrontendC/2010-06-28-DbgEntryPC.c
    llvm/branches/wendling/eh/test/FrontendC/2010-06-28-DbgLocalVar.c
      - copied unchanged from r107464, llvm/trunk/test/FrontendC/2010-06-28-DbgLocalVar.c
    llvm/branches/wendling/eh/test/FrontendC/2010-06-28-nowarn.c
      - copied unchanged from r107464, llvm/trunk/test/FrontendC/2010-06-28-nowarn.c
    llvm/branches/wendling/eh/test/LLVMC/C++/filelist.cpp
      - copied unchanged from r107464, llvm/trunk/test/LLVMC/C++/filelist.cpp
    llvm/branches/wendling/eh/test/TableGen/usevalname.td
      - copied unchanged from r107464, llvm/trunk/test/TableGen/usevalname.td
    llvm/branches/wendling/eh/test/Transforms/StripSymbols/2010-06-30-StripDebug.ll
      - copied unchanged from r107464, llvm/trunk/test/Transforms/StripSymbols/2010-06-30-StripDebug.ll
    llvm/branches/wendling/eh/test/Transforms/StripSymbols/2010-07-01-DeadDbgInfo.ll
      - copied unchanged from r107464, llvm/trunk/test/Transforms/StripSymbols/2010-07-01-DeadDbgInfo.ll
    llvm/branches/wendling/eh/test/Transforms/TailCallElim/2010-06-26-MultipleReturnValues.ll
      - copied unchanged from r107464, llvm/trunk/test/Transforms/TailCallElim/2010-06-26-MultipleReturnValues.ll
Removed:
    llvm/branches/wendling/eh/test/CodeGen/ARM/ifcvt6-tc.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/insn-sched1-tc.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/ldm-tc.ll
    llvm/branches/wendling/eh/test/CodeGen/Generic/v-binop-widen.ll
    llvm/branches/wendling/eh/test/CodeGen/Generic/v-binop-widen2.ll
    llvm/branches/wendling/eh/test/CodeGen/PowerPC/2008-03-06-KillInfo.ll
    llvm/branches/wendling/eh/test/CodeGen/PowerPC/2010-02-26-FoldFloats.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/sign_extend_inreg.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2007-11-14-Coalescer-Bug.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2008-10-16-SpillerBug.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2009-01-12-CoalescerBug.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2009-09-07-CoalescerBug.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2009-09-19-SchedCustomLoweringBug.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2009-12-12-CoalescerBug.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/imp-def-copies.ll
    llvm/branches/wendling/eh/test/FrontendC++/thunk-weak-odr.cpp
Modified:
    llvm/branches/wendling/eh/   (props changed)
    llvm/branches/wendling/eh/CMakeLists.txt
    llvm/branches/wendling/eh/Makefile
    llvm/branches/wendling/eh/Makefile.rules
    llvm/branches/wendling/eh/autoconf/configure.ac
    llvm/branches/wendling/eh/bindings/ada/llvm/llvm.ads
    llvm/branches/wendling/eh/cmake/config-ix.cmake
    llvm/branches/wendling/eh/configure
    llvm/branches/wendling/eh/docs/AliasAnalysis.html
    llvm/branches/wendling/eh/docs/DebuggingJITedCode.html
    llvm/branches/wendling/eh/docs/LangRef.html
    llvm/branches/wendling/eh/docs/TableGenFundamentals.html
    llvm/branches/wendling/eh/docs/WritingAnLLVMPass.html
    llvm/branches/wendling/eh/docs/tutorial/LangImpl6.html
    llvm/branches/wendling/eh/docs/tutorial/OCamlLangImpl6.html
    llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter3/toy.cpp
    llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter5/toy.cpp
    llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter6/toy.cpp
    llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter7/toy.cpp
    llvm/branches/wendling/eh/include/llvm-c/Core.h
    llvm/branches/wendling/eh/include/llvm/ADT/SmallPtrSet.h
    llvm/branches/wendling/eh/include/llvm/ADT/Statistic.h
    llvm/branches/wendling/eh/include/llvm/ADT/ValueMap.h
    llvm/branches/wendling/eh/include/llvm/AbstractTypeUser.h
    llvm/branches/wendling/eh/include/llvm/Analysis/AliasAnalysis.h
    llvm/branches/wendling/eh/include/llvm/Analysis/LoopInfo.h
    llvm/branches/wendling/eh/include/llvm/Analysis/MemoryBuiltins.h
    llvm/branches/wendling/eh/include/llvm/Analysis/Passes.h
    llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolution.h
    llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolutionExpander.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/AsmPrinter.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/FastISel.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/GCMetadata.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/GCMetadataPrinter.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/ISDOpcodes.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/LiveInterval.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/LiveIntervalAnalysis.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/MachineBasicBlock.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/MachineInstr.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/MachineLoopInfo.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/MachineRegisterInfo.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterCoalescer.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/RuntimeLibcalls.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAG.h
    llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGNodes.h
    llvm/branches/wendling/eh/include/llvm/Config/config.h.in
    llvm/branches/wendling/eh/include/llvm/GlobalValue.h
    llvm/branches/wendling/eh/include/llvm/Instructions.h
    llvm/branches/wendling/eh/include/llvm/IntrinsicInst.h
    llvm/branches/wendling/eh/include/llvm/LinkAllPasses.h
    llvm/branches/wendling/eh/include/llvm/MC/MCContext.h
    llvm/branches/wendling/eh/include/llvm/MC/MCParser/AsmParser.h
    llvm/branches/wendling/eh/include/llvm/MC/MCSection.h
    llvm/branches/wendling/eh/include/llvm/MC/MCSectionCOFF.h
    llvm/branches/wendling/eh/include/llvm/MC/SectionKind.h
    llvm/branches/wendling/eh/include/llvm/Module.h
    llvm/branches/wendling/eh/include/llvm/Pass.h
    llvm/branches/wendling/eh/include/llvm/PassAnalysisSupport.h
    llvm/branches/wendling/eh/include/llvm/PassManagers.h
    llvm/branches/wendling/eh/include/llvm/PassSupport.h
    llvm/branches/wendling/eh/include/llvm/Support/CallSite.h
    llvm/branches/wendling/eh/include/llvm/Support/IRBuilder.h
    llvm/branches/wendling/eh/include/llvm/Support/MemoryBuffer.h
    llvm/branches/wendling/eh/include/llvm/System/DataTypes.h.cmake
    llvm/branches/wendling/eh/include/llvm/Target/Target.td
    llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h
    llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h
    llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h
    llvm/branches/wendling/eh/include/llvm/Transforms/IPO.h
    llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Cloning.h
    llvm/branches/wendling/eh/include/llvm/Type.h
    llvm/branches/wendling/eh/include/llvm/Value.h
    llvm/branches/wendling/eh/lib/Analysis/AliasAnalysis.cpp
    llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisEvaluator.cpp
    llvm/branches/wendling/eh/lib/Analysis/BasicAliasAnalysis.cpp
    llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp
    llvm/branches/wendling/eh/lib/Analysis/MemoryBuiltins.cpp
    llvm/branches/wendling/eh/lib/Analysis/MemoryDependenceAnalysis.cpp
    llvm/branches/wendling/eh/lib/Analysis/ProfileInfo.cpp
    llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp
    llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
    llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp
    llvm/branches/wendling/eh/lib/Analysis/ValueTracking.cpp
    llvm/branches/wendling/eh/lib/AsmParser/LLLexer.cpp
    llvm/branches/wendling/eh/lib/AsmParser/LLParser.cpp
    llvm/branches/wendling/eh/lib/AsmParser/LLToken.h
    llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp
    llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriter.cpp
    llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.cpp
    llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
    llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
    llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DIE.cpp
    llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
    llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h
    llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
    llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp
    llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.h
    llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt
    llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp
    llvm/branches/wendling/eh/lib/CodeGen/ELFCodeEmitter.cpp
    llvm/branches/wendling/eh/lib/CodeGen/GCStrategy.cpp
    llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp
    llvm/branches/wendling/eh/lib/CodeGen/IntrinsicLowering.cpp
    llvm/branches/wendling/eh/lib/CodeGen/LLVMTargetMachine.cpp
    llvm/branches/wendling/eh/lib/CodeGen/LiveInterval.cpp
    llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp
    llvm/branches/wendling/eh/lib/CodeGen/LiveStackAnalysis.cpp
    llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp
    llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp
    llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp
    llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp
    llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp
    llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp
    llvm/branches/wendling/eh/lib/CodeGen/MachineVerifier.cpp
    llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp
    llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp
    llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp
    llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp
    llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp
    llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp
    llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAG.cpp
    llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.h
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/branches/wendling/eh/lib/CodeGen/ShadowStackGC.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.h
    llvm/branches/wendling/eh/lib/CodeGen/SjLjEHPrepare.cpp
    llvm/branches/wendling/eh/lib/CodeGen/SlotIndexes.cpp
    llvm/branches/wendling/eh/lib/CodeGen/Spiller.cpp
    llvm/branches/wendling/eh/lib/CodeGen/Spiller.h
    llvm/branches/wendling/eh/lib/CodeGen/StrongPHIElimination.cpp
    llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp
    llvm/branches/wendling/eh/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
    llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp
    llvm/branches/wendling/eh/lib/CodeGen/VirtRegRewriter.cpp
    llvm/branches/wendling/eh/lib/CompilerDriver/Tool.cpp
    llvm/branches/wendling/eh/lib/MC/MCAsmStreamer.cpp
    llvm/branches/wendling/eh/lib/MC/MCAssembler.cpp
    llvm/branches/wendling/eh/lib/MC/MCContext.cpp
    llvm/branches/wendling/eh/lib/MC/MCParser/AsmParser.cpp
    llvm/branches/wendling/eh/lib/MC/MCSectionCOFF.cpp
    llvm/branches/wendling/eh/lib/MC/MachObjectWriter.cpp
    llvm/branches/wendling/eh/lib/Support/FoldingSet.cpp
    llvm/branches/wendling/eh/lib/Support/MemoryBuffer.cpp
    llvm/branches/wendling/eh/lib/Support/PrettyStackTrace.cpp
    llvm/branches/wendling/eh/lib/Support/SmallPtrSet.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.h
    llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMCodeEmitter.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMConstantIslandPass.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMISelDAGToDAG.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.h
    llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrFormats.td
    llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrInfo.td
    llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrNEON.td
    llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrThumb.td
    llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrThumb2.td
    llvm/branches/wendling/eh/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/ARMMachineFunctionInfo.h
    llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td
    llvm/branches/wendling/eh/lib/Target/ARM/ARMScheduleA8.td
    llvm/branches/wendling/eh/lib/Target/ARM/ARMScheduleA9.td
    llvm/branches/wendling/eh/lib/Target/ARM/ARMTargetMachine.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/CMakeLists.txt
    llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/Thumb1InstrInfo.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/Thumb1RegisterInfo.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/Thumb2ITBlockPass.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.cpp
    llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.h
    llvm/branches/wendling/eh/lib/Target/ARM/Thumb2SizeReduction.cpp
    llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrFormats.td
    llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.td
    llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.td
    llvm/branches/wendling/eh/lib/Target/CBackend/CBackend.cpp
    llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
    llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp
    llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.h
    llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.cpp
    llvm/branches/wendling/eh/lib/Target/CppBackend/CPPBackend.cpp
    llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeISelLowering.cpp
    llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.td
    llvm/branches/wendling/eh/lib/Target/Mangler.cpp
    llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.td
    llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.td
    llvm/branches/wendling/eh/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp
    llvm/branches/wendling/eh/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h
    llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.cpp
    llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.h
    llvm/branches/wendling/eh/lib/Target/README.txt
    llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.td
    llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrFP.td
    llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.td
    llvm/branches/wendling/eh/lib/Target/TargetRegisterInfo.cpp
    llvm/branches/wendling/eh/lib/Target/X86/AsmParser/X86AsmLexer.cpp
    llvm/branches/wendling/eh/lib/Target/X86/AsmParser/X86AsmParser.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86CodeEmitter.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86FastISel.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86FloatingPoint.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86ISelDAGToDAG.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.h
    llvm/branches/wendling/eh/lib/Target/X86/X86Instr64bit.td
    llvm/branches/wendling/eh/lib/Target/X86/X86InstrFPStack.td
    llvm/branches/wendling/eh/lib/Target/X86/X86InstrFormats.td
    llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.cpp
    llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.h
    llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.td
    llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td
    llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp
    llvm/branches/wendling/eh/lib/Target/XCore/AsmPrinter/XCoreAsmPrinter.cpp
    llvm/branches/wendling/eh/lib/Target/XCore/XCoreISelLowering.cpp
    llvm/branches/wendling/eh/lib/Target/XCore/XCoreInstrInfo.td
    llvm/branches/wendling/eh/lib/Transforms/Hello/Hello.cpp
    llvm/branches/wendling/eh/lib/Transforms/Hello/Makefile
    llvm/branches/wendling/eh/lib/Transforms/IPO/GlobalOpt.cpp
    llvm/branches/wendling/eh/lib/Transforms/IPO/LowerSetJmp.cpp
    llvm/branches/wendling/eh/lib/Transforms/IPO/MergeFunctions.cpp
    llvm/branches/wendling/eh/lib/Transforms/IPO/PartialInlining.cpp
    llvm/branches/wendling/eh/lib/Transforms/IPO/PartialSpecialization.cpp
    llvm/branches/wendling/eh/lib/Transforms/IPO/StripSymbols.cpp
    llvm/branches/wendling/eh/lib/Transforms/IPO/StructRetPromotion.cpp
    llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombine.h
    llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineCalls.cpp
    llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineCompares.cpp
    llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineShifts.cpp
    llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
    llvm/branches/wendling/eh/lib/Transforms/InstCombine/InstructionCombining.cpp
    llvm/branches/wendling/eh/lib/Transforms/Instrumentation/OptimalEdgeProfiling.cpp
    llvm/branches/wendling/eh/lib/Transforms/Instrumentation/ProfilingUtils.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/ADCE.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/CodeGenPrepare.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/DeadStoreElimination.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/GVN.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/LoopDeletion.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/LoopIndexSplit.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/LoopRotation.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/LoopStrengthReduce.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/LoopUnswitch.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/MemCpyOptimizer.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/ScalarReplAggregates.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyLibCalls.cpp
    llvm/branches/wendling/eh/lib/Transforms/Scalar/TailRecursionElimination.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/AddrModeMatcher.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/BuildLibCalls.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/CloneFunction.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/CloneLoop.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/CloneModule.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/InlineFunction.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/LoopSimplify.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/LoopUnroll.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/LowerInvoke.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/ValueMapper.cpp
    llvm/branches/wendling/eh/lib/Transforms/Utils/ValueMapper.h
    llvm/branches/wendling/eh/lib/VMCore/AsmWriter.cpp
    llvm/branches/wendling/eh/lib/VMCore/AutoUpgrade.cpp
    llvm/branches/wendling/eh/lib/VMCore/ConstantFold.cpp
    llvm/branches/wendling/eh/lib/VMCore/Core.cpp
    llvm/branches/wendling/eh/lib/VMCore/Instruction.cpp
    llvm/branches/wendling/eh/lib/VMCore/Instructions.cpp
    llvm/branches/wendling/eh/lib/VMCore/IntrinsicInst.cpp
    llvm/branches/wendling/eh/lib/VMCore/Module.cpp
    llvm/branches/wendling/eh/lib/VMCore/Pass.cpp
    llvm/branches/wendling/eh/lib/VMCore/PassManager.cpp
    llvm/branches/wendling/eh/lib/VMCore/Value.cpp
    llvm/branches/wendling/eh/lib/VMCore/Verifier.cpp
    llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/scev-aa.ll
    llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/trip-count10.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-23-linkerprivate.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/arm-returnaddr.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/call-tc.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/crash-O0.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/ifcvt2.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/long_shift.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-code-insertion.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll
    llvm/branches/wendling/eh/test/CodeGen/ARM/vget_lane.ll
    llvm/branches/wendling/eh/test/CodeGen/CellSPU/call.ll
    llvm/branches/wendling/eh/test/CodeGen/CellSPU/call_indirect.ll
    llvm/branches/wendling/eh/test/CodeGen/CellSPU/jumptable.ll
    llvm/branches/wendling/eh/test/CodeGen/CellSPU/loads.ll
    llvm/branches/wendling/eh/test/CodeGen/PowerPC/2009-08-23-linkerprivate.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb/push.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/crash.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/ldr-str-imm12.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-call-tc.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-cbnz.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-eor.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-select_xform.ll
    llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-uxtb.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2006-11-17-IllegalMove.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2007-01-08-InstrSched.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2007-11-04-rip-immediate-constant.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2008-03-18-CoalescerBug.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2008-04-09-BranchFolding.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2008-08-23-X86-64AsmBug.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2009-08-23-linkerprivate.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/break-sse-dep.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/fast-isel-gep.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/fast-isel-shift-imm.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/ins_subreg_coalesce-3.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/loop-strength-reduce6.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/lsr-reuse.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/optimize-max-3.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/pic.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/pr2659.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/sse3.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/stack-align.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/tailcallstack64.ll
    llvm/branches/wendling/eh/test/CodeGen/X86/vec_shuffle-6.ll
    llvm/branches/wendling/eh/test/DebugInfo/2010-05-28-Crash.ll
    llvm/branches/wendling/eh/test/FrontendC/2010-06-17-asmcrash.c
    llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-encoding.s
    llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-new-encoder.s
    llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-encoding.s
    llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-new-encoder.s
    llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-operands.s
    llvm/branches/wendling/eh/test/TableGen/defmclass.td
    llvm/branches/wendling/eh/test/Transforms/IndVarSimplify/tripcount_compute.ll
    llvm/branches/wendling/eh/test/Transforms/InstCombine/icmp.ll
    llvm/branches/wendling/eh/test/Transforms/LoopRotate/phi-duplicate.ll
    llvm/branches/wendling/eh/test/Transforms/PartialSpecialize/two-specializations.ll
    llvm/branches/wendling/eh/test/lit.cfg
    llvm/branches/wendling/eh/tools/bugpoint/BugDriver.h
    llvm/branches/wendling/eh/tools/bugpoint/CrashDebugger.cpp
    llvm/branches/wendling/eh/tools/bugpoint/ExtractFunction.cpp
    llvm/branches/wendling/eh/tools/bugpoint/Miscompilation.cpp
    llvm/branches/wendling/eh/tools/edis/EDDisassembler.cpp
    llvm/branches/wendling/eh/tools/gold/gold-plugin.cpp
    llvm/branches/wendling/eh/tools/llvm-extract/llvm-extract.cpp
    llvm/branches/wendling/eh/tools/llvm-mc/llvm-mc.cpp
    llvm/branches/wendling/eh/tools/llvm-nm/llvm-nm.cpp
    llvm/branches/wendling/eh/tools/llvmc/plugins/Base/Base.td.in
    llvm/branches/wendling/eh/utils/TableGen/ARMDecoderEmitter.cpp
    llvm/branches/wendling/eh/utils/TableGen/CMakeLists.txt
    llvm/branches/wendling/eh/utils/TableGen/ClangAttrEmitter.cpp
    llvm/branches/wendling/eh/utils/TableGen/CodeGenInstruction.cpp
    llvm/branches/wendling/eh/utils/TableGen/EDEmitter.cpp
    llvm/branches/wendling/eh/utils/TableGen/NeonEmitter.cpp
    llvm/branches/wendling/eh/utils/TableGen/Record.cpp
    llvm/branches/wendling/eh/utils/TableGen/TGParser.cpp
    llvm/branches/wendling/eh/utils/buildit/build_llvm
    llvm/branches/wendling/eh/utils/unittest/googletest/README.LLVM
    llvm/branches/wendling/eh/utils/unittest/googletest/include/gtest/internal/gtest-port.h

Propchange: llvm/branches/wendling/eh/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jul  2 04:57:13 2010
@@ -1 +1 @@
-/llvm/trunk:104459-106308
+/llvm/trunk:104459-107464

Modified: llvm/branches/wendling/eh/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/CMakeLists.txt?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/CMakeLists.txt Fri Jul  2 04:57:13 2010
@@ -4,7 +4,7 @@
 cmake_minimum_required(VERSION 2.6.1)
 
 set(PACKAGE_NAME llvm)
-set(PACKAGE_VERSION 2.7svn)
+set(PACKAGE_VERSION 2.8svn)
 set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
 set(PACKAGE_BUGREPORT "llvmbugs at cs.uiuc.edu")
 

Modified: llvm/branches/wendling/eh/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/Makefile?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/Makefile (original)
+++ llvm/branches/wendling/eh/Makefile Fri Jul  2 04:57:13 2010
@@ -64,7 +64,7 @@
 
 ifeq ($(MAKECMDGOALS),install-clang)
   DIRS := tools/clang/tools/driver tools/clang/lib/Headers \
-          tools/clang/lib/Runtime tools/clang/docs
+          tools/clang/runtime tools/clang/docs
   OPTIONAL_DIRS :=
   NO_INSTALL = 1
 endif

Modified: llvm/branches/wendling/eh/Makefile.rules
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/Makefile.rules?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/Makefile.rules (original)
+++ llvm/branches/wendling/eh/Makefile.rules Fri Jul  2 04:57:13 2010
@@ -935,7 +935,7 @@
 endif
 
 ###############################################################################
-# Set up variables for building libararies
+# Set up variables for building libraries
 ###############################################################################
 
 #---------------------------------------------------------
@@ -1131,7 +1131,7 @@
 	  $(ProjLibsOptions) $(LLVMLibsOptions) $(LIBS)
 else
 $(LibName.SO): $(ObjectsO) $(LibDir)/.dir
-	$(Echo) Linking $(BuildMode) Shared Library $(LIBRARYNAME)$(SHLIBEXT)
+	$(Echo) Linking $(BuildMode) Shared Library $(basename $@)
 	$(Verb) $(Link) $(SharedLinkOptions) -o $@ $(ObjectsO)
 endif
 

Modified: llvm/branches/wendling/eh/autoconf/configure.ac
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/autoconf/configure.ac?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/autoconf/configure.ac (original)
+++ llvm/branches/wendling/eh/autoconf/configure.ac Fri Jul  2 04:57:13 2010
@@ -1284,6 +1284,9 @@
   AC_CHECK_HEADERS([ffi.h ffi/ffi.h])
 fi
 
+dnl Try to find Darwin specific crash reporting library.
+AC_CHECK_HEADERS([CrashReporterClient.h])
+
 dnl===-----------------------------------------------------------------------===
 dnl===
 dnl=== SECTION 7: Check for types and structures

Modified: llvm/branches/wendling/eh/bindings/ada/llvm/llvm.ads
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/bindings/ada/llvm/llvm.ads?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/bindings/ada/llvm/llvm.ads (original)
+++ llvm/branches/wendling/eh/bindings/ada/llvm/llvm.ads Fri Jul  2 04:57:13 2010
@@ -316,7 +316,8 @@
       LLVMExternalWeakLinkage,
       LLVMGhostLinkage,
       LLVMCommonLinkage,
-      LLVMLinkerPrivateLinkage);
+      LLVMLinkerPrivateLinkage,
+      LLVMLinkerPrivateWeakLinkage);
 
    for LLVMLinkage use
      (LLVMExternalLinkage            => 0,
@@ -333,7 +334,8 @@
       LLVMExternalWeakLinkage        => 11,
       LLVMGhostLinkage               => 12,
       LLVMCommonLinkage              => 13,
-      LLVMLinkerPrivateLinkage       => 14);
+      LLVMLinkerPrivateLinkage       => 14,
+      LLVMLinkerPrivateWeakLinkage   => 15);
 
    pragma Convention (C, LLVMLinkage);
 

Modified: llvm/branches/wendling/eh/cmake/config-ix.cmake
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/cmake/config-ix.cmake?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/cmake/config-ix.cmake (original)
+++ llvm/branches/wendling/eh/cmake/config-ix.cmake Fri Jul  2 04:57:13 2010
@@ -4,7 +4,7 @@
 include(CheckFunctionExists)
 include(CheckCXXSourceCompiles)
 
-if( UNIX )
+if( UNIX AND NOT BEOS )
   # Used by check_symbol_exists:
   set(CMAKE_REQUIRED_LIBRARIES m)
 endif()

Modified: llvm/branches/wendling/eh/configure
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/configure?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/configure (original)
+++ llvm/branches/wendling/eh/configure Fri Jul  2 04:57:13 2010
@@ -16950,6 +16950,176 @@
 fi
 
 
+for ac_header in CrashReporterClient.h
+do
+as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+  { echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+fi
+ac_res=`eval echo '${'$as_ac_Header'}'`
+	       { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
+else
+  # Is the header compilable?
+{ echo "$as_me:$LINENO: checking $ac_header usability" >&5
+echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+$ac_includes_default
+#include <$ac_header>
+_ACEOF
+rm -f conftest.$ac_objext
+if { (ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_compile") 2>conftest.er1
+  ac_status=$?
+  grep -v '^ *+' conftest.er1 >conftest.err
+  rm -f conftest.er1
+  cat conftest.err >&5
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); } &&
+	 { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
+  { (case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); }; } &&
+	 { ac_try='test -s conftest.$ac_objext'
+  { (case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); }; }; then
+  ac_header_compiler=yes
+else
+  echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_header_compiler=no
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
+echo "${ECHO_T}$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ echo "$as_me:$LINENO: checking $ac_header presence" >&5
+echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6; }
+cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+#include <$ac_header>
+_ACEOF
+if { (ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
+  ac_status=$?
+  grep -v '^ *+' conftest.er1 >conftest.err
+  rm -f conftest.er1
+  cat conftest.err >&5
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); } >/dev/null; then
+  if test -s conftest.err; then
+    ac_cpp_err=$ac_c_preproc_warn_flag
+    ac_cpp_err=$ac_cpp_err$ac_c_werror_flag
+  else
+    ac_cpp_err=
+  fi
+else
+  ac_cpp_err=yes
+fi
+if test -z "$ac_cpp_err"; then
+  ac_header_preproc=yes
+else
+  echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+  ac_header_preproc=no
+fi
+
+rm -f conftest.err conftest.$ac_ext
+{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
+echo "${ECHO_T}$ac_header_preproc" >&6; }
+
+# So?  What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
+  yes:no: )
+    { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
+echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
+    ac_header_preproc=yes
+    ;;
+  no:yes:* )
+    { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
+echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header:     check for missing prerequisite headers?" >&5
+echo "$as_me: WARNING: $ac_header:     check for missing prerequisite headers?" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
+echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&5
+echo "$as_me: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
+echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
+    { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
+echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
+    ( cat <<\_ASBOX
+## ----------------------------------- ##
+## Report this to llvmbugs at cs.uiuc.edu ##
+## ----------------------------------- ##
+_ASBOX
+     ) | sed "s/^/$as_me: WARNING:     /" >&2
+    ;;
+esac
+{ echo "$as_me:$LINENO: checking for $ac_header" >&5
+echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
+if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+  eval "$as_ac_Header=\$ac_header_preproc"
+fi
+ac_res=`eval echo '${'$as_ac_Header'}'`
+	       { echo "$as_me:$LINENO: result: $ac_res" >&5
+echo "${ECHO_T}$ac_res" >&6; }
+
+fi
+if test `eval echo '${'$as_ac_Header'}'` = yes; then
+  cat >>confdefs.h <<_ACEOF
+#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+
 
 
   { echo "$as_me:$LINENO: checking for HUGE_VAL sanity" >&5

Modified: llvm/branches/wendling/eh/docs/AliasAnalysis.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/AliasAnalysis.html?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/AliasAnalysis.html (original)
+++ llvm/branches/wendling/eh/docs/AliasAnalysis.html Fri Jul  2 04:57:13 2010
@@ -31,6 +31,7 @@
     <li><a href="#chaining"><tt>AliasAnalysis</tt> chaining behavior</a></li>
     <li><a href="#updating">Updating analysis results for transformations</a></li>
     <li><a href="#implefficiency">Efficiency Issues</a></li>
+    <li><a href="#passmanager">Pass Manager Issues</a></li>
     </ul>
   </li>
 
@@ -502,6 +503,45 @@
 
 </div>
 
+<!-- ======================================================================= -->
+<div class="doc_subsection">
+  <a name="passmanager">Pass Manager Issues</a>
+</div>
+
+<div class="doc_text">
+
+<p>PassManager support for alternative AliasAnalysis implementation
+has some issues.</p>
+
+<p>There is no way to override the default alias analysis. It would
+be very useful to be able to do something like "opt -my-aa -O2" and
+have it use -my-aa for all passes which need AliasAnalysis, but there
+is currently no support for that, short of changing the source code
+and recompiling. Similarly, there is also no way of setting a chain
+of analyses as the default.</p>
+
+<p>There is no way for transform passes to declare that they preserve
+<tt>AliasAnalysis</tt> implementations. The <tt>AliasAnalysis</tt>
+interface includes <tt>deleteValue</tt> and <tt>copyValue</tt> methods
+which are intended to allow a pass to keep an AliasAnalysis consistent,
+however there's no way for a pass to declare in its
+<tt>getAnalysisUsage</tt> that it does so. Some passes attempt to use
+<tt>AU.addPreserved<AliasAnalysis></tt>, however this doesn't
+actually have any effect.</tt>
+
+<p><tt>AliasAnalysisCounter</tt> (<tt>-count-aa</tt>) and <tt>AliasDebugger</tt>
+(<tt>-debug-aa</tt>) are implemented as <tt>ModulePass</tt> classes, so if your
+alias analysis uses <tt>FunctionPass</tt>, it won't be able to use
+these utilities. If you try to use them, the pass manager will
+silently route alias analysis queries directly to
+<tt>BasicAliasAnalysis</tt> instead.</p>
+
+<p>Similarly, the <tt>opt -p</tt> option introduces <tt>ModulePass</tt>
+passes between each pass, which prevents the use of <tt>FunctionPass</tt>
+alias analysis passes.</p>
+
+</div>
+
 <!-- *********************************************************************** -->
 <div class="doc_section">
   <a name="using">Using alias analysis results</a>
@@ -749,6 +789,19 @@
 
 </div>
 
+<!-- _______________________________________________________________________ -->
+<div class="doc_subsubsection">
+  <a name="scev-aa">The <tt>-scev-aa</tt> pass</a>
+</div>
+
+<div class="doc_text">
+
+<p>The <tt>-scev-aa</tt> pass implements AliasAnalysis queries by
+translating them into ScalarEvolution queries. This gives it a
+more complete understanding of <tt>getelementptr</tt> instructions
+and loop induction variables than other alias analyses have.</p>
+
+</div>
 
 <!-- ======================================================================= -->
 <div class="doc_subsection">

Modified: llvm/branches/wendling/eh/docs/DebuggingJITedCode.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/DebuggingJITedCode.html?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/DebuggingJITedCode.html (original)
+++ llvm/branches/wendling/eh/docs/DebuggingJITedCode.html Fri Jul  2 04:57:13 2010
@@ -28,7 +28,7 @@
 
 <p>Depending on the architecture, this can impact the debugging experience in
 different ways.  For example, on most 32-bit x86 architectures, you can simply
-compile with -fno-omit-framepointer for GCC and -fdisable-fp-elim for LLVM.
+compile with -fno-omit-frame-pointer for GCC and -disable-fp-elim for LLVM.
 When GDB creates a backtrace, it can properly unwind the stack, but the stack
 frames owned by JITed code have ??'s instead of the appropriate symbol name.
 However, on Linux x86_64 in particular, GDB relies on the DWARF CFA debug

Modified: llvm/branches/wendling/eh/docs/LangRef.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/LangRef.html?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/LangRef.html (original)
+++ llvm/branches/wendling/eh/docs/LangRef.html Fri Jul  2 04:57:13 2010
@@ -24,6 +24,7 @@
         <ol>
           <li><a href="#linkage_private">'<tt>private</tt>' Linkage</a></li>
           <li><a href="#linkage_linker_private">'<tt>linker_private</tt>' Linkage</a></li>
+          <li><a href="#linkage_linker_private_weak">'<tt>linker_private_weak</tt>' Linkage</a></li>
           <li><a href="#linkage_internal">'<tt>internal</tt>' Linkage</a></li>
           <li><a href="#linkage_available_externally">'<tt>available_externally</tt>' Linkage</a></li>
           <li><a href="#linkage_linkonce">'<tt>linkonce</tt>' Linkage</a></li>
@@ -546,20 +547,24 @@
 
 <dl>
   <dt><tt><b><a name="linkage_private">private</a></b></tt></dt>
-  <dd>Global values with private linkage are only directly accessible by objects
-      in the current module.  In particular, linking code into a module with an
-      private global value may cause the private to be renamed as necessary to
-      avoid collisions.  Because the symbol is private to the module, all
-      references can be updated. This doesn't show up in any symbol table in the
-      object file.</dd>
+  <dd>Global values with "<tt>private</tt>" linkage are only directly accessible
+      by objects in the current module. In particular, linking code into a
+      module with an private global value may cause the private to be renamed as
+      necessary to avoid collisions.  Because the symbol is private to the
+      module, all references can be updated. This doesn't show up in any symbol
+      table in the object file.</dd>
 
   <dt><tt><b><a name="linkage_linker_private">linker_private</a></b></tt></dt>
-  <dd>Similar to private, but the symbol is passed through the assembler and
-      removed by the linker after evaluation.  Note that (unlike private
-      symbols) linker_private symbols are subject to coalescing by the linker:
-      weak symbols get merged and redefinitions are rejected.  However, unlike
-      normal strong symbols, they are removed by the linker from the final
-      linked image (executable or dynamic library).</dd>
+  <dd>Similar to <tt>private</tt>, but the symbol is passed through the
+      assembler and evaluated by the linker. Unlike normal strong symbols, they
+      are removed by the linker from the final linked image (executable or
+      dynamic library).</dd>
+
+  <dt><tt><b><a name="linkage_linker_private_weak">linker_private_weak</a></b></tt></dt>
+  <dd>Similar to "<tt>linker_private</tt>", but the symbol is weak. Note that
+      <tt>linker_private_weak</tt> symbols are subject to coalescing by the
+      linker. The symbols are removed by the linker from the final linked image
+      (executable or dynamic library).</dd>
 
   <dt><tt><b><a name="linkage_internal">internal</a></b></tt></dt>
   <dd>Similar to private, but the value shows as a local symbol
@@ -623,8 +628,8 @@
   <dt><tt><b><a name="linkage_weak_odr">weak_odr</a></b></tt></dt>
   <dd>Some languages allow differing globals to be merged, such as two functions
       with different semantics.  Other languages, such as <tt>C++</tt>, ensure
-      that only equivalent globals are ever merged (the "one definition rule" -
-      "ODR").  Such languages can use the <tt>linkonce_odr</tt>
+      that only equivalent globals are ever merged (the "one definition rule"
+      — "ODR").  Such languages can use the <tt>linkonce_odr</tt>
       and <tt>weak_odr</tt> linkage types to indicate that the global will only
       be merged with equivalent globals.  These linkage types are otherwise the
       same as their non-<tt>odr</tt> versions.</dd>

Modified: llvm/branches/wendling/eh/docs/TableGenFundamentals.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/TableGenFundamentals.html?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/TableGenFundamentals.html (original)
+++ llvm/branches/wendling/eh/docs/TableGenFundamentals.html Fri Jul  2 04:57:13 2010
@@ -144,7 +144,6 @@
   <b>bit</b> mayLoad = 0;
   <b>bit</b> mayStore = 0;
   <b>bit</b> isImplicitDef = 0;
-  <b>bit</b> isTwoAddress = 1;
   <b>bit</b> isConvertibleToThreeAddress = 1;
   <b>bit</b> isCommutable = 1;
   <b>bit</b> isTerminator = 0;

Modified: llvm/branches/wendling/eh/docs/WritingAnLLVMPass.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/WritingAnLLVMPass.html?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/WritingAnLLVMPass.html (original)
+++ llvm/branches/wendling/eh/docs/WritingAnLLVMPass.html Fri Jul  2 04:57:13 2010
@@ -189,11 +189,6 @@
 # dlopen/dlsym on the resulting library.
 LOADABLE_MODULE = 1
 
-# Tell the build system which LLVM libraries your pass needs. You'll probably
-# need at least LLVMSystem.a, LLVMSupport.a, LLVMCore.a but possibly several
-# others too.
-LLVMLIBS = LLVMCore.a LLVMSupport.a LLVMSystem.a
-
 # Include the makefile implementation stuff
 include $(LEVEL)/Makefile.common
 </pre></div>

Modified: llvm/branches/wendling/eh/docs/tutorial/LangImpl6.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/tutorial/LangImpl6.html?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/tutorial/LangImpl6.html (original)
+++ llvm/branches/wendling/eh/docs/tutorial/LangImpl6.html Fri Jul  2 04:57:13 2010
@@ -531,7 +531,7 @@
 def unary-(v)
   0-v;
 
-# Define > with the same precedence as >.
+# Define > with the same precedence as <.
 def binary> 10 (LHS RHS)
   RHS < LHS;
 

Modified: llvm/branches/wendling/eh/docs/tutorial/OCamlLangImpl6.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/tutorial/OCamlLangImpl6.html?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/tutorial/OCamlLangImpl6.html (original)
+++ llvm/branches/wendling/eh/docs/tutorial/OCamlLangImpl6.html Fri Jul  2 04:57:13 2010
@@ -512,7 +512,7 @@
 def unary-(v)
   0-v;
 
-# Define > with the same precedence as >.
+# Define > with the same precedence as <.
 def binary> 10 (LHS RHS)
   RHS < LHS;
 

Modified: llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter3/toy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter3/toy.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter3/toy.cpp (original)
+++ llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter3/toy.cpp Fri Jul  2 04:57:13 2010
@@ -367,9 +367,9 @@
   if (L == 0 || R == 0) return 0;
   
   switch (Op) {
-  case '+': return Builder.CreateAdd(L, R, "addtmp");
-  case '-': return Builder.CreateSub(L, R, "subtmp");
-  case '*': return Builder.CreateMul(L, R, "multmp");
+  case '+': return Builder.CreateFAdd(L, R, "addtmp");
+  case '-': return Builder.CreateFSub(L, R, "subtmp");
+  case '*': return Builder.CreateFMul(L, R, "multmp");
   case '<':
     L = Builder.CreateFCmpULT(L, R, "cmptmp");
     // Convert bool 0/1 to double 0.0 or 1.0

Modified: llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter5/toy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter5/toy.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter5/toy.cpp (original)
+++ llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter5/toy.cpp Fri Jul  2 04:57:13 2010
@@ -615,7 +615,7 @@
     StepVal = ConstantFP::get(getGlobalContext(), APFloat(1.0));
   }
   
-  Value *NextVar = Builder.CreateAdd(Variable, StepVal, "nextvar");
+  Value *NextVar = Builder.CreateFAdd(Variable, StepVal, "nextvar");
 
   // Compute the end condition.
   Value *EndCond = End->Codegen();

Modified: llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter6/toy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter6/toy.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter6/toy.cpp (original)
+++ llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter6/toy.cpp Fri Jul  2 04:57:13 2010
@@ -719,7 +719,7 @@
     StepVal = ConstantFP::get(getGlobalContext(), APFloat(1.0));
   }
   
-  Value *NextVar = Builder.CreateAdd(Variable, StepVal, "nextvar");
+  Value *NextVar = Builder.CreateFAdd(Variable, StepVal, "nextvar");
 
   // Compute the end condition.
   Value *EndCond = End->Codegen();

Modified: llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter7/toy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter7/toy.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter7/toy.cpp (original)
+++ llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter7/toy.cpp Fri Jul  2 04:57:13 2010
@@ -828,7 +828,7 @@
   // Reload, increment, and restore the alloca.  This handles the case where
   // the body of the loop mutates the variable.
   Value *CurVar = Builder.CreateLoad(Alloca, VarName.c_str());
-  Value *NextVar = Builder.CreateAdd(CurVar, StepVal, "nextvar");
+  Value *NextVar = Builder.CreateFAdd(CurVar, StepVal, "nextvar");
   Builder.CreateStore(NextVar, Alloca);
   
   // Convert condition to a bool by comparing equal to 0.0.

Modified: llvm/branches/wendling/eh/include/llvm-c/Core.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm-c/Core.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm-c/Core.h (original)
+++ llvm/branches/wendling/eh/include/llvm-c/Core.h Fri Jul  2 04:57:13 2010
@@ -226,7 +226,8 @@
   LLVMExternalWeakLinkage,/**< ExternalWeak linkage description */
   LLVMGhostLinkage,       /**< Obsolete */
   LLVMCommonLinkage,      /**< Tentative definitions */
-  LLVMLinkerPrivateLinkage /**< Like Private, but linker removes. */
+  LLVMLinkerPrivateLinkage, /**< Like Private, but linker removes. */
+  LLVMLinkerPrivateWeakLinkage /**< Like LinkerPrivate, but is weak. */
 } LLVMLinkage;
 
 typedef enum {

Modified: llvm/branches/wendling/eh/include/llvm/ADT/SmallPtrSet.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/ADT/SmallPtrSet.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/ADT/SmallPtrSet.h (original)
+++ llvm/branches/wendling/eh/include/llvm/ADT/SmallPtrSet.h Fri Jul  2 04:57:13 2010
@@ -46,8 +46,10 @@
 class SmallPtrSetImpl {
   friend class SmallPtrSetIteratorImpl;
 protected:
-  /// CurArray - This is the current set of buckets.  If it points to
-  /// SmallArray, then the set is in 'small mode'.
+  /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
+  const void **SmallArray;
+  /// CurArray - This is the current set of buckets.  If equal to SmallArray,
+  /// then the set is in 'small mode'.
   const void **CurArray;
   /// CurArraySize - The allocated size of CurArray, always a power of two.
   /// Note that CurArray points to an array that has CurArraySize+1 elements in
@@ -57,15 +59,13 @@
   // If small, this is # elts allocated consequtively
   unsigned NumElements;
   unsigned NumTombstones;
-  const void *SmallArray[1];  // Must be last ivar.
 
   // Helper to copy construct a SmallPtrSet.
-  SmallPtrSetImpl(const SmallPtrSetImpl& that);
-  explicit SmallPtrSetImpl(unsigned SmallSize) {
+  SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl& that);
+  explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize) :
+    SmallArray(SmallStorage), CurArray(SmallStorage), CurArraySize(SmallSize) {
     assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
            "Initial size must be a power of two!");
-    CurArray = &SmallArray[0];
-    CurArraySize = SmallSize;
     // The end pointer, always valid, is set to a valid element to help the
     // iterator.
     CurArray[SmallSize] = 0;
@@ -123,7 +123,7 @@
   }
 
 private:
-  bool isSmall() const { return CurArray == &SmallArray[0]; }
+  bool isSmall() const { return CurArray == SmallArray; }
 
   unsigned Hash(const void *Ptr) const {
     return static_cast<unsigned>(((uintptr_t)Ptr >> 4) & (CurArraySize-1));
@@ -199,29 +199,29 @@
   }
 };
 
-/// NextPowerOfTwo - This is a helper template that rounds N up to the next
-/// power of two.
+/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
+/// power of two (which means N itself if N is already a power of two).
 template<unsigned N>
-struct NextPowerOfTwo;
+struct RoundUpToPowerOfTwo;
 
-/// NextPowerOfTwoH - If N is not a power of two, increase it.  This is a helper
-/// template used to implement NextPowerOfTwo.
+/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it.  This is a
+/// helper template used to implement RoundUpToPowerOfTwo.
 template<unsigned N, bool isPowerTwo>
-struct NextPowerOfTwoH {
+struct RoundUpToPowerOfTwoH {
   enum { Val = N };
 };
 template<unsigned N>
-struct NextPowerOfTwoH<N, false> {
+struct RoundUpToPowerOfTwoH<N, false> {
   enum {
     // We could just use NextVal = N+1, but this converges faster.  N|(N-1) sets
     // the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
-    Val = NextPowerOfTwo<(N|(N-1)) + 1>::Val
+    Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
   };
 };
 
 template<unsigned N>
-struct NextPowerOfTwo {
-  enum { Val = NextPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
+struct RoundUpToPowerOfTwo {
+  enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
 };
   
 
@@ -232,16 +232,17 @@
 template<class PtrType, unsigned SmallSize>
 class SmallPtrSet : public SmallPtrSetImpl {
   // Make sure that SmallSize is a power of two, round up if not.
-  enum { SmallSizePowTwo = NextPowerOfTwo<SmallSize>::Val };
-  void *SmallArray[SmallSizePowTwo];
+  enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
+  /// SmallStorage - Fixed size storage used in 'small mode'.  The extra element
+  /// ensures that the end iterator actually points to valid memory.
+  const void *SmallStorage[SmallSizePowTwo+1];
   typedef PointerLikeTypeTraits<PtrType> PtrTraits;
 public:
-  SmallPtrSet() : SmallPtrSetImpl(NextPowerOfTwo<SmallSizePowTwo>::Val) {}
-  SmallPtrSet(const SmallPtrSet &that) : SmallPtrSetImpl(that) {}
+  SmallPtrSet() : SmallPtrSetImpl(SmallStorage, SmallSizePowTwo) {}
+  SmallPtrSet(const SmallPtrSet &that) : SmallPtrSetImpl(SmallStorage, that) {}
 
   template<typename It>
-  SmallPtrSet(It I, It E)
-    : SmallPtrSetImpl(NextPowerOfTwo<SmallSizePowTwo>::Val) {
+  SmallPtrSet(It I, It E) : SmallPtrSetImpl(SmallStorage, SmallSizePowTwo) {
     insert(I, E);
   }
 

Modified: llvm/branches/wendling/eh/include/llvm/ADT/Statistic.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/ADT/Statistic.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/ADT/Statistic.h (original)
+++ llvm/branches/wendling/eh/include/llvm/ADT/Statistic.h Fri Jul  2 04:57:13 2010
@@ -56,6 +56,10 @@
   }
   
   const Statistic &operator++() {
+    // FIXME: This function and all those that follow carefully use an
+    // atomic operation to update the value safely in the presence of
+    // concurrent accesses, but not to read the return value, so the
+    // return value is not thread safe.
     sys::AtomicIncrement(&Value);
     return init();
   }

Modified: llvm/branches/wendling/eh/include/llvm/ADT/ValueMap.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/ADT/ValueMap.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/ADT/ValueMap.h (original)
+++ llvm/branches/wendling/eh/include/llvm/ADT/ValueMap.h Fri Jul  2 04:57:13 2010
@@ -59,16 +59,16 @@
   struct ExtraData {};
 
   template<typename ExtraDataT>
-  static void onRAUW(const ExtraDataT &Data, KeyT Old, KeyT New) {}
+  static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {}
   template<typename ExtraDataT>
-  static void onDelete(const ExtraDataT &Data, KeyT Old) {}
+  static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {}
 
   /// Returns a mutex that should be acquired around any changes to the map.
   /// This is only acquired from the CallbackVH (and held around calls to onRAUW
   /// and onDelete) and not inside other ValueMap methods.  NULL means that no
   /// mutex is necessary.
   template<typename ExtraDataT>
-  static sys::Mutex *getMutex(const ExtraDataT &Data) { return NULL; }
+  static sys::Mutex *getMutex(const ExtraDataT &/*Data*/) { return NULL; }
 };
 
 /// See the file comment.

Modified: llvm/branches/wendling/eh/include/llvm/AbstractTypeUser.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/AbstractTypeUser.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/AbstractTypeUser.h (original)
+++ llvm/branches/wendling/eh/include/llvm/AbstractTypeUser.h Fri Jul  2 04:57:13 2010
@@ -146,6 +146,7 @@
   mutable const Type *Ty;
   void destroy();
 public:
+  PATypeHolder() : Ty(0) {}
   PATypeHolder(const Type *ty) : Ty(ty) {
     addRef();
   }
@@ -153,7 +154,7 @@
     addRef();
   }
 
-  ~PATypeHolder() { if (Ty) dropRef(); }
+  ~PATypeHolder() { dropRef(); }
 
   operator Type *() const { return get(); }
   Type *get() const;

Modified: llvm/branches/wendling/eh/include/llvm/Analysis/AliasAnalysis.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/AliasAnalysis.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/AliasAnalysis.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/AliasAnalysis.h Fri Jul  2 04:57:13 2010
@@ -344,10 +344,10 @@
 /// identifiable object.  This returns true for:
 ///    Global Variables and Functions (but not Global Aliases)
 ///    Allocas and Mallocs
-///    ByVal and NoAlias Arguments
-///    NoAlias returns
+///    ByVal and NoAlias Arguments, if Interprocedural is false
+///    NoAlias returns, if Interprocedural is false
 ///
-bool isIdentifiedObject(const Value *V);
+bool isIdentifiedObject(const Value *V, bool Interprocedural = false);
 
 } // End llvm namespace
 

Modified: llvm/branches/wendling/eh/include/llvm/Analysis/LoopInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/LoopInfo.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/LoopInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/LoopInfo.h Fri Jul  2 04:57:13 2010
@@ -256,6 +256,27 @@
   ///
   BlockT *getLoopPreheader() const {
     // Keep track of nodes outside the loop branching to the header...
+    BlockT *Out = getLoopPredecessor();
+    if (!Out) return 0;
+
+    // Make sure there is only one exit out of the preheader.
+    typedef GraphTraits<BlockT*> BlockTraits;
+    typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
+    ++SI;
+    if (SI != BlockTraits::child_end(Out))
+      return 0;  // Multiple exits from the block, must not be a preheader.
+
+    // The predecessor has exactly one successor, so it is a preheader.
+    return Out;
+  }
+
+  /// getLoopPredecessor - If the given loop's header has exactly one unique
+  /// predecessor outside the loop, return it. Otherwise return null.
+  /// This is less strict that the loop "preheader" concept, which requires
+  /// the predecessor to have exactly one successor.
+  ///
+  BlockT *getLoopPredecessor() const {
+    // Keep track of nodes outside the loop branching to the header...
     BlockT *Out = 0;
 
     // Loop over the predecessors of the header node...
@@ -273,13 +294,6 @@
 
     // Make sure there is only one exit out of the preheader.
     assert(Out && "Header of loop has no predecessors from outside loop?");
-    typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
-    ++SI;
-    if (SI != BlockTraits::child_end(Out))
-      return 0;  // Multiple exits from the block, must not be a preheader.
-
-    // If there is exactly one preheader, return it.  If there was zero, then
-    // Out is still null.
     return Out;
   }
 

Modified: llvm/branches/wendling/eh/include/llvm/Analysis/MemoryBuiltins.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/MemoryBuiltins.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/MemoryBuiltins.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/MemoryBuiltins.h Fri Jul  2 04:57:13 2010
@@ -72,8 +72,8 @@
 //  free Call Utility Functions.
 //
 
-/// isFreeCall - Returns true if the value is a call to the builtin free()
-bool isFreeCall(const Value *I);
+/// isFreeCall - Returns non-null if the value is a call to the builtin free()
+const CallInst *isFreeCall(const Value *I);
 
 } // End llvm namespace
 

Modified: llvm/branches/wendling/eh/include/llvm/Analysis/Passes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/Passes.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/Passes.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/Passes.h Fri Jul  2 04:57:13 2010
@@ -53,6 +53,13 @@
 
   //===--------------------------------------------------------------------===//
   //
+  // createInterproceduralAAEvalPass - This pass implements a simple
+  // N^2 interprocedural alias analysis accuracy evaluator.
+  //
+  Pass *createInterproceduralAAEvalPass();
+
+  //===--------------------------------------------------------------------===//
+  //
   // createNoAAPass - This pass implements a "I don't know" alias analysis.
   //
   ImmutablePass *createNoAAPass();
@@ -66,6 +73,14 @@
 
   //===--------------------------------------------------------------------===//
   //
+  // createInterproceduralBasicAliasAnalysisPass - This pass is similar to
+  // baiscaa, except that it properly supports queries to values which live
+  // in different functions.
+  //
+  ImmutablePass *createInterproceduralBasicAliasAnalysisPass();
+
+  //===--------------------------------------------------------------------===//
+  //
   /// createLibCallAliasAnalysisPass - Create an alias analysis pass that knows
   /// about the semantics of a set of libcalls specified by LCI.  The newly
   /// constructed pass takes ownership of the pointer that is provided.

Modified: llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolution.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolution.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolution.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolution.h Fri Jul  2 04:57:13 2010
@@ -343,10 +343,6 @@
     BackedgeTakenInfo HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
                                        const Loop *L, bool isSigned);
 
-    /// getLoopPredecessor - If the given loop's header has exactly one unique
-    /// predecessor outside the loop, return it. Otherwise return null.
-    BasicBlock *getLoopPredecessor(const Loop *L);
-
     /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
     /// (which may not be an immediate predecessor) which has exactly one
     /// successor from which BB is reachable, or null if no such block is

Modified: llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolutionExpander.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolutionExpander.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolutionExpander.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolutionExpander.h Fri Jul  2 04:57:13 2010
@@ -128,6 +128,14 @@
     /// of work to avoid inserting an obviously redundant operation.
     Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS);
 
+    /// ReuseOrCreateCast - Arange for there to be a cast of V to Ty at IP,
+    /// reusing an existing cast if a suitable one exists, moving an existing
+    /// cast if a suitable one exists but isn't in the right place, or
+    /// or creating a new one.
+    Value *ReuseOrCreateCast(Value *V, const Type *Ty,
+                             Instruction::CastOps Op,
+                             BasicBlock::iterator IP);
+
     /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
     /// which must be possible with a noop cast, doing what we can to
     /// share the casts.

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/AsmPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/AsmPrinter.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/AsmPrinter.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/AsmPrinter.h Fri Jul  2 04:57:13 2010
@@ -64,7 +64,7 @@
     /// Target machine description.
     ///
     TargetMachine &TM;
-    
+
     /// Target Asm Printer information.
     ///
     const MCAsmInfo *MAI;
@@ -73,13 +73,13 @@
     /// streaming.  This owns all of the global MC-related objects for the
     /// generated translation unit.
     MCContext &OutContext;
-    
+
     /// OutStreamer - This is the MCStreamer object for the file we are
     /// generating.  This contains the transient state for the current
     /// translation unit that we are generating (such as the current section
     /// etc).
     MCStreamer &OutStreamer;
-    
+
     /// The current machine function.
     const MachineFunction *MF;
 
@@ -94,30 +94,30 @@
     /// beginning of each call to runOnMachineFunction().
     ///
     MCSymbol *CurrentFnSym;
-    
+
   private:
     // GCMetadataPrinters - The garbage collection metadata printer table.
     void *GCMetadataPrinters;  // Really a DenseMap.
-    
+
     /// VerboseAsm - Emit comments in assembly output if this is true.
     ///
     bool VerboseAsm;
     static char ID;
-    
+
     /// If VerboseAsm is set, a pointer to the loop info for this
     /// function.
     MachineLoopInfo *LI;
 
     /// DD - If the target supports dwarf debug info, this pointer is non-null.
     DwarfDebug *DD;
-    
+
     /// DE - If the target supports dwarf exception info, this pointer is
     /// non-null.
     DwarfException *DE;
-    
+
   protected:
     explicit AsmPrinter(TargetMachine &TM, MCStreamer &Streamer);
-    
+
   public:
     virtual ~AsmPrinter();
 
@@ -128,7 +128,7 @@
     /// getFunctionNumber - Return a unique ID for the current function.
     ///
     unsigned getFunctionNumber() const;
-    
+
     /// getObjFileLowering - Return information about object file lowering.
     const TargetLoweringObjectFile &getObjFileLowering() const;
 
@@ -137,16 +137,16 @@
 
     /// getCurrentSection() - Return the current section we are emitting to.
     const MCSection *getCurrentSection() const;
-    
-    
+
+
     //===------------------------------------------------------------------===//
     // MachineFunctionPass Implementation.
     //===------------------------------------------------------------------===//
-    
+
     /// getAnalysisUsage - Record analysis usage.
-    /// 
+    ///
     void getAnalysisUsage(AnalysisUsage &AU) const;
-    
+
     /// doInitialization - Set up the AsmPrinter when we are working on a new
     /// module.  If your pass overrides this, it must make sure to explicitly
     /// call this implementation.
@@ -155,7 +155,7 @@
     /// doFinalization - Shut down the asmprinter.  If you override this in your
     /// pass, you must make sure to call it explicitly.
     bool doFinalization(Module &M);
-    
+
     /// runOnMachineFunction - Emit the specified function out to the
     /// OutStreamer.
     virtual bool runOnMachineFunction(MachineFunction &MF) {
@@ -163,20 +163,20 @@
       EmitFunctionHeader();
       EmitFunctionBody();
       return false;
-    }      
-    
+    }
+
     //===------------------------------------------------------------------===//
     // Coarse grained IR lowering routines.
     //===------------------------------------------------------------------===//
-    
+
     /// SetupMachineFunction - This should be called when a new MachineFunction
     /// is being processed from runOnMachineFunction.
     void SetupMachineFunction(MachineFunction &MF);
-    
+
     /// EmitFunctionHeader - This method emits the header for the current
     /// function.
     void EmitFunctionHeader();
-    
+
     /// EmitFunctionBody - This method emits the body and trailer for a
     /// function.
     void EmitFunctionBody();
@@ -187,15 +187,15 @@
     /// the code generator.
     ///
     virtual void EmitConstantPool();
-    
-    /// EmitJumpTableInfo - Print assembly representations of the jump tables 
-    /// used by the current function to the current output stream.  
+
+    /// EmitJumpTableInfo - Print assembly representations of the jump tables
+    /// used by the current function to the current output stream.
     ///
     void EmitJumpTableInfo();
-    
+
     /// EmitGlobalVariable - Emit the specified global variable to the .s file.
     virtual void EmitGlobalVariable(const GlobalVariable *GV);
-    
+
     /// EmitSpecialLLVMGlobal - Check to see if the specified global is a
     /// special global used by LLVM.  If so, emit it and return true, otherwise
     /// do nothing and return false.
@@ -208,54 +208,54 @@
     /// if required for correctness.
     ///
     void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const;
-    
+
     /// EmitBasicBlockStart - This method prints the label for the specified
     /// MachineBasicBlock, an alignment (if present) and a comment describing
     /// it if appropriate.
     void EmitBasicBlockStart(const MachineBasicBlock *MBB) const;
-    
+
     /// EmitGlobalConstant - Print a general LLVM constant to the .s file.
     void EmitGlobalConstant(const Constant *CV, unsigned AddrSpace = 0);
-    
-    
+
+
     //===------------------------------------------------------------------===//
     // Overridable Hooks
     //===------------------------------------------------------------------===//
-    
+
     // Targets can, or in the case of EmitInstruction, must implement these to
     // customize output.
-    
+
     /// EmitStartOfAsmFile - This virtual method can be overridden by targets
     /// that want to emit something at the start of their file.
     virtual void EmitStartOfAsmFile(Module &) {}
-    
+
     /// EmitEndOfAsmFile - This virtual method can be overridden by targets that
     /// want to emit something at the end of their file.
     virtual void EmitEndOfAsmFile(Module &) {}
-    
+
     /// EmitFunctionBodyStart - Targets can override this to emit stuff before
     /// the first basic block in the function.
     virtual void EmitFunctionBodyStart() {}
-    
+
     /// EmitFunctionBodyEnd - Targets can override this to emit stuff after
     /// the last basic block in the function.
     virtual void EmitFunctionBodyEnd() {}
-    
+
     /// EmitInstruction - Targets should implement this to emit instructions.
     virtual void EmitInstruction(const MachineInstr *) {
       assert(0 && "EmitInstruction not implemented");
     }
-    
+
     virtual void EmitFunctionEntryLabel();
-    
+
     virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
-    
+
     /// isBlockOnlyReachableByFallthough - Return true if the basic block has
     /// exactly one predecessor and the control transfer mechanism between
     /// the predecessor and this block is a fall-through.
     virtual bool
     isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
-    
+
     //===------------------------------------------------------------------===//
     // Symbol Lowering Routines.
     //===------------------------------------------------------------------===//
@@ -264,23 +264,23 @@
     /// GetTempSymbol - Return the MCSymbol corresponding to the assembler
     /// temporary label with the specified stem and unique ID.
     MCSymbol *GetTempSymbol(StringRef Name, unsigned ID) const;
-    
+
     /// GetTempSymbol - Return an assembler temporary label with the specified
     /// stem.
     MCSymbol *GetTempSymbol(StringRef Name) const;
-    
-    
+
+
     /// GetSymbolWithGlobalValueBase - Return the MCSymbol for a symbol with
     /// global value name as its base, with the specified suffix, and where the
     /// symbol is forced to have private linkage if ForcePrivate is true.
     MCSymbol *GetSymbolWithGlobalValueBase(const GlobalValue *GV,
                                            StringRef Suffix,
                                            bool ForcePrivate = true) const;
-    
+
     /// GetExternalSymbolSymbol - Return the MCSymbol for the specified
     /// ExternalSymbol.
     MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;
-    
+
     /// GetCPISymbol - Return the symbol for the specified constant pool entry.
     MCSymbol *GetCPISymbol(unsigned CPID) const;
 
@@ -302,42 +302,42 @@
   public:
     /// printOffset - This is just convenient handler for printing offsets.
     void printOffset(int64_t Offset, raw_ostream &OS) const;
-    
+
     /// EmitInt8 - Emit a byte directive and value.
     ///
     void EmitInt8(int Value) const;
-    
+
     /// EmitInt16 - Emit a short directive and value.
     ///
     void EmitInt16(int Value) const;
-    
+
     /// EmitInt32 - Emit a long directive and value.
     ///
     void EmitInt32(int Value) const;
-    
+
     /// EmitLabelDifference - Emit something like ".long Hi-Lo" where the size
     /// in bytes of the directive is specified by Size and Hi/Lo specify the
     /// labels.  This implicitly uses .set if it is available.
     void EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
                              unsigned Size) const;
-    
-    /// EmitLabelOffsetDifference - Emit something like ".long Hi+Offset-Lo" 
+
+    /// EmitLabelOffsetDifference - Emit something like ".long Hi+Offset-Lo"
     /// where the size in bytes of the directive is specified by Size and Hi/Lo
     /// specify the labels.  This implicitly uses .set if it is available.
     void EmitLabelOffsetDifference(const MCSymbol *Hi, uint64_t Offset,
                                    const MCSymbol *Lo, unsigned Size) const;
-    
+
     //===------------------------------------------------------------------===//
     // Dwarf Emission Helper Routines
     //===------------------------------------------------------------------===//
-    
+
     /// EmitSLEB128 - emit the specified signed leb128 value.
     void EmitSLEB128(int Value, const char *Desc = 0) const;
-    
+
     /// EmitULEB128 - emit the specified unsigned leb128 value.
     void EmitULEB128(unsigned Value, const char *Desc = 0,
                      unsigned PadTo = 0) const;
-    
+
     /// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
     void EmitCFAByte(unsigned Val) const;
 
@@ -346,15 +346,15 @@
     /// describing the encoding.  Desc is a string saying what the encoding is
     /// specifying (e.g. "LSDA").
     void EmitEncodingByte(unsigned Val, const char *Desc = 0) const;
-    
+
     /// GetSizeOfEncodedValue - Return the size of the encoding in bytes.
     unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
-    
+
     /// EmitReference - Emit a reference to a label with a specified encoding.
     ///
     void EmitReference(const MCSymbol *Sym, unsigned Encoding) const;
     void EmitReference(const GlobalValue *GV, unsigned Encoding) const;
-    
+
     /// EmitSectionOffset - Emit the 4-byte offset of Label from the start of
     /// its section.  This can be done with a special directive if the target
     /// supports it (e.g. cygwin) or by emitting it as an offset from a label at
@@ -372,20 +372,20 @@
     //===------------------------------------------------------------------===//
     // Dwarf Lowering Routines
     //===------------------------------------------------------------------===//
-    
+
     /// EmitFrameMoves - Emit frame instructions to describe the layout of the
     /// frame.
-    void EmitFrameMoves(const std::vector<MachineMove> &Moves, 
+    void EmitFrameMoves(const std::vector<MachineMove> &Moves,
                         MCSymbol *BaseLabel, bool isEH) const;
-    
-    
+
+
     //===------------------------------------------------------------------===//
     // Inline Asm Support
     //===------------------------------------------------------------------===//
   public:
     // These are hooks that targets can override to implement inline asm
     // support.  These should probably be moved out of AsmPrinter someday.
-    
+
     /// PrintSpecial - Print information related to the specified machine instr
     /// that is independent of the operand, and may be independent of the instr
     /// itself.  This can be useful for portably encoding the comment character
@@ -394,7 +394,7 @@
     /// for their own strange codes.
     virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
                               const char *Code) const;
-    
+
     /// PrintAsmOperand - Print the specified operand of MI, an INLINEASM
     /// instruction, using the specified assembler variant.  Targets should
     /// override this to format as appropriate.  This method can return true if
@@ -402,16 +402,16 @@
     virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
                                  unsigned AsmVariant, const char *ExtraCode,
                                  raw_ostream &OS);
-    
+
     /// PrintAsmMemoryOperand - Print the specified operand of MI, an INLINEASM
     /// instruction, using the specified assembler variant as an address.
     /// Targets should override this to format as appropriate.  This method can
     /// return true if the operand is erroneous.
     virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
-                                       unsigned AsmVariant, 
+                                       unsigned AsmVariant,
                                        const char *ExtraCode,
                                        raw_ostream &OS);
-    
+
   private:
     /// Private state for PrintSpecial()
     // Assign a unique ID to this machine instruction.
@@ -422,7 +422,7 @@
 
     /// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
     void EmitInlineAsm(StringRef Str, unsigned LocCookie) const;
-    
+
     /// EmitInlineAsm - This method formats and emits the specified machine
     /// instruction that is an inline asm.
     void EmitInlineAsm(const MachineInstr *MI) const;
@@ -430,13 +430,13 @@
     //===------------------------------------------------------------------===//
     // Internal Implementation Details
     //===------------------------------------------------------------------===//
-    
+
     /// EmitVisibility - This emits visibility information about symbol, if
     /// this is suported by the target.
     void EmitVisibility(MCSymbol *Sym, unsigned Visibility) const;
-    
+
     void EmitLinkage(unsigned Linkage, MCSymbol *GVSym) const;
-    
+
     void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
                             const MachineBasicBlock *MBB,
                             unsigned uid) const;

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/FastISel.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/FastISel.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/FastISel.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/FastISel.h Fri Jul  2 04:57:13 2010
@@ -36,6 +36,7 @@
 class TargetLowering;
 class TargetMachine;
 class TargetRegisterClass;
+class TargetRegisterInfo;
 
 /// FastISel - This is a fast-path instruction selection class that
 /// generates poor code and doesn't support illegal types or non-trivial
@@ -60,6 +61,7 @@
   const TargetData &TD;
   const TargetInstrInfo &TII;
   const TargetLowering &TLI;
+  const TargetRegisterInfo &TRI;
   bool IsBottomUp;
 
 public:
@@ -305,6 +307,8 @@
   }
 
 private:
+  bool SelectLoad(const User *I);
+
   bool SelectBinaryOp(const User *I, unsigned ISDOpcode);
 
   bool SelectFNeg(const User *I);

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/GCMetadata.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/GCMetadata.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/GCMetadata.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/GCMetadata.h Fri Jul  2 04:57:13 2010
@@ -1,4 +1,4 @@
-//===-- GCMetadata.h - Garbage collector metadata -------------------------===//
+//===-- GCMetadata.h - Garbage collector metadata ---------------*- C++ -*-===//
 //
 //                     The LLVM Compiler Infrastructure
 //
@@ -14,7 +14,7 @@
 //
 // The GCFunctionInfo class logs the data necessary to build a type accurate
 // stack map. The code generator outputs:
-// 
+//
 //   - Safe points as specified by the GCStrategy's NeededSafePoints.
 //   - Stack offsets for GC roots, as specified by calls to llvm.gcroot
 //
@@ -42,10 +42,10 @@
   class GCStrategy;
   class Constant;
   class MCSymbol;
-  
+
   namespace GC {
     /// PointKind - The type of a collector-safe point.
-    /// 
+    ///
     enum PointKind {
       Loop,    //< Instr is a loop (backwards branch).
       Return,  //< Instr is a return instruction.
@@ -53,138 +53,138 @@
       PostCall //< Instr is the return address of a call.
     };
   }
-  
+
   /// GCPoint - Metadata for a collector-safe point in machine code.
-  /// 
+  ///
   struct GCPoint {
     GC::PointKind Kind; //< The kind of the safe point.
     MCSymbol *Label;    //< A label.
-    
+
     GCPoint(GC::PointKind K, MCSymbol *L) : Kind(K), Label(L) {}
   };
-  
+
   /// GCRoot - Metadata for a pointer to an object managed by the garbage
   /// collector.
   struct GCRoot {
     int Num;            //< Usually a frame index.
     int StackOffset;    //< Offset from the stack pointer.
     const Constant *Metadata;//< Metadata straight from the call to llvm.gcroot.
-    
+
     GCRoot(int N, const Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
   };
-  
-  
+
+
   /// GCFunctionInfo - Garbage collection metadata for a single function.
-  /// 
+  ///
   class GCFunctionInfo {
   public:
     typedef std::vector<GCPoint>::iterator iterator;
     typedef std::vector<GCRoot>::iterator roots_iterator;
     typedef std::vector<GCRoot>::const_iterator live_iterator;
-    
+
   private:
     const Function &F;
     GCStrategy &S;
     uint64_t FrameSize;
     std::vector<GCRoot> Roots;
     std::vector<GCPoint> SafePoints;
-    
+
     // FIXME: Liveness. A 2D BitVector, perhaps?
-    // 
+    //
     //   BitVector Liveness;
-    //   
+    //
     //   bool islive(int point, int root) =
     //     Liveness[point * SafePoints.size() + root]
-    // 
+    //
     // The bit vector is the more compact representation where >3.2% of roots
     // are live per safe point (1.5% on 64-bit hosts).
-    
+
   public:
     GCFunctionInfo(const Function &F, GCStrategy &S);
     ~GCFunctionInfo();
-    
+
     /// getFunction - Return the function to which this metadata applies.
-    /// 
+    ///
     const Function &getFunction() const { return F; }
-    
+
     /// getStrategy - Return the GC strategy for the function.
-    /// 
+    ///
     GCStrategy &getStrategy() { return S; }
-    
+
     /// addStackRoot - Registers a root that lives on the stack. Num is the
     ///                stack object ID for the alloca (if the code generator is
     //                 using  MachineFrameInfo).
     void addStackRoot(int Num, const Constant *Metadata) {
       Roots.push_back(GCRoot(Num, Metadata));
     }
-    
+
     /// addSafePoint - Notes the existence of a safe point. Num is the ID of the
-    /// label just prior to the safe point (if the code generator is using 
+    /// label just prior to the safe point (if the code generator is using
     /// MachineModuleInfo).
     void addSafePoint(GC::PointKind Kind, MCSymbol *Label) {
       SafePoints.push_back(GCPoint(Kind, Label));
     }
-    
+
     /// getFrameSize/setFrameSize - Records the function's frame size.
-    /// 
+    ///
     uint64_t getFrameSize() const { return FrameSize; }
     void setFrameSize(uint64_t S) { FrameSize = S; }
-    
+
     /// begin/end - Iterators for safe points.
-    /// 
+    ///
     iterator begin() { return SafePoints.begin(); }
     iterator end()   { return SafePoints.end();   }
     size_t size() const { return SafePoints.size(); }
-    
+
     /// roots_begin/roots_end - Iterators for all roots in the function.
-    /// 
+    ///
     roots_iterator roots_begin() { return Roots.begin(); }
     roots_iterator roots_end  () { return Roots.end();   }
     size_t roots_size() const { return Roots.size(); }
-    
+
     /// live_begin/live_end - Iterators for live roots at a given safe point.
-    /// 
+    ///
     live_iterator live_begin(const iterator &p) { return roots_begin(); }
     live_iterator live_end  (const iterator &p) { return roots_end();   }
     size_t live_size(const iterator &p) const { return roots_size(); }
   };
-  
-  
+
+
   /// GCModuleInfo - Garbage collection metadata for a whole module.
-  /// 
+  ///
   class GCModuleInfo : public ImmutablePass {
     typedef StringMap<GCStrategy*> strategy_map_type;
     typedef std::vector<GCStrategy*> list_type;
     typedef DenseMap<const Function*,GCFunctionInfo*> finfo_map_type;
-    
+
     strategy_map_type StrategyMap;
     list_type StrategyList;
     finfo_map_type FInfoMap;
-    
+
     GCStrategy *getOrCreateStrategy(const Module *M, const std::string &Name);
-    
+
   public:
     typedef list_type::const_iterator iterator;
-    
+
     static char ID;
-    
+
     GCModuleInfo();
     ~GCModuleInfo();
-    
+
     /// clear - Resets the pass. The metadata deleter pass calls this.
-    /// 
+    ///
     void clear();
-    
+
     /// begin/end - Iterators for used strategies.
-    /// 
+    ///
     iterator begin() const { return StrategyList.begin(); }
     iterator end()   const { return StrategyList.end();   }
-    
+
     /// get - Look up function metadata.
-    /// 
+    ///
     GCFunctionInfo &getFunctionInfo(const Function &F);
   };
-  
+
 }
 
 #endif

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/GCMetadataPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/GCMetadataPrinter.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/GCMetadataPrinter.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/GCMetadataPrinter.h Fri Jul  2 04:57:13 2010
@@ -25,49 +25,49 @@
 #include "llvm/Support/Registry.h"
 
 namespace llvm {
-  
+
   class GCMetadataPrinter;
-  
+
   /// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
   /// defaults from Registry.
   typedef Registry<GCMetadataPrinter> GCMetadataPrinterRegistry;
-  
+
   /// GCMetadataPrinter - Emits GC metadata as assembly code.
-  /// 
+  ///
   class GCMetadataPrinter {
   public:
     typedef GCStrategy::list_type list_type;
     typedef GCStrategy::iterator iterator;
-    
+
   private:
     GCStrategy *S;
-    
+
     friend class AsmPrinter;
-    
+
   protected:
     // May only be subclassed.
     GCMetadataPrinter();
-    
+
     // Do not implement.
     GCMetadataPrinter(const GCMetadataPrinter &);
     GCMetadataPrinter &operator=(const GCMetadataPrinter &);
-    
+
   public:
     GCStrategy &getStrategy() { return *S; }
     const Module &getModule() const { return S->getModule(); }
-    
+
     /// begin/end - Iterate over the collected function metadata.
     iterator begin() { return S->begin(); }
     iterator end()   { return S->end();   }
-    
+
     /// beginAssembly/finishAssembly - Emit module metadata as assembly code.
     virtual void beginAssembly(AsmPrinter &AP);
-    
+
     virtual void finishAssembly(AsmPrinter &AP);
-    
+
     virtual ~GCMetadataPrinter();
   };
-  
+
 }
 
 #endif

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/ISDOpcodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/ISDOpcodes.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/ISDOpcodes.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/ISDOpcodes.h Fri Jul  2 04:57:13 2010
@@ -508,8 +508,9 @@
     CALLSEQ_START,  // Beginning of a call sequence
     CALLSEQ_END,    // End of a call sequence
 
-    // VAARG - VAARG has three operands: an input chain, a pointer, and a
-    // SRCVALUE.  It returns a pair of values: the vaarg value and a new chain.
+    // VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
+    // and the alignment. It returns a pair of values: the vaarg value and a
+    // new chain.
     VAARG,
 
     // VACOPY - VACOPY has five operands: an input chain, a destination pointer,

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/LiveInterval.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/LiveInterval.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/LiveInterval.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/LiveInterval.h Fri Jul  2 04:57:13 2010
@@ -53,7 +53,7 @@
   class VNInfo {
   private:
     enum {
-      HAS_PHI_KILL    = 1,                         
+      HAS_PHI_KILL    = 1,
       REDEF_BY_EC     = 1 << 1,
       IS_PHI_DEF      = 1 << 2,
       IS_UNUSED       = 1 << 3,
@@ -67,22 +67,14 @@
     } cr;
 
   public:
-    typedef SpecificBumpPtrAllocator<VNInfo> Allocator;
-    typedef SmallVector<SlotIndex, 4> KillSet;
+    typedef BumpPtrAllocator Allocator;
 
     /// The ID number of this value.
     unsigned id;
-    
+
     /// The index of the defining instruction (if isDefAccurate() returns true).
     SlotIndex def;
 
-    KillSet kills;
-
-    /*
-    VNInfo(LiveIntervals &li_)
-      : defflags(IS_UNUSED), id(~1U) { cr.copy = 0; }
-    */
-
     /// VNInfo constructor.
     /// d is presumed to point to the actual defining instr. If it doesn't
     /// setIsDefAccurate(false) should be called after construction.
@@ -91,7 +83,7 @@
 
     /// VNInfo construtor, copies values from orig, except for the value number.
     VNInfo(unsigned i, const VNInfo &orig)
-      : flags(orig.flags), cr(orig.cr), id(i), def(orig.def), kills(orig.kills)
+      : flags(orig.flags), cr(orig.cr), id(i), def(orig.def)
     { }
 
     /// Copy from the parameter into this VNInfo.
@@ -99,7 +91,6 @@
       flags = src.flags;
       cr = src.cr;
       def = src.def;
-      kills = src.kills;
     }
 
     /// Used for copying value number info.
@@ -114,7 +105,7 @@
     /// This method should not be called on stack intervals as it may lead to
     /// undefined behavior.
     void setCopy(MachineInstr *c) { cr.copy = c; }
-    
+
     /// For a stack interval, returns the reg which this stack interval was
     /// defined from.
     /// For a register interval the behaviour of this method is undefined. 
@@ -144,7 +135,7 @@
       else
         flags &= ~REDEF_BY_EC;
     }
-   
+
     /// Returns true if this value is defined by a PHI instruction (or was,
     /// PHI instrucions may have been eliminated).
     bool isPHIDef() const { return flags & IS_PHI_DEF; }
@@ -172,49 +163,9 @@
     void setIsDefAccurate(bool defAccurate) {
       if (defAccurate)
         flags |= IS_DEF_ACCURATE;
-      else 
+      else
         flags &= ~IS_DEF_ACCURATE;
     }
-
-    /// Returns true if the given index is a kill of this value.
-    bool isKill(SlotIndex k) const {
-      KillSet::const_iterator
-        i = std::lower_bound(kills.begin(), kills.end(), k);
-      return (i != kills.end() && *i == k);
-    }
-
-    /// addKill - Add a kill instruction index to the specified value
-    /// number.
-    void addKill(SlotIndex k) {
-      if (kills.empty()) {
-        kills.push_back(k);
-      } else {
-        KillSet::iterator
-          i = std::lower_bound(kills.begin(), kills.end(), k);
-        kills.insert(i, k);
-      }
-    }
-
-    /// Remove the specified kill index from this value's kills list.
-    /// Returns true if the value was present, otherwise returns false.
-    bool removeKill(SlotIndex k) {
-      KillSet::iterator i = std::lower_bound(kills.begin(), kills.end(), k);
-      if (i != kills.end() && *i == k) {
-        kills.erase(i);
-        return true;
-      }
-      return false;
-    }
-
-    /// Remove all kills in the range [s, e).
-    void removeKills(SlotIndex s, SlotIndex e) {
-      KillSet::iterator
-        si = std::lower_bound(kills.begin(), kills.end(), s),
-        se = std::upper_bound(kills.begin(), kills.end(), e);
-
-      kills.erase(si, se);
-    }
-
   };
 
   /// LiveRange structure - This represents a simple register range in the
@@ -368,8 +319,8 @@
     /// the instruction that defines the value number.
     VNInfo *getNextValue(SlotIndex def, MachineInstr *CopyMI,
                        bool isDefAccurate, VNInfo::Allocator &VNInfoAllocator) {
-      VNInfo *VNI = VNInfoAllocator.Allocate();
-      new (VNI) VNInfo((unsigned)valnos.size(), def, CopyMI);
+      VNInfo *VNI =
+        new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def, CopyMI);
       VNI->setIsDefAccurate(isDefAccurate);
       valnos.push_back(VNI);
       return VNI;
@@ -379,23 +330,12 @@
     /// for the Value number.
     VNInfo *createValueCopy(const VNInfo *orig,
                             VNInfo::Allocator &VNInfoAllocator) {
-      VNInfo *VNI = VNInfoAllocator.Allocate();
-      new (VNI) VNInfo((unsigned)valnos.size(), *orig);
+      VNInfo *VNI =
+        new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), *orig);
       valnos.push_back(VNI);
       return VNI;
     }
 
-    /// addKills - Add a number of kills into the VNInfo kill vector. If this
-    /// interval is live at a kill point, then the kill is not added.
-    void addKills(VNInfo *VNI, const VNInfo::KillSet &kills) {
-      for (unsigned i = 0, e = static_cast<unsigned>(kills.size());
-           i != e; ++i) {
-        if (!liveBeforeAndAt(kills[i])) {
-          VNI->addKill(kills[i]);
-        }
-      }
-    }
-
     /// isOnlyLROfValNo - Return true if the specified live range is the only
     /// one defined by the its val#.
     bool isOnlyLROfValNo(const LiveRange *LR) {
@@ -474,6 +414,17 @@
     // range.If it does, then check if the previous live range ends at index-1.
     bool liveBeforeAndAt(SlotIndex index) const;
 
+    /// killedAt - Return true if a live range ends at index. Note that the kill
+    /// point is not contained in the half-open live range. It is usually the
+    /// getDefIndex() slot following its last use.
+    bool killedAt(SlotIndex index) const;
+
+    /// killedInRange - Return true if the interval has kills in [Start,End).
+    /// Note that the kill point is considered the end of a live range, so it is
+    /// not contained in the live range. If a live range ends at End, it won't
+    /// be counted as a kill by this method.
+    bool killedInRange(SlotIndex Start, SlotIndex End) const;
+
     /// getLiveRangeContaining - Return the live range that contains the
     /// specified index, or null if there is none.
     const LiveRange *getLiveRangeContaining(SlotIndex Idx) const {

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/LiveIntervalAnalysis.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/LiveIntervalAnalysis.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/LiveIntervalAnalysis.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/LiveIntervalAnalysis.h Fri Jul  2 04:57:13 2010
@@ -133,10 +133,9 @@
     bool conflictsWithPhysReg(const LiveInterval &li, VirtRegMap &vrm,
                               unsigned reg);
 
-    /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
-    /// it checks for sub-register reference and it can check use as well.
-    bool conflictsWithSubPhysRegRef(LiveInterval &li, unsigned Reg,
-                                    bool CheckUse,
+    /// conflictsWithAliasRef - Similar to conflictsWithPhysRegRef except
+    /// it checks for alias uses and defs.
+    bool conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
                                    SmallPtrSet<MachineInstr*,32> &JoinedCopies);
 
     // Interval creation
@@ -229,10 +228,6 @@
 
     VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
 
-    /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
-    /// copy field and returns the source register that defines it.
-    unsigned getVNInfoSourceReg(const VNInfo *VNI) const;
-
     virtual void getAnalysisUsage(AnalysisUsage &AU) const;
     virtual void releaseMemory();
 
@@ -249,12 +244,6 @@
     addIntervalsForSpills(const LiveInterval& i,
                           SmallVectorImpl<LiveInterval*> &SpillIs,
                           const MachineLoopInfo *loopInfo, VirtRegMap& vrm);
-    
-    /// addIntervalsForSpillsFast - Quickly create new intervals for spilled
-    /// defs / uses without remat or splitting.
-    std::vector<LiveInterval*>
-    addIntervalsForSpillsFast(const LiveInterval &li,
-                              const MachineLoopInfo *loopInfo, VirtRegMap &vrm);
 
     /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
     /// around all defs and uses of the specified interval. Return true if it

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/MachineBasicBlock.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/MachineBasicBlock.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/MachineBasicBlock.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/MachineBasicBlock.h Fri Jul  2 04:57:13 2010
@@ -19,6 +19,7 @@
 
 namespace llvm {
 
+class Pass;
 class BasicBlock;
 class MachineFunction;
 class MCSymbol;
@@ -281,6 +282,14 @@
   /// it returns end()
   iterator getFirstTerminator();
 
+  /// SplitCriticalEdge - Split the critical edge from this block to the
+  /// given successor block, and return the newly created block, or null
+  /// if splitting is not possible.
+  ///
+  /// This function updates LiveVariables, MachineDominatorTree, and
+  /// MachineLoopInfo, as applicable.
+  MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P);
+
   void pop_front() { Insts.pop_front(); }
   void pop_back() { Insts.pop_back(); }
   void push_back(MachineInstr *MI) { Insts.push_back(MI); }

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/MachineInstr.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/MachineInstr.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/MachineInstr.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/MachineInstr.h Fri Jul  2 04:57:13 2010
@@ -364,6 +364,11 @@
   void addRegisterDefined(unsigned IncomingReg,
                           const TargetRegisterInfo *RegInfo = 0);
 
+  /// setPhysRegsDeadExcept - Mark every physreg used by this instruction as dead
+  /// except those in the UsedRegs list.
+  void setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRegs,
+                             const TargetRegisterInfo &TRI);
+
   /// isSafeToMove - Return true if it is safe to move this instruction. If
   /// SawStore is set to true, it means that there is a store (or call) between
   /// the instruction's location and its intended destination.

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/MachineLoopInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/MachineLoopInfo.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/MachineLoopInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/MachineLoopInfo.h Fri Jul  2 04:57:13 2010
@@ -64,13 +64,13 @@
   void operator=(const MachineLoopInfo &);  // do not implement
   MachineLoopInfo(const MachineLoopInfo &); // do not implement
 
-  LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
-
 public:
   static char ID; // Pass identification, replacement for typeid
 
   MachineLoopInfo() : MachineFunctionPass(&ID) {}
 
+  LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
+
   /// iterator/begin/end - The interface to the top-level loops in the current
   /// function.
   ///

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/MachineRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/MachineRegisterInfo.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/MachineRegisterInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/MachineRegisterInfo.h Fri Jul  2 04:57:13 2010
@@ -363,7 +363,18 @@
     defusechain_iterator operator++(int) {        // Postincrement
       defusechain_iterator tmp = *this; ++*this; return tmp;
     }
-    
+
+    /// skipInstruction - move forward until reaching a different instruction.
+    /// Return the skipped instruction that is no longer pointed to, or NULL if
+    /// already pointing to end().
+    MachineInstr *skipInstruction() {
+      if (!Op) return 0;
+      MachineInstr *MI = Op->getParent();
+      do ++*this;
+      while (Op && Op->getParent() == MI);
+      return MI;
+    }
+
     MachineOperand &getOperand() const {
       assert(Op && "Cannot dereference end iterator!");
       return *Op;

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterCoalescer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterCoalescer.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterCoalescer.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterCoalescer.h Fri Jul  2 04:57:13 2010
@@ -165,9 +165,15 @@
     /// virtual register.
     unsigned subIdx_;
 
+    /// origDstReg_ - dstReg_ without subreg adjustments.
+    unsigned origDstReg_;
+
     /// partial_ - True when the original copy was a partial subregister copy.
     bool partial_;
 
+    /// crossClass_ - True when both regs are virtual, and newRC is constrained.
+    bool crossClass_;
+
     /// flipped_ - True when DstReg and SrcReg are reversed from the oriignal copy
     /// instruction.
     bool flipped_;
@@ -186,7 +192,8 @@
   public:
     CoalescerPair(const TargetInstrInfo &tii, const TargetRegisterInfo &tri)
       : tii_(tii), tri_(tri), dstReg_(0), srcReg_(0), subIdx_(0),
-        partial_(false), flipped_(false), newRC_(0) {}
+        origDstReg_(0), partial_(false), crossClass_(false), flipped_(false),
+        newRC_(0) {}
 
     /// setRegisters - set registers to match the copy instruction MI. Return
     /// false if MI is not a coalescable copy instruction.
@@ -207,6 +214,9 @@
     /// full register, but was a subreg operation.
     bool isPartial() const { return partial_; }
 
+    /// isCrossClass - Return true if DstReg is virtual and NewRC is a smaller register class than DstReg's.
+    bool isCrossClass() const { return crossClass_; }
+
     /// isFlipped - Return true when getSrcReg is the register being defined by
     /// the original copy instruction.
     bool isFlipped() const { return flipped_; }
@@ -222,6 +232,10 @@
     /// coalesced into, or 0.
     unsigned getSubIdx() const { return subIdx_; }
 
+    /// getOrigDstReg - Return DstReg as it appeared in the original copy
+    /// instruction before any subreg adjustments.
+    unsigned getOrigDstReg() const { return isPhys() ? origDstReg_ : dstReg_; }
+
     /// getNewRC - Return the register class of the coalesced register.
     const TargetRegisterClass *getNewRC() const { return newRC_; }
   };

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/RuntimeLibcalls.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/RuntimeLibcalls.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/RuntimeLibcalls.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/RuntimeLibcalls.h Fri Jul  2 04:57:13 2010
@@ -247,6 +247,40 @@
     // EXCEPTION HANDLING
     UNWIND_RESUME,
 
+    // Family ATOMICs
+    SYNC_VAL_COMPARE_AND_SWAP_1,
+    SYNC_VAL_COMPARE_AND_SWAP_2,
+    SYNC_VAL_COMPARE_AND_SWAP_4,
+    SYNC_VAL_COMPARE_AND_SWAP_8,
+    SYNC_LOCK_TEST_AND_SET_1,
+    SYNC_LOCK_TEST_AND_SET_2,
+    SYNC_LOCK_TEST_AND_SET_4,
+    SYNC_LOCK_TEST_AND_SET_8,
+    SYNC_FETCH_AND_ADD_1,
+    SYNC_FETCH_AND_ADD_2,
+    SYNC_FETCH_AND_ADD_4,
+    SYNC_FETCH_AND_ADD_8,
+    SYNC_FETCH_AND_SUB_1,
+    SYNC_FETCH_AND_SUB_2,
+    SYNC_FETCH_AND_SUB_4,
+    SYNC_FETCH_AND_SUB_8,
+    SYNC_FETCH_AND_AND_1,
+    SYNC_FETCH_AND_AND_2,
+    SYNC_FETCH_AND_AND_4,
+    SYNC_FETCH_AND_AND_8,
+    SYNC_FETCH_AND_OR_1,
+    SYNC_FETCH_AND_OR_2,
+    SYNC_FETCH_AND_OR_4,
+    SYNC_FETCH_AND_OR_8,
+    SYNC_FETCH_AND_XOR_1,
+    SYNC_FETCH_AND_XOR_2,
+    SYNC_FETCH_AND_XOR_4,
+    SYNC_FETCH_AND_XOR_8,
+    SYNC_FETCH_AND_NAND_1,
+    SYNC_FETCH_AND_NAND_2,
+    SYNC_FETCH_AND_NAND_4,
+    SYNC_FETCH_AND_NAND_8,
+
     UNKNOWN_LIBCALL
   };
 

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAG.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAG.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAG.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAG.h Fri Jul  2 04:57:13 2010
@@ -582,7 +582,7 @@
   /// getVAArg - VAArg produces a result and token chain, and takes a pointer
   /// and a source value as input.
   SDValue getVAArg(EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr,
-                   SDValue SV);
+                   SDValue SV, unsigned Align = 0);
 
   /// getAtomic - Gets a node for an atomic op, produces result and chain and
   /// takes 3 operands

Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGNodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGNodes.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGNodes.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGNodes.h Fri Jul  2 04:57:13 2010
@@ -549,6 +549,15 @@
     return FoundNode;
   }
 
+  /// getFlaggedUser - If this node has a flag value with a user, return
+  /// the user (there is at most one). Otherwise return NULL.
+  SDNode *getFlaggedUser() const {
+    for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
+      if (UI.getUse().get().getValueType() == MVT::Flag)
+        return *UI;
+    return 0;
+  }
+
   /// getNumValues - Return the number of values defined/returned by this
   /// operator.
   ///
@@ -1563,6 +1572,8 @@
   struct OutputArg {
     ArgFlagsTy Flags;
     SDValue Val;
+
+    /// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
     bool IsFixed;
 
     OutputArg() : IsFixed(false) {}

Modified: llvm/branches/wendling/eh/include/llvm/Config/config.h.in
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Config/config.h.in?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Config/config.h.in (original)
+++ llvm/branches/wendling/eh/include/llvm/Config/config.h.in Fri Jul  2 04:57:13 2010
@@ -63,6 +63,9 @@
 /* Define to 1 if you have the `closedir' function. */
 #undef HAVE_CLOSEDIR
 
+/* Define to 1 if you have the <CrashReporterClient.h> header file. */
+#undef HAVE_CRASHREPORTERCLIENT_H
+
 /* Define to 1 if you have the <ctype.h> header file. */
 #undef HAVE_CTYPE_H
 

Modified: llvm/branches/wendling/eh/include/llvm/GlobalValue.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/GlobalValue.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/GlobalValue.h (original)
+++ llvm/branches/wendling/eh/include/llvm/GlobalValue.h Fri Jul  2 04:57:13 2010
@@ -40,6 +40,7 @@
     InternalLinkage,    ///< Rename collisions when linking (static functions).
     PrivateLinkage,     ///< Like Internal, but omit from symbol table.
     LinkerPrivateLinkage, ///< Like Private, but linker removes.
+    LinkerPrivateWeakLinkage, ///< Like LinkerPrivate, but weak.
     DLLImportLinkage,   ///< Function to be imported from DLL
     DLLExportLinkage,   ///< Function to be accessible from DLL.
     ExternalWeakLinkage,///< ExternalWeak linkage description.
@@ -132,11 +133,14 @@
     return Linkage == PrivateLinkage;
   }
   static bool isLinkerPrivateLinkage(LinkageTypes Linkage) {
-    return Linkage==LinkerPrivateLinkage;
+    return Linkage == LinkerPrivateLinkage;
+  }
+  static bool isLinkerPrivateWeakLinkage(LinkageTypes Linkage) {
+    return Linkage == LinkerPrivateWeakLinkage;
   }
   static bool isLocalLinkage(LinkageTypes Linkage) {
     return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage) ||
-      isLinkerPrivateLinkage(Linkage);
+      isLinkerPrivateLinkage(Linkage) || isLinkerPrivateWeakLinkage(Linkage);
   }
   static bool isDLLImportLinkage(LinkageTypes Linkage) {
     return Linkage == DLLImportLinkage;
@@ -158,7 +162,8 @@
     return (Linkage == WeakAnyLinkage ||
             Linkage == LinkOnceAnyLinkage ||
             Linkage == CommonLinkage ||
-            Linkage == ExternalWeakLinkage);
+            Linkage == ExternalWeakLinkage ||
+            Linkage == LinkerPrivateWeakLinkage);
   }
 
   /// isWeakForLinker - Whether the definition of this global may be replaced at
@@ -170,7 +175,8 @@
             Linkage == LinkOnceAnyLinkage ||
             Linkage == LinkOnceODRLinkage ||
             Linkage == CommonLinkage ||
-            Linkage == ExternalWeakLinkage);
+            Linkage == ExternalWeakLinkage ||
+            Linkage == LinkerPrivateWeakLinkage);
   }
 
   bool hasExternalLinkage() const { return isExternalLinkage(Linkage); }
@@ -187,6 +193,9 @@
   bool hasInternalLinkage() const { return isInternalLinkage(Linkage); }
   bool hasPrivateLinkage() const { return isPrivateLinkage(Linkage); }
   bool hasLinkerPrivateLinkage() const { return isLinkerPrivateLinkage(Linkage); }
+  bool hasLinkerPrivateWeakLinkage() const {
+    return isLinkerPrivateWeakLinkage(Linkage);
+  }
   bool hasLocalLinkage() const { return isLocalLinkage(Linkage); }
   bool hasDLLImportLinkage() const { return isDLLImportLinkage(Linkage); }
   bool hasDLLExportLinkage() const { return isDLLExportLinkage(Linkage); }

Modified: llvm/branches/wendling/eh/include/llvm/Instructions.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Instructions.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Instructions.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Instructions.h Fri Jul  2 04:57:13 2010
@@ -235,6 +235,9 @@
 
   void setAlignment(unsigned Align);
 
+  Value *getValueOperand() { return getOperand(0); }
+  const Value *getValueOperand() const { return getOperand(0); }
+  
   Value *getPointerOperand() { return getOperand(1); }
   const Value *getPointerOperand() const { return getOperand(1); }
   static unsigned getPointerOperandIndex() { return 1U; }
@@ -940,8 +943,21 @@
   /// Provide fast operand accessors
   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
 
+  enum { ArgOffset = 1 }; ///< temporary, do not use for new code!
   unsigned getNumArgOperands() const { return getNumOperands() - 1; }
-  Value *getArgOperand(unsigned i) const { return getOperand(i + 1); }
+  Value *getArgOperand(unsigned i) const { return getOperand(i + ArgOffset); }
+  void setArgOperand(unsigned i, Value *v) { setOperand(i + ArgOffset, v); }
+
+  /// Provide compile-time errors for accessing operand 0
+  /// @deprecated these will go away soon
+  /// @detail see below comments and update your code to high-level interfaces
+  ///    - getOperand(0)  --->  getCalledValue()
+  ///    - setOperand(0, V)  --->  setCalledFunction(V)
+  ///
+private:
+  void getOperand(void*); // NO IMPL ---> use getCalledValue (or possibly getCalledFunction) instead
+  void setOperand(void*, Value*); // NO IMPL ---> use setCalledFunction instead
+public:
 
   /// getCallingConv/setCallingConv - Get or set the calling convention of this
   /// function call.
@@ -1034,17 +1050,17 @@
   /// indirect function invocation.
   ///
   Function *getCalledFunction() const {
-    return dyn_cast<Function>(Op<0>());
+    return dyn_cast<Function>(Op<ArgOffset -1>());
   }
 
   /// getCalledValue - Get a pointer to the function that is invoked by this
   /// instruction.
-  const Value *getCalledValue() const { return Op<0>(); }
-        Value *getCalledValue()       { return Op<0>(); }
+  const Value *getCalledValue() const { return Op<ArgOffset -1>(); }
+        Value *getCalledValue()       { return Op<ArgOffset -1>(); }
 
   /// setCalledFunction - Set the function called.
   void setCalledFunction(Value* Fn) {
-    Op<0>() = Fn;
+    Op<ArgOffset -1>() = Fn;
   }
 
   // Methods for support type inquiry through isa, cast, and dyn_cast:
@@ -1074,7 +1090,7 @@
                                    ->getElementType())->getReturnType(),
                 Instruction::Call,
                 OperandTraits<CallInst>::op_end(this) - (ArgEnd - ArgBegin + 1),
-                (unsigned)(ArgEnd - ArgBegin + 1), InsertAtEnd) {
+                unsigned(ArgEnd - ArgBegin + 1), InsertAtEnd) {
   init(Func, ArgBegin, ArgEnd, NameStr,
        typename std::iterator_traits<InputIterator>::iterator_category());
 }
@@ -2459,6 +2475,7 @@
 
   unsigned getNumArgOperands() const { return getNumOperands() - 3; }
   Value *getArgOperand(unsigned i) const { return getOperand(i); }
+  void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
 
   /// getCallingConv/setCallingConv - Get or set the calling convention of this
   /// function call.

Modified: llvm/branches/wendling/eh/include/llvm/IntrinsicInst.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/IntrinsicInst.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/IntrinsicInst.h (original)
+++ llvm/branches/wendling/eh/include/llvm/IntrinsicInst.h Fri Jul  2 04:57:13 2010
@@ -43,7 +43,7 @@
     Intrinsic::ID getIntrinsicID() const {
       return (Intrinsic::ID)getCalledFunction()->getIntrinsicID();
     }
-    
+
     // Methods for support type inquiry through isa, cast, and dyn_cast:
     static inline bool classof(const IntrinsicInst *) { return true; }
     static inline bool classof(const CallInst *I) {
@@ -74,7 +74,7 @@
     static inline bool classof(const Value *V) {
       return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
     }
-    
+
     static Value *StripCast(Value *C);
   };
 
@@ -83,7 +83,7 @@
   class DbgDeclareInst : public DbgInfoIntrinsic {
   public:
     Value *getAddress() const;
-    MDNode *getVariable() const { return cast<MDNode>(getOperand(2)); }
+    MDNode *getVariable() const { return cast<MDNode>(getArgOperand(1)); }
 
     // Methods for support type inquiry through isa, cast, and dyn_cast:
     static inline bool classof(const DbgDeclareInst *) { return true; }
@@ -103,9 +103,9 @@
     Value *getValue();
     uint64_t getOffset() const {
       return cast<ConstantInt>(
-                             const_cast<Value*>(getOperand(2)))->getZExtValue();
+                             const_cast<Value*>(getArgOperand(1)))->getZExtValue();
     }
-    MDNode *getVariable() const { return cast<MDNode>(getOperand(3)); }
+    MDNode *getVariable() const { return cast<MDNode>(getArgOperand(2)); }
 
     // Methods for support type inquiry through isa, cast, and dyn_cast:
     static inline bool classof(const DbgValueInst *) { return true; }
@@ -121,19 +121,19 @@
   ///
   class MemIntrinsic : public IntrinsicInst {
   public:
-    Value *getRawDest() const { return const_cast<Value*>(getOperand(1)); }
+    Value *getRawDest() const { return const_cast<Value*>(getArgOperand(0)); }
 
-    Value *getLength() const { return const_cast<Value*>(getOperand(3)); }
+    Value *getLength() const { return const_cast<Value*>(getArgOperand(2)); }
     ConstantInt *getAlignmentCst() const {
-      return cast<ConstantInt>(const_cast<Value*>(getOperand(4)));
+      return cast<ConstantInt>(const_cast<Value*>(getArgOperand(3)));
     }
-    
+
     unsigned getAlignment() const {
       return getAlignmentCst()->getZExtValue();
     }
 
     ConstantInt *getVolatileCst() const {
-      return cast<ConstantInt>(const_cast<Value*>(getOperand(5)));
+      return cast<ConstantInt>(const_cast<Value*>(getArgOperand(4)));
     }
     bool isVolatile() const {
       return !getVolatileCst()->isZero();
@@ -149,27 +149,27 @@
     void setDest(Value *Ptr) {
       assert(getRawDest()->getType() == Ptr->getType() &&
              "setDest called with pointer of wrong type!");
-      setOperand(1, Ptr);
+      setArgOperand(0, Ptr);
     }
 
     void setLength(Value *L) {
       assert(getLength()->getType() == L->getType() &&
              "setLength called with value of wrong type!");
-      setOperand(3, L);
+      setArgOperand(2, L);
     }
-    
+
     void setAlignment(Constant* A) {
-      setOperand(4, A);
+      setArgOperand(3, A);
     }
 
     void setVolatile(Constant* V) {
-      setOperand(5, V);
+      setArgOperand(4, V);
     }
 
     const Type *getAlignmentType() const {
-      return getOperand(4)->getType();
+      return getArgOperand(3)->getType();
     }
-    
+
     // Methods for support type inquiry through isa, cast, and dyn_cast:
     static inline bool classof(const MemIntrinsic *) { return true; }
     static inline bool classof(const IntrinsicInst *I) {
@@ -192,14 +192,14 @@
   public:
     /// get* - Return the arguments to the instruction.
     ///
-    Value *getValue() const { return const_cast<Value*>(getOperand(2)); }
-    
+    Value *getValue() const { return const_cast<Value*>(getArgOperand(1)); }
+
     void setValue(Value *Val) {
       assert(getValue()->getType() == Val->getType() &&
-             "setSource called with pointer of wrong type!");
-      setOperand(2, Val);
+             "setValue called with value of wrong type!");
+      setArgOperand(1, Val);
     }
-    
+
     // Methods for support type inquiry through isa, cast, and dyn_cast:
     static inline bool classof(const MemSetInst *) { return true; }
     static inline bool classof(const IntrinsicInst *I) {
@@ -209,26 +209,26 @@
       return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
     }
   };
-  
+
   /// MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics.
   ///
   class MemTransferInst : public MemIntrinsic {
   public:
     /// get* - Return the arguments to the instruction.
     ///
-    Value *getRawSource() const { return const_cast<Value*>(getOperand(2)); }
-    
+    Value *getRawSource() const { return const_cast<Value*>(getArgOperand(1)); }
+
     /// getSource - This is just like getRawSource, but it strips off any cast
     /// instructions that feed it, giving the original input.  The returned
     /// value is guaranteed to be a pointer.
     Value *getSource() const { return getRawSource()->stripPointerCasts(); }
-    
+
     void setSource(Value *Ptr) {
       assert(getRawSource()->getType() == Ptr->getType() &&
              "setSource called with pointer of wrong type!");
-      setOperand(2, Ptr);
+      setArgOperand(1, Ptr);
     }
-    
+
     // Methods for support type inquiry through isa, cast, and dyn_cast:
     static inline bool classof(const MemTransferInst *) { return true; }
     static inline bool classof(const IntrinsicInst *I) {
@@ -239,8 +239,8 @@
       return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
     }
   };
-  
-  
+
+
   /// MemCpyInst - This class wraps the llvm.memcpy intrinsic.
   ///
   class MemCpyInst : public MemTransferInst {
@@ -282,7 +282,7 @@
       return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
     }
   };
-  
+
   /// MemoryUseIntrinsic - This is the common base class for the memory use
   /// marker intrinsics.
   ///

Modified: llvm/branches/wendling/eh/include/llvm/LinkAllPasses.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/LinkAllPasses.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/LinkAllPasses.h (original)
+++ llvm/branches/wendling/eh/include/llvm/LinkAllPasses.h Fri Jul  2 04:57:13 2010
@@ -113,6 +113,7 @@
       (void) llvm::createSingleLoopExtractorPass();
       (void) llvm::createStripSymbolsPass();
       (void) llvm::createStripNonDebugSymbolsPass();
+      (void) llvm::createStripDeadDebugInfoPass();
       (void) llvm::createStripDeadPrototypesPass();
       (void) llvm::createTailCallEliminationPass();
       (void) llvm::createTailDuplicationPass();

Modified: llvm/branches/wendling/eh/include/llvm/MC/MCContext.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/MC/MCContext.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/MC/MCContext.h (original)
+++ llvm/branches/wendling/eh/include/llvm/MC/MCContext.h Fri Jul  2 04:57:13 2010
@@ -14,6 +14,7 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/StringMap.h"
 #include "llvm/Support/Allocator.h"
+#include "llvm/Support/raw_ostream.h"
 
 namespace llvm {
   class MCAsmInfo;
@@ -54,6 +55,17 @@
     /// for the LocalLabelVal and adds it to the map if needed.
     unsigned GetInstance(int64_t LocalLabelVal);
     
+    /// The file name of the log file from the enviromment variable
+    /// AS_SECURE_LOG_FILE.  Which must be set before the .secure_log_unique
+    /// directive is used or it is an error.
+    char *SecureLogFile;
+    /// The stream that gets written to for the .secure_log_unique directive.
+    raw_ostream *SecureLog;
+    /// Boolean toggled when .secure_log_unique / .secure_log_reset is seen to
+    /// catch errors if .secure_log_unique appears twice without
+    /// .secure_log_reset appearing between them.
+    bool SecureLogUsed;
+
     /// Allocator - Allocator object used for creating machine code objects.
     ///
     /// We use a bump pointer allocator to avoid the need to track all allocated
@@ -127,6 +139,16 @@
     
     /// @}
 
+    char *getSecureLogFile() { return SecureLogFile; }
+    raw_ostream *getSecureLog() { return SecureLog; }
+    bool getSecureLogUsed() { return SecureLogUsed; }
+    void setSecureLog(raw_ostream *Value) {
+      SecureLog = Value;
+    }
+    void setSecureLogUsed(bool Value) {
+      SecureLogUsed = Value;
+    }
+
     void *Allocate(unsigned Size, unsigned Align = 8) {
       return Allocator.Allocate(Size, Align);
     }

Modified: llvm/branches/wendling/eh/include/llvm/MC/MCParser/AsmParser.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/MC/MCParser/AsmParser.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/MC/MCParser/AsmParser.h (original)
+++ llvm/branches/wendling/eh/include/llvm/MC/MCParser/AsmParser.h Fri Jul  2 04:57:13 2010
@@ -36,6 +36,8 @@
 class Twine;
 
 class AsmParser : public MCAsmParser {
+  AsmParser(const AsmParser &);   // DO NOT IMPLEMENT
+  void operator=(const AsmParser &);  // DO NOT IMPLEMENT
 private:
   AsmLexer Lexer;
   MCContext &Ctx;
@@ -56,7 +58,7 @@
   /// in the directive name and the location of the directive keyword.
   StringMap<bool(AsmParser::*)(StringRef, SMLoc)> DirectiveMap;
 public:
-  AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
+  AsmParser(const Target &T, SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
             const MCAsmInfo &MAI);
   ~AsmParser();
 
@@ -143,6 +145,10 @@
   bool ParseDirectiveDarwinSubsectionsViaSymbols();
   // Darwin specific .dump and .load
   bool ParseDirectiveDarwinDumpOrLoad(SMLoc IDLoc, bool IsDump);
+  // Darwin specific .secure_log_unique
+  bool ParseDirectiveDarwinSecureLogUnique(SMLoc IDLoc);
+  // Darwin specific .secure_log_reset
+  bool ParseDirectiveDarwinSecureLogReset(SMLoc IDLoc);
 
   bool ParseDirectiveAbort(); // ".abort"
   bool ParseDirectiveInclude(); // ".include"

Modified: llvm/branches/wendling/eh/include/llvm/MC/MCSection.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/MC/MCSection.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/MC/MCSection.h (original)
+++ llvm/branches/wendling/eh/include/llvm/MC/MCSection.h Fri Jul  2 04:57:13 2010
@@ -23,7 +23,7 @@
   class MCContext;
   class MCAsmInfo;
   class raw_ostream;
-  
+
   /// MCSection - Instances of this class represent a uniqued identifier for a
   /// section in the current translation unit.  The MCContext class uniques and
   /// creates these.
@@ -49,7 +49,7 @@
     SectionKind getKind() const { return Kind; }
 
     SectionVariant getVariant() const { return Variant; }
-    
+
     virtual void PrintSwitchToSection(const MCAsmInfo &MAI,
                                       raw_ostream &OS) const = 0;
 
@@ -63,7 +63,7 @@
 
     static bool classof(const MCSection *) { return true; }
   };
-  
+
 } // end namespace llvm
 
 #endif

Modified: llvm/branches/wendling/eh/include/llvm/MC/MCSectionCOFF.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/MC/MCSectionCOFF.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/MC/MCSectionCOFF.h (original)
+++ llvm/branches/wendling/eh/include/llvm/MC/MCSectionCOFF.h Fri Jul  2 04:57:13 2010
@@ -16,6 +16,8 @@
 
 #include "llvm/MC/MCSection.h"
 
+#include "llvm/Support/COFF.h"
+
 namespace llvm {
   
 /// MCSectionCOFF - This represents a section on Windows
@@ -47,56 +49,6 @@
     /// should be printed before the section name
     bool ShouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
 
-    //FIXME: all COFF enumerations/flags should be standardized into one place...
-    // Target/X86COFF.h doesn't seem right as COFF can be used for other targets,
-    // MC/WinCOFF.h maybe right as it isn't target or entity specific, and it is
-    //   pretty low on the dependancy graph (is there any need to support non
-    //   windows COFF?)
-    // here is good for section stuff, but others should go elsewhere
-
-    /// Valid section flags.
-    enum {
-      IMAGE_SCN_TYPE_NO_PAD                     = 0x00000008,
-      IMAGE_SCN_CNT_CODE                        = 0x00000020,
-      IMAGE_SCN_CNT_INITIALIZED_DATA            = 0x00000040,
-      IMAGE_SCN_CNT_UNINITIALIZED_DATA          = 0x00000080,
-      IMAGE_SCN_LNK_OTHER                       = 0x00000100,
-      IMAGE_SCN_LNK_INFO                        = 0x00000200,
-      IMAGE_SCN_LNK_REMOVE                      = 0x00000800,
-      IMAGE_SCN_LNK_COMDAT                      = 0x00001000,
-      IMAGE_SCN_MEM_FARDATA                     = 0x00008000,
-      IMAGE_SCN_MEM_PURGEABLE                   = 0x00020000,
-      IMAGE_SCN_MEM_16BIT                       = 0x00020000,
-      IMAGE_SCN_MEM_LOCKED                      = 0x00040000,
-      IMAGE_SCN_MEM_PRELOAD                     = 0x00080000,
-      /* these are handled elsewhere
-      IMAGE_SCN_ALIGN_1BYTES                    = 0x00100000,
-      IMAGE_SCN_ALIGN_2BYTES                    = 0x00200000,
-      IMAGE_SCN_ALIGN_4BYTES                    = 0x00300000,
-      IMAGE_SCN_ALIGN_8BYTES                    = 0x00400000,
-      IMAGE_SCN_ALIGN_16BYTES                   = 0x00500000,
-      IMAGE_SCN_ALIGN_32BYTES                   = 0x00600000,
-      IMAGE_SCN_ALIGN_64BYTES                   = 0x00700000,
-      */
-      IMAGE_SCN_LNK_NRELOC_OVFL                 = 0x01000000,
-      IMAGE_SCN_MEM_DISCARDABLE                 = 0x02000000,
-      IMAGE_SCN_MEM_NOT_CACHED                  = 0x04000000,
-      IMAGE_SCN_MEM_NOT_PAGED                   = 0x08000000,
-      IMAGE_SCN_MEM_SHARED                      = 0x10000000,
-      IMAGE_SCN_MEM_EXECUTE                     = 0x20000000,
-      IMAGE_SCN_MEM_READ                        = 0x40000000,
-      IMAGE_SCN_MEM_WRITE                       = 0x80000000
-    };
-
-    enum {
-      IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
-      IMAGE_COMDAT_SELECT_ANY,
-      IMAGE_COMDAT_SELECT_SAME_SIZE,
-      IMAGE_COMDAT_SELECT_EXACT_MATCH,
-      IMAGE_COMDAT_SELECT_ASSOCIATIVE,
-      IMAGE_COMDAT_SELECT_LARGEST
-    };
-
     StringRef getSectionName() const { return SectionName; }
     unsigned getCharacteristics() const { return Characteristics; }
     int getSelection () const { return Selection; }

Modified: llvm/branches/wendling/eh/include/llvm/MC/SectionKind.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/MC/SectionKind.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/MC/SectionKind.h (original)
+++ llvm/branches/wendling/eh/include/llvm/MC/SectionKind.h Fri Jul  2 04:57:13 2010
@@ -29,10 +29,10 @@
   enum Kind {
     /// Metadata - Debug info sections or other metadata.
     Metadata,
-    
+
     /// Text - Text section, used for functions and other executable code.
     Text,
-    
+
     /// ReadOnly - Data that is never written to at program runtime by the
     /// program or the dynamic linker.  Things in the top-level readonly
     /// SectionKind are not mergeable.
@@ -45,7 +45,7 @@
 
            /// Mergeable1ByteCString - 1 byte mergable, null terminated, string.
            Mergeable1ByteCString,
-    
+
            /// Mergeable2ByteCString - 2 byte mergable, null terminated, string.
            Mergeable2ByteCString,
 
@@ -56,11 +56,11 @@
         /// constants together.  For example, this can be used to unique
         /// constant pool entries etc.
         MergeableConst,
-    
+
             /// MergeableConst4 - This is a section used by 4-byte constants,
             /// for example, floats.
             MergeableConst4,
-    
+
             /// MergeableConst8 - This is a section used by 8-byte constants,
             /// for example, doubles.
             MergeableConst8,
@@ -68,33 +68,33 @@
             /// MergeableConst16 - This is a section used by 16-byte constants,
             /// for example, vectors.
             MergeableConst16,
-    
+
     /// Writeable - This is the base of all segments that need to be written
     /// to during program runtime.
-    
+
        /// ThreadLocal - This is the base of all TLS segments.  All TLS
        /// objects must be writeable, otherwise there is no reason for them to
        /// be thread local!
-    
+
            /// ThreadBSS - Zero-initialized TLS data objects.
            ThreadBSS,
-    
+
            /// ThreadData - Initialized TLS data objects.
            ThreadData,
-    
+
        /// GlobalWriteableData - Writeable data that is global (not thread
        /// local).
-    
+
            /// BSS - Zero initialized writeable data.
            BSS,
-    
+
                /// BSSLocal - This is BSS (zero initialized and writable) data
                /// which has local linkage.
                BSSLocal,
-    
+
                /// BSSExtern - This is BSS data with normal external linkage.
                BSSExtern,
-    
+
            /// Common - Data with common linkage.  These represent tentative
            /// definitions, which always have a zero initializer and are never
            /// marked 'constant'.
@@ -123,20 +123,20 @@
            /// mark the pages these globals end up on as read-only after it is
            /// done with its relocation phase.
            ReadOnlyWithRel,
-    
+
                /// ReadOnlyWithRelLocal - This is data that is readonly by the
                /// program, but must be writeable so that the dynamic linker
                /// can perform relocations in it.  This is used when we know
                /// that all the relocations are to globals in this final
                /// linked image.
                ReadOnlyWithRelLocal
-    
+
   } K : 8;
 public:
-  
+
   bool isMetadata() const { return K == Metadata; }
   bool isText() const { return K == Text; }
-  
+
   bool isReadOnly() const {
     return K == ReadOnly || isMergeableCString() ||
            isMergeableConst();
@@ -149,7 +149,7 @@
   bool isMergeable1ByteCString() const { return K == Mergeable1ByteCString; }
   bool isMergeable2ByteCString() const { return K == Mergeable2ByteCString; }
   bool isMergeable4ByteCString() const { return K == Mergeable4ByteCString; }
-  
+
   bool isMergeableConst() const {
     return K == MergeableConst || K == MergeableConst4 ||
            K == MergeableConst8 || K == MergeableConst16;
@@ -157,38 +157,38 @@
   bool isMergeableConst4() const { return K == MergeableConst4; }
   bool isMergeableConst8() const { return K == MergeableConst8; }
   bool isMergeableConst16() const { return K == MergeableConst16; }
-  
+
   bool isWriteable() const {
     return isThreadLocal() || isGlobalWriteableData();
   }
-  
+
   bool isThreadLocal() const {
     return K == ThreadData || K == ThreadBSS;
   }
-  
-  bool isThreadBSS() const { return K == ThreadBSS; } 
-  bool isThreadData() const { return K == ThreadData; } 
+
+  bool isThreadBSS() const { return K == ThreadBSS; }
+  bool isThreadData() const { return K == ThreadData; }
 
   bool isGlobalWriteableData() const {
     return isBSS() || isCommon() || isDataRel() || isReadOnlyWithRel();
   }
-  
+
   bool isBSS() const { return K == BSS || K == BSSLocal || K == BSSExtern; }
   bool isBSSLocal() const { return K == BSSLocal; }
   bool isBSSExtern() const { return K == BSSExtern; }
-  
+
   bool isCommon() const { return K == Common; }
-  
+
   bool isDataRel() const {
     return K == DataRel || K == DataRelLocal || K == DataNoRel;
   }
-  
+
   bool isDataRelLocal() const {
     return K == DataRelLocal || K == DataNoRel;
   }
 
   bool isDataNoRel() const { return K == DataNoRel; }
-  
+
   bool isReadOnlyWithRel() const {
     return K == ReadOnlyWithRel || K == ReadOnlyWithRelLocal;
   }
@@ -196,14 +196,14 @@
   bool isReadOnlyWithRelLocal() const {
     return K == ReadOnlyWithRelLocal;
   }
-private: 
+private:
   static SectionKind get(Kind K) {
     SectionKind Res;
     Res.K = K;
     return Res;
   }
 public:
-  
+
   static SectionKind getMetadata() { return get(Metadata); }
   static SectionKind getText() { return get(Text); }
   static SectionKind getReadOnly() { return get(ReadOnly); }
@@ -234,7 +234,7 @@
     return get(ReadOnlyWithRelLocal);
   }
 };
-  
+
 } // end namespace llvm
 
 #endif

Modified: llvm/branches/wendling/eh/include/llvm/Module.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Module.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Module.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Module.h Fri Jul  2 04:57:13 2010
@@ -197,11 +197,11 @@
   /// Get any module-scope inline assembly blocks.
   /// @returns a string containing the module-scope inline assembly blocks.
   const std::string &getModuleInlineAsm() const { return GlobalScopeAsm; }
-  
+
 /// @}
 /// @name Module Level Mutators
 /// @{
-  
+
   /// Set the module identifier.
   void setModuleIdentifier(StringRef ID) { ModuleID = ID; }
 
@@ -235,12 +235,12 @@
   /// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
   /// This ID is uniqued across modules in the current LLVMContext.
   unsigned getMDKindID(StringRef Name) const;
-  
+
   /// getMDKindNames - Populate client supplied SmallVector with the name for
   /// custom metadata IDs registered in this LLVMContext.   ID #0 is not used,
   /// so it is filled in as an empty string.
   void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
-  
+
 /// @}
 /// @name Function Accessors
 /// @{
@@ -277,7 +277,7 @@
   Constant *getOrInsertTargetIntrinsic(StringRef Name,
                                        const FunctionType *Ty,
                                        AttrListPtr AttributeList);
-  
+
   /// getFunction - Look up the specified function in the module symbol table.
   /// If it does not exist, return null.
   Function *getFunction(StringRef Name) const;
@@ -321,15 +321,14 @@
 /// @}
 /// @name Named Metadata Accessors
 /// @{
-  
+
   /// getNamedMetadata - Return the first NamedMDNode in the module with the
-  /// specified name. This method returns null if a NamedMDNode with the 
+  /// specified name. This method returns null if a NamedMDNode with the
   /// specified name is not found.
-  NamedMDNode *getNamedMetadata(StringRef Name) const;
-  NamedMDNode *getNamedMetadataUsingTwine(Twine Name) const;
+  NamedMDNode *getNamedMetadata(const Twine &Name) const;
 
-  /// getOrInsertNamedMetadata - Return the first named MDNode in the module 
-  /// with the specified name. This method returns a new NamedMDNode if a 
+  /// getOrInsertNamedMetadata - Return the first named MDNode in the module
+  /// with the specified name. This method returns a new NamedMDNode if a
   /// NamedMDNode with the specified name is not found.
   NamedMDNode *getOrInsertNamedMetadata(StringRef Name);
 
@@ -516,15 +515,16 @@
   const_named_metadata_iterator named_metadata_begin() const {
     return NamedMDList.begin();
   }
-  
+
   /// Get an iterator to the last named metadata.
   named_metadata_iterator named_metadata_end() { return NamedMDList.end(); }
   /// Get a constant iterator to the last named metadata.
   const_named_metadata_iterator named_metadata_end() const {
     return NamedMDList.end();
   }
-  
-  /// Determine how many NamedMDNodes are in the Module's list of named metadata.
+
+  /// Determine how many NamedMDNodes are in the Module's list of named
+  /// metadata.
   size_t named_metadata_size() const { return NamedMDList.size();  }
   /// Determine if the list of named metadata is empty.
   bool named_metadata_empty() const { return NamedMDList.empty(); }
@@ -536,7 +536,7 @@
 
   /// Print the module to an output stream with AssemblyAnnotationWriter.
   void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW) const;
-  
+
   /// Dump the module to stderr (for debugging).
   void dump() const;
   /// This function causes all the subinstructions to "let go" of all references

Modified: llvm/branches/wendling/eh/include/llvm/Pass.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Pass.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Pass.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Pass.h Fri Jul  2 04:57:13 2010
@@ -31,7 +31,6 @@
 
 #include "llvm/System/DataTypes.h"
 
-#include <cassert>
 #include <string>
 #include <utility>
 #include <vector>
@@ -89,13 +88,8 @@
   Pass(const Pass &);           // DO NOT IMPLEMENT
   
 public:
-  explicit Pass(PassKind K, intptr_t pid) : Resolver(0), PassID(pid), Kind(K) {
-    assert(pid && "pid cannot be 0");
-  }
-  explicit Pass(PassKind K, const void *pid)
-    : Resolver(0), PassID((intptr_t)pid), Kind(K) {
-    assert(pid && "pid cannot be 0"); 
-  }
+  explicit Pass(PassKind K, intptr_t pid);
+  explicit Pass(PassKind K, const void *pid);
   virtual ~Pass();
 
   
@@ -138,13 +132,8 @@
   virtual PassManagerType getPotentialPassManagerType() const;
 
   // Access AnalysisResolver
-  inline void setResolver(AnalysisResolver *AR) { 
-    assert(!Resolver && "Resolver is already set");
-    Resolver = AR; 
-  }
-  inline AnalysisResolver *getResolver() { 
-    return Resolver; 
-  }
+  void setResolver(AnalysisResolver *AR);
+  AnalysisResolver *getResolver() const { return Resolver; }
 
   /// getAnalysisUsage - This function should be overriden by passes that need
   /// analysis information to do their job.  If a pass specifies that it uses a
@@ -170,11 +159,9 @@
   /// an analysis interface through multiple inheritance.  If needed, it should
   /// override this to adjust the this pointer as needed for the specified pass
   /// info.
-  virtual void *getAdjustedAnalysisPointer(const PassInfo *) {
-    return this;
-  }
-  virtual ImmutablePass *getAsImmutablePass() { return 0; }
-  virtual PMDataManager *getAsPMDataManager() { return 0; }
+  virtual void *getAdjustedAnalysisPointer(const PassInfo *);
+  virtual ImmutablePass *getAsImmutablePass();
+  virtual PMDataManager *getAsPMDataManager();
   
   /// verifyAnalysis() - This member can be implemented by a analysis pass to
   /// check state of analysis information. 

Modified: llvm/branches/wendling/eh/include/llvm/PassAnalysisSupport.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/PassAnalysisSupport.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/PassAnalysisSupport.h (original)
+++ llvm/branches/wendling/eh/include/llvm/PassAnalysisSupport.h Fri Jul  2 04:57:13 2010
@@ -49,22 +49,13 @@
   // addRequired - Add the specified ID to the required set of the usage info
   // for a pass.
   //
-  AnalysisUsage &addRequiredID(AnalysisID ID) {
-    assert(ID && "Pass class not registered!");
-    Required.push_back(ID);
-    return *this;
-  }
+  AnalysisUsage &addRequiredID(AnalysisID ID);
   template<class PassClass>
   AnalysisUsage &addRequired() {
     return addRequiredID(Pass::getClassPassInfo<PassClass>());
   }
 
-  AnalysisUsage &addRequiredTransitiveID(AnalysisID ID) {
-    assert(ID && "Pass class not registered!");
-    Required.push_back(ID);
-    RequiredTransitive.push_back(ID);
-    return *this;
-  }
+  AnalysisUsage &addRequiredTransitiveID(AnalysisID ID);
   template<class PassClass>
   AnalysisUsage &addRequiredTransitive() {
     AnalysisID ID = Pass::getClassPassInfo<PassClass>();

Modified: llvm/branches/wendling/eh/include/llvm/PassManagers.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/PassManagers.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/PassManagers.h (original)
+++ llvm/branches/wendling/eh/include/llvm/PassManagers.h Fri Jul  2 04:57:13 2010
@@ -302,10 +302,7 @@
   /// through getAnalysis interface.
   virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);
 
-  virtual Pass * getOnTheFlyPass(Pass *P, const PassInfo *PI, Function &F) {
-    assert (0 && "Unable to find on the fly pass");
-    return NULL;
-  }
+  virtual Pass *getOnTheFlyPass(Pass *P, const PassInfo *PI, Function &F);
 
   /// Initialize available analysis information.
   void initializeAnalysisInfo() { 

Modified: llvm/branches/wendling/eh/include/llvm/PassSupport.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/PassSupport.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/PassSupport.h (original)
+++ llvm/branches/wendling/eh/include/llvm/PassSupport.h Fri Jul  2 04:57:13 2010
@@ -109,13 +109,7 @@
   }
 
   /// createPass() - Use this method to create an instance of this pass.
-  Pass *createPass() const {
-    assert((!isAnalysisGroup() || NormalCtor) &&
-           "No default implementation found for analysis group!");
-    assert(NormalCtor &&
-           "Cannot call createPass on PassInfo without default ctor!");
-    return NormalCtor();
-  }
+  Pass *createPass() const;
 
   /// addInterfaceImplemented - This method is called when this pass is
   /// registered as a member of an analysis group with the RegisterAnalysisGroup

Modified: llvm/branches/wendling/eh/include/llvm/Support/CallSite.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Support/CallSite.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Support/CallSite.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Support/CallSite.h Fri Jul  2 04:57:13 2010
@@ -273,7 +273,9 @@
     // FIXME: this is slow, since we do not have the fast versions of the op_*()
     // functions here. See CallSite::getCallee.
     if (isCall())
-      return getInstruction()->op_begin();   // Unchanged (ATM)
+      return CallInst::ArgOffset
+             ? getInstruction()->op_begin() // Unchanged
+             : getInstruction()->op_end() - 1; // Skip Function
     else
       // An invoke.
       return getInstruction()->op_end() - 4; // Skip PersFn, BB, BB, Function

Modified: llvm/branches/wendling/eh/include/llvm/Support/IRBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Support/IRBuilder.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Support/IRBuilder.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Support/IRBuilder.h Fri Jul  2 04:57:13 2010
@@ -635,8 +635,8 @@
     return Insert(GetElementPtrInst::Create(Ptr, IdxBegin, IdxEnd), Name);
   }
   template<typename InputIterator>
-  Value *CreateInBoundsGEP(Value *Ptr, InputIterator IdxBegin, InputIterator IdxEnd,
-                           const Twine &Name = "") {
+  Value *CreateInBoundsGEP(Value *Ptr, InputIterator IdxBegin,
+                           InputIterator IdxEnd, const Twine &Name = "") {
     if (Constant *PC = dyn_cast<Constant>(Ptr)) {
       // Every index must be constant.
       InputIterator i;

Modified: llvm/branches/wendling/eh/include/llvm/Support/MemoryBuffer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Support/MemoryBuffer.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Support/MemoryBuffer.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Support/MemoryBuffer.h Fri Jul  2 04:57:13 2010
@@ -26,17 +26,20 @@
 /// into a memory buffer.  In addition to basic access to the characters in the
 /// file, this interface guarantees you can read one character past the end of
 /// the file, and that this character will read as '\0'.
+///
+/// The '\0' guarantee is needed to support an optimization -- it's intended to
+/// be more efficient for clients which are reading all the data to stop
+/// reading when they encounter a '\0' than to continually check the file
+/// position to see if it has reached the end of the file.
 class MemoryBuffer {
   const char *BufferStart; // Start of the buffer.
   const char *BufferEnd;   // End of the buffer.
 
-  /// MustDeleteBuffer - True if we allocated this buffer.  If so, the
-  /// destructor must know the delete[] it.
-  bool MustDeleteBuffer;
+  MemoryBuffer(const MemoryBuffer &); // DO NOT IMPLEMENT
+  MemoryBuffer &operator=(const MemoryBuffer &); // DO NOT IMPLEMENT
 protected:
-  MemoryBuffer() : MustDeleteBuffer(false) {}
+  MemoryBuffer() {}
   void init(const char *BufStart, const char *BufEnd);
-  void initCopyOf(const char *BufStart, const char *BufEnd);
 public:
   virtual ~MemoryBuffer();
 
@@ -62,24 +65,27 @@
                                std::string *ErrStr = 0,
                                int64_t FileSize = -1,
                                struct stat *FileInfo = 0);
+  static MemoryBuffer *getFile(const char *Filename,
+                               std::string *ErrStr = 0,
+                               int64_t FileSize = -1,
+                               struct stat *FileInfo = 0);
 
   /// getMemBuffer - Open the specified memory range as a MemoryBuffer.  Note
   /// that EndPtr[0] must be a null byte and be accessible!
   static MemoryBuffer *getMemBuffer(StringRef InputData,
-                                    const char *BufferName = "");
+                                    StringRef BufferName = "");
 
   /// getMemBufferCopy - Open the specified memory range as a MemoryBuffer,
   /// copying the contents and taking ownership of it.  This has no requirements
   /// on EndPtr[0].
   static MemoryBuffer *getMemBufferCopy(StringRef InputData,
-                                        const char *BufferName = "");
+                                        StringRef BufferName = "");
 
   /// getNewMemBuffer - Allocate a new MemoryBuffer of the specified size that
   /// is completely initialized to zeros.  Note that the caller should
   /// initialize the memory allocated by this method.  The memory is owned by
   /// the MemoryBuffer object.
-  static MemoryBuffer *getNewMemBuffer(size_t Size,
-                                       const char *BufferName = "");
+  static MemoryBuffer *getNewMemBuffer(size_t Size, StringRef BufferName = "");
 
   /// getNewUninitMemBuffer - Allocate a new MemoryBuffer of the specified size
   /// that is not initialized.  Note that the caller should initialize the
@@ -100,6 +106,10 @@
                                       std::string *ErrStr = 0,
                                       int64_t FileSize = -1,
                                       struct stat *FileInfo = 0);
+  static MemoryBuffer *getFileOrSTDIN(const char *Filename,
+                                      std::string *ErrStr = 0,
+                                      int64_t FileSize = -1,
+                                      struct stat *FileInfo = 0);
 };
 
 } // end namespace llvm

Modified: llvm/branches/wendling/eh/include/llvm/System/DataTypes.h.cmake
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/System/DataTypes.h.cmake?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/System/DataTypes.h.cmake (original)
+++ llvm/branches/wendling/eh/include/llvm/System/DataTypes.h.cmake Fri Jul  2 04:57:13 2010
@@ -109,41 +109,59 @@
 typedef signed char int8_t;
 typedef unsigned char uint8_t;
 typedef signed int ssize_t;
-#define INT8_MAX 127
-#define INT8_MIN -128
-#define UINT8_MAX 255
-#define INT16_MAX 32767
-#define INT16_MIN -32768
-#define UINT16_MAX 65535
-#define INT32_MAX 2147483647
-#define INT32_MIN -2147483648
-#define UINT32_MAX 4294967295U
+#ifndef INT8_MAX
+# define INT8_MAX 127
+#endif
+#ifndef INT8_MIN
+# define INT8_MIN -128
+#endif
+#ifndef UINT8_MAX
+# define UINT8_MAX 255
+#endif
+#ifndef INT16_MAX
+# define INT16_MAX 32767
+#endif
+#ifndef INT16_MIN
+# define INT16_MIN -32768
+#endif
+#ifndef UINT16_MAX
+# define UINT16_MAX 65535
+#endif
+#ifndef INT32_MAX
+# define INT32_MAX 2147483647
+#endif
+#ifndef INT32_MIN
+# define INT32_MIN -2147483648
+#endif
+#ifndef UINT32_MAX
+# define UINT32_MAX 4294967295U
+#endif
 /* Certain compatibility updates to VC++ introduce the `cstdint'
  * header, which defines the INT*_C macros. On default installs they
  * are absent. */
 #ifndef INT8_C
-# define INT8_C(C)   C
+# define INT8_C(C)   C##i8
 #endif
 #ifndef UINT8_C
-# define UINT8_C(C)  C
+# define UINT8_C(C)  C##ui8
 #endif
 #ifndef INT16_C
-# define INT16_C(C)  C
+# define INT16_C(C)  C##i16
 #endif
 #ifndef UINT16_C
-# define UINT16_C(C) C
+# define UINT16_C(C) C##ui16
 #endif
 #ifndef INT32_C
-# define INT32_C(C)  C
+# define INT32_C(C)  C##i32
 #endif
 #ifndef UINT32_C
-# define UINT32_C(C) C ## U
+# define UINT32_C(C) C##ui32
 #endif
 #ifndef INT64_C
-# define INT64_C(C)  ((int64_t) C ## LL)
+# define INT64_C(C)  C##i64
 #endif
 #ifndef UINT64_C
-# define UINT64_C(C) ((uint64_t) C ## ULL)
+# define UINT64_C(C) C##ui64
 #endif
 #endif /* _MSC_VER */
 

Modified: llvm/branches/wendling/eh/include/llvm/Target/Target.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/Target.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/Target.td (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/Target.td Fri Jul  2 04:57:13 2010
@@ -203,7 +203,6 @@
   bit canFoldAsLoad = 0;    // Can this be folded as a simple memory operand?
   bit mayLoad      = 0;     // Is it possible for this inst to read memory?
   bit mayStore     = 0;     // Is it possible for this inst to write memory?
-  bit isTwoAddress = 0;     // Is this a two address instruction?
   bit isConvertibleToThreeAddress = 0;  // Can this 2-addr instruction promote?
   bit isCommutable = 0;     // Is this 3 operand instruction commutable?
   bit isTerminator = 0;     // Is this part of the terminator for a basic block?

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h Fri Jul  2 04:57:13 2010
@@ -237,23 +237,19 @@
     return 0;
   }
 
-  /// commuteInstruction - If a target has any instructions that are commutable,
-  /// but require converting to a different instruction or making non-trivial
-  /// changes to commute them, this method can overloaded to do this.  The
-  /// default implementation of this method simply swaps the first two operands
-  /// of MI and returns it.
-  ///
-  /// If a target wants to make more aggressive changes, they can construct and
-  /// return a new machine instruction.  If an instruction cannot commute, it
-  /// can also return null.
-  ///
-  /// If NewMI is true, then a new machine instruction must be created.
-  ///
+  /// commuteInstruction - If a target has any instructions that are
+  /// commutable but require converting to different instructions or making
+  /// non-trivial changes to commute them, this method can overloaded to do
+  /// that.  The default implementation simply swaps the commutable operands.
+  /// If NewMI is false, MI is modified in place and returned; otherwise, a
+  /// new machine instruction is created and returned.  Do not call this
+  /// method for a non-commutable instruction, but there may be some cases
+  /// where this method fails and returns null.
   virtual MachineInstr *commuteInstruction(MachineInstr *MI,
                                            bool NewMI = false) const = 0;
 
   /// findCommutedOpIndices - If specified MI is commutable, return the two
-  /// operand indices that would swap value. Return true if the instruction
+  /// operand indices that would swap value. Return false if the instruction
   /// is not in a form which this routine understands.
   virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
                                      unsigned &SrcOpIdx2) const = 0;
@@ -321,6 +317,45 @@
     assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!"); 
     return 0;
   }
+
+  /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
+  /// after it, replacing it with an unconditional branch to NewDest. This is
+  /// used by the tail merging pass.
+  virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+                                       MachineBasicBlock *NewDest) const = 0;
+
+  /// isLegalToSplitMBBAt - Return true if it's legal to split the given basic
+  /// block at the specified instruction (i.e. instruction would be the start
+  /// of a new basic block).
+  virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+                                   MachineBasicBlock::iterator MBBI) const {
+    return true;
+  }
+
+  /// isProfitableToIfCvt - Return true if it's profitable to first "NumInstrs"
+  /// of the specified basic block.
+  virtual
+  bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const {
+    return false;
+  }
+  
+  /// isProfitableToIfCvt - Second variant of isProfitableToIfCvt, this one
+  /// checks for the case where two basic blocks from true and false path
+  /// of a if-then-else (diamond) are predicated on mutally exclusive
+  /// predicates.
+  virtual bool
+  isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
+                      MachineBasicBlock &FMBB, unsigned NumFInstrs) const {
+    return false;
+  }
+
+  /// isProfitableToDupForIfCvt - Return true if it's profitable for
+  /// if-converter to duplicate a specific number of instructions in the
+  /// specified MBB to enable if-conversion.
+  virtual bool
+  isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs) const {
+    return false;
+  }
   
   /// copyRegToReg - Emit instructions to copy between a pair of registers. It
   /// returns false if the target does not how to copy between the specified
@@ -562,6 +597,13 @@
     return true;
   }
 
+  /// isSchedulingBoundary - Test if the given instruction should be
+  /// considered a scheduling boundary. This primarily includes labels and
+  /// terminators.
+  virtual bool isSchedulingBoundary(const MachineInstr *MI,
+                                    const MachineBasicBlock *MBB,
+                                    const MachineFunction &MF) const = 0;
+
   /// GetInstSize - Returns the size of the specified Instruction.
   /// 
   virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const {
@@ -595,6 +637,8 @@
   TargetInstrInfoImpl(const TargetInstrDesc *desc, unsigned NumOpcodes)
   : TargetInstrInfo(desc, NumOpcodes) {}
 public:
+  virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
+                                       MachineBasicBlock *NewDest) const;
   virtual MachineInstr *commuteInstruction(MachineInstr *MI,
                                            bool NewMI = false) const;
   virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
@@ -610,6 +654,9 @@
                                   MachineFunction &MF) const;
   virtual bool produceSameValue(const MachineInstr *MI0,
                                 const MachineInstr *MI1) const;
+  virtual bool isSchedulingBoundary(const MachineInstr *MI,
+                                    const MachineBasicBlock *MBB,
+                                    const MachineFunction &MF) const;
   virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const;
 
   virtual ScheduleHazardRecognizer *

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h Fri Jul  2 04:57:13 2010
@@ -114,7 +114,7 @@
   /// isSelectExpensive - Return true if the select operation is expensive for
   /// this target.
   bool isSelectExpensive() const { return SelectIsExpensive; }
-  
+
   /// isIntDivCheap() - Return true if integer divide is usually cheaper than
   /// a sequence of several shifts, adds, and multiplies for this target.
   bool isIntDivCheap() const { return IntDivIsCheap; }
@@ -131,10 +131,10 @@
   virtual
   MVT::SimpleValueType getSetCCResultType(EVT VT) const;
 
-  /// getCmpLibcallReturnType - Return the ValueType for comparison 
+  /// getCmpLibcallReturnType - Return the ValueType for comparison
   /// libcalls. Comparions libcalls include floating point comparion calls,
   /// and Ordered/Unordered check calls on floating point numbers.
-  virtual 
+  virtual
   MVT::SimpleValueType getCmpLibcallReturnType() const;
 
   /// getBooleanContents - For targets without i1 registers, this gives the
@@ -208,7 +208,7 @@
       ValueTypeActions[I] = Action;
     }
   };
-  
+
   const ValueTypeActionImpl &getValueTypeActions() const {
     return ValueTypeActions;
   }
@@ -229,7 +229,7 @@
   /// returns the integer type to transform to.
   EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
     if (VT.isSimple()) {
-      assert((unsigned)VT.getSimpleVT().SimpleTy < 
+      assert((unsigned)VT.getSimpleVT().SimpleTy <
              array_lengthof(TransformToType));
       EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
       assert(getTypeAction(Context, NVT) != Promote &&
@@ -256,7 +256,7 @@
         return EVT::getIntegerVT(Context, VT.getSizeInBits() / 2);
       else
         // Promote to a power of two size, avoiding multi-step promotion.
-        return getTypeAction(Context, NVT) == Promote ? 
+        return getTypeAction(Context, NVT) == Promote ?
           getTypeToTransformTo(Context, NVT) : NVT;
     }
     assert(0 && "Unsupported extended type!");
@@ -302,11 +302,11 @@
   /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If
   /// this is the case, it returns true and store the intrinsic
   /// information into the IntrinsicInfo that was passed to the function.
-  struct IntrinsicInfo { 
+  struct IntrinsicInfo {
     unsigned     opc;         // target opcode
     EVT          memVT;       // memory VT
     const Value* ptrVal;      // value representing memory location
-    int          offset;      // offset off of ptrVal 
+    int          offset;      // offset off of ptrVal
     unsigned     align;       // alignment
     bool         vol;         // is volatile?
     bool         readMem;     // reads memory?
@@ -324,7 +324,7 @@
   virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const {
     return false;
   }
-  
+
   /// isShuffleMaskLegal - Targets can use this to indicate that they only
   /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
   /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
@@ -446,7 +446,7 @@
            "Table isn't big enough!");
     unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
     return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
-  }  
+  }
 
   /// isIndexedStoreLegal - Return true if the specified indexed load is legal
   /// on this target.
@@ -492,7 +492,7 @@
 
     assert((VT.isInteger() || VT.isFloatingPoint()) &&
            "Cannot autopromote this type, add it with AddPromotedToType.");
-    
+
     EVT NVT = VT;
     do {
       NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1);
@@ -516,14 +516,14 @@
   /// function arguments in the caller parameter area.  This is the actual
   /// alignment, not its logarithm.
   virtual unsigned getByValTypeAlignment(const Type *Ty) const;
-  
+
   /// getRegisterType - Return the type of registers that this ValueType will
   /// eventually require.
   EVT getRegisterType(MVT VT) const {
     assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
     return RegisterTypeForVT[VT.SimpleTy];
   }
-  
+
   /// getRegisterType - Return the type of registers that this ValueType will
   /// eventually require.
   EVT getRegisterType(LLVMContext &Context, EVT VT) const {
@@ -606,7 +606,7 @@
   /// of the specified type. This is used, for example, in situations where an
   /// array copy/move/set is  converted to a sequence of store operations. It's
   /// use helps to ensure that such replacements don't generate code that causes
-  /// an alignment error  (trap) on the target machine. 
+  /// an alignment error  (trap) on the target machine.
   /// @brief Determine if the target supports unaligned memory accesses.
   virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
     return false;
@@ -637,7 +637,7 @@
                                   MachineFunction &MF) const {
     return MVT::Other;
   }
-  
+
   /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
   /// to implement llvm.setjmp.
   bool usesUnderscoreSetJmp() const {
@@ -683,25 +683,19 @@
     return JumpBufAlignment;
   }
 
-  /// getIfCvtBlockLimit - returns the target specific if-conversion block size
-  /// limit. Any block whose size is greater should not be predicated.
-  unsigned getIfCvtBlockSizeLimit() const {
-    return IfCvtBlockSizeLimit;
-  }
-
-  /// getIfCvtDupBlockLimit - returns the target specific size limit for a
-  /// block to be considered for duplication. Any block whose size is greater
-  /// should not be duplicated to facilitate its predication.
-  unsigned getIfCvtDupBlockSizeLimit() const {
-    return IfCvtDupBlockSizeLimit;
-  }
-
   /// getPrefLoopAlignment - return the preferred loop alignment.
   ///
   unsigned getPrefLoopAlignment() const {
     return PrefLoopAlignment;
   }
-  
+
+  /// getShouldFoldAtomicFences - return whether the combiner should fold
+  /// fence MEMBARRIER instructions into the atomic intrinsic instructions.
+  ///
+  bool getShouldFoldAtomicFences() const {
+    return ShouldFoldAtomicFences;
+  }
+
   /// getPreIndexedAddressParts - returns true by value, base pointer and
   /// offset pointer and addressing mode by reference if the node's address
   /// can be legally represented as pre-indexed load / store address.
@@ -711,7 +705,7 @@
                                          SelectionDAG &DAG) const {
     return false;
   }
-  
+
   /// getPostIndexedAddressParts - returns true by value, base pointer and
   /// offset pointer and addressing mode by reference if this node can be
   /// combined with a load / store to form a post-indexed load / store.
@@ -721,12 +715,12 @@
                                           SelectionDAG &DAG) const {
     return false;
   }
-  
+
   /// getJumpTableEncoding - Return the entry encoding for a jump table in the
   /// current function.  The returned value is a member of the
   /// MachineJumpTableInfo::JTEntryKind enum.
   virtual unsigned getJumpTableEncoding() const;
-  
+
   virtual const MCExpr *
   LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
                             const MachineBasicBlock *MBB, unsigned uid,
@@ -734,7 +728,7 @@
     assert(0 && "Need to implement this hook if target has custom JTIs");
     return 0;
   }
-  
+
   /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
   /// jumptable.
   virtual SDValue getPICJumpTableRelocBase(SDValue Table,
@@ -746,7 +740,7 @@
   virtual const MCExpr *
   getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
                                unsigned JTI, MCContext &Ctx) const;
-  
+
   /// isOffsetFoldingLegal - Return true if folding a constant offset
   /// with the given GlobalAddress is legal.  It is frequently not legal in
   /// PIC relocation models.
@@ -758,10 +752,10 @@
   //===--------------------------------------------------------------------===//
   // TargetLowering Optimization Methods
   //
-  
+
   /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two
   /// SDValues for returning information from TargetLowering to its clients
-  /// that want to combine 
+  /// that want to combine
   struct TargetLoweringOpt {
     SelectionDAG &DAG;
     bool LegalTys;
@@ -775,14 +769,14 @@
 
     bool LegalTypes() const { return LegalTys; }
     bool LegalOperations() const { return LegalOps; }
-    
-    bool CombineTo(SDValue O, SDValue N) { 
-      Old = O; 
-      New = N; 
+
+    bool CombineTo(SDValue O, SDValue N) {
+      Old = O;
+      New = N;
       return true;
     }
-    
-    /// ShrinkDemandedConstant - Check to see if the specified operand of the 
+
+    /// ShrinkDemandedConstant - Check to see if the specified operand of the
     /// specified instruction is a constant integer.  If so, check to see if
     /// there are any bits set in the constant that are not demanded.  If so,
     /// shrink the constant and return true.
@@ -795,25 +789,25 @@
     bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
                           DebugLoc dl);
   };
-                                                
+
   /// SimplifyDemandedBits - Look at Op.  At this point, we know that only the
   /// DemandedMask bits of the result of Op are ever used downstream.  If we can
   /// use this information to simplify Op, create a new simplified DAG node and
-  /// return true, returning the original and new nodes in Old and New. 
-  /// Otherwise, analyze the expression and return a mask of KnownOne and 
-  /// KnownZero bits for the expression (used to simplify the caller).  
-  /// The KnownZero/One bits may only be accurate for those bits in the 
+  /// return true, returning the original and new nodes in Old and New.
+  /// Otherwise, analyze the expression and return a mask of KnownOne and
+  /// KnownZero bits for the expression (used to simplify the caller).
+  /// The KnownZero/One bits may only be accurate for those bits in the
   /// DemandedMask.
-  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, 
+  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
                             APInt &KnownZero, APInt &KnownOne,
                             TargetLoweringOpt &TLO, unsigned Depth = 0) const;
-  
+
   /// computeMaskedBitsForTargetNode - Determine which of the bits specified in
-  /// Mask are known to be either zero or one and return them in the 
+  /// Mask are known to be either zero or one and return them in the
   /// KnownZero/KnownOne bitsets.
   virtual void computeMaskedBitsForTargetNode(const SDValue Op,
                                               const APInt &Mask,
-                                              APInt &KnownZero, 
+                                              APInt &KnownZero,
                                               APInt &KnownOne,
                                               const SelectionDAG &DAG,
                                               unsigned Depth = 0) const;
@@ -823,7 +817,7 @@
   /// DAG Combiner.
   virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
                                                    unsigned Depth = 0) const;
-  
+
   struct DAGCombinerInfo {
     void *DC;  // The DAG Combiner object.
     bool BeforeLegalize;
@@ -831,15 +825,15 @@
     bool CalledByLegalizer;
   public:
     SelectionDAG &DAG;
-    
+
     DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc)
       : DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo),
         CalledByLegalizer(cl), DAG(dag) {}
-    
+
     bool isBeforeLegalize() const { return BeforeLegalize; }
     bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; }
     bool isCalledByLegalizer() const { return CalledByLegalizer; }
-    
+
     void AddToWorklist(SDNode *N);
     SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
                       bool AddTo = true);
@@ -849,7 +843,7 @@
     void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
   };
 
-  /// SimplifySetCC - Try to simplify a setcc built with the specified operands 
+  /// SimplifySetCC - Try to simplify a setcc built with the specified operands
   /// and cc. If it is unable to simplify it, return a null SDValue.
   SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
                           ISD::CondCode Cond, bool foldBooleans,
@@ -890,7 +884,7 @@
   virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
     return false;
   }
-  
+
   //===--------------------------------------------------------------------===//
   // TargetLowering Configuration Methods - These methods should be invoked by
   // the derived class constructor to configure this object for the target.
@@ -930,7 +924,7 @@
   void setStackPointerRegisterToSaveRestore(unsigned R) {
     StackPointerRegisterToSaveRestore = R;
   }
-  
+
   /// setExceptionPointerRegister - If set to a physical register, this sets
   /// the register that receives the exception address on entry to a landing
   /// pad.
@@ -953,12 +947,12 @@
   /// expensive, and if possible, should be replaced by an alternate sequence
   /// of instructions not containing an integer divide.
   void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
-  
+
   /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
   /// srl/add/sra for a signed divide by power of two, and let the target handle
   /// it.
   void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
-  
+
   /// addRegisterClass - Add the specified register class as an available
   /// regclass for the specified value type.  This indicates the selector can
   /// handle values of that class natively.
@@ -981,7 +975,7 @@
     assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
     OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
   }
-  
+
   /// setLoadExtAction - Indicate that the specified load with extension does
   /// not work with the specified type and indicate what to do about it.
   void setLoadExtAction(unsigned ExtType, MVT VT,
@@ -991,7 +985,7 @@
            "Table isn't big enough!");
     LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
   }
-  
+
   /// setTruncStoreAction - Indicate that the specified truncating store does
   /// not work with the specified type and indicate what to do about it.
   void setTruncStoreAction(MVT ValVT, MVT MemVT,
@@ -1016,7 +1010,7 @@
     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
   }
-  
+
   /// setIndexedStoreAction - Indicate that the specified indexed store does or
   /// does not work with the specified type and indicate what to do about
   /// it. NOTE: All indexed mode stores are initialized to Expand in
@@ -1031,7 +1025,7 @@
     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
     IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
   }
-  
+
   /// setCondCodeAction - Indicate that the specified condition code is or isn't
   /// supported on the target and indicate what to do about it.
   void setCondCodeAction(ISD::CondCode CC, MVT VT,
@@ -1058,7 +1052,7 @@
     assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
     TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
   }
-  
+
   /// setJumpBufSize - Set the target's required jmp_buf buffer size (in
   /// bytes); default is 200
   void setJumpBufSize(unsigned Size) {
@@ -1071,25 +1065,18 @@
     JumpBufAlignment = Align;
   }
 
-  /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size
-  /// limit (in number of instructions); default is 2.
-  void setIfCvtBlockSizeLimit(unsigned Limit) {
-    IfCvtBlockSizeLimit = Limit;
-  }
-  
-  /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number
-  /// of instructions) to be considered for code duplication during
-  /// if-conversion; default is 2.
-  void setIfCvtDupBlockSizeLimit(unsigned Limit) {
-    IfCvtDupBlockSizeLimit = Limit;
-  }
-
   /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
   /// alignment is zero, it means the target does not care about loop alignment.
   void setPrefLoopAlignment(unsigned Align) {
     PrefLoopAlignment = Align;
   }
-  
+
+  /// setShouldFoldAtomicFences - Set if the target's implementation of the
+  /// atomic operation intrinsics includes locking. Default is false.
+  void setShouldFoldAtomicFences(bool fold) {
+    ShouldFoldAtomicFences = fold;
+  }
+
 public:
   //===--------------------------------------------------------------------===//
   // Lowering methods - These methods must be implemented by targets so that
@@ -1198,7 +1185,7 @@
                                      SmallVectorImpl<SDValue> &Results,
                                      SelectionDAG &DAG) const;
 
-  /// LowerOperation - This callback is invoked for operations that are 
+  /// LowerOperation - This callback is invoked for operations that are
   /// unsupported by the target, which are registered to use 'custom' lowering,
   /// and whose defined values are all legal.
   /// If the target has no operations that require custom lowering, it need not
@@ -1241,7 +1228,7 @@
   //===--------------------------------------------------------------------===//
   // Inline Asm Support hooks
   //
-  
+
   /// ExpandInlineAsm - This hook allows the target to expand an inline asm
   /// call to be explicit llvm code if it wants to.  This is useful for
   /// turning simple inline asms into LLVM intrinsics, which gives the
@@ -1249,7 +1236,7 @@
   virtual bool ExpandInlineAsm(CallInst *CI) const {
     return false;
   }
-  
+
   enum ConstraintType {
     C_Register,            // Constraint represents specific register(s).
     C_RegisterClass,       // Constraint represents any of register(s) in class.
@@ -1257,7 +1244,7 @@
     C_Other,               // Something else.
     C_Unknown              // Unsupported constraint.
   };
-  
+
   /// AsmOperandInfo - This contains information for each constraint that we are
   /// lowering.
   struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
@@ -1269,25 +1256,25 @@
     /// ConstraintType - Information about the constraint code, e.g. Register,
     /// RegisterClass, Memory, Other, Unknown.
     TargetLowering::ConstraintType ConstraintType;
-  
+
     /// CallOperandval - If this is the result output operand or a
     /// clobber, this is null, otherwise it is the incoming operand to the
     /// CallInst.  This gets modified as the asm is processed.
     Value *CallOperandVal;
-  
+
     /// ConstraintVT - The ValueType for the operand value.
     EVT ConstraintVT;
-    
+
     /// isMatchingInputConstraint - Return true of this is an input operand that
     /// is a matching constraint like "4".
     bool isMatchingInputConstraint() const;
-    
+
     /// getMatchedOperand - If this is an input matching constraint, this method
     /// returns the output operand it matches.
     unsigned getMatchedOperand() const;
-  
+
     AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
-      : InlineAsm::ConstraintInfo(info), 
+      : InlineAsm::ConstraintInfo(info),
         ConstraintType(TargetLowering::C_Unknown),
         CallOperandVal(0), ConstraintVT(MVT::Other) {
     }
@@ -1297,21 +1284,19 @@
   /// type to use for the specific AsmOperandInfo, setting
   /// OpInfo.ConstraintCode and OpInfo.ConstraintType.  If the actual operand
   /// being passed in is available, it can be passed in as Op, otherwise an
-  /// empty SDValue can be passed. If hasMemory is true it means one of the asm
-  /// constraint of the inline asm instruction being processed is 'm'.
+  /// empty SDValue can be passed. 
   virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
                                       SDValue Op,
-                                      bool hasMemory,
                                       SelectionDAG *DAG = 0) const;
-  
+
   /// getConstraintType - Given a constraint, return the type of constraint it
   /// is for this target.
   virtual ConstraintType getConstraintType(const std::string &Constraint) const;
-  
+
   /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
   /// return a list of registers that can be used to satisfy the constraint.
   /// This should only be used for C_RegisterClass constraints.
-  virtual std::vector<unsigned> 
+  virtual std::vector<unsigned>
   getRegClassForInlineAsmConstraint(const std::string &Constraint,
                                     EVT VT) const;
 
@@ -1325,29 +1310,26 @@
   ///
   /// This should only be used for C_Register constraints.  On error,
   /// this returns a register number of 0 and a null register class pointer..
-  virtual std::pair<unsigned, const TargetRegisterClass*> 
+  virtual std::pair<unsigned, const TargetRegisterClass*>
     getRegForInlineAsmConstraint(const std::string &Constraint,
                                  EVT VT) const;
-  
+
   /// LowerXConstraint - try to replace an X constraint, which matches anything,
   /// with another that has more specific requirements based on the type of the
   /// corresponding operand.  This returns null if there is no replacement to
   /// make.
   virtual const char *LowerXConstraint(EVT ConstraintVT) const;
-  
+
   /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
-  /// vector.  If it is invalid, don't add anything to Ops. If hasMemory is true
-  /// it means one of the asm constraint of the inline asm instruction being
-  /// processed is 'm'.
+  /// vector.  If it is invalid, don't add anything to Ops.
   virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter,
-                                            bool hasMemory,
                                             std::vector<SDValue> &Ops,
                                             SelectionDAG &DAG) const;
-  
+
   //===--------------------------------------------------------------------===//
   // Instruction Emitting Hooks
   //
-  
+
   // EmitInstrWithCustomInserter - This method should be implemented by targets
   // that mark instructions with the 'usesCustomInserter' flag.  These
   // instructions are special in various ways, which require special support to
@@ -1376,7 +1358,7 @@
     int64_t      Scale;
     AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
   };
-  
+
   /// isLegalAddressingMode - Return true if the addressing mode represented by
   /// AM is legal for this target, for a load/store of the specified type.
   /// The type may be VoidTy, in which case only return true if the addressing
@@ -1429,9 +1411,9 @@
   //===--------------------------------------------------------------------===//
   // Div utility functions
   //
-  SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, 
+  SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG,
                       std::vector<SDNode*>* Created) const;
-  SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, 
+  SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG,
                       std::vector<SDNode*>* Created) const;
 
 
@@ -1468,7 +1450,7 @@
   void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
     LibcallCallingConvs[Call] = CC;
   }
-  
+
   /// getLibcallCallingConv - Get the CallingConv that should be used for the
   /// specified libcall.
   CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
@@ -1497,12 +1479,12 @@
   /// a real cost model is in place.  If we ever optimize for size, this will be
   /// set to true unconditionally.
   bool IntDivIsCheap;
-  
+
   /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
   /// srl/add/sra for a signed divide by power of two, and let the target handle
   /// it.
   bool Pow2DivIsCheap;
-  
+
   /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
   /// llvm.setjmp.  Defaults to false.
   bool UseUnderscoreSetJmp;
@@ -1522,26 +1504,23 @@
   /// SchedPreferenceInfo - The target scheduling preference: shortest possible
   /// total cycles or lowest register usage.
   Sched::Preference SchedPreferenceInfo;
-  
+
   /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
   unsigned JumpBufSize;
-  
+
   /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
   /// buffers
   unsigned JumpBufAlignment;
 
-  /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be
-  /// if-converted.
-  unsigned IfCvtBlockSizeLimit;
-  
-  /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be
-  /// duplicated during if-conversion.
-  unsigned IfCvtDupBlockSizeLimit;
-
   /// PrefLoopAlignment - The perferred loop alignment.
   ///
   unsigned PrefLoopAlignment;
 
+  /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should
+  /// be folded into the enclosed atomic intrinsic instruction by the
+  /// combiner.
+  bool ShouldFoldAtomicFences;
+
   /// StackPointerRegisterToSaveRestore - If set to a physical register, this
   /// specifies the register that llvm.savestack/llvm.restorestack should save
   /// and restore.
@@ -1581,12 +1560,12 @@
   /// operations that are not should be described.  Note that operations on
   /// non-legal value types are not described here.
   uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
-  
+
   /// LoadExtActions - For each load extension type and each value type,
   /// keep a LegalizeAction that indicates how instruction selection should deal
   /// with a load of a specific value type and extension type.
   uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
-  
+
   /// TruncStoreActions - For each value type pair keep a LegalizeAction that
   /// indicates whether a truncating store of a specific value type and
   /// truncating type is legal.
@@ -1598,7 +1577,7 @@
   /// value_type for the reference. The second dimension represents the various
   /// modes for load store.
   uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
-  
+
   /// CondCodeActions - For each condition code (ISD::CondCode) keep a
   /// LegalizeAction that indicates how instruction selection should
   /// deal with the condition code.
@@ -1613,7 +1592,7 @@
   /// which sets a bit in this array.
   unsigned char
   TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
-  
+
   /// PromoteToType - For operations that must be promoted to a specific type,
   /// this holds the destination type.  This map should be sparse, so don't hold
   /// it as an array.

Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h Fri Jul  2 04:57:13 2010
@@ -321,7 +321,8 @@
 
   /// getMinimalPhysRegClass - Returns the Register Class of a physical
   /// register of the given type.
-  const TargetRegisterClass * getMinimalPhysRegClass(unsigned Reg) const;
+  const TargetRegisterClass *
+    getMinimalPhysRegClass(unsigned Reg, EVT VT = MVT::Other) const;
 
   /// getAllocatableSet - Returns a bitset indexed by register number
   /// indicating if a register is allocatable or not. If a register class is

Modified: llvm/branches/wendling/eh/include/llvm/Transforms/IPO.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Transforms/IPO.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Transforms/IPO.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Transforms/IPO.h Fri Jul  2 04:57:13 2010
@@ -45,6 +45,11 @@
 ModulePass *createStripDebugDeclarePass();
 
 //===----------------------------------------------------------------------===//
+//
+// These pass removes unused symbols' debug info.
+ModulePass *createStripDeadDebugInfoPass();
+
+//===----------------------------------------------------------------------===//
 /// createLowerSetJmpPass - This function lowers the setjmp/longjmp intrinsics
 /// to invoke/unwind instructions.  This should really be part of the C/C++
 /// front-end, but it's so much easier to write transformations in LLVM proper.

Modified: llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Cloning.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Cloning.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Cloning.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Transforms/Utils/Cloning.h Fri Jul  2 04:57:13 2010
@@ -18,7 +18,7 @@
 #ifndef LLVM_TRANSFORMS_UTILS_CLONING_H
 #define LLVM_TRANSFORMS_UTILS_CLONING_H
 
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/Twine.h"
 #include "llvm/Support/ValueHandle.h"
@@ -46,7 +46,7 @@
 /// CloneModule - Return an exact copy of the specified module
 ///
 Module *CloneModule(const Module *M);
-Module *CloneModule(const Module *M, DenseMap<const Value*, Value*> &ValueMap);
+Module *CloneModule(const Module *M, ValueMap<const Value*, Value*> &VMap);
 
 /// ClonedCodeInfo - This struct can be used to capture information about code
 /// being cloned, while it is being cloned.
@@ -89,7 +89,7 @@
 /// incoming edges.
 ///
 /// The correlation between instructions in the source and result basic blocks
-/// is recorded in the ValueMap map.
+/// is recorded in the VMap map.
 ///
 /// If you have a particular suffix you'd like to use to add to any cloned
 /// names, specify it as the optional third parameter.
@@ -102,34 +102,34 @@
 /// parameter.
 ///
 BasicBlock *CloneBasicBlock(const BasicBlock *BB,
-                            DenseMap<const Value*, Value*> &ValueMap,
+                            ValueMap<const Value*, Value*> &VMap,
                             const Twine &NameSuffix = "", Function *F = 0,
                             ClonedCodeInfo *CodeInfo = 0);
 
 
 /// CloneLoop - Clone Loop. Clone dominator info for loop insiders. Populate
-/// ValueMap using old blocks to new blocks mapping.
+/// VMap using old blocks to new blocks mapping.
 Loop *CloneLoop(Loop *L, LPPassManager *LPM, LoopInfo *LI, 
-                DenseMap<const Value *, Value *> &ValueMap, Pass *P);
+                ValueMap<const Value *, Value *> &VMap, Pass *P);
 
 /// CloneFunction - Return a copy of the specified function, but without
 /// embedding the function into another module.  Also, any references specified
-/// in the ValueMap are changed to refer to their mapped value instead of the
-/// original one.  If any of the arguments to the function are in the ValueMap,
-/// the arguments are deleted from the resultant function.  The ValueMap is
+/// in the VMap are changed to refer to their mapped value instead of the
+/// original one.  If any of the arguments to the function are in the VMap,
+/// the arguments are deleted from the resultant function.  The VMap is
 /// updated to include mappings from all of the instructions and basicblocks in
 /// the function from their old to new values.  The final argument captures
 /// information about the cloned code if non-null.
 ///
 Function *CloneFunction(const Function *F,
-                        DenseMap<const Value*, Value*> &ValueMap,
+                        ValueMap<const Value*, Value*> &VMap,
                         ClonedCodeInfo *CodeInfo = 0);
 
-/// CloneFunction - Version of the function that doesn't need the ValueMap.
+/// CloneFunction - Version of the function that doesn't need the VMap.
 ///
 inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){
-  DenseMap<const Value*, Value*> ValueMap;
-  return CloneFunction(F, ValueMap, CodeInfo);
+  ValueMap<const Value*, Value*> VMap;
+  return CloneFunction(F, VMap, CodeInfo);
 }
 
 /// Clone OldFunc into NewFunc, transforming the old arguments into references
@@ -139,7 +139,7 @@
 /// specified suffix to all values cloned.
 ///
 void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
-                       DenseMap<const Value*, Value*> &ValueMap,
+                       ValueMap<const Value*, Value*> &VMap,
                        SmallVectorImpl<ReturnInst*> &Returns,
                        const char *NameSuffix = "", 
                        ClonedCodeInfo *CodeInfo = 0);
@@ -152,7 +152,7 @@
 /// dead.  Since this doesn't produce an exactly copy of the input, it can't be
 /// used for things like CloneFunction or CloneModule.
 void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
-                               DenseMap<const Value*, Value*> &ValueMap,
+                               ValueMap<const Value*, Value*> &VMap,
                                SmallVectorImpl<ReturnInst*> &Returns,
                                const char *NameSuffix = "", 
                                ClonedCodeInfo *CodeInfo = 0,

Modified: llvm/branches/wendling/eh/include/llvm/Type.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Type.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Type.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Type.h Fri Jul  2 04:57:13 2010
@@ -504,19 +504,19 @@
 /// reference to the type.
 ///
 inline Type* PATypeHolder::get() const {
+  if (Ty == 0) return 0;
   const Type *NewTy = Ty->getForwardedType();
   if (!NewTy) return const_cast<Type*>(Ty);
   return *const_cast<PATypeHolder*>(this) = NewTy;
 }
 
 inline void PATypeHolder::addRef() {
-  assert(Ty && "Type Holder has a null type!");
-  if (Ty->isAbstract())
+  if (Ty && Ty->isAbstract())
     Ty->addRef();
 }
 
 inline void PATypeHolder::dropRef() {
-  if (Ty->isAbstract())
+  if (Ty && Ty->isAbstract())
     Ty->dropRef();
 }
 

Modified: llvm/branches/wendling/eh/include/llvm/Value.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Value.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Value.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Value.h Fri Jul  2 04:57:13 2010
@@ -93,8 +93,8 @@
   /// printing behavior.
   virtual void printCustom(raw_ostream &O) const;
 
-public:
   Value(const Type *Ty, unsigned scid);
+public:
   virtual ~Value();
 
   /// dump - Support for debugging, callable in GDB: V->dump()

Modified: llvm/branches/wendling/eh/lib/Analysis/AliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/AliasAnalysis.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/AliasAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/AliasAnalysis.cpp Fri Jul  2 04:57:13 2010
@@ -229,16 +229,20 @@
 /// identifiable object.  This returns true for:
 ///    Global Variables and Functions (but not Global Aliases)
 ///    Allocas and Mallocs
-///    ByVal and NoAlias Arguments
-///    NoAlias returns
+///    ByVal and NoAlias Arguments, if Interprocedural is false
+///    NoAlias returns, if Interprocedural is false
 ///
-bool llvm::isIdentifiedObject(const Value *V) {
-  if (isa<AllocaInst>(V) || isNoAliasCall(V))
+bool llvm::isIdentifiedObject(const Value *V, bool Interprocedural) {
+  if (isa<AllocaInst>(V))
     return true;
   if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
     return true;
-  if (const Argument *A = dyn_cast<Argument>(V))
-    return A->hasNoAliasAttr() || A->hasByValAttr();
+  if (!Interprocedural) {
+    if (isNoAliasCall(V))
+      return true;
+    if (const Argument *A = dyn_cast<Argument>(V))
+      return A->hasNoAliasAttr() || A->hasByValAttr();
+  }
   return false;
 }
 

Modified: llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisEvaluator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisEvaluator.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisEvaluator.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/AliasAnalysisEvaluator.cpp Fri Jul  2 04:57:13 2010
@@ -21,11 +21,11 @@
 #include "llvm/DerivedTypes.h"
 #include "llvm/Function.h"
 #include "llvm/Instructions.h"
+#include "llvm/Module.h"
 #include "llvm/Pass.h"
 #include "llvm/Analysis/Passes.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Assembly/Writer.h"
-#include "llvm/Target/TargetData.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/InstIterator.h"
 #include "llvm/Support/CommandLine.h"
@@ -45,20 +45,21 @@
 static cl::opt<bool> PrintModRef("print-modref", cl::ReallyHidden);
 
 namespace {
-  class AAEval : public FunctionPass {
+  /// AAEval - Base class for exhaustive alias analysis evaluators.
+  class AAEval {
+  protected:
     unsigned NoAlias, MayAlias, MustAlias;
     unsigned NoModRef, Mod, Ref, ModRef;
 
-  public:
-    static char ID; // Pass identification, replacement for typeid
-    AAEval() : FunctionPass(&ID) {}
+    SetVector<Value *> Pointers;
+    SetVector<CallSite> CallSites;
 
-    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+    void getAnalysisUsage(AnalysisUsage &AU) const {
       AU.addRequired<AliasAnalysis>();
       AU.setPreservesAll();
     }
 
-    bool doInitialization(Module &M) {
+    void doInitialization(Module &M) {
       NoAlias = MayAlias = MustAlias = 0;
       NoModRef = Mod = Ref = ModRef = 0;
 
@@ -66,19 +67,85 @@
         PrintNoAlias = PrintMayAlias = PrintMustAlias = true;
         PrintNoModRef = PrintMod = PrintRef = PrintModRef = true;
       }
+    }
+
+    void runOnFunction(Function &F);
+    void evaluate(AliasAnalysis *AA, Module *M);
+    void doFinalization(Module &M);
+  };
+
+  class FunctionAAEval : public FunctionPass, AAEval {
+  public:
+    static char ID; // Pass identification, replacement for typeid
+    FunctionAAEval() : FunctionPass(&ID) {}
+
+    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+      return AAEval::getAnalysisUsage(AU);
+    }
+
+    virtual bool doInitialization(Module &M) {
+      AAEval::doInitialization(M);
+      return false;
+    }
+
+    virtual bool runOnFunction(Function &F) {
+      AAEval::runOnFunction(F);
+
+      if (PrintNoAlias || PrintMayAlias || PrintMustAlias ||
+          PrintNoModRef || PrintMod || PrintRef || PrintModRef)
+        errs() << "Function: " << F.getName() << ": " << Pointers.size()
+               << " pointers, " << CallSites.size() << " call sites\n";
+
+      AAEval::evaluate(&getAnalysis<AliasAnalysis>(), F.getParent());
+      return false;
+    }
+
+    virtual bool doFinalization(Module &M) {
+      AAEval::doFinalization(M);
       return false;
     }
+  };
+
+  class InterproceduralAAEval : public ModulePass, AAEval {
+  public:
+    static char ID; // Pass identification, replacement for typeid
+    InterproceduralAAEval() : ModulePass(&ID) {}
+
+    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+      return AAEval::getAnalysisUsage(AU);
+    }
+
+    virtual bool runOnModule(Module &M) {
+      AAEval::doInitialization(M);
+      for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
+        AAEval::runOnFunction(*I);
+
+      if (PrintNoAlias || PrintMayAlias || PrintMustAlias ||
+          PrintNoModRef || PrintMod || PrintRef || PrintModRef)
+        errs() << "Module: " << Pointers.size()
+               << " pointers, " << CallSites.size() << " call sites\n";
 
-    bool runOnFunction(Function &F);
-    bool doFinalization(Module &M);
+      AAEval::evaluate(&getAnalysis<AliasAnalysis>(), &M);
+      AAEval::doFinalization(M);
+      return false;
+    }
   };
 }
 
-char AAEval::ID = 0;
-static RegisterPass<AAEval>
+char FunctionAAEval::ID = 0;
+static RegisterPass<FunctionAAEval>
 X("aa-eval", "Exhaustive Alias Analysis Precision Evaluator", false, true);
 
-FunctionPass *llvm::createAAEvalPass() { return new AAEval(); }
+FunctionPass *llvm::createAAEvalPass() { return new FunctionAAEval(); }
+
+char InterproceduralAAEval::ID = 0;
+static RegisterPass<InterproceduralAAEval>
+Y("interprocedural-aa-eval",
+  "Exhaustive Interprocedural Alias Analysis Precision Evaluator", false, true);
+
+Pass *llvm::createInterproceduralAAEvalPass() {
+  return new InterproceduralAAEval();
+}
 
 static void PrintResults(const char *Msg, bool P, const Value *V1,
                          const Value *V2, const Module *M) {
@@ -113,12 +180,7 @@
       && !isa<ConstantPointerNull>(V);
 }
 
-bool AAEval::runOnFunction(Function &F) {
-  AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
-
-  SetVector<Value *> Pointers;
-  SetVector<CallSite> CallSites;
-
+void AAEval::runOnFunction(Function &F) {
   for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
     if (I->getType()->isPointerTy())    // Add all pointer arguments.
       Pointers.insert(I);
@@ -148,33 +210,31 @@
 
     if (CS.getInstruction()) CallSites.insert(CS);
   }
+}
 
-  if (PrintNoAlias || PrintMayAlias || PrintMustAlias ||
-      PrintNoModRef || PrintMod || PrintRef || PrintModRef)
-    errs() << "Function: " << F.getName() << ": " << Pointers.size()
-           << " pointers, " << CallSites.size() << " call sites\n";
+void AAEval::evaluate(AliasAnalysis *AA, Module *M) {
 
   // iterate over the worklist, and run the full (n^2)/2 disambiguations
   for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
        I1 != E; ++I1) {
     unsigned I1Size = ~0u;
     const Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
-    if (I1ElTy->isSized()) I1Size = AA.getTypeStoreSize(I1ElTy);
+    if (I1ElTy->isSized()) I1Size = AA->getTypeStoreSize(I1ElTy);
 
     for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
       unsigned I2Size = ~0u;
       const Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
-      if (I2ElTy->isSized()) I2Size = AA.getTypeStoreSize(I2ElTy);
+      if (I2ElTy->isSized()) I2Size = AA->getTypeStoreSize(I2ElTy);
 
-      switch (AA.alias(*I1, I1Size, *I2, I2Size)) {
+      switch (AA->alias(*I1, I1Size, *I2, I2Size)) {
       case AliasAnalysis::NoAlias:
-        PrintResults("NoAlias", PrintNoAlias, *I1, *I2, F.getParent());
+        PrintResults("NoAlias", PrintNoAlias, *I1, *I2, M);
         ++NoAlias; break;
       case AliasAnalysis::MayAlias:
-        PrintResults("MayAlias", PrintMayAlias, *I1, *I2, F.getParent());
+        PrintResults("MayAlias", PrintMayAlias, *I1, *I2, M);
         ++MayAlias; break;
       case AliasAnalysis::MustAlias:
-        PrintResults("MustAlias", PrintMustAlias, *I1, *I2, F.getParent());
+        PrintResults("MustAlias", PrintMustAlias, *I1, *I2, M);
         ++MustAlias; break;
       default:
         errs() << "Unknown alias query result!\n";
@@ -191,20 +251,20 @@
          V != Ve; ++V) {
       unsigned Size = ~0u;
       const Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
-      if (ElTy->isSized()) Size = AA.getTypeStoreSize(ElTy);
+      if (ElTy->isSized()) Size = AA->getTypeStoreSize(ElTy);
 
-      switch (AA.getModRefInfo(*C, *V, Size)) {
+      switch (AA->getModRefInfo(*C, *V, Size)) {
       case AliasAnalysis::NoModRef:
-        PrintModRefResults("NoModRef", PrintNoModRef, I, *V, F.getParent());
+        PrintModRefResults("NoModRef", PrintNoModRef, I, *V, M);
         ++NoModRef; break;
       case AliasAnalysis::Mod:
-        PrintModRefResults("     Mod", PrintMod, I, *V, F.getParent());
+        PrintModRefResults("     Mod", PrintMod, I, *V, M);
         ++Mod; break;
       case AliasAnalysis::Ref:
-        PrintModRefResults("     Ref", PrintRef, I, *V, F.getParent());
+        PrintModRefResults("     Ref", PrintRef, I, *V, M);
         ++Ref; break;
       case AliasAnalysis::ModRef:
-        PrintModRefResults("  ModRef", PrintModRef, I, *V, F.getParent());
+        PrintModRefResults("  ModRef", PrintModRef, I, *V, M);
         ++ModRef; break;
       default:
         errs() << "Unknown alias query result!\n";
@@ -212,7 +272,8 @@
     }
   }
 
-  return false;
+  Pointers.clear();
+  CallSites.clear();
 }
 
 static void PrintPercent(unsigned Num, unsigned Sum) {
@@ -220,7 +281,7 @@
          << ((Num*1000ULL/Sum) % 10) << "%)\n";
 }
 
-bool AAEval::doFinalization(Module &M) {
+void AAEval::doFinalization(Module &M) {
   unsigned AliasSum = NoAlias + MayAlias + MustAlias;
   errs() << "===== Alias Analysis Evaluator Report =====\n";
   if (AliasSum == 0) {
@@ -256,6 +317,4 @@
            << NoModRef*100/ModRefSum  << "%/" << Mod*100/ModRefSum << "%/"
            << Ref*100/ModRefSum << "%/" << ModRef*100/ModRefSum << "%\n";
   }
-
-  return false;
 }

Modified: llvm/branches/wendling/eh/lib/Analysis/BasicAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/BasicAliasAnalysis.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/BasicAliasAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/BasicAliasAnalysis.cpp Fri Jul  2 04:57:13 2010
@@ -78,6 +78,20 @@
   return false;
 }
 
+/// isEscapeSource - Return true if the pointer is one which would have
+/// been considered an escape by isNonEscapingLocalObject.
+static bool isEscapeSource(const Value *V) {
+  if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
+    return true;
+
+  // The load case works because isNonEscapingLocalObject considers all
+  // stores to be escapes (it passes true for the StoreCaptures argument
+  // to PointerMayBeCaptured).
+  if (isa<LoadInst>(V))
+    return true;
+
+  return false;
+}
 
 /// isObjectSmallerThan - Return true if we can prove that the object specified
 /// by V is smaller than Size.
@@ -94,7 +108,7 @@
   } else if (const CallInst* CI = extractMallocCall(V)) {
     if (!isArrayMalloc(V, &TD))
       // The size is the argument to the malloc call.
-      if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getOperand(1)))
+      if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getArgOperand(0)))
         return (C->getZExtValue() < Size);
     return false;
   } else if (const Argument *A = dyn_cast<Argument>(V)) {
@@ -177,21 +191,63 @@
 ImmutablePass *llvm::createNoAAPass() { return new NoAA(); }
 
 //===----------------------------------------------------------------------===//
-// BasicAA Pass
+// BasicAliasAnalysis Pass
 //===----------------------------------------------------------------------===//
 
+static const Function *getParent(const Value *V) {
+  if (const Instruction *inst = dyn_cast<Instruction>(V))
+    return inst->getParent()->getParent();
+
+  if (const Argument *arg = dyn_cast<Argument>(V))
+    return arg->getParent();
+
+  return NULL;
+}
+
+static bool sameParent(const Value *O1, const Value *O2) {
+
+  const Function *F1 = getParent(O1);
+  const Function *F2 = getParent(O2);
+
+  return F1 && F1 == F2;
+}
+
+#ifdef XDEBUG
+static bool notDifferentParent(const Value *O1, const Value *O2) {
+
+  const Function *F1 = getParent(O1);
+  const Function *F2 = getParent(O2);
+
+  return !F1 || !F2 || F1 == F2;
+}
+#endif
+
 namespace {
   /// BasicAliasAnalysis - This is the default alias analysis implementation.
   /// Because it doesn't chain to a previous alias analysis (like -no-aa), it
   /// derives from the NoAA class.
   struct BasicAliasAnalysis : public NoAA {
+    /// Interprocedural - Flag for "interprocedural" mode, where we must
+    /// support queries of values which live in different functions.
+    bool Interprocedural;
+
     static char ID; // Class identification, replacement for typeinfo
-    BasicAliasAnalysis() : NoAA(&ID) {}
+    BasicAliasAnalysis()
+      : NoAA(&ID), Interprocedural(false) {}
+    BasicAliasAnalysis(void *PID, bool interprocedural)
+      : NoAA(PID), Interprocedural(interprocedural) {}
+
     AliasResult alias(const Value *V1, unsigned V1Size,
                       const Value *V2, unsigned V2Size) {
-      assert(VisitedPHIs.empty() && "VisitedPHIs must be cleared after use!");
+      assert(Visited.empty() && "Visited must be cleared after use!");
+#ifdef XDEBUG
+      assert((Interprocedural || notDifferentParent(V1, V2)) &&
+             "BasicAliasAnalysis (-basicaa) doesn't support interprocedural "
+             "queries; use InterproceduralAliasAnalysis "
+             "(-interprocedural-basic-aa) instead.");
+#endif
       AliasResult Alias = aliasCheck(V1, V1Size, V2, V2Size);
-      VisitedPHIs.clear();
+      Visited.clear();
       return Alias;
     }
 
@@ -213,8 +269,8 @@
     }
     
   private:
-    // VisitedPHIs - Track PHI nodes visited by a aliasCheck() call.
-    SmallPtrSet<const Value*, 16> VisitedPHIs;
+    // Visited - Track instructions visited by a aliasPHI, aliasSelect(), and aliasGEP().
+    SmallPtrSet<const Value*, 16> Visited;
 
     // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
     // instruction against another.
@@ -280,10 +336,16 @@
       if (CI->isTailCall())
         return NoModRef;
   
+  // If we can identify an object and it's known to be within the
+  // same function as the call, we can ignore interprocedural concerns.
+  bool EffectivelyInterprocedural =
+    Interprocedural && !sameParent(Object, CS.getInstruction());
+  
   // If the pointer is to a locally allocated object that does not escape,
   // then the call can not mod/ref the pointer unless the call takes the pointer
   // as an argument, and itself doesn't capture it.
   if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
+      !EffectivelyInterprocedural &&
       isNonEscapingLocalObject(Object)) {
     bool PassedAsArg = false;
     unsigned ArgNo = 0;
@@ -318,10 +380,10 @@
   case Intrinsic::memcpy:
   case Intrinsic::memmove: {
     unsigned Len = ~0U;
-    if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3)))
+    if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
       Len = LenCI->getZExtValue();
-    Value *Dest = II->getOperand(1);
-    Value *Src = II->getOperand(2);
+    Value *Dest = II->getArgOperand(0);
+    Value *Src = II->getArgOperand(1);
     if (isNoAlias(Dest, Len, P, Size)) {
       if (isNoAlias(Src, Len, P, Size))
         return NoModRef;
@@ -332,9 +394,9 @@
   case Intrinsic::memset:
     // Since memset is 'accesses arguments' only, the AliasAnalysis base class
     // will handle it for the variable length case.
-    if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3))) {
+    if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
       unsigned Len = LenCI->getZExtValue();
-      Value *Dest = II->getOperand(1);
+      Value *Dest = II->getArgOperand(0);
       if (isNoAlias(Dest, Len, P, Size))
         return NoModRef;
     }
@@ -352,7 +414,7 @@
   case Intrinsic::atomic_load_umax:
   case Intrinsic::atomic_load_umin:
     if (TD) {
-      Value *Op1 = II->getOperand(1);
+      Value *Op1 = II->getArgOperand(0);
       unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
       if (isNoAlias(Op1, Op1Size, P, Size))
         return NoModRef;
@@ -361,14 +423,14 @@
   case Intrinsic::lifetime_start:
   case Intrinsic::lifetime_end:
   case Intrinsic::invariant_start: {
-    unsigned PtrSize = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
-    if (isNoAlias(II->getOperand(2), PtrSize, P, Size))
+    unsigned PtrSize = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
+    if (isNoAlias(II->getArgOperand(1), PtrSize, P, Size))
       return NoModRef;
     break;
   }
   case Intrinsic::invariant_end: {
-    unsigned PtrSize = cast<ConstantInt>(II->getOperand(2))->getZExtValue();
-    if (isNoAlias(II->getOperand(3), PtrSize, P, Size))
+    unsigned PtrSize = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
+    if (isNoAlias(II->getArgOperand(2), PtrSize, P, Size))
       return NoModRef;
     break;
   }
@@ -440,6 +502,13 @@
                              const Value *V2, unsigned V2Size,
                              const Value *UnderlyingV1,
                              const Value *UnderlyingV2) {
+  // If this GEP has been visited before, we're on a use-def cycle.
+  // Such cycles are only valid when PHI nodes are involved or in unreachable
+  // code. The visitPHI function catches cycles containing PHIs, but there
+  // could still be a cycle without PHIs in unreachable code.
+  if (!Visited.insert(GEP1))
+    return MayAlias;
+
   int64_t GEP1BaseOffset;
   SmallVector<std::pair<const Value*, int64_t>, 4> GEP1VariableIndices;
 
@@ -550,6 +619,13 @@
 AliasAnalysis::AliasResult
 BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
                                 const Value *V2, unsigned V2Size) {
+  // If this select has been visited before, we're on a use-def cycle.
+  // Such cycles are only valid when PHI nodes are involved or in unreachable
+  // code. The visitPHI function catches cycles containing PHIs, but there
+  // could still be a cycle without PHIs in unreachable code.
+  if (!Visited.insert(SI))
+    return MayAlias;
+
   // If the values are Selects with the same condition, we can do a more precise
   // check: just check for aliases between the values on corresponding arms.
   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
@@ -570,11 +646,17 @@
   // If both arms of the Select node NoAlias or MustAlias V2, then returns
   // NoAlias / MustAlias. Otherwise, returns MayAlias.
   AliasResult Alias =
-    aliasCheck(SI->getTrueValue(), SISize, V2, V2Size);
+    aliasCheck(V2, V2Size, SI->getTrueValue(), SISize);
   if (Alias == MayAlias)
     return MayAlias;
+
+  // If V2 is visited, the recursive case will have been caught in the
+  // above aliasCheck call, so these subsequent calls to aliasCheck
+  // don't need to assume that V2 is being visited recursively.
+  Visited.erase(V2);
+
   AliasResult ThisAlias =
-    aliasCheck(SI->getFalseValue(), SISize, V2, V2Size);
+    aliasCheck(V2, V2Size, SI->getFalseValue(), SISize);
   if (ThisAlias != Alias)
     return MayAlias;
   return Alias;
@@ -586,7 +668,7 @@
 BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
                              const Value *V2, unsigned V2Size) {
   // The PHI node has already been visited, avoid recursion any further.
-  if (!VisitedPHIs.insert(PN))
+  if (!Visited.insert(PN))
     return MayAlias;
 
   // If the values are PHIs in the same block, we can do a more precise
@@ -636,10 +718,10 @@
   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
     Value *V = V1Srcs[i];
 
-    // If V2 is a PHI, the recursive case will have been caught in the
+    // If V2 is visited, the recursive case will have been caught in the
     // above aliasCheck call, so these subsequent calls to aliasCheck
     // don't need to assume that V2 is being visited recursively.
-    VisitedPHIs.erase(V2);
+    Visited.erase(V2);
 
     AliasResult ThisAlias = aliasCheck(V2, V2Size, V, PNSize);
     if (ThisAlias != Alias || ThisAlias == MayAlias)
@@ -683,19 +765,31 @@
     if (CPN->getType()->getAddressSpace() == 0)
       return NoAlias;
 
+  // If we can identify two objects and they're known to be within the
+  // same function, we can ignore interprocedural concerns.
+  bool EffectivelyInterprocedural =
+    Interprocedural && !sameParent(O1, O2);
+
   if (O1 != O2) {
     // If V1/V2 point to two different objects we know that we have no alias.
-    if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
+    if (isIdentifiedObject(O1, EffectivelyInterprocedural) &&
+        isIdentifiedObject(O2, EffectivelyInterprocedural))
       return NoAlias;
 
     // Constant pointers can't alias with non-const isIdentifiedObject objects.
-    if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
-        (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
+    if ((isa<Constant>(O1) &&
+         isIdentifiedObject(O2, EffectivelyInterprocedural) &&
+         !isa<Constant>(O2)) ||
+        (isa<Constant>(O2) &&
+         isIdentifiedObject(O1, EffectivelyInterprocedural) &&
+         !isa<Constant>(O1)))
       return NoAlias;
 
-    // Arguments can't alias with local allocations or noalias calls.
-    if ((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) ||
-        (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1))))
+    // Arguments can't alias with local allocations or noalias calls
+    // in the same function.
+    if (!EffectivelyInterprocedural &&
+        ((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) ||
+         (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1)))))
       return NoAlias;
 
     // Most objects can't alias null.
@@ -712,18 +806,18 @@
       return NoAlias;
   
   // If one pointer is the result of a call/invoke or load and the other is a
-  // non-escaping local object, then we know the object couldn't escape to a
-  // point where the call could return it. The load case works because
-  // isNonEscapingLocalObject considers all stores to be escapes (it
-  // passes true for the StoreCaptures argument to PointerMayBeCaptured).
-  if (O1 != O2) {
-    if ((isa<CallInst>(O1) || isa<InvokeInst>(O1) || isa<LoadInst>(O1) ||
-         isa<Argument>(O1)) &&
-        isNonEscapingLocalObject(O2))
+  // non-escaping local object within the same function, then we know the
+  // object couldn't escape to a point where the call could return it.
+  //
+  // Note that if the pointers are in different functions, there are a
+  // variety of complications. A call with a nocapture argument may still
+  // temporary store the nocapture argument's value in a temporary memory
+  // location if that memory location doesn't escape. Or it may pass a
+  // nocapture value to other functions as long as they don't capture it.
+  if (O1 != O2 && !EffectivelyInterprocedural) {
+    if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
       return NoAlias;
-    if ((isa<CallInst>(O2) || isa<InvokeInst>(O2) || isa<LoadInst>(O2) ||
-         isa<Argument>(O2)) &&
-        isNonEscapingLocalObject(O1))
+    if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
       return NoAlias;
   }
 
@@ -756,3 +850,33 @@
 
 // Make sure that anything that uses AliasAnalysis pulls in this file.
 DEFINING_FILE_FOR(BasicAliasAnalysis)
+
+//===----------------------------------------------------------------------===//
+// InterproceduralBasicAliasAnalysis Pass
+//===----------------------------------------------------------------------===//
+
+namespace {
+  /// InterproceduralBasicAliasAnalysis - This is similar to basicaa, except
+  /// that it properly supports queries to values which live in different
+  /// functions.
+  ///
+  /// Note that we don't currently take this to the extreme, analyzing all
+  /// call sites of a function to answer a query about an Argument.
+  ///
+  struct InterproceduralBasicAliasAnalysis : public BasicAliasAnalysis {
+    static char ID; // Class identification, replacement for typeinfo
+    InterproceduralBasicAliasAnalysis() : BasicAliasAnalysis(&ID, true) {}
+  };
+}
+
+// Register this pass...
+char InterproceduralBasicAliasAnalysis::ID = 0;
+static RegisterPass<InterproceduralBasicAliasAnalysis>
+W("interprocedural-basic-aa", "Interprocedural Basic Alias Analysis", false, true);
+
+// Declare that we implement the AliasAnalysis interface
+static RegisterAnalysisGroup<AliasAnalysis> Z(W);
+
+ImmutablePass *llvm::createInterproceduralBasicAliasAnalysisPass() {
+  return new InterproceduralBasicAliasAnalysis();
+}

Modified: llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp Fri Jul  2 04:57:13 2010
@@ -971,7 +971,12 @@
     ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
     Fn
   };
-  return DISubprogram(MDNode::get(VMContext, &Elts[0], 17));
+  MDNode *Node = MDNode::get(VMContext, &Elts[0], 17);
+
+  // Create a named metadata so that we do not lose this mdnode.
+  NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.sp");
+  NMD->addOperand(Node);
+  return DISubprogram(Node);
 }
 
 /// CreateSubprogramDefinition - Create new subprogram descriptor for the
@@ -997,9 +1002,15 @@
     DeclNode->getOperand(12), // VIndex
     DeclNode->getOperand(13), // Containting Type
     DeclNode->getOperand(14), // isArtificial
-    DeclNode->getOperand(15)  // isOptimized
+    DeclNode->getOperand(15), // isOptimized
+    SPDeclaration.getFunction()
   };
-  return DISubprogram(MDNode::get(VMContext, &Elts[0], 16));
+  MDNode *Node =MDNode::get(VMContext, &Elts[0], 16);
+
+  // Create a named metadata so that we do not lose this mdnode.
+  NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.sp");
+  NMD->addOperand(Node);
+  return DISubprogram(Node);
 }
 
 /// CreateGlobalVariable - Create a new descriptor for the specified global.
@@ -1056,10 +1067,16 @@
     // to preserve variable info in such situation then stash it in a
     // named mdnode.
     DISubprogram Fn(getDISubprogram(Context));
-    const Twine FnLVName = Twine("llvm.dbg.lv.", Fn.getName());
-    NamedMDNode *FnLocals = M.getNamedMetadataUsingTwine(FnLVName);
+    StringRef FName = "fn";
+    if (Fn.getFunction())
+      FName = Fn.getFunction()->getName();
+    char One = '\1';
+    if (FName.startswith(StringRef(&One, 1)))
+      FName = FName.substr(1);
+    NamedMDNode *FnLocals = M.getNamedMetadata(Twine("llvm.dbg.lv.", FName));
     if (!FnLocals)
-      FnLocals = NamedMDNode::Create(VMContext, FnLVName, NULL, 0, &M);
+      FnLocals = NamedMDNode::Create(VMContext, Twine("llvm.dbg.lv.", FName),
+                                     NULL, 0, &M);
     FnLocals->addOperand(Node);
   }
   return DIVariable(Node);
@@ -1223,17 +1240,19 @@
           processLocation(DILocation(IA));
       }
 
-  NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv");
-  if (!NMD)
-    return;
-
-  for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
-    DIGlobalVariable DIG(cast<MDNode>(NMD->getOperand(i)));
-    if (addGlobalVariable(DIG)) {
-      addCompileUnit(DIG.getCompileUnit());
-      processType(DIG.getType());
+  if (NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv")) {
+    for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
+      DIGlobalVariable DIG(cast<MDNode>(NMD->getOperand(i)));
+      if (addGlobalVariable(DIG)) {
+        addCompileUnit(DIG.getCompileUnit());
+        processType(DIG.getType());
+      }
     }
   }
+
+  if (NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.sp"))
+    for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i)
+      processSubprogram(DISubprogram(NMD->getOperand(i)));
 }
 
 /// processLocation - Process DILocation.

Modified: llvm/branches/wendling/eh/lib/Analysis/MemoryBuiltins.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/MemoryBuiltins.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/MemoryBuiltins.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/MemoryBuiltins.cpp Fri Jul  2 04:57:13 2010
@@ -101,9 +101,9 @@
   if (const StructType *ST = dyn_cast<StructType>(T))
     ElementSize = TD->getStructLayout(ST)->getSizeInBytes();
 
-  // If malloc calls' arg can be determined to be a multiple of ElementSize,
+  // If malloc call's arg can be determined to be a multiple of ElementSize,
   // return the multiple.  Otherwise, return NULL.
-  Value *MallocArg = CI->getOperand(1);
+  Value *MallocArg = CI->getArgOperand(0);
   Value *Multiple = NULL;
   if (ComputeMultiple(MallocArg, ElementSize, Multiple,
                       LookThroughSExt))
@@ -120,7 +120,7 @@
   Value *ArraySize = computeArraySize(CI, TD);
 
   if (ArraySize &&
-      ArraySize != ConstantInt::get(CI->getOperand(1)->getType(), 1))
+      ArraySize != ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
     return CI;
 
   // CI is a non-array malloc or we can't figure out that it is an array malloc.
@@ -183,25 +183,25 @@
 //  free Call Utility Functions.
 //
 
-/// isFreeCall - Returns true if the value is a call to the builtin free()
-bool llvm::isFreeCall(const Value *I) {
+/// isFreeCall - Returns non-null if the value is a call to the builtin free()
+const CallInst *llvm::isFreeCall(const Value *I) {
   const CallInst *CI = dyn_cast<CallInst>(I);
   if (!CI)
-    return false;
+    return 0;
   Function *Callee = CI->getCalledFunction();
   if (Callee == 0 || !Callee->isDeclaration() || Callee->getName() != "free")
-    return false;
+    return 0;
 
   // Check free prototype.
   // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin 
   // attribute will exist.
   const FunctionType *FTy = Callee->getFunctionType();
   if (!FTy->getReturnType()->isVoidTy())
-    return false;
+    return 0;
   if (FTy->getNumParams() != 1)
-    return false;
+    return 0;
   if (FTy->param_begin()->get() != Type::getInt8PtrTy(Callee->getContext()))
-    return false;
+    return 0;
 
-  return true;
+  return CI;
 }

Modified: llvm/branches/wendling/eh/lib/Analysis/MemoryDependenceAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/MemoryDependenceAnalysis.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/MemoryDependenceAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/MemoryDependenceAnalysis.cpp Fri Jul  2 04:57:13 2010
@@ -116,8 +116,8 @@
     } else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
       Pointer = V->getOperand(0);
       PointerSize = AA->getTypeStoreSize(V->getType());
-    } else if (isFreeCall(Inst)) {
-      Pointer = Inst->getOperand(1);
+    } else if (const CallInst *CI = isFreeCall(Inst)) {
+      Pointer = CI->getArgOperand(0);
       // calls to free() erase the entire structure
       PointerSize = ~0ULL;
     } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
@@ -197,9 +197,9 @@
         // pointer, not on query pointers that are indexed off of them.  It'd
         // be nice to handle that at some point.
         AliasAnalysis::AliasResult R = 
-          AA->alias(II->getOperand(3), ~0U, MemPtr, ~0U);
+          AA->alias(II->getArgOperand(2), ~0U, MemPtr, ~0U);
         if (R == AliasAnalysis::MustAlias) {
-          InvariantTag = II->getOperand(1);
+          InvariantTag = II->getArgOperand(0);
           continue;
         }
       
@@ -210,7 +210,7 @@
         // pointer, not on query pointers that are indexed off of them.  It'd
         // be nice to handle that at some point.
         AliasAnalysis::AliasResult R =
-          AA->alias(II->getOperand(2), ~0U, MemPtr, ~0U);
+          AA->alias(II->getArgOperand(1), ~0U, MemPtr, ~0U);
         if (R == AliasAnalysis::MustAlias)
           return MemDepResult::getDef(II);
       }
@@ -365,25 +365,26 @@
       MemPtr = LI->getPointerOperand();
       MemSize = AA->getTypeStoreSize(LI->getType());
     }
-  } else if (isFreeCall(QueryInst)) {
-    MemPtr = QueryInst->getOperand(1);
+  } else if (const CallInst *CI = isFreeCall(QueryInst)) {
+    MemPtr = CI->getArgOperand(0);
     // calls to free() erase the entire structure, not just a field.
     MemSize = ~0UL;
   } else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
     int IntrinsicID = 0;  // Intrinsic IDs start at 1.
-    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
+    IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst);
+    if (II)
       IntrinsicID = II->getIntrinsicID();
 
     switch (IntrinsicID) {
     case Intrinsic::lifetime_start:
     case Intrinsic::lifetime_end:
     case Intrinsic::invariant_start:
-      MemPtr = QueryInst->getOperand(2);
-      MemSize = cast<ConstantInt>(QueryInst->getOperand(1))->getZExtValue();
+      MemPtr = II->getArgOperand(1);
+      MemSize = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
       break;
     case Intrinsic::invariant_end:
-      MemPtr = QueryInst->getOperand(3);
-      MemSize = cast<ConstantInt>(QueryInst->getOperand(2))->getZExtValue();
+      MemPtr = II->getArgOperand(2);
+      MemSize = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
       break;
     default:
       CallSite QueryCS = CallSite::get(QueryInst);
@@ -456,7 +457,7 @@
     // Okay, we have a cache entry.  If we know it is not dirty, just return it
     // with no computation.
     if (!CacheP.second) {
-      NumCacheNonLocal++;
+      ++NumCacheNonLocal;
       return Cache;
     }
     
@@ -478,7 +479,7 @@
     BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
     for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
       DirtyBlocks.push_back(*PI);
-    NumUncacheNonLocal++;
+    ++NumUncacheNonLocal;
   }
   
   // isReadonlyCall - If this is a read-only call, we can be more aggressive.

Modified: llvm/branches/wendling/eh/lib/Analysis/ProfileInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ProfileInfo.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ProfileInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ProfileInfo.cpp Fri Jul  2 04:57:13 2010
@@ -577,8 +577,6 @@
 
 template<>
 bool ProfileInfoT<Function,BasicBlock>::EstimateMissingEdges(const BasicBlock *BB) {
-  bool hasNoSuccessors = false;
-
   double inWeight = 0;
   std::set<Edge> inMissing;
   std::set<const BasicBlock*> ProcessedPreds;
@@ -596,10 +594,8 @@
   std::set<Edge> outMissing;
   std::set<const BasicBlock*> ProcessedSuccs;
   succ_const_iterator sbbi = succ_begin(BB), sbbe = succ_end(BB);
-  if (sbbi == sbbe) {
+  if (sbbi == sbbe)
     readEdge(this,getEdge(BB,0),outWeight,outMissing);
-    hasNoSuccessors = true;
-  }
   for ( ; sbbi != sbbe; ++sbbi ) {
     if (ProcessedSuccs.insert(*sbbi).second) {
       readEdge(this,getEdge(BB,*sbbi),outWeight,outMissing);

Modified: llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp Fri Jul  2 04:57:13 2010
@@ -822,7 +822,8 @@
   // Fold if the operand is constant.
   if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
     return getConstant(
-      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
+      cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(),
+                                               getEffectiveSCEVType(Ty))));
 
   // trunc(trunc(x)) --> trunc(x)
   if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
@@ -844,9 +845,9 @@
     return getAddRecExpr(Operands, AddRec->getLoop());
   }
 
-  // The cast wasn't folded; create an explicit cast node.
-  // Recompute the insert position, as it may have been invalidated.
-  if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+  // The cast wasn't folded; create an explicit cast node. We can reuse
+  // the existing insert position since if we get here, we won't have
+  // made any changes which would invalidate it.
   SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
                                                  Op, Ty);
   UniqueSCEVs.InsertNode(S, IP);
@@ -862,12 +863,10 @@
   Ty = getEffectiveSCEVType(Ty);
 
   // Fold if the operand is constant.
-  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
-    const Type *IntTy = getEffectiveSCEVType(Ty);
-    Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
-    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
-    return getConstant(cast<ConstantInt>(C));
-  }
+  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
+    return getConstant(
+      cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(),
+                                              getEffectiveSCEVType(Ty))));
 
   // zext(zext(x)) --> zext(x)
   if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
@@ -997,12 +996,10 @@
   Ty = getEffectiveSCEVType(Ty);
 
   // Fold if the operand is constant.
-  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
-    const Type *IntTy = getEffectiveSCEVType(Ty);
-    Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
-    if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
-    return getConstant(cast<ConstantInt>(C));
-  }
+  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
+    return getConstant(
+      cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(),
+                                              getEffectiveSCEVType(Ty))));
 
   // sext(sext(x)) --> sext(x)
   if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
@@ -1406,8 +1403,8 @@
     while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
       // If we have an add, expand the add operands onto the end of the operands
       // list.
-      Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
       Ops.erase(Ops.begin()+Idx);
+      Ops.append(Add->op_begin(), Add->op_end());
       DeletedAdd = true;
     }
 
@@ -1555,9 +1552,11 @@
                                              AddRec->op_end());
       AddRecOps[0] = getAddExpr(LIOps);
 
-      // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition
-      // is not associative so this isn't necessarily safe.
-      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop);
+      // Build the new addrec. Propagate the NUW and NSW flags if both the
+      // outer add and the inner addrec are guaranteed to have no overflow.
+      const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop,
+                                         HasNUW && AddRec->hasNoUnsignedWrap(),
+                                         HasNSW && AddRec->hasNoSignedWrap());
 
       // If all of the other operands were loop invariant, we are done.
       if (Ops.size() == 1) return NewRec;
@@ -1584,7 +1583,7 @@
                                               AddRec->op_end());
           for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
             if (i >= NewOps.size()) {
-              NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
+              NewOps.append(OtherAddRec->op_begin()+i,
                             OtherAddRec->op_end());
               break;
             }
@@ -1717,8 +1716,8 @@
     while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
       // If we have an mul, expand the mul operands onto the end of the operands
       // list.
-      Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
       Ops.erase(Ops.begin()+Idx);
+      Ops.append(Mul->op_begin(), Mul->op_end());
       DeletedMul = true;
     }
 
@@ -1757,11 +1756,11 @@
       for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
         NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
 
-      // It's tempting to propagate the NSW flag here, but nsw multiplication
-      // is not associative so this isn't necessarily safe.
+      // Build the new addrec. Propagate the NUW and NSW flags if both the
+      // outer mul and the inner addrec are guaranteed to have no overflow.
       const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
                                          HasNUW && AddRec->hasNoUnsignedWrap(),
-                                         /*HasNSW=*/false);
+                                         HasNSW && AddRec->hasNoSignedWrap());
 
       // If all of the other operands were loop invariant, we are done.
       if (Ops.size() == 1) return NewRec;
@@ -1940,8 +1939,7 @@
   Operands.push_back(Start);
   if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
     if (StepChrec->getLoop() == L) {
-      Operands.insert(Operands.end(), StepChrec->op_begin(),
-                      StepChrec->op_end());
+      Operands.append(StepChrec->op_begin(), StepChrec->op_end());
       return getAddRecExpr(Operands, L);
     }
 
@@ -2104,8 +2102,8 @@
   if (Idx < Ops.size()) {
     bool DeletedSMax = false;
     while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
-      Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
       Ops.erase(Ops.begin()+Idx);
+      Ops.append(SMax->op_begin(), SMax->op_end());
       DeletedSMax = true;
     }
 
@@ -2209,8 +2207,8 @@
   if (Idx < Ops.size()) {
     bool DeletedUMax = false;
     while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
-      Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
       Ops.erase(Ops.begin()+Idx);
+      Ops.append(UMax->op_begin(), UMax->op_end());
       DeletedUMax = true;
     }
 
@@ -2767,7 +2765,11 @@
 ///
 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
 
-  bool InBounds = GEP->isInBounds();
+  // Don't blindly transfer the inbounds flag from the GEP instruction to the
+  // Add expression, because the Instruction may be guarded by control flow
+  // and the no-overflow bits may not be valid for the expression in any
+  // context.
+
   const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
   Value *Base = GEP->getOperand(0);
   // Don't attempt to analyze GEPs over unsized objects.
@@ -2783,23 +2785,30 @@
     if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
       // For a struct, add the member offset.
       unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
-      TotalOffset = getAddExpr(TotalOffset,
-                               getOffsetOfExpr(STy, FieldNo),
-                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
+      const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
+
+      // Add the field offset to the running total offset.
+      TotalOffset = getAddExpr(TotalOffset, FieldOffset);
     } else {
       // For an array, add the element offset, explicitly scaled.
-      const SCEV *LocalOffset = getSCEV(Index);
+      const SCEV *ElementSize = getSizeOfExpr(*GTI);
+      const SCEV *IndexS = getSCEV(Index);
       // Getelementptr indices are signed.
-      LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
-      // Lower "inbounds" GEPs to NSW arithmetic.
-      LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
-                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
-      TotalOffset = getAddExpr(TotalOffset, LocalOffset,
-                               /*HasNUW=*/false, /*HasNSW=*/InBounds);
+      IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
+
+      // Multiply the index by the element size to compute the element offset.
+      const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize);
+
+      // Add the element offset to the running total offset.
+      TotalOffset = getAddExpr(TotalOffset, LocalOffset);
     }
   }
-  return getAddExpr(getSCEV(Base), TotalOffset,
-                    /*HasNUW=*/false, /*HasNSW=*/InBounds);
+
+  // Get the SCEV for the GEP base.
+  const SCEV *BaseS = getSCEV(Base);
+
+  // Add the total offset from all the GEP indices to the base.
+  return getAddExpr(BaseS, TotalOffset);
 }
 
 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
@@ -2958,7 +2967,8 @@
       if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
         if (!C->getValue()->isZero())
           ConservativeResult =
-            ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0));
+            ConservativeResult.intersectWith(
+              ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
 
     // TODO: non-affine addrec
     if (AddRec->isAffine()) {
@@ -3191,15 +3201,9 @@
   Operator *U = cast<Operator>(V);
   switch (Opcode) {
   case Instruction::Add:
-    // Don't transfer the NSW and NUW bits from the Add instruction to the
-    // Add expression, because the Instruction may be guarded by control
-    // flow and the no-overflow bits may not be valid for the expression in
-    // any context.
     return getAddExpr(getSCEV(U->getOperand(0)),
                       getSCEV(U->getOperand(1)));
   case Instruction::Mul:
-    // Don't transfer the NSW and NUW bits from the Mul instruction to the
-    // Mul expression, as with Add.
     return getMulExpr(getSCEV(U->getOperand(0)),
                       getSCEV(U->getOperand(1)));
   case Instruction::UDiv:
@@ -3653,6 +3657,26 @@
         ConstantEvolutionLoopExitValue.erase(PN);
     }
 
+    // If there's a SCEVUnknown tying this value into the SCEV
+    // space, remove it from the folding set map. The SCEVUnknown
+    // object and any other SCEV objects which reference it
+    // (transitively) remain allocated, effectively leaked until
+    // the underlying BumpPtrAllocator is freed.
+    //
+    // This permits SCEV pointers to be used as keys in maps
+    // such as the ValuesAtScopes map.
+    FoldingSetNodeID ID;
+    ID.AddInteger(scUnknown);
+    ID.AddPointer(I);
+    void *IP;
+    if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
+      UniqueSCEVs.RemoveNode(S);
+
+      // This isn't necessary, but we might as well remove the
+      // value from the ValuesAtScopes map too.
+      ValuesAtScopes.erase(S);
+    }
+
     PushDefUseChildren(I, Worklist);
   }
 }
@@ -4134,8 +4158,7 @@
   // constant or derived from a PHI node themselves.
   PHINode *PHI = 0;
   for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
-    if (!(isa<Constant>(I->getOperand(Op)) ||
-          isa<GlobalValue>(I->getOperand(Op)))) {
+    if (!isa<Constant>(I->getOperand(Op))) {
       PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
       if (P == 0) return 0;  // Not evolving from PHI
       if (PHI == 0)
@@ -4156,11 +4179,9 @@
                                     const TargetData *TD) {
   if (isa<PHINode>(V)) return PHIVal;
   if (Constant *C = dyn_cast<Constant>(V)) return C;
-  if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
   Instruction *I = cast<Instruction>(V);
 
-  std::vector<Constant*> Operands;
-  Operands.resize(I->getNumOperands());
+  std::vector<Constant*> Operands(I->getNumOperands());
 
   for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
     Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
@@ -4202,8 +4223,8 @@
     return RetVal = 0;  // Must be a constant.
 
   Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
-  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
-  if (PN2 != PN)
+  if (getConstantEvolvingPHI(BEValue, L) != PN &&
+      !isa<Constant>(BEValue))
     return RetVal = 0;  // Not derived from same PHI.
 
   // Execute the loop symbolically to determine the exit value.
@@ -4238,8 +4259,11 @@
   PHINode *PN = getConstantEvolvingPHI(Cond, L);
   if (PN == 0) return getCouldNotCompute();
 
-  // Since the loop is canonicalized, the PHI node must have two entries.  One
-  // entry must be a constant (coming in from outside of the loop), and the
+  // If the loop is canonicalized, the PHI will have exactly two entries.
+  // That's the only form we support here.
+  if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
+
+  // One entry must be a constant (coming in from outside of the loop), and the
   // second must be derived from the same PHI.
   bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
   Constant *StartCST =
@@ -4247,8 +4271,9 @@
   if (StartCST == 0) return getCouldNotCompute();  // Must be a constant.
 
   Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
-  PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
-  if (PN2 != PN) return getCouldNotCompute();  // Not derived from same PHI.
+  if (getConstantEvolvingPHI(BEValue, L) != PN &&
+      !isa<Constant>(BEValue))
+    return getCouldNotCompute();  // Not derived from same PHI.
 
   // Okay, we find a PHI node that defines the trip count of this loop.  Execute
   // the loop symbolically to determine when the condition gets a value of
@@ -4336,54 +4361,51 @@
       // the arguments into constants, and if so, try to constant propagate the
       // result.  This is particularly useful for computing loop exit values.
       if (CanConstantFold(I)) {
-        std::vector<Constant*> Operands;
-        Operands.reserve(I->getNumOperands());
+        SmallVector<Constant *, 4> Operands;
+        bool MadeImprovement = false;
         for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
           Value *Op = I->getOperand(i);
           if (Constant *C = dyn_cast<Constant>(Op)) {
             Operands.push_back(C);
-          } else {
-            // If any of the operands is non-constant and if they are
-            // non-integer and non-pointer, don't even try to analyze them
-            // with scev techniques.
-            if (!isSCEVable(Op->getType()))
-              return V;
-
-            const SCEV *OpV = getSCEVAtScope(Op, L);
-            if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
-              Constant *C = SC->getValue();
-              if (C->getType() != Op->getType())
-                C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
-                                                                  Op->getType(),
-                                                                  false),
-                                          C, Op->getType());
-              Operands.push_back(C);
-            } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
-              if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
-                if (C->getType() != Op->getType())
-                  C =
-                    ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
-                                                                  Op->getType(),
-                                                                  false),
-                                          C, Op->getType());
-                Operands.push_back(C);
-              } else
-                return V;
-            } else {
-              return V;
-            }
+            continue;
           }
+
+          // If any of the operands is non-constant and if they are
+          // non-integer and non-pointer, don't even try to analyze them
+          // with scev techniques.
+          if (!isSCEVable(Op->getType()))
+            return V;
+
+          const SCEV *OrigV = getSCEV(Op);
+          const SCEV *OpV = getSCEVAtScope(OrigV, L);
+          MadeImprovement |= OrigV != OpV;
+
+          Constant *C = 0;
+          if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV))
+            C = SC->getValue();
+          if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV))
+            C = dyn_cast<Constant>(SU->getValue());
+          if (!C) return V;
+          if (C->getType() != Op->getType())
+            C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
+                                                              Op->getType(),
+                                                              false),
+                                      C, Op->getType());
+          Operands.push_back(C);
         }
 
-        Constant *C = 0;
-        if (const CmpInst *CI = dyn_cast<CmpInst>(I))
-          C = ConstantFoldCompareInstOperands(CI->getPredicate(),
-                                              Operands[0], Operands[1], TD);
-        else
-          C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
-                                       &Operands[0], Operands.size(), TD);
-        if (C)
+        // Check to see if getSCEVAtScope actually made an improvement.
+        if (MadeImprovement) {
+          Constant *C = 0;
+          if (const CmpInst *CI = dyn_cast<CmpInst>(I))
+            C = ConstantFoldCompareInstOperands(CI->getPredicate(),
+                                                Operands[0], Operands[1], TD);
+          else
+            C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
+                                         &Operands[0], Operands.size(), TD);
+          if (!C) return V;
           return getSCEV(C);
+        }
       }
     }
 
@@ -4433,7 +4455,29 @@
   // If this is a loop recurrence for a loop that does not contain L, then we
   // are dealing with the final value computed by the loop.
   if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
-    if (!L || !AddRec->getLoop()->contains(L)) {
+    // First, attempt to evaluate each operand.
+    // Avoid performing the look-up in the common case where the specified
+    // expression has no loop-variant portions.
+    for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
+      const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
+      if (OpAtScope == AddRec->getOperand(i))
+        continue;
+
+      // Okay, at least one of these operands is loop variant but might be
+      // foldable.  Build a new instance of the folded commutative expression.
+      SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
+                                          AddRec->op_begin()+i);
+      NewOps.push_back(OpAtScope);
+      for (++i; i != e; ++i)
+        NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
+
+      AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop()));
+      break;
+    }
+
+    // If the scope is outside the addrec's loop, evaluate it by using the
+    // loop exit value of the addrec.
+    if (!AddRec->getLoop()->contains(L)) {
       // To evaluate this recurrence, we need to know how many times the AddRec
       // loop iterates.  Compute this now.
       const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
@@ -4442,6 +4486,7 @@
       // Then, evaluate the AddRec.
       return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
     }
+
     return AddRec;
   }
 
@@ -4691,23 +4736,6 @@
   return getCouldNotCompute();
 }
 
-/// getLoopPredecessor - If the given loop's header has exactly one unique
-/// predecessor outside the loop, return it. Otherwise return null.
-/// This is less strict that the loop "preheader" concept, which requires
-/// the predecessor to have only one single successor.
-///
-BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
-  BasicBlock *Header = L->getHeader();
-  BasicBlock *Pred = 0;
-  for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
-       PI != E; ++PI)
-    if (!L->contains(*PI)) {
-      if (Pred && Pred != *PI) return 0; // Multiple predecessors.
-      Pred = *PI;
-    }
-  return Pred;
-}
-
 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
 /// (which may not be an immediate predecessor) which has exactly one
 /// successor from which BB is reachable, or null if no such block is
@@ -4725,7 +4753,7 @@
   // If the header has a unique predecessor outside the loop, it must be
   // a block that has exactly one successor that can reach the loop.
   if (Loop *L = LI->getLoopFor(BB))
-    return std::make_pair(getLoopPredecessor(L), L->getHeader());
+    return std::make_pair(L->getLoopPredecessor(), L->getHeader());
 
   return std::pair<BasicBlock *, BasicBlock *>();
 }
@@ -5176,7 +5204,7 @@
   // as there are predecessors that can be found that have unique successors
   // leading to the original header.
   for (std::pair<BasicBlock *, BasicBlock *>
-         Pair(getLoopPredecessor(L), L->getHeader());
+         Pair(L->getLoopPredecessor(), L->getHeader());
        Pair.first;
        Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
 

Modified: llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp Fri Jul  2 04:57:13 2010
@@ -106,6 +106,12 @@
 AliasAnalysis::AliasResult
 ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
                                     const Value *B, unsigned BSize) {
+  // If either of the memory references is empty, it doesn't matter what the
+  // pointer values are. This allows the code below to ignore this special
+  // case.
+  if (ASize == 0 || BSize == 0)
+    return NoAlias;
+
   // This is ScalarEvolutionAliasAnalysis. Get the SCEVs!
   const SCEV *AS = SE->getSCEV(const_cast<Value *>(A));
   const SCEV *BS = SE->getSCEV(const_cast<Value *>(B));
@@ -118,14 +124,32 @@
   if (SE->getEffectiveSCEVType(AS->getType()) ==
       SE->getEffectiveSCEVType(BS->getType())) {
     unsigned BitWidth = SE->getTypeSizeInBits(AS->getType());
-    APInt AI(BitWidth, ASize);
+    APInt ASizeInt(BitWidth, ASize);
+    APInt BSizeInt(BitWidth, BSize);
+
+    // Compute the difference between the two pointers.
     const SCEV *BA = SE->getMinusSCEV(BS, AS);
-    if (AI.ule(SE->getUnsignedRange(BA).getUnsignedMin())) {
-      APInt BI(BitWidth, BSize);
-      const SCEV *AB = SE->getMinusSCEV(AS, BS);
-      if (BI.ule(SE->getUnsignedRange(AB).getUnsignedMin()))
-        return NoAlias;
-    }
+
+    // Test whether the difference is known to be great enough that memory of
+    // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt
+    // are non-zero, which is special-cased above.
+    if (ASizeInt.ule(SE->getUnsignedRange(BA).getUnsignedMin()) &&
+        (-BSizeInt).uge(SE->getUnsignedRange(BA).getUnsignedMax()))
+      return NoAlias;
+
+    // Folding the subtraction while preserving range information can be tricky
+    // (because of INT_MIN, etc.); if the prior test failed, swap AS and BS
+    // and try again to see if things fold better that way.
+
+    // Compute the difference between the two pointers.
+    const SCEV *AB = SE->getMinusSCEV(AS, BS);
+
+    // Test whether the difference is known to be great enough that memory of
+    // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt
+    // are non-zero, which is special-cased above.
+    if (BSizeInt.ule(SE->getUnsignedRange(AB).getUnsignedMin()) &&
+        (-ASizeInt).uge(SE->getUnsignedRange(AB).getUnsignedMax()))
+      return NoAlias;
   }
 
   // If ScalarEvolution can find an underlying object, form a new query.

Modified: llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp Fri Jul  2 04:57:13 2010
@@ -21,6 +21,41 @@
 #include "llvm/ADT/STLExtras.h"
 using namespace llvm;
 
+/// ReuseOrCreateCast - Arange for there to be a cast of V to Ty at IP,
+/// reusing an existing cast if a suitable one exists, moving an existing
+/// cast if a suitable one exists but isn't in the right place, or
+/// or creating a new one.
+Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
+                                       Instruction::CastOps Op,
+                                       BasicBlock::iterator IP) {
+  // Check to see if there is already a cast!
+  for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
+       UI != E; ++UI)
+    if ((*UI)->getType() == Ty)
+      if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
+        if (CI->getOpcode() == Op) {
+          // If the cast isn't where we want it, fix it.
+          if (BasicBlock::iterator(CI) != IP) {
+            // Create a new cast, and leave the old cast in place in case
+            // it is being used as an insert point. Clear its operand
+            // so that it doesn't hold anything live.
+            Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP);
+            NewCI->takeName(CI);
+            CI->replaceAllUsesWith(NewCI);
+            CI->setOperand(0, UndefValue::get(V->getType()));
+            rememberInstruction(NewCI);
+            return NewCI;
+          }
+          rememberInstruction(CI);
+          return CI;
+        }
+
+  // Create a new cast.
+  Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP);
+  rememberInstruction(I);
+  return I;
+}
+
 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
 /// which must be possible with a noop cast, doing what we can to share
 /// the casts.
@@ -54,71 +89,29 @@
         return CE->getOperand(0);
   }
 
+  // Fold a cast of a constant.
   if (Constant *C = dyn_cast<Constant>(V))
     return ConstantExpr::getCast(Op, C, Ty);
 
+  // Cast the argument at the beginning of the entry block, after
+  // any bitcasts of other arguments.
   if (Argument *A = dyn_cast<Argument>(V)) {
-    // Check to see if there is already a cast!
-    for (Value::use_iterator UI = A->use_begin(), E = A->use_end();
-         UI != E; ++UI)
-      if ((*UI)->getType() == Ty)
-        if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
-          if (CI->getOpcode() == Op) {
-            // If the cast isn't the first instruction of the function, move it.
-            if (BasicBlock::iterator(CI) !=
-                A->getParent()->getEntryBlock().begin()) {
-              // Recreate the cast at the beginning of the entry block.
-              // The old cast is left in place in case it is being used
-              // as an insert point.
-              Instruction *NewCI =
-                CastInst::Create(Op, V, Ty, "",
-                                 A->getParent()->getEntryBlock().begin());
-              NewCI->takeName(CI);
-              CI->replaceAllUsesWith(NewCI);
-              return NewCI;
-            }
-            return CI;
-          }
-
-    Instruction *I = CastInst::Create(Op, V, Ty, V->getName(),
-                                      A->getParent()->getEntryBlock().begin());
-    rememberInstruction(I);
-    return I;
+    BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
+    while ((isa<BitCastInst>(IP) &&
+            isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
+            cast<BitCastInst>(IP)->getOperand(0) != A) ||
+           isa<DbgInfoIntrinsic>(IP))
+      ++IP;
+    return ReuseOrCreateCast(A, Ty, Op, IP);
   }
 
+  // Cast the instruction immediately after the instruction.
   Instruction *I = cast<Instruction>(V);
-
-  // Check to see if there is already a cast.  If there is, use it.
-  for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
-       UI != E; ++UI) {
-    if ((*UI)->getType() == Ty)
-      if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
-        if (CI->getOpcode() == Op) {
-          BasicBlock::iterator It = I; ++It;
-          if (isa<InvokeInst>(I))
-            It = cast<InvokeInst>(I)->getNormalDest()->begin();
-          while (isa<PHINode>(It) || isa<DbgInfoIntrinsic>(It)) ++It;
-          if (It != BasicBlock::iterator(CI)) {
-            // Recreate the cast after the user.
-            // The old cast is left in place in case it is being used
-            // as an insert point.
-            Instruction *NewCI = CastInst::Create(Op, V, Ty, "", It);
-            NewCI->takeName(CI);
-            CI->replaceAllUsesWith(NewCI);
-            rememberInstruction(NewCI);
-            return NewCI;
-          }
-          rememberInstruction(CI);
-          return CI;
-        }
-  }
   BasicBlock::iterator IP = I; ++IP;
   if (InvokeInst *II = dyn_cast<InvokeInst>(I))
     IP = II->getNormalDest()->begin();
   while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP)) ++IP;
-  Instruction *CI = CastInst::Create(Op, V, Ty, V->getName(), IP);
-  rememberInstruction(CI);
-  return CI;
+  return ReuseOrCreateCast(I, Ty, Op, IP);
 }
 
 /// InsertBinop - Insert the specified binary operator, doing a small amount
@@ -295,11 +288,11 @@
   // the sum into a single value, so just use that.
   Ops.clear();
   if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
-    Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
+    Ops.append(Add->op_begin(), Add->op_end());
   else if (!Sum->isZero())
     Ops.push_back(Sum);
   // Then append the addrecs.
-  Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end());
+  Ops.append(AddRecs.begin(), AddRecs.end());
 }
 
 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
@@ -322,7 +315,7 @@
                                          A->getLoop()));
       if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
         Ops[i] = Zero;
-        Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
+        Ops.append(Add->op_begin(), Add->op_end());
         e += Add->getNumOperands();
       } else {
         Ops[i] = Start;
@@ -330,7 +323,7 @@
     }
   if (!AddRecs.empty()) {
     // Add the addrecs onto the end of the list.
-    Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end());
+    Ops.append(AddRecs.begin(), AddRecs.end());
     // Resort the operand list, moving any constants to the front.
     SimplifyAddOperands(Ops, Ty, SE);
   }
@@ -1108,8 +1101,7 @@
   }
 
   // {0,+,1} --> Insert a canonical induction variable into the loop!
-  if (S->isAffine() &&
-      S->getOperand(1) == SE.getConstant(Ty, 1)) {
+  if (S->isAffine() && S->getOperand(1)->isOne()) {
     // If there's a canonical IV, just use it.
     if (CanonicalIV) {
       assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&

Modified: llvm/branches/wendling/eh/lib/Analysis/ValueTracking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ValueTracking.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ValueTracking.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ValueTracking.cpp Fri Jul  2 04:57:13 2010
@@ -953,7 +953,7 @@
   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
     // sqrt(-0.0) = -0.0, no other negative results are possible.
     if (II->getIntrinsicID() == Intrinsic::sqrt)
-      return CannotBeNegativeZero(II->getOperand(1), Depth+1);
+      return CannotBeNegativeZero(II->getArgOperand(0), Depth+1);
   
   if (const CallInst *CI = dyn_cast<CallInst>(I))
     if (const Function *F = CI->getCalledFunction()) {
@@ -966,7 +966,7 @@
         if (F->getName() == "fabsl") return true;
         if (F->getName() == "sqrt" || F->getName() == "sqrtf" ||
             F->getName() == "sqrtl")
-          return CannotBeNegativeZero(CI->getOperand(1), Depth+1);
+          return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1);
       }
     }
   

Modified: llvm/branches/wendling/eh/lib/AsmParser/LLLexer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/AsmParser/LLLexer.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/AsmParser/LLLexer.cpp (original)
+++ llvm/branches/wendling/eh/lib/AsmParser/LLLexer.cpp Fri Jul  2 04:57:13 2010
@@ -492,6 +492,7 @@
 
   KEYWORD(private);
   KEYWORD(linker_private);
+  KEYWORD(linker_private_weak);
   KEYWORD(internal);
   KEYWORD(available_externally);
   KEYWORD(linkonce);

Modified: llvm/branches/wendling/eh/lib/AsmParser/LLParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/AsmParser/LLParser.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/AsmParser/LLParser.cpp (original)
+++ llvm/branches/wendling/eh/lib/AsmParser/LLParser.cpp Fri Jul  2 04:57:13 2010
@@ -196,19 +196,20 @@
     // optional leading prefixes, the production is:
     // GlobalVar ::= OptionalLinkage OptionalVisibility OptionalThreadLocal
     //               OptionalAddrSpace ('constant'|'global') ...
-    case lltok::kw_private :       // OptionalLinkage
-    case lltok::kw_linker_private: // OptionalLinkage
-    case lltok::kw_internal:       // OptionalLinkage
-    case lltok::kw_weak:           // OptionalLinkage
-    case lltok::kw_weak_odr:       // OptionalLinkage
-    case lltok::kw_linkonce:       // OptionalLinkage
-    case lltok::kw_linkonce_odr:   // OptionalLinkage
-    case lltok::kw_appending:      // OptionalLinkage
-    case lltok::kw_dllexport:      // OptionalLinkage
-    case lltok::kw_common:         // OptionalLinkage
-    case lltok::kw_dllimport:      // OptionalLinkage
-    case lltok::kw_extern_weak:    // OptionalLinkage
-    case lltok::kw_external: {     // OptionalLinkage
+    case lltok::kw_private:             // OptionalLinkage
+    case lltok::kw_linker_private:      // OptionalLinkage
+    case lltok::kw_linker_private_weak: // OptionalLinkage
+    case lltok::kw_internal:            // OptionalLinkage
+    case lltok::kw_weak:                // OptionalLinkage
+    case lltok::kw_weak_odr:            // OptionalLinkage
+    case lltok::kw_linkonce:            // OptionalLinkage
+    case lltok::kw_linkonce_odr:        // OptionalLinkage
+    case lltok::kw_appending:           // OptionalLinkage
+    case lltok::kw_dllexport:           // OptionalLinkage
+    case lltok::kw_common:              // OptionalLinkage
+    case lltok::kw_dllimport:           // OptionalLinkage
+    case lltok::kw_extern_weak:         // OptionalLinkage
+    case lltok::kw_external: {          // OptionalLinkage
       unsigned Linkage, Visibility;
       if (ParseOptionalLinkage(Linkage) ||
           ParseOptionalVisibility(Visibility) ||
@@ -629,7 +630,8 @@
       Linkage != GlobalValue::WeakODRLinkage &&
       Linkage != GlobalValue::InternalLinkage &&
       Linkage != GlobalValue::PrivateLinkage &&
-      Linkage != GlobalValue::LinkerPrivateLinkage)
+      Linkage != GlobalValue::LinkerPrivateLinkage &&
+      Linkage != GlobalValue::LinkerPrivateWeakLinkage)
     return Error(LinkageLoc, "invalid linkage type for alias");
 
   Constant *Aliasee;
@@ -1013,11 +1015,13 @@
 ///   ::= /*empty*/
 ///   ::= 'private'
 ///   ::= 'linker_private'
+///   ::= 'linker_private_weak'
 ///   ::= 'internal'
 ///   ::= 'weak'
 ///   ::= 'weak_odr'
 ///   ::= 'linkonce'
 ///   ::= 'linkonce_odr'
+///   ::= 'available_externally'
 ///   ::= 'appending'
 ///   ::= 'dllexport'
 ///   ::= 'common'
@@ -1030,6 +1034,9 @@
   default:                       Res=GlobalValue::ExternalLinkage; return false;
   case lltok::kw_private:        Res = GlobalValue::PrivateLinkage;       break;
   case lltok::kw_linker_private: Res = GlobalValue::LinkerPrivateLinkage; break;
+  case lltok::kw_linker_private_weak:
+    Res = GlobalValue::LinkerPrivateWeakLinkage;
+    break;
   case lltok::kw_internal:       Res = GlobalValue::InternalLinkage;      break;
   case lltok::kw_weak:           Res = GlobalValue::WeakAnyLinkage;       break;
   case lltok::kw_weak_odr:       Res = GlobalValue::WeakODRLinkage;       break;
@@ -2704,6 +2711,7 @@
     break;
   case GlobalValue::PrivateLinkage:
   case GlobalValue::LinkerPrivateLinkage:
+  case GlobalValue::LinkerPrivateWeakLinkage:
   case GlobalValue::InternalLinkage:
   case GlobalValue::AvailableExternallyLinkage:
   case GlobalValue::LinkOnceAnyLinkage:

Modified: llvm/branches/wendling/eh/lib/AsmParser/LLToken.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/AsmParser/LLToken.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/AsmParser/LLToken.h (original)
+++ llvm/branches/wendling/eh/lib/AsmParser/LLToken.h Fri Jul  2 04:57:13 2010
@@ -47,6 +47,7 @@
     kw_external,
     kw_internal,
     kw_linker_private,
+    kw_linker_private_weak,
     kw_linkonce,
     kw_linkonce_odr,
     kw_private,

Modified: llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp (original)
+++ llvm/branches/wendling/eh/lib/Bitcode/Reader/BitcodeReader.cpp Fri Jul  2 04:57:13 2010
@@ -75,6 +75,7 @@
   case 11: return GlobalValue::LinkOnceODRLinkage;
   case 12: return GlobalValue::AvailableExternallyLinkage;
   case 13: return GlobalValue::LinkerPrivateLinkage;
+  case 14: return GlobalValue::LinkerPrivateWeakLinkage;
   }
 }
 

Modified: llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriter.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Bitcode/Writer/BitcodeWriter.cpp Fri Jul  2 04:57:13 2010
@@ -313,6 +313,7 @@
   case GlobalValue::LinkOnceODRLinkage:         return 11;
   case GlobalValue::AvailableExternallyLinkage: return 12;
   case GlobalValue::LinkerPrivateLinkage:       return 13;
+  case GlobalValue::LinkerPrivateWeakLinkage:   return 14;
   }
 }
 
@@ -1151,26 +1152,25 @@
     Vals.push_back(cast<StoreInst>(I).isVolatile());
     break;
   case Instruction::Call: {
-    const PointerType *PTy = cast<PointerType>(I.getOperand(0)->getType());
+    const CallInst &CI = cast<CallInst>(I);
+    const PointerType *PTy = cast<PointerType>(CI.getCalledValue()->getType());
     const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
 
     Code = bitc::FUNC_CODE_INST_CALL;
 
-    const CallInst *CI = cast<CallInst>(&I);
-    Vals.push_back(VE.getAttributeID(CI->getAttributes()));
-    Vals.push_back((CI->getCallingConv() << 1) | unsigned(CI->isTailCall()));
-    PushValueAndType(CI->getOperand(0), InstID, Vals, VE);  // Callee
+    Vals.push_back(VE.getAttributeID(CI.getAttributes()));
+    Vals.push_back((CI.getCallingConv() << 1) | unsigned(CI.isTailCall()));
+    PushValueAndType(CI.getCalledValue(), InstID, Vals, VE);  // Callee
 
     // Emit value #'s for the fixed parameters.
     for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
-      Vals.push_back(VE.getValueID(I.getOperand(i+1)));  // fixed param.
+      Vals.push_back(VE.getValueID(CI.getArgOperand(i)));  // fixed param.
 
     // Emit type/value pairs for varargs params.
     if (FTy->isVarArg()) {
-      unsigned NumVarargs = I.getNumOperands()-1-FTy->getNumParams();
-      for (unsigned i = I.getNumOperands()-NumVarargs, e = I.getNumOperands();
+      for (unsigned i = FTy->getNumParams(), e = CI.getNumArgOperands();
            i != e; ++i)
-        PushValueAndType(I.getOperand(i), InstID, Vals, VE);  // varargs
+        PushValueAndType(CI.getArgOperand(i), InstID, Vals, VE);  // varargs
     }
     break;
   }

Modified: llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.cpp (original)
+++ llvm/branches/wendling/eh/lib/Bitcode/Writer/ValueEnumerator.cpp Fri Jul  2 04:57:13 2010
@@ -72,7 +72,7 @@
   // Enumerate types used by the type symbol table.
   EnumerateTypeSymbolTable(M->getTypeSymbolTable());
 
-  // Insert constants and metadata  that are named at module level into the slot 
+  // Insert constants and metadata that are named at module level into the slot 
   // pool so that the module symbol table can refer to them...
   EnumerateValueSymbolTable(M->getValueSymbolTable());
   EnumerateMDSymbolTable(M->getMDSymbolTable());

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp Fri Jul  2 04:57:13 2010
@@ -199,7 +199,7 @@
   case GlobalValue::LinkOnceODRLinkage:
   case GlobalValue::WeakAnyLinkage:
   case GlobalValue::WeakODRLinkage:
-  case GlobalValue::LinkerPrivateLinkage:
+  case GlobalValue::LinkerPrivateWeakLinkage:
     if (MAI->getWeakDefDirective() != 0) {
       // .globl _foo
       OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global);
@@ -225,6 +225,7 @@
     break;
   case GlobalValue::PrivateLinkage:
   case GlobalValue::InternalLinkage:
+  case GlobalValue::LinkerPrivateLinkage:
     break;
   default:
     llvm_unreachable("Unknown linkage type!");
@@ -449,14 +450,11 @@
 }
 
 
-/// EmitComments - Pretty-print comments for instructions.
-static void EmitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
-  const MachineFunction *MF = MI.getParent()->getParent();
-  const TargetMachine &TM = MF->getTarget();
-  
-  DebugLoc DL = MI.getDebugLoc();
+static void EmitDebugLoc(DebugLoc DL, const MachineFunction *MF, 
+                         raw_ostream &CommentOS) {
+  const LLVMContext &Ctx = MF->getFunction()->getContext();
   if (!DL.isUnknown()) {          // Print source line info.
-    DIScope Scope(DL.getScope(MF->getFunction()->getContext()));
+    DIScope Scope(DL.getScope(Ctx));
     // Omit the directory, because it's likely to be long and uninteresting.
     if (Scope.Verify())
       CommentOS << Scope.getFilename();
@@ -465,6 +463,23 @@
     CommentOS << ':' << DL.getLine();
     if (DL.getCol() != 0)
       CommentOS << ':' << DL.getCol();
+    DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(DL.getInlinedAt(Ctx));
+    if (!InlinedAtDL.isUnknown()) {
+      CommentOS << "[ ";
+      EmitDebugLoc(InlinedAtDL, MF, CommentOS);
+      CommentOS << " ]";
+    }
+  }
+}
+
+/// EmitComments - Pretty-print comments for instructions.
+static void EmitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
+  const MachineFunction *MF = MI.getParent()->getParent();
+  const TargetMachine &TM = MF->getTarget();
+  
+  DebugLoc DL = MI.getDebugLoc();
+  if (!DL.isUnknown()) {          // Print source line info.
+    EmitDebugLoc(DL, MF, CommentOS);
     CommentOS << '\n';
   }
   

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp Fri Jul  2 04:57:13 2010
@@ -83,7 +83,7 @@
   // Tell SrcMgr about this buffer, it takes ownership of the buffer.
   SrcMgr.AddNewSourceBuffer(Buffer, SMLoc());
   
-  AsmParser Parser(SrcMgr, OutContext, OutStreamer, *MAI);
+  AsmParser Parser(TM.getTarget(), SrcMgr, OutContext, OutStreamer, *MAI);
   OwningPtr<TargetAsmParser> TAP(TM.getTarget().createAsmParser(Parser));
   if (!TAP)
     report_fatal_error("Inline asm not supported by this streamer because"

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DIE.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DIE.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DIE.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DIE.cpp Fri Jul  2 04:57:13 2010
@@ -201,6 +201,7 @@
   case dwarf::DW_FORM_data8: Size = 8; break;
   case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return;
   case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return;
+  case dwarf::DW_FORM_addr:  Size = Asm->getTargetData().getPointerSize(); break;
   default: llvm_unreachable("DIE Value form not supported yet");
   }
   Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/);
@@ -221,6 +222,7 @@
   case dwarf::DW_FORM_data8: return sizeof(int64_t);
   case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer);
   case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer);
+  case dwarf::DW_FORM_addr:  return AP->getTargetData().getPointerSize();
   default: llvm_unreachable("DIE Value form not supported yet"); break;
   }
   return 0;

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp Fri Jul  2 04:57:13 2010
@@ -321,6 +321,7 @@
   DwarfFrameSectionSym = DwarfInfoSectionSym = DwarfAbbrevSectionSym = 0;
   DwarfStrSectionSym = TextSectionSym = 0;
   DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = 0; 
+  DwarfDebugLineSectionSym = CurrentLineSectionSym = 0;
   FunctionBeginSym = FunctionEndSym = 0;
   {
     NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
@@ -1691,6 +1692,7 @@
   if (Scope->getInlinedAt())
     ScopeDIE = constructInlinedScopeDIE(Scope);
   else if (DS.isSubprogram()) {
+    ProcessedSPNodes.insert(DS);
     if (Scope->isAbstractScope())
       ScopeDIE = getCompileUnit(DS)->getDIE(DS);
     else
@@ -1791,11 +1793,11 @@
   addString(Die, dwarf::DW_AT_name, dwarf::DW_FORM_string, FN);
   // Use DW_AT_entry_pc instead of DW_AT_low_pc/DW_AT_high_pc pair. This
   // simplifies debug range entries.
-  addUInt(Die, dwarf::DW_AT_entry_pc, dwarf::DW_FORM_data4, 0);
+  addUInt(Die, dwarf::DW_AT_entry_pc, dwarf::DW_FORM_addr, 0);
   // DW_AT_stmt_list is a offset of line number information for this
-  // compile unit in debug_line section. It is always zero when only one
-  // compile unit is emitted in one object file.
-  addUInt(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, 0);
+  // compile unit in debug_line section. This offset is calculated 
+  // during endMoudle().
+  addLabel(Die, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4, 0);
 
   if (!Dir.empty())
     addString(Die, dwarf::DW_AT_comp_dir, dwarf::DW_FORM_string, Dir);
@@ -2005,6 +2007,40 @@
 ///
 void DwarfDebug::endModule() {
   if (!FirstCU) return;
+  const Module *M = MMI->getModule();
+  if (NamedMDNode *AllSPs = M->getNamedMetadata("llvm.dbg.sp")) {
+    for (unsigned SI = 0, SE = AllSPs->getNumOperands(); SI != SE; ++SI) {
+      if (ProcessedSPNodes.count(AllSPs->getOperand(SI)) != 0) continue;
+      DISubprogram SP(AllSPs->getOperand(SI));
+      if (!SP.Verify()) continue;
+
+      // Collect info for variables that were optimized out.
+      StringRef FName = SP.getLinkageName();
+      if (FName.empty())
+        FName = SP.getName();
+      NamedMDNode *NMD = 
+        M->getNamedMetadata(Twine("llvm.dbg.lv.", getRealLinkageName(FName)));
+      if (!NMD) continue;
+      unsigned E = NMD->getNumOperands();
+      if (!E) continue;
+      DbgScope *Scope = new DbgScope(NULL, DIDescriptor(SP), NULL);
+      for (unsigned I = 0; I != E; ++I) {
+        DIVariable DV(NMD->getOperand(I));
+        if (!DV.Verify()) continue;
+        Scope->addVariable(new DbgVariable(DV));
+      }
+      
+      // Construct subprogram DIE and add variables DIEs.
+      constructSubprogramDIE(SP);
+      DIE *ScopeDIE = getCompileUnit(SP)->getDIE(SP);
+      const SmallVector<DbgVariable *, 8> &Variables = Scope->getVariables();
+      for (unsigned i = 0, N = Variables.size(); i < N; ++i) {
+        DIE *VariableDIE = constructVariableDIE(Variables[i], Scope);
+        if (VariableDIE)
+          ScopeDIE->addChild(VariableDIE);
+      }
+    }
+  }
 
   // Attach DW_AT_inline attribute with inlined subprogram DIEs.
   for (SmallPtrSet<DIE *, 4>::iterator AI = InlinedSubprogramDIEs.begin(),
@@ -2046,15 +2082,15 @@
   // Compute DIE offsets and sizes.
   computeSizeAndOffsets();
 
+  // Emit source line correspondence into a debug line section.
+  emitDebugLines();
+
   // Emit all the DIEs into a debug info section
   emitDebugInfo();
 
   // Corresponding abbreviations into a abbrev section.
   emitAbbreviations();
 
-  // Emit source line correspondence into a debug line section.
-  emitDebugLines();
-
   // Emit info into a debug pubnames section.
   emitDebugPubNames();
 
@@ -2159,8 +2195,9 @@
 }
 
 /// collectVariableInfo - Populate DbgScope entries with variables' info.
-void DwarfDebug::collectVariableInfo(const MachineFunction *MF) {
-  SmallPtrSet<const MDNode *, 16> Processed;
+void 
+DwarfDebug::collectVariableInfo(const MachineFunction *MF,
+                                SmallPtrSet<const MDNode *, 16> &Processed) {
   
   /// collection info from MMI table.
   collectVariableInfoFromMMITable(MF, Processed);
@@ -2257,9 +2294,11 @@
   }
 
   // Collect info for variables that were optimized out.
-  const Twine FnLVName = Twine("llvm.dbg.lv.", MF->getFunction()->getName());
-  if (NamedMDNode *NMD =
-      MF->getFunction()->getParent()->getNamedMetadataUsingTwine(FnLVName)) {
+  const Function *F = MF->getFunction();
+  const Module *M = F->getParent();
+  if (NamedMDNode *NMD = 
+      M->getNamedMetadata(Twine("llvm.dbg.lv.", 
+                                getRealLinkageName(F->getName())))) {
     for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
       DIVariable DV(cast_or_null<MDNode>(NMD->getOperand(i)));
       if (!DV || !Processed.insert(DV))
@@ -2365,6 +2404,7 @@
     return WScope;
   }
 
+  getOrCreateAbstractScope(Scope);
   DbgScope *WScope = DbgScopeMap.lookup(InlinedAt);
   if (WScope)
     return WScope;
@@ -2378,7 +2418,6 @@
   Parent->addScope(WScope);
 
   ConcreteScopes[InlinedAt] = WScope;
-  getOrCreateAbstractScope(Scope);
 
   return WScope;
 }
@@ -2388,8 +2427,6 @@
 static bool hasValidLocation(LLVMContext &Ctx,
                              const MachineInstr *MInsn,
                              const MDNode *&Scope, const MDNode *&InlinedAt) {
-  if (MInsn->isDebugValue())
-    return false;
   DebugLoc DL = MInsn->getDebugLoc();
   if (DL.isUnknown()) return false;
       
@@ -2588,7 +2625,6 @@
            RE = Ranges.end(); RI != RE; ++RI) {
       assert(RI->first && "DbgRange does not have first instruction!");      
       assert(RI->second && "DbgRange does not have second instruction!");      
-      InsnsBeginScopeSet.insert(RI->first);
       InsnsEndScopeSet.insert(RI->second);
     }
   }
@@ -2653,15 +2689,16 @@
         assert (MI->getNumOperands() > 1 && "Invalid machine instruction!");
         DIVariable DV(MI->getOperand(MI->getNumOperands() - 1).getMetadata());
         if (!DV.Verify()) continue;
-        if (isDbgValueInUndefinedReg(MI)) continue;
         // If DBG_VALUE is for a local variable then it needs a label.
-        if (DV.getTag() != dwarf::DW_TAG_arg_variable)
+        if (DV.getTag() != dwarf::DW_TAG_arg_variable 
+            && isDbgValueInUndefinedReg(MI) == false)
           InsnNeedsLabel.insert(MI);
         // DBG_VALUE for inlined functions argument needs a label.
-        else if (!DISubprogram(DV.getContext()).describes(MF->getFunction()))
+        else if (!DISubprogram(getDISubprogram(DV.getContext())).
+                 describes(MF->getFunction()))
           InsnNeedsLabel.insert(MI);
         // DBG_VALUE indicating argument location change needs a label.
-        else if (!ProcessedArgs.insert(DV))
+        else if (isDbgValueInUndefinedReg(MI) == false && !ProcessedArgs.insert(DV))
           InsnNeedsLabel.insert(MI);
       } else {
         // If location is unknown then instruction needs a location only if 
@@ -2694,7 +2731,8 @@
     // Assumes in correct section after the entry point.
     Asm->OutStreamer.EmitLabel(FunctionEndSym);
     
-    collectVariableInfo(MF);
+    SmallPtrSet<const MDNode *, 16> ProcessedVars;
+    collectVariableInfo(MF, ProcessedVars);
 
     // Get function line info.
     if (!Lines.empty()) {
@@ -2709,8 +2747,30 @@
     
     // Construct abstract scopes.
     for (SmallVector<DbgScope *, 4>::iterator AI = AbstractScopesList.begin(),
-           AE = AbstractScopesList.end(); AI != AE; ++AI)
-      constructScopeDIE(*AI);
+           AE = AbstractScopesList.end(); AI != AE; ++AI) {
+      DISubprogram SP((*AI)->getScopeNode());
+      if (SP.Verify()) {
+        // Collect info for variables that were optimized out.
+        StringRef FName = SP.getLinkageName();
+        if (FName.empty())
+          FName = SP.getName();
+        const Module *M = MF->getFunction()->getParent();
+        if (NamedMDNode *NMD = 
+            M->getNamedMetadata(Twine("llvm.dbg.lv.", 
+                                      getRealLinkageName(FName)))) {
+          for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
+          DIVariable DV(cast_or_null<MDNode>(NMD->getOperand(i)));
+          if (!DV || !ProcessedVars.insert(DV))
+            continue;
+          DbgScope *Scope = AbstractScopes.lookup(DV.getContext());
+          if (Scope)
+            Scope->addVariable(new DbgVariable(DV));
+          }
+        }
+      }
+      if (ProcessedSPNodes.count((*AI)->getScopeNode()) == 0)
+        constructScopeDIE(*AI);
+    }
     
     DIE *CurFnDIE = constructScopeDIE(CurrentFnDbgScope);
     
@@ -2731,7 +2791,6 @@
   DbgVariableToDbgInstMap.clear();
   DbgVariableLabelsMap.clear();
   DeleteContainerSeconds(DbgScopeMap);
-  InsnsBeginScopeSet.clear();
   InsnsEndScopeSet.clear();
   ConcreteScopes.clear();
   DeleteContainerSeconds(AbstractScopes);
@@ -2937,7 +2996,8 @@
   if (const MCSection *MacroInfo = TLOF.getDwarfMacroInfoSection())
     EmitSectionSym(Asm, MacroInfo);
 
-  EmitSectionSym(Asm, TLOF.getDwarfLineSection());
+  DwarfDebugLineSectionSym = 
+    EmitSectionSym(Asm, TLOF.getDwarfLineSection(), "section_line");
   EmitSectionSym(Asm, TLOF.getDwarfLocSection());
   EmitSectionSym(Asm, TLOF.getDwarfPubNamesSection());
   EmitSectionSym(Asm, TLOF.getDwarfPubTypesSection());
@@ -3000,6 +3060,11 @@
                                      4);
       break;
     }
+    case dwarf::DW_AT_stmt_list: {
+      Asm->EmitLabelDifference(CurrentLineSectionSym, 
+                               DwarfDebugLineSectionSym, 4);
+      break;
+    }
     case dwarf::DW_AT_location: {
       if (UseDotDebugLocEntry.count(Die) != 0) {
         DIELabel *L = cast<DIELabel>(Values[i]);
@@ -3145,6 +3210,8 @@
                             Asm->getObjFileLowering().getDwarfLineSection());
 
   // Construct the section header.
+  CurrentLineSectionSym = Asm->GetTempSymbol("section_line_begin");
+  Asm->OutStreamer.EmitLabel(CurrentLineSectionSym);
   Asm->OutStreamer.AddComment("Length of Source Line Info");
   Asm->EmitLabelDifference(Asm->GetTempSymbol("line_end"),
                            Asm->GetTempSymbol("line_begin"), 4);

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.h Fri Jul  2 04:57:13 2010
@@ -210,7 +210,7 @@
   DenseMap<DIE *, const MDNode *> ContainingTypeMap;
 
   typedef SmallVector<DbgScope *, 2> ScopeVector;
-  SmallPtrSet<const MachineInstr *, 8> InsnsBeginScopeSet;
+
   SmallPtrSet<const MachineInstr *, 8> InsnsEndScopeSet;
 
   /// InlineInfo - Keep track of inlined functions and their location.  This
@@ -219,6 +219,10 @@
   DenseMap<const MDNode *, SmallVector<InlineInfoLabels, 4> > InlineInfo;
   SmallVector<const MDNode *, 4> InlinedSPNodes;
 
+  // ProcessedSPNodes - This is a collection of subprogram MDNodes that
+  // are processed to create DIEs.
+  SmallPtrSet<const MDNode *, 16> ProcessedSPNodes;
+
   /// LabelsBeforeInsn - Maps instruction with label emitted before 
   /// instruction.
   DenseMap<const MachineInstr *, MCSymbol *> LabelsBeforeInsn;
@@ -254,6 +258,7 @@
   MCSymbol *DwarfFrameSectionSym, *DwarfInfoSectionSym, *DwarfAbbrevSectionSym;
   MCSymbol *DwarfStrSectionSym, *TextSectionSym, *DwarfDebugRangeSectionSym;
   MCSymbol *DwarfDebugLocSectionSym;
+  MCSymbol *DwarfDebugLineSectionSym, *CurrentLineSectionSym;
   MCSymbol *FunctionBeginSym, *FunctionEndSym;
 private:
   
@@ -590,7 +595,8 @@
   bool extractScopeInformation();
   
   /// collectVariableInfo - Populate DbgScope entries with variables' info.
-  void collectVariableInfo(const MachineFunction *);
+  void collectVariableInfo(const MachineFunction *,
+                           SmallPtrSet<const MDNode *, 16> &ProcessedVars);
   
   /// collectVariableInfoFromMMITable - Collect variable information from
   /// side table maintained by MMI.

Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp Fri Jul  2 04:57:13 2010
@@ -52,13 +52,13 @@
   SymName.append(MId.begin(), std::find(MId.begin(), MId.end(), '.'));
   SymName += "__";
   SymName += Id;
-  
+
   // Capitalize the first letter of the module name.
   SymName[Letter] = toupper(SymName[Letter]);
-  
+
   SmallString<128> TmpStr;
   AP.Mang->getNameWithPrefix(TmpStr, SymName);
-  
+
   MCSymbol *Sym = AP.OutContext.GetOrCreateSymbol(TmpStr);
 
   AP.OutStreamer.EmitSymbolAttribute(Sym, MCSA_Global);

Modified: llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp Fri Jul  2 04:57:13 2010
@@ -358,24 +358,10 @@
 }
 
 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
-/// after it, replacing it with an unconditional branch to NewDest.  This
-/// returns true if OldInst's block is modified, false if NewDest is modified.
+/// after it, replacing it with an unconditional branch to NewDest.
 void BranchFolder::ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
                                            MachineBasicBlock *NewDest) {
-  MachineBasicBlock *OldBB = OldInst->getParent();
-
-  // Remove all the old successors of OldBB from the CFG.
-  while (!OldBB->succ_empty())
-    OldBB->removeSuccessor(OldBB->succ_begin());
-
-  // Remove all the dead instructions from the end of OldBB.
-  OldBB->erase(OldInst, OldBB->end());
-
-  // If OldBB isn't immediately before OldBB, insert a branch to it.
-  if (++MachineFunction::iterator(OldBB) != MachineFunction::iterator(NewDest))
-    TII->InsertBranch(*OldBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
-                      OldInst->getDebugLoc());
-  OldBB->addSuccessor(NewDest);
+  TII->ReplaceTailWithBranchTo(OldInst, NewDest);
   ++NumTailMerge;
 }
 
@@ -384,6 +370,9 @@
 /// iterator.  This returns the new MBB.
 MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB,
                                             MachineBasicBlock::iterator BBI1) {
+  if (!TII->isLegalToSplitMBBAt(CurMBB, BBI1))
+    return 0;
+
   MachineFunction &MF = *CurMBB.getParent();
 
   // Create the fall-through block.
@@ -628,9 +617,10 @@
 
 /// CreateCommonTailOnlyBlock - None of the blocks to be tail-merged consist
 /// only of the common tail.  Create a block that does by splitting one.
-unsigned BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
-                                             unsigned maxCommonTailLength) {
-  unsigned commonTailIndex = 0;
+bool BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
+                                             unsigned maxCommonTailLength,
+                                             unsigned &commonTailIndex) {
+  commonTailIndex = 0;
   unsigned TimeEstimate = ~0U;
   for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
     // Use PredBB if possible; that doesn't require a new branch.
@@ -658,6 +648,11 @@
                << maxCommonTailLength);
 
   MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI);
+  if (!newMBB) {
+    DEBUG(dbgs() << "... failed!");
+    return false;
+  }
+
   SameTails[commonTailIndex].setBlock(newMBB);
   SameTails[commonTailIndex].setTailStartPos(newMBB->begin());
 
@@ -665,7 +660,7 @@
   if (PredBB == MBB)
     PredBB = newMBB;
 
-  return commonTailIndex;
+  return true;
 }
 
 // See if any of the blocks in MergePotentials (which all have a common single
@@ -760,7 +755,11 @@
          !SameTails[commonTailIndex].tailIsWholeBlock())) {
       // None of the blocks consist entirely of the common tail.
       // Split a block so that one does.
-      commonTailIndex = CreateCommonTailOnlyBlock(PredBB, maxCommonTailLength);
+      if (!CreateCommonTailOnlyBlock(PredBB,
+                                     maxCommonTailLength, commonTailIndex)) {
+        RemoveBlocksWithHash(CurHash, SuccBB, PredBB);
+        continue;
+      }
     }
 
     MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock();

Modified: llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.h Fri Jul  2 04:57:13 2010
@@ -102,8 +102,9 @@
                               MachineBasicBlock *PredBB);
     void RemoveBlocksWithHash(unsigned CurHash, MachineBasicBlock* SuccBB,
                                                 MachineBasicBlock* PredBB);
-    unsigned CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
-                                       unsigned maxCommonTailLength);
+    bool CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB,
+                                   unsigned maxCommonTailLength,
+                                   unsigned &commonTailIndex);
 
     bool OptimizeBranches(MachineFunction &MF);
     bool OptimizeBlock(MachineBasicBlock *MBB);

Modified: llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt Fri Jul  2 04:57:13 2010
@@ -13,6 +13,7 @@
   GCMetadataPrinter.cpp
   GCStrategy.cpp
   IfConversion.cpp
+  InlineSpiller.cpp
   IntrinsicLowering.cpp
   LLVMTargetMachine.cpp
   LatencyPriorityQueue.cpp

Modified: llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp Fri Jul  2 04:57:13 2010
@@ -22,6 +22,7 @@
 #include "llvm/Analysis/Dominators.h"
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Support/CallSite.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
@@ -87,12 +88,13 @@
     /// CleanupSelectors - Any remaining eh.selector intrinsic calls which still
     /// use the ".llvm.eh.catch.all.value" call need to convert to using its
     /// initializer instead.
-    bool CleanupSelectors();
+    bool CleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels);
 
-    bool IsACleanupSelector(IntrinsicInst *);
+    bool HasCatchAllInSelector(IntrinsicInst *);
 
     /// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
-    void FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels);
+    void FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels,
+                                 SmallPtrSet<IntrinsicInst*, 32> &CatchAllSels);
 
     /// FindAllURoRInvokes - Find all URoR invokes in the function.
     void FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes);
@@ -152,7 +154,7 @@
         Changed = true;
       }
 
-      return false;
+      return Changed;
     }
 
   public:
@@ -188,39 +190,20 @@
   return new DwarfEHPrepare(tm, fast);
 }
 
-/// IsACleanupSelector - Return true if the intrinsic instruction is a clean-up
-/// selector instruction.
-bool DwarfEHPrepare::IsACleanupSelector(IntrinsicInst *II) {
-  unsigned NumOps = II->getNumOperands();
-  bool IsCleanUp = (NumOps == 3);
-
-  if (IsCleanUp)
-    return true;
-
-  if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(3))) {
-    unsigned Val = CI->getZExtValue();
-
-    if (Val == 0 || Val + 3 == NumOps) {
-      // If the value is 0 or the selector has only filters in it, then it's
-      // a cleanup.
-      return true;
-    } else {
-      assert(Val + 3 < NumOps && "Ill-formed eh.selector!");
-
-      if (Val + 4 == NumOps) {
-        if (ConstantInt *FinalVal =
-            dyn_cast<ConstantInt>(II->getOperand(NumOps - 1)))
-          return FinalVal->isZero();
-      }
-    }
-  }
+/// HasCatchAllInSelector - Return true if the intrinsic instruction has a
+/// catch-all.
+bool DwarfEHPrepare::HasCatchAllInSelector(IntrinsicInst *II) {
+  if (!EHCatchAllValue) return false;
 
-  return false;
+  unsigned ArgIdx = II->getNumArgOperands() - 1;
+  GlobalVariable *GV = dyn_cast<GlobalVariable>(II->getArgOperand(ArgIdx));
+  return GV == EHCatchAllValue;
 }
 
 /// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
 void DwarfEHPrepare::
-FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels) {
+FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels,
+                        SmallPtrSet<IntrinsicInst*, 32> &CatchAllSels) {
   for (Value::use_iterator
          I = SelectorIntrinsic->use_begin(),
          E = SelectorIntrinsic->use_end(); I != E; ++I) {
@@ -229,8 +212,10 @@
     if (II->getParent()->getParent() != F)
       continue;
 
-    if (IsACleanupSelector(II))
+    if (!HasCatchAllInSelector(II))
       Sels.insert(II);
+    else
+      CatchAllSels.insert(II);
   }
 }
 
@@ -248,7 +233,7 @@
 /// CleanupSelectors - Any remaining eh.selector intrinsic calls which still use
 /// the ".llvm.eh.catch.all.value" call need to convert to using its
 /// initializer instead.
-bool DwarfEHPrepare::CleanupSelectors() {
+bool DwarfEHPrepare::CleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels) {
   if (!EHCatchAllValue) return false;
 
   if (!SelectorIntrinsic) {
@@ -258,17 +243,15 @@
   }
 
   bool Changed = false;
-  for (Value::use_iterator
-         I = SelectorIntrinsic->use_begin(),
-         E = SelectorIntrinsic->use_end(); I != E; ++I) {
-    IntrinsicInst *Sel = dyn_cast<IntrinsicInst>(I);
-    if (!Sel || Sel->getParent()->getParent() != F) continue;
+  for (SmallPtrSet<IntrinsicInst*, 32>::iterator
+         I = Sels.begin(), E = Sels.end(); I != E; ++I) {
+    IntrinsicInst *Sel = *I;
 
     // Index of the ".llvm.eh.catch.all.value" variable.
-    unsigned OpIdx = Sel->getNumOperands() - 1;
-    GlobalVariable *GV = dyn_cast<GlobalVariable>(Sel->getOperand(OpIdx));
+    unsigned OpIdx = Sel->getNumArgOperands() - 1;
+    GlobalVariable *GV = dyn_cast<GlobalVariable>(Sel->getArgOperand(OpIdx));
     if (GV != EHCatchAllValue) continue;
-    Sel->setOperand(OpIdx, EHCatchAllValue->getInitializer());
+    Sel->setArgOperand(OpIdx, EHCatchAllValue->getInitializer());
     Changed = true;
   }
 
@@ -319,8 +302,6 @@
 /// function. This is a candidate to merge the selector associated with the URoR
 /// invoke with the one from the URoR's landing pad.
 bool DwarfEHPrepare::HandleURoRInvokes() {
-  if (!DT) return CleanupSelectors(); // We require DominatorTree information.
-
   if (!EHCatchAllValue) {
     EHCatchAllValue =
       F->getParent()->getNamedGlobal(".llvm.eh.catch.all.value");
@@ -333,14 +314,20 @@
     if (!SelectorIntrinsic) return false;
   }
 
+  SmallPtrSet<IntrinsicInst*, 32> Sels;
+  SmallPtrSet<IntrinsicInst*, 32> CatchAllSels;
+  FindAllCleanupSelectors(Sels, CatchAllSels);
+
+  if (!DT)
+    // We require DominatorTree information.
+    return CleanupSelectors(CatchAllSels);
+
   if (!URoR) {
     URoR = F->getParent()->getFunction("_Unwind_Resume_or_Rethrow");
-    if (!URoR) return CleanupSelectors();
+    if (!URoR) return CleanupSelectors(CatchAllSels);
   }
 
-  SmallPtrSet<IntrinsicInst*, 32> Sels;
   SmallPtrSet<InvokeInst*, 32> URoRInvokes;
-  FindAllCleanupSelectors(Sels);
   FindAllURoRInvokes(URoRInvokes);
 
   SmallPtrSet<IntrinsicInst*, 32> SelsToConvert;
@@ -366,7 +353,8 @@
     if (!ExceptionValueIntrinsic) {
       ExceptionValueIntrinsic =
         Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_exception);
-      if (!ExceptionValueIntrinsic) return CleanupSelectors();
+      if (!ExceptionValueIntrinsic)
+        return CleanupSelectors(CatchAllSels);
     }
 
     for (Value::use_iterator
@@ -387,7 +375,7 @@
         // need to convert it to a 'catch-all'.
         for (SmallPtrSet<IntrinsicInst*, 8>::iterator
                SI = SelCalls.begin(), SE = SelCalls.end(); SI != SE; ++SI)
-          if (IsACleanupSelector(*SI))
+          if (!HasCatchAllInSelector(*SI))
               SelsToConvert.insert(*SI);
       }
     }
@@ -402,20 +390,21 @@
            SI = SelsToConvert.begin(), SE = SelsToConvert.end();
          SI != SE; ++SI) {
       IntrinsicInst *II = *SI;
-      SmallVector<Value*, 8> Args;
 
       // Use the exception object pointer and the personality function
       // from the original selector.
-      Args.push_back(II->getOperand(1)); // Exception object pointer.
-      Args.push_back(II->getOperand(2)); // Personality function.
-
-      unsigned I = 3;
-      unsigned E = II->getNumOperands() -
-        (isa<ConstantInt>(II->getOperand(II->getNumOperands() - 1)) ? 1 : 0);
-
-      // Add in any filter IDs.
-      for (; I < E; ++I)
-        Args.push_back(II->getOperand(I));
+      CallSite CS(II);
+      IntrinsicInst::op_iterator I = CS.arg_begin();
+      IntrinsicInst::op_iterator E = CS.arg_end();
+      IntrinsicInst::op_iterator B = prior(E);
+
+      // Exclude last argument if it is an integer.
+      if (isa<ConstantInt>(B)) E = B;
+
+      // Add exception object pointer (front).
+      // Add personality function (next).
+      // Add in any filter IDs (rest).
+      SmallVector<Value*, 8> Args(I, E);
 
       Args.push_back(EHCatchAllValue->getInitializer()); // Catch-all indicator.
 
@@ -432,7 +421,7 @@
     }
   }
 
-  Changed |= CleanupSelectors();
+  Changed |= CleanupSelectors(CatchAllSels);
   return Changed;
 }
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/ELFCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ELFCodeEmitter.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ELFCodeEmitter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ELFCodeEmitter.cpp Fri Jul  2 04:57:13 2010
@@ -90,7 +90,7 @@
     for (std::vector<MachineRelocation>::iterator MRI = JTRelocations.begin(),
          MRE = JTRelocations.end(); MRI != MRE; ++MRI) {
       MachineRelocation &MR = *MRI;
-      unsigned MBBOffset = getMachineBasicBlockAddress(MR.getBasicBlock());
+      uintptr_t MBBOffset = getMachineBasicBlockAddress(MR.getBasicBlock());
       MR.setResultPointer((void*)MBBOffset);
       MR.setConstantVal(ES->SectionIdx);
       JTSection.addRelocation(MR);

Modified: llvm/branches/wendling/eh/lib/CodeGen/GCStrategy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/GCStrategy.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/GCStrategy.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/GCStrategy.cpp Fri Jul  2 04:57:13 2010
@@ -271,7 +271,7 @@
         case Intrinsic::gcwrite:
           if (LowerWr) {
             // Replace a write barrier with a simple store.
-            Value *St = new StoreInst(CI->getOperand(1), CI->getOperand(3), CI);
+            Value *St = new StoreInst(CI->getArgOperand(0), CI->getArgOperand(2), CI);
             CI->replaceAllUsesWith(St);
             CI->eraseFromParent();
           }
@@ -279,7 +279,7 @@
         case Intrinsic::gcread:
           if (LowerRd) {
             // Replace a read barrier with a simple load.
-            Value *Ld = new LoadInst(CI->getOperand(2), "", CI);
+            Value *Ld = new LoadInst(CI->getArgOperand(1), "", CI);
             Ld->takeName(CI);
             CI->replaceAllUsesWith(Ld);
             CI->eraseFromParent();
@@ -290,7 +290,7 @@
             // Initialize the GC root, but do not delete the intrinsic. The
             // backend needs the intrinsic to flag the stack slot.
             Roots.push_back(cast<AllocaInst>(
-                              CI->getOperand(1)->stripPointerCasts()));
+                              CI->getArgOperand(0)->stripPointerCasts()));
           }
           break;
         default:

Modified: llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp Fri Jul  2 04:57:13 2010
@@ -186,10 +186,16 @@
                                SmallVectorImpl<MachineOperand> &Cond,
                                SmallSet<unsigned, 4> &Redefs,
                                bool IgnoreBr = false);
-    void MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI);
+    void MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges = true);
 
-    bool MeetIfcvtSizeLimit(unsigned Size) const {
-      return Size > 0 && Size <= TLI->getIfCvtBlockSizeLimit();
+    bool MeetIfcvtSizeLimit(MachineBasicBlock &BB, unsigned Size) const {
+      return Size > 0 && TII->isProfitableToIfCvt(BB, Size);
+    }
+
+    bool MeetIfcvtSizeLimit(MachineBasicBlock &TBB, unsigned TSize,
+                            MachineBasicBlock &FBB, unsigned FSize) const {
+      return TSize > 0 && FSize > 0 &&
+        TII->isProfitableToIfCvt(TBB, TSize, FBB, FSize);
     }
 
     // blockAlwaysFallThrough - Block ends without a terminator.
@@ -235,6 +241,12 @@
   TRI = MF.getTarget().getRegisterInfo();
   if (!TII) return false;
 
+  // Tail merge tend to expose more if-conversion opportunities.
+  BranchFolder BF(true);
+  bool BFChange = BF.OptimizeFunction(MF, TII,
+                                   MF.getTarget().getRegisterInfo(),
+                                   getAnalysisIfAvailable<MachineModuleInfo>());
+
   DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum <<  ") \'"
                << MF.getFunction()->getName() << "\'");
 
@@ -297,8 +309,8 @@
         RetVal = IfConvertSimple(BBI, Kind);
         DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
         if (RetVal) {
-          if (isFalse) NumSimpleFalse++;
-          else         NumSimple++;
+          if (isFalse) ++NumSimpleFalse;
+          else         ++NumSimple;
         }
        break;
       }
@@ -324,11 +336,11 @@
         DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
         if (RetVal) {
           if (isFalse) {
-            if (isRev) NumTriangleFRev++;
-            else       NumTriangleFalse++;
+            if (isRev) ++NumTriangleFRev;
+            else       ++NumTriangleFalse;
           } else {
-            if (isRev) NumTriangleRev++;
-            else       NumTriangle++;
+            if (isRev) ++NumTriangleRev;
+            else       ++NumTriangle;
           }
         }
         break;
@@ -340,7 +352,7 @@
                      << BBI.FalseBB->getNumber() << ") ");
         RetVal = IfConvertDiamond(BBI, Kind, NumDups, NumDups2);
         DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
-        if (RetVal) NumDiamonds++;
+        if (RetVal) ++NumDiamonds;
         break;
       }
       }
@@ -369,13 +381,14 @@
   Roots.clear();
   BBAnalysis.clear();
 
-  if (MadeChange && !IfCvtBranchFold) {
+  if (MadeChange && IfCvtBranchFold) {
     BranchFolder BF(false);
     BF.OptimizeFunction(MF, TII,
                         MF.getTarget().getRegisterInfo(),
                         getAnalysisIfAvailable<MachineModuleInfo>());
   }
 
+  MadeChange |= BFChange;
   return MadeChange;
 }
 
@@ -429,7 +442,7 @@
 
   if (TrueBBI.BB->pred_size() > 1) {
     if (TrueBBI.CannotBeCopied ||
-        TrueBBI.NonPredSize > TLI->getIfCvtDupBlockSizeLimit())
+        !TII->isProfitableToDupForIfCvt(*TrueBBI.BB, TrueBBI.NonPredSize))
       return false;
     Dups = TrueBBI.NonPredSize;
   }
@@ -466,7 +479,7 @@
           ++Size;
       }
     }
-    if (Size > TLI->getIfCvtDupBlockSizeLimit())
+    if (!TII->isProfitableToDupForIfCvt(*TrueBBI.BB, Size))
       return false;
     Dups = Size;
   }
@@ -531,6 +544,19 @@
   while (FI != FIE && FI->isDebugValue())
     ++FI;
   while (TI != TIE && FI != FIE) {
+    // Skip dbg_value instructions. These do not count.
+    if (TI->isDebugValue()) {
+      while (TI != TIE && TI->isDebugValue())
+        ++TI;
+      if (TI == TIE)
+        break;
+    }
+    if (FI->isDebugValue()) {
+      while (FI != FIE && FI->isDebugValue())
+        ++FI;
+      if (FI == FIE)
+        break;
+    }
     if (!TI->isIdenticalTo(FI))
       break;
     ++Dups1;
@@ -542,12 +568,25 @@
   FI = firstNonBranchInst(FalseBBI.BB, TII);
   MachineBasicBlock::iterator TIB = TrueBBI.BB->begin();
   MachineBasicBlock::iterator FIB = FalseBBI.BB->begin();
-  // Skip dbg_value instructions
+  // Skip dbg_value instructions at end of the bb's.
   while (TI != TIB && TI->isDebugValue())
     --TI;
   while (FI != FIB && FI->isDebugValue())
     --FI;
   while (TI != TIB && FI != FIB) {
+    // Skip dbg_value instructions. These do not count.
+    if (TI->isDebugValue()) {
+      while (TI != TIB && TI->isDebugValue())
+        --TI;
+      if (TI == TIB)
+        break;
+    }
+    if (FI->isDebugValue()) {
+      while (FI != FIB && FI->isDebugValue())
+        --FI;
+      if (FI == FIB)
+        break;
+    }
     if (!TI->isIdenticalTo(FI))
       break;
     ++Dups2;
@@ -728,8 +767,8 @@
   bool FNeedSub = FalseBBI.Predicate.size() > 0;
   bool Enqueued = false;
   if (CanRevCond && ValidDiamond(TrueBBI, FalseBBI, Dups, Dups2) &&
-      MeetIfcvtSizeLimit(TrueBBI.NonPredSize - (Dups + Dups2)) &&
-      MeetIfcvtSizeLimit(FalseBBI.NonPredSize - (Dups + Dups2)) &&
+      MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize - (Dups + Dups2),
+                         *FalseBBI.BB, FalseBBI.NonPredSize - (Dups + Dups2)) &&
       FeasibilityAnalysis(TrueBBI, BBI.BrCond) &&
       FeasibilityAnalysis(FalseBBI, RevCond)) {
     // Diamond:
@@ -746,7 +785,7 @@
   }
 
   if (ValidTriangle(TrueBBI, FalseBBI, false, Dups) &&
-      MeetIfcvtSizeLimit(TrueBBI.NonPredSize) &&
+      MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
       FeasibilityAnalysis(TrueBBI, BBI.BrCond, true)) {
     // Triangle:
     //   EBB
@@ -760,14 +799,14 @@
   }
 
   if (ValidTriangle(TrueBBI, FalseBBI, true, Dups) &&
-      MeetIfcvtSizeLimit(TrueBBI.NonPredSize) &&
+      MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
       FeasibilityAnalysis(TrueBBI, BBI.BrCond, true, true)) {
     Tokens.push_back(new IfcvtToken(BBI, ICTriangleRev, TNeedSub, Dups));
     Enqueued = true;
   }
 
   if (ValidSimple(TrueBBI, Dups) &&
-      MeetIfcvtSizeLimit(TrueBBI.NonPredSize) &&
+      MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize) &&
       FeasibilityAnalysis(TrueBBI, BBI.BrCond)) {
     // Simple (split, no rejoin):
     //   EBB
@@ -783,21 +822,21 @@
   if (CanRevCond) {
     // Try the other path...
     if (ValidTriangle(FalseBBI, TrueBBI, false, Dups) &&
-        MeetIfcvtSizeLimit(FalseBBI.NonPredSize) &&
+        MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
         FeasibilityAnalysis(FalseBBI, RevCond, true)) {
       Tokens.push_back(new IfcvtToken(BBI, ICTriangleFalse, FNeedSub, Dups));
       Enqueued = true;
     }
 
     if (ValidTriangle(FalseBBI, TrueBBI, true, Dups) &&
-        MeetIfcvtSizeLimit(FalseBBI.NonPredSize) &&
+        MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
         FeasibilityAnalysis(FalseBBI, RevCond, true, true)) {
       Tokens.push_back(new IfcvtToken(BBI, ICTriangleFRev, FNeedSub, Dups));
       Enqueued = true;
     }
 
     if (ValidSimple(FalseBBI, Dups) &&
-        MeetIfcvtSizeLimit(FalseBBI.NonPredSize) &&
+        MeetIfcvtSizeLimit(*FalseBBI.BB, FalseBBI.NonPredSize) &&
         FeasibilityAnalysis(FalseBBI, RevCond)) {
       Tokens.push_back(new IfcvtToken(BBI, ICSimpleFalse, FNeedSub, Dups));
       Enqueued = true;
@@ -918,7 +957,7 @@
       if (AddImpUse)
         // Treat predicated update as read + write.
         MI->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/,
-                                                 true/*IsImp*/,false/*IsKill*/));
+                                                true/*IsImp*/,false/*IsKill*/));
     } else {
       Redefs.insert(Reg);
       for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
@@ -961,7 +1000,7 @@
     if (TII->ReverseBranchCondition(Cond))
       assert(false && "Unable to reverse branch condition!");
 
-  // Initialize liveins to the first BB. These are potentiall re-defined by
+  // Initialize liveins to the first BB. These are potentiall redefined by
   // predicated instructions.
   SmallSet<unsigned, 4> Redefs;
   InitPredRedefs(CvtBBI->BB, Redefs, TRI);
@@ -1052,15 +1091,14 @@
     }
   }
 
-  // Initialize liveins to the first BB. These are potentiall re-defined by
+  // Initialize liveins to the first BB. These are potentially redefined by
   // predicated instructions.
   SmallSet<unsigned, 4> Redefs;
   InitPredRedefs(CvtBBI->BB, Redefs, TRI);
   InitPredRedefs(NextBBI->BB, Redefs, TRI);
 
   bool HasEarlyExit = CvtBBI->FalseBB != NULL;
-  bool DupBB = CvtBBI->BB->pred_size() > 1;
-  if (DupBB) {
+  if (CvtBBI->BB->pred_size() > 1) {
     BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
     // Copy instructions in the true block, predicate them, and add them to
     // the entry block.
@@ -1072,7 +1110,7 @@
 
     // Now merge the entry of the triangle with the true block.
     BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
-    MergeBlocks(BBI, *CvtBBI);
+    MergeBlocks(BBI, *CvtBBI, false);
   }
 
   // If 'true' block has a 'false' successor, add an exit branch to it.
@@ -1145,9 +1183,9 @@
     return false;
   }
 
-  // Merge the 'true' and 'false' blocks by copying the instructions
-  // from the 'false' block to the 'true' block. That is, unless the true
-  // block would clobber the predicate, in that case, do the opposite.
+  // Put the predicated instructions from the 'true' block before the
+  // instructions from the 'false' block, unless the true block would clobber
+  // the predicate, in which case, do the opposite.
   BBInfo *BBI1 = &TrueBBI;
   BBInfo *BBI2 = &FalseBBI;
   SmallVector<MachineOperand, 4> RevCond(BBI.BrCond.begin(), BBI.BrCond.end());
@@ -1172,7 +1210,7 @@
   // Remove the conditional branch from entry to the blocks.
   BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
 
-  // Initialize liveins to the first BB. These are potentiall re-defined by
+  // Initialize liveins to the first BB. These are potentially redefined by
   // predicated instructions.
   SmallSet<unsigned, 4> Redefs;
   InitPredRedefs(BBI1->BB, Redefs, TRI);
@@ -1189,10 +1227,17 @@
     ++DI2;
   BBI1->NonPredSize -= NumDups1;
   BBI2->NonPredSize -= NumDups1;
+
+  // Skip past the dups on each side separately since there may be
+  // differing dbg_value entries.
+  for (unsigned i = 0; i < NumDups1; ++DI1) {
+    if (!DI1->isDebugValue())
+      ++i;
+  }
   while (NumDups1 != 0) {
-    ++DI1;
     ++DI2;
-    --NumDups1;
+    if (!DI2->isDebugValue())
+      --NumDups1;
   }
 
   UpdatePredRedefs(BBI1->BB->begin(), DI1, Redefs, TRI);
@@ -1229,8 +1274,8 @@
   PredicateBlock(*BBI2, DI2, *Cond2, Redefs);
 
   // Merge the true block into the entry of the diamond.
-  MergeBlocks(BBI, *BBI1);
-  MergeBlocks(BBI, *BBI2);
+  MergeBlocks(BBI, *BBI1, TailBB == 0);
+  MergeBlocks(BBI, *BBI2, TailBB == 0);
 
   // If the if-converted block falls through or unconditionally branches into
   // the tail block, and the tail block does not have other predecessors, then
@@ -1238,16 +1283,32 @@
   // tail, add a unconditional branch to it.
   if (TailBB) {
     BBInfo TailBBI = BBAnalysis[TailBB->getNumber()];
-    if (TailBB->pred_size() == 1 && !TailBBI.HasFallThrough) {
-      BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
+    bool CanMergeTail = !TailBBI.HasFallThrough;
+    // There may still be a fall-through edge from BBI1 or BBI2 to TailBB;
+    // check if there are any other predecessors besides those.
+    unsigned NumPreds = TailBB->pred_size();
+    if (NumPreds > 1)
+      CanMergeTail = false;
+    else if (NumPreds == 1 && CanMergeTail) {
+      MachineBasicBlock::pred_iterator PI = TailBB->pred_begin();
+      if (*PI != BBI1->BB && *PI != BBI2->BB)
+        CanMergeTail = false;
+    }
+    if (CanMergeTail) {
       MergeBlocks(BBI, TailBBI);
       TailBBI.IsDone = true;
     } else {
+      BBI.BB->addSuccessor(TailBB);
       InsertUncondBranch(BBI.BB, TailBB, TII);
       BBI.HasFallThrough = false;
     }
   }
 
+  // RemoveExtraEdges won't work if the block has an unanalyzable branch,
+  // which can happen here if TailBB is unanalyzable and is merged, so
+  // explicitly remove BBI1 and BBI2 as successors.
+  BBI.BB->removeSuccessor(BBI1->BB);
+  BBI.BB->removeSuccessor(BBI2->BB);
   RemoveExtraEdges(BBI);
 
   // Update block info.
@@ -1274,7 +1335,7 @@
       llvm_unreachable(0);
     }
 
-    // If the predicated instruction now re-defines a register as the result of
+    // If the predicated instruction now redefines a register as the result of
     // if-conversion, add an implicit kill.
     UpdatePredRedefs(I, Redefs, TRI, true);
   }
@@ -1284,7 +1345,7 @@
   BBI.IsAnalyzed = false;
   BBI.NonPredSize = 0;
 
-  NumIfConvBBs++;
+  ++NumIfConvBBs;
 }
 
 /// CopyAndPredicateBlock - Copy and predicate instructions from source BB to
@@ -1315,22 +1376,24 @@
       }
     }
 
-    // If the predicated instruction now re-defines a register as the result of
+    // If the predicated instruction now redefines a register as the result of
     // if-conversion, add an implicit kill.
     UpdatePredRedefs(MI, Redefs, TRI, true);
   }
 
-  std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
-                                         FromBBI.BB->succ_end());
-  MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
-  MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : NULL;
-
-  for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
-    MachineBasicBlock *Succ = Succs[i];
-    // Fallthrough edge can't be transferred.
-    if (Succ == FallThrough)
-      continue;
-    ToBBI.BB->addSuccessor(Succ);
+  if (!IgnoreBr) {
+    std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
+                                           FromBBI.BB->succ_end());
+    MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
+    MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : NULL;
+
+    for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
+      MachineBasicBlock *Succ = Succs[i];
+      // Fallthrough edge can't be transferred.
+      if (Succ == FallThrough)
+        continue;
+      ToBBI.BB->addSuccessor(Succ);
+    }
   }
 
   std::copy(FromBBI.Predicate.begin(), FromBBI.Predicate.end(),
@@ -1340,25 +1403,18 @@
   ToBBI.ClobbersPred |= FromBBI.ClobbersPred;
   ToBBI.IsAnalyzed = false;
 
-  NumDupBBs++;
+  ++NumDupBBs;
 }
 
 /// MergeBlocks - Move all instructions from FromBB to the end of ToBB.
-///
-void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI) {
+/// This will leave FromBB as an empty block, so remove all of its
+/// successor edges except for the fall-through edge.  If AddEdges is true,
+/// i.e., when FromBBI's branch is being moved, add those successor edges to
+/// ToBBI.
+void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges) {
   ToBBI.BB->splice(ToBBI.BB->end(),
                    FromBBI.BB, FromBBI.BB->begin(), FromBBI.BB->end());
 
-  // Redirect all branches to FromBB to ToBB.
-  std::vector<MachineBasicBlock *> Preds(FromBBI.BB->pred_begin(),
-                                         FromBBI.BB->pred_end());
-  for (unsigned i = 0, e = Preds.size(); i != e; ++i) {
-    MachineBasicBlock *Pred = Preds[i];
-    if (Pred == ToBBI.BB)
-      continue;
-    Pred->ReplaceUsesOfBlockWith(FromBBI.BB, ToBBI.BB);
-  }
-
   std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
                                          FromBBI.BB->succ_end());
   MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
@@ -1370,7 +1426,8 @@
     if (Succ == FallThrough)
       continue;
     FromBBI.BB->removeSuccessor(Succ);
-    ToBBI.BB->addSuccessor(Succ);
+    if (AddEdges)
+      ToBBI.BB->addSuccessor(Succ);
   }
 
   // Now FromBBI always falls through to the next block!

Modified: llvm/branches/wendling/eh/lib/CodeGen/IntrinsicLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/IntrinsicLowering.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/IntrinsicLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/IntrinsicLowering.cpp Fri Jul  2 04:57:13 2010
@@ -16,6 +16,7 @@
 #include "llvm/Module.h"
 #include "llvm/Type.h"
 #include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/Support/CallSite.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/IRBuilder.h"
 #include "llvm/Support/raw_ostream.h"
@@ -314,21 +315,22 @@
 static void ReplaceFPIntrinsicWithCall(CallInst *CI, const char *Fname,
                                        const char *Dname,
                                        const char *LDname) {
-  switch (CI->getOperand(1)->getType()->getTypeID()) {
+  CallSite CS(CI);
+  switch (CI->getArgOperand(0)->getType()->getTypeID()) {
   default: llvm_unreachable("Invalid type in intrinsic");
   case Type::FloatTyID:
-    ReplaceCallWith(Fname, CI, CI->op_begin() + 1, CI->op_end(),
+    ReplaceCallWith(Fname, CI, CS.arg_begin(), CS.arg_end(),
                   Type::getFloatTy(CI->getContext()));
     break;
   case Type::DoubleTyID:
-    ReplaceCallWith(Dname, CI, CI->op_begin() + 1, CI->op_end(),
+    ReplaceCallWith(Dname, CI, CS.arg_begin(), CS.arg_end(),
                   Type::getDoubleTy(CI->getContext()));
     break;
   case Type::X86_FP80TyID:
   case Type::FP128TyID:
   case Type::PPC_FP128TyID:
-    ReplaceCallWith(LDname, CI, CI->op_begin() + 1, CI->op_end(),
-                  CI->getOperand(1)->getType());
+    ReplaceCallWith(LDname, CI, CS.arg_begin(), CS.arg_end(),
+                  CI->getArgOperand(0)->getType());
     break;
   }
 }
@@ -340,6 +342,7 @@
   const Function *Callee = CI->getCalledFunction();
   assert(Callee && "Cannot lower an indirect call!");
 
+  CallSite CS(CI);
   switch (Callee->getIntrinsicID()) {
   case Intrinsic::not_intrinsic:
     report_fatal_error("Cannot lower a call to a non-intrinsic function '"+
@@ -353,7 +356,7 @@
     // by the lowerinvoke pass.  In both cases, the right thing to do is to
     // convert the call to an explicit setjmp or longjmp call.
   case Intrinsic::setjmp: {
-    Value *V = ReplaceCallWith("setjmp", CI, CI->op_begin() + 1, CI->op_end(),
+    Value *V = ReplaceCallWith("setjmp", CI, CS.arg_begin(), CS.arg_end(),
                                Type::getInt32Ty(Context));
     if (!CI->getType()->isVoidTy())
       CI->replaceAllUsesWith(V);
@@ -365,32 +368,32 @@
      break;
 
   case Intrinsic::longjmp: {
-    ReplaceCallWith("longjmp", CI, CI->op_begin() + 1, CI->op_end(),
+    ReplaceCallWith("longjmp", CI, CS.arg_begin(), CS.arg_end(),
                     Type::getVoidTy(Context));
     break;
   }
 
   case Intrinsic::siglongjmp: {
     // Insert the call to abort
-    ReplaceCallWith("abort", CI, CI->op_end(), CI->op_end(), 
+    ReplaceCallWith("abort", CI, CS.arg_end(), CS.arg_end(), 
                     Type::getVoidTy(Context));
     break;
   }
   case Intrinsic::ctpop:
-    CI->replaceAllUsesWith(LowerCTPOP(Context, CI->getOperand(1), CI));
+    CI->replaceAllUsesWith(LowerCTPOP(Context, CI->getArgOperand(0), CI));
     break;
 
   case Intrinsic::bswap:
-    CI->replaceAllUsesWith(LowerBSWAP(Context, CI->getOperand(1), CI));
+    CI->replaceAllUsesWith(LowerBSWAP(Context, CI->getArgOperand(0), CI));
     break;
     
   case Intrinsic::ctlz:
-    CI->replaceAllUsesWith(LowerCTLZ(Context, CI->getOperand(1), CI));
+    CI->replaceAllUsesWith(LowerCTLZ(Context, CI->getArgOperand(0), CI));
     break;
 
   case Intrinsic::cttz: {
     // cttz(x) -> ctpop(~X & (X-1))
-    Value *Src = CI->getOperand(1);
+    Value *Src = CI->getArgOperand(0);
     Value *NotSrc = Builder.CreateNot(Src);
     NotSrc->setName(Src->getName() + ".not");
     Value *SrcM1 = ConstantInt::get(Src->getType(), 1);
@@ -451,37 +454,37 @@
     
   case Intrinsic::memcpy: {
     const IntegerType *IntPtr = TD.getIntPtrType(Context);
-    Value *Size = Builder.CreateIntCast(CI->getOperand(3), IntPtr,
+    Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
                                         /* isSigned */ false);
     Value *Ops[3];
-    Ops[0] = CI->getOperand(1);
-    Ops[1] = CI->getOperand(2);
+    Ops[0] = CI->getArgOperand(0);
+    Ops[1] = CI->getArgOperand(1);
     Ops[2] = Size;
-    ReplaceCallWith("memcpy", CI, Ops, Ops+3, CI->getOperand(1)->getType());
+    ReplaceCallWith("memcpy", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
     break;
   }
   case Intrinsic::memmove: {
     const IntegerType *IntPtr = TD.getIntPtrType(Context);
-    Value *Size = Builder.CreateIntCast(CI->getOperand(3), IntPtr,
+    Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
                                         /* isSigned */ false);
     Value *Ops[3];
-    Ops[0] = CI->getOperand(1);
-    Ops[1] = CI->getOperand(2);
+    Ops[0] = CI->getArgOperand(0);
+    Ops[1] = CI->getArgOperand(1);
     Ops[2] = Size;
-    ReplaceCallWith("memmove", CI, Ops, Ops+3, CI->getOperand(1)->getType());
+    ReplaceCallWith("memmove", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
     break;
   }
   case Intrinsic::memset: {
     const IntegerType *IntPtr = TD.getIntPtrType(Context);
-    Value *Size = Builder.CreateIntCast(CI->getOperand(3), IntPtr,
+    Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
                                         /* isSigned */ false);
     Value *Ops[3];
-    Ops[0] = CI->getOperand(1);
+    Ops[0] = CI->getArgOperand(0);
     // Extend the amount to i32.
-    Ops[1] = Builder.CreateIntCast(CI->getOperand(2), Type::getInt32Ty(Context),
+    Ops[1] = Builder.CreateIntCast(CI->getArgOperand(1), Type::getInt32Ty(Context),
                                    /* isSigned */ false);
     Ops[2] = Size;
-    ReplaceCallWith("memset", CI, Ops, Ops+3, CI->getOperand(1)->getType());
+    ReplaceCallWith("memset", CI, Ops, Ops+3, CI->getArgOperand(0)->getType());
     break;
   }
   case Intrinsic::sqrt: {

Modified: llvm/branches/wendling/eh/lib/CodeGen/LLVMTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LLVMTargetMachine.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LLVMTargetMachine.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LLVMTargetMachine.cpp Fri Jul  2 04:57:13 2010
@@ -330,6 +330,13 @@
     PM.add(createOptimizePHIsPass());
 
   // Delete dead machine instructions regardless of optimization level.
+  //
+  // At -O0, fast-isel frequently creates dead instructions.
+  //
+  // With optimization, dead code should already be eliminated. However
+  // there is one known exception: lowered code for arguments that are only
+  // used by tail calls, where the tail calls reuse the incoming stack
+  // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
   PM.add(createDeadMachineInstructionElimPass());
   printAndVerify(PM, "After codegen DCE pass",
                  /* allowDoubleDefs= */ true);

Modified: llvm/branches/wendling/eh/lib/CodeGen/LiveInterval.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LiveInterval.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LiveInterval.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LiveInterval.cpp Fri Jul  2 04:57:13 2010
@@ -68,6 +68,37 @@
   return r->end == I;
 }
 
+/// killedAt - Return true if a live range ends at index. Note that the kill
+/// point is not contained in the half-open live range. It is usually the
+/// getDefIndex() slot following its last use.
+bool LiveInterval::killedAt(SlotIndex I) const {
+  Ranges::const_iterator r = std::lower_bound(ranges.begin(), ranges.end(), I);
+
+  // Now r points to the first interval with start >= I, or ranges.end().
+  if (r == ranges.begin())
+    return false;
+
+  --r;
+  // Now r points to the last interval with end <= I.
+  // r->end is the kill point.
+  return r->end == I;
+}
+
+/// killedInRange - Return true if the interval has kills in [Start,End).
+bool LiveInterval::killedInRange(SlotIndex Start, SlotIndex End) const {
+  Ranges::const_iterator r =
+    std::lower_bound(ranges.begin(), ranges.end(), End);
+
+  // Now r points to the first interval with start >= End, or ranges.end().
+  if (r == ranges.begin())
+    return false;
+
+  --r;
+  // Now r points to the last interval with end <= End.
+  // r->end is the kill point.
+  return r->end >= Start && r->end < End;
+}
+
 // overlaps - Return true if the intersection of the two live intervals is
 // not empty.
 //
@@ -149,7 +180,6 @@
 void LiveInterval::extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd) {
   assert(I != ranges.end() && "Not a valid interval!");
   VNInfo *ValNo = I->valno;
-  SlotIndex OldEnd = I->end;
 
   // Search for the first interval that we can't merge with.
   Ranges::iterator MergeTo = next(I);
@@ -163,9 +193,6 @@
   // Erase any dead ranges.
   ranges.erase(next(I), MergeTo);
 
-  // Update kill info.
-  ValNo->removeKills(OldEnd, I->end.getPrevSlot());
-
   // If the newly formed range now touches the range after it and if they have
   // the same value number, merge the two ranges into one range.
   Ranges::iterator Next = next(I);
@@ -245,9 +272,6 @@
         // endpoint as well.
         if (End > it->end)
           extendIntervalEndTo(it, End);
-        else if (End < it->end)
-          // Overlapping intervals, there might have been a kill here.
-          it->valno->removeKill(End);
         return it;
       }
     } else {
@@ -288,7 +312,6 @@
   VNInfo *ValNo = I->valno;
   if (I->start == Start) {
     if (I->end == End) {
-      ValNo->removeKills(Start, End);
       if (RemoveDeadValNo) {
         // Check if val# is dead.
         bool isDead = true;
@@ -296,7 +319,7 @@
           if (II != I && II->valno == ValNo) {
             isDead = false;
             break;
-          }          
+          }
         if (isDead) {
           // Now that ValNo is dead, remove it.  If it is the largest value
           // number, just nuke it (and any other deleted values neighboring it),
@@ -320,7 +343,6 @@
   // Otherwise if the span we are removing is at the end of the LiveRange,
   // adjust the other way.
   if (I->end == End) {
-    ValNo->removeKills(Start, End);
     I->end = Start;
     return;
   }
@@ -529,6 +551,7 @@
   SmallVector<VNInfo*, 4> ReplacedValNos;
   iterator IP = begin();
   for (const_iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
+    assert(I->valno == RHS.getValNumInfo(I->valno->id) && "Bad VNInfo");
     if (I->valno != RHSValNo)
       continue;
     SlotIndex Start = I->start, End = I->end;
@@ -823,10 +846,12 @@
   else {
     OS << " = ";
     for (LiveInterval::Ranges::const_iterator I = ranges.begin(),
-           E = ranges.end(); I != E; ++I)
-    OS << *I;
+           E = ranges.end(); I != E; ++I) {
+      OS << *I;
+      assert(I->valno == getValNumInfo(I->valno->id) && "Bad VNInfo");
+    }
   }
-  
+
   // Print value number info.
   if (getNumValNums()) {
     OS << "  ";
@@ -843,21 +868,6 @@
           OS << "?";
         else
           OS << vni->def;
-        unsigned ee = vni->kills.size();
-        if (ee || vni->hasPHIKill()) {
-          OS << "-(";
-          for (unsigned j = 0; j != ee; ++j) {
-            OS << vni->kills[j];
-            if (j != ee-1)
-              OS << " ";
-          }
-          if (vni->hasPHIKill()) {
-            if (ee)
-              OS << " ";
-            OS << "phi";
-          }
-          OS << ")";
-        }
       }
     }
   }

Modified: llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp Fri Jul  2 04:57:13 2010
@@ -50,9 +50,6 @@
 static cl::opt<bool> DisableReMat("disable-rematerialization", 
                                   cl::init(false), cl::Hidden);
 
-static cl::opt<bool> EnableFastSpilling("fast-spill",
-                                        cl::init(false), cl::Hidden);
-
 STATISTIC(numIntervals , "Number of original intervals");
 STATISTIC(numFolds     , "Number of loads/stores folded into instructions");
 STATISTIC(numSplits    , "Number of intervals split");
@@ -90,8 +87,8 @@
   
   r2iMap_.clear();
 
-  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
-  VNInfoAllocator.DestroyAll();
+  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
+  VNInfoAllocator.Reset();
   while (!CloneMIs.empty()) {
     MachineInstr *MI = CloneMIs.back();
     CloneMIs.pop_back();
@@ -218,10 +215,7 @@
   return false;
 }
 
-/// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
-/// it checks for sub-register reference and it can check use as well.
-bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
-                                            unsigned Reg, bool CheckUse,
+bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
                                   SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
   for (LiveInterval::Ranges::const_iterator
          I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
@@ -239,12 +233,11 @@
         MachineOperand& MO = MI->getOperand(i);
         if (!MO.isReg())
           continue;
-        if (MO.isUse() && !CheckUse)
-          continue;
         unsigned PhysReg = MO.getReg();
-        if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
+        if (PhysReg == 0 || PhysReg == Reg ||
+            TargetRegisterInfo::isVirtualRegister(PhysReg))
           continue;
-        if (tri_->isSubRegister(Reg, PhysReg))
+        if (tri_->regsOverlap(Reg, PhysReg))
           return true;
       }
     }
@@ -329,9 +322,16 @@
     MachineInstr *CopyMI = NULL;
     unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
     if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
-        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
+        tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) {
       CopyMI = mi;
 
+      // Some of the REG_SEQUENCE lowering in TwoAddressInstrPass creates
+      // implicit defs without really knowing. It shows up as INSERT_SUBREG
+      // using an undefined register.
+      if (mi->isInsertSubreg())
+        mi->getOperand(1).setIsUndef();
+    }
+
     VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
                                           VNInfoAllocator);
     assert(ValNo->id == 0 && "First value in interval is not 0?");
@@ -356,7 +356,6 @@
         LiveRange LR(defIndex, killIdx, ValNo);
         interval.addRange(LR);
         DEBUG(dbgs() << " +" << LR << "\n");
-        ValNo->addKill(killIdx);
         return;
       }
     }
@@ -376,7 +375,6 @@
       // valno in the killing blocks.
       assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
       DEBUG(dbgs() << " phi-join");
-      ValNo->addKill(indexes_->getTerminatorGap(mbb));
       ValNo->setHasPHIKill(true);
     } else {
       // Iterate over all of the blocks that the variable is completely
@@ -407,7 +405,6 @@
       }
       LiveRange LR(Start, killIdx, ValNo);
       interval.addRange(LR);
-      ValNo->addKill(killIdx);
       DEBUG(dbgs() << " +" << LR);
     }
 
@@ -468,7 +465,6 @@
       LiveRange LR(DefIndex, RedefIndex, ValNo);
       DEBUG(dbgs() << " replace range with " << LR);
       interval.addRange(LR);
-      ValNo->addKill(RedefIndex);
 
       // If this redefinition is dead, we need to add a dummy unit live
       // range covering the def slot.
@@ -500,7 +496,6 @@
       SlotIndex killIndex = getMBBEndIdx(mbb);
       LiveRange LR(defIndex, killIndex, ValNo);
       interval.addRange(LR);
-      ValNo->addKill(indexes_->getTerminatorGap(mbb));
       ValNo->setHasPHIKill(true);
       DEBUG(dbgs() << " phi-join +" << LR);
     } else {
@@ -596,7 +591,6 @@
     ValNo->setHasRedefByEC(true);
   LiveRange LR(start, end, ValNo);
   interval.addRange(LR);
-  LR.valno->addKill(end);
   DEBUG(dbgs() << " +" << LR << '\n');
 }
 
@@ -697,7 +691,6 @@
   LiveRange LR(start, end, vni);
 
   interval.addRange(LR);
-  LR.valno->addKill(end);
   DEBUG(dbgs() << " +" << LR << '\n');
 }
 
@@ -783,37 +776,6 @@
   return NewLI;
 }
 
-/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
-/// copy field and returns the source register that defines it.
-unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
-  if (!VNI->getCopy())
-    return 0;
-
-  if (VNI->getCopy()->isExtractSubreg()) {
-    // If it's extracting out of a physical register, return the sub-register.
-    unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
-    if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
-      unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
-      unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
-      if (SrcSubReg == DstSubReg)
-        // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
-        // reg1034 can still be coalesced to EDX.
-        return Reg;
-      assert(DstSubReg == 0);
-      Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
-    }
-    return Reg;
-  } else if (VNI->getCopy()->isInsertSubreg() ||
-             VNI->getCopy()->isSubregToReg())
-    return VNI->getCopy()->getOperand(2).getReg();
-
-  unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
-  if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
-    return SrcReg;
-  llvm_unreachable("Unrecognized copy instruction!");
-  return 0;
-}
-
 //===----------------------------------------------------------------------===//
 // Register allocator hooks.
 //
@@ -1271,16 +1233,7 @@
                                    const VNInfo *VNI,
                                    MachineBasicBlock *MBB,
                                    SlotIndex Idx) const {
-  SlotIndex End = getMBBEndIdx(MBB);
-  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
-    if (VNI->kills[j].isPHI())
-      continue;
-
-    SlotIndex KillIdx = VNI->kills[j];
-    if (KillIdx > Idx && KillIdx <= End)
-      return true;
-  }
-  return false;
+  return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
 }
 
 /// RewriteInfo - Keep track of machine instrs that will be rewritten
@@ -1620,93 +1573,9 @@
 }
 
 std::vector<LiveInterval*> LiveIntervals::
-addIntervalsForSpillsFast(const LiveInterval &li,
-                          const MachineLoopInfo *loopInfo,
-                          VirtRegMap &vrm) {
-  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
-
-  std::vector<LiveInterval*> added;
-
-  assert(li.isSpillable() && "attempt to spill already spilled interval!");
-
-  DEBUG({
-      dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
-      li.dump();
-      dbgs() << '\n';
-    });
-
-  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
-
-  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
-  while (RI != mri_->reg_end()) {
-    MachineInstr* MI = &*RI;
-    
-    SmallVector<unsigned, 2> Indices;
-    bool HasUse, HasDef;
-    tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(li.reg, &Indices);
-
-    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
-                              Indices, true, slot, li.reg)) {
-      unsigned NewVReg = mri_->createVirtualRegister(rc);
-      vrm.grow();
-      vrm.assignVirt2StackSlot(NewVReg, slot);
-      
-      // create a new register for this spill
-      LiveInterval &nI = getOrCreateInterval(NewVReg);
-      nI.markNotSpillable();
-      
-      // Rewrite register operands to use the new vreg.
-      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
-           E = Indices.end(); I != E; ++I) {
-        MI->getOperand(*I).setReg(NewVReg);
-        
-        if (MI->getOperand(*I).isUse())
-          MI->getOperand(*I).setIsKill(true);
-      }
-      
-      // Fill in  the new live interval.
-      SlotIndex index = getInstructionIndex(MI);
-      if (HasUse) {
-        LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
-                     nI.getNextValue(SlotIndex(), 0, false,
-                                     getVNInfoAllocator()));
-        DEBUG(dbgs() << " +" << LR);
-        nI.addRange(LR);
-        vrm.addRestorePoint(NewVReg, MI);
-      }
-      if (HasDef) {
-        LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
-                     nI.getNextValue(SlotIndex(), 0, false,
-                                     getVNInfoAllocator()));
-        DEBUG(dbgs() << " +" << LR);
-        nI.addRange(LR);
-        vrm.addSpillPoint(NewVReg, true, MI);
-      }
-      
-      added.push_back(&nI);
-        
-      DEBUG({
-          dbgs() << "\t\t\t\tadded new interval: ";
-          nI.dump();
-          dbgs() << '\n';
-        });
-    }
-    
-    
-    RI = mri_->reg_begin(li.reg);
-  }
-
-  return added;
-}
-
-std::vector<LiveInterval*> LiveIntervals::
 addIntervalsForSpills(const LiveInterval &li,
                       SmallVectorImpl<LiveInterval*> &SpillIs,
                       const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
-  
-  if (EnableFastSpilling)
-    return addIntervalsForSpillsFast(li, loopInfo, vrm);
-  
   assert(li.isSpillable() && "attempt to spill already spilled interval!");
 
   DEBUG({
@@ -2142,7 +2011,6 @@
     SlotIndex(getInstructionIndex(startInst).getDefIndex()),
     startInst, true, getVNInfoAllocator());
   VN->setHasPHIKill(true);
-  VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
   LiveRange LR(
      SlotIndex(getInstructionIndex(startInst).getDefIndex()),
      getMBBEndIdx(startInst->getParent()), VN);

Modified: llvm/branches/wendling/eh/lib/CodeGen/LiveStackAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LiveStackAnalysis.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LiveStackAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LiveStackAnalysis.cpp Fri Jul  2 04:57:13 2010
@@ -35,8 +35,8 @@
 }
 
 void LiveStacks::releaseMemory() {
-  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
-  VNInfoAllocator.DestroyAll();
+  // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
+  VNInfoAllocator.Reset();
   S2IMap.clear();
   S2RCMap.clear();
 }

Modified: llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LowerSubregs.cpp Fri Jul  2 04:57:13 2010
@@ -62,6 +62,7 @@
     void TransferKillFlag(MachineInstr *MI, unsigned SrcReg,
                           const TargetRegisterInfo *TRI,
                           bool AddIfNotFound = false);
+    void TransferImplicitDefs(MachineInstr *MI);
   };
 
   char LowerSubregsInstructionPass::ID = 0;
@@ -104,6 +105,22 @@
   }
 }
 
+/// TransferImplicitDefs - MI is a pseudo-instruction, and the lowered
+/// replacement instructions immediately precede it.  Copy any implicit-def
+/// operands from MI to the replacement instruction.
+void
+LowerSubregsInstructionPass::TransferImplicitDefs(MachineInstr *MI) {
+  MachineBasicBlock::iterator CopyMI = MI;
+  --CopyMI;
+
+  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+    MachineOperand &MO = MI->getOperand(i);
+    if (!MO.isReg() || !MO.isImplicit() || MO.isUse())
+      continue;
+    CopyMI->addOperand(MachineOperand::CreateReg(MO.getReg(), true, true));
+  }
+}
+
 bool LowerSubregsInstructionPass::LowerExtract(MachineInstr *MI) {
   MachineBasicBlock *MBB = MI->getParent();
 
@@ -149,6 +166,7 @@
       TransferDeadFlag(MI, DstReg, TRI);
     if (MI->getOperand(1).isKill())
       TransferKillFlag(MI, SuperReg, TRI, true);
+    TransferImplicitDefs(MI);
     DEBUG({
         MachineBasicBlock::iterator dMI = MI;
         dbgs() << "subreg: " << *(--dMI);
@@ -166,10 +184,10 @@
          MI->getOperand(1).isImm() &&
          (MI->getOperand(2).isReg() && MI->getOperand(2).isUse()) &&
           MI->getOperand(3).isImm() && "Invalid subreg_to_reg");
-          
+
   unsigned DstReg  = MI->getOperand(0).getReg();
   unsigned InsReg  = MI->getOperand(2).getReg();
-  unsigned InsSIdx = MI->getOperand(2).getSubReg();
+  assert(!MI->getOperand(2).getSubReg() && "SubIdx on physreg?");
   unsigned SubIdx  = MI->getOperand(3).getImm();
 
   assert(SubIdx != 0 && "Invalid index for insert_subreg");
@@ -182,13 +200,18 @@
 
   DEBUG(dbgs() << "subreg: CONVERTING: " << *MI);
 
-  if (DstSubReg == InsReg && InsSIdx == 0) {
+  if (DstSubReg == InsReg) {
     // No need to insert an identify copy instruction.
     // Watch out for case like this:
-    // %RAX<def> = ...
-    // %RAX<def> = SUBREG_TO_REG 0, %EAX:3<kill>, 3
-    // The first def is defining RAX, not EAX so the top bits were not
-    // zero extended.
+    // %RAX<def> = SUBREG_TO_REG 0, %EAX<kill>, 3
+    // We must leave %RAX live.
+    if (DstReg != InsReg) {
+      MI->setDesc(TII->get(TargetOpcode::KILL));
+      MI->RemoveOperand(3);     // SubIdx
+      MI->RemoveOperand(1);     // Imm
+      DEBUG(dbgs() << "subreg: replace by: " << *MI);
+      return true;
+    }
     DEBUG(dbgs() << "subreg: eliminated!");
   } else {
     // Insert sub-register copy

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp Fri Jul  2 04:57:13 2010
@@ -13,7 +13,10 @@
 
 #include "llvm/CodeGen/MachineBasicBlock.h"
 #include "llvm/BasicBlock.h"
+#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineDominators.h"
 #include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
 #include "llvm/MC/MCAsmInfo.h"
 #include "llvm/MC/MCContext.h"
 #include "llvm/Target/TargetRegisterInfo.h"
@@ -396,6 +399,82 @@
   return FBB == 0;
 }
 
+MachineBasicBlock *
+MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
+  MachineFunction *MF = getParent();
+  DebugLoc dl;  // FIXME: this is nowhere
+
+  // We may need to update this's terminator, but we can't do that if AnalyzeBranch
+  // fails. If this uses a jump table, we won't touch it.
+  const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
+  MachineBasicBlock *TBB = 0, *FBB = 0;
+  SmallVector<MachineOperand, 4> Cond;
+  if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
+    return NULL;
+
+  MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
+  MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
+  DEBUG(dbgs() << "PHIElimination splitting critical edge:"
+        " BB#" << getNumber()
+        << " -- BB#" << NMBB->getNumber()
+        << " -- BB#" << Succ->getNumber() << '\n');
+
+  ReplaceUsesOfBlockWith(Succ, NMBB);
+  updateTerminator();
+
+  // Insert unconditional "jump Succ" instruction in NMBB if necessary.
+  NMBB->addSuccessor(Succ);
+  if (!NMBB->isLayoutSuccessor(Succ)) {
+    Cond.clear();
+    MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, Succ, NULL, Cond, dl);
+  }
+
+  // Fix PHI nodes in Succ so they refer to NMBB instead of this
+  for (MachineBasicBlock::iterator i = Succ->begin(), e = Succ->end();
+       i != e && i->isPHI(); ++i)
+    for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
+      if (i->getOperand(ni+1).getMBB() == this)
+        i->getOperand(ni+1).setMBB(NMBB);
+
+  if (LiveVariables *LV =
+        P->getAnalysisIfAvailable<LiveVariables>())
+    LV->addNewBlock(NMBB, this, Succ);
+
+  if (MachineDominatorTree *MDT =
+        P->getAnalysisIfAvailable<MachineDominatorTree>())
+    MDT->addNewBlock(NMBB, this);
+
+  if (MachineLoopInfo *MLI =
+        P->getAnalysisIfAvailable<MachineLoopInfo>())
+    if (MachineLoop *TIL = MLI->getLoopFor(this)) {
+      // If one or the other blocks were not in a loop, the new block is not
+      // either, and thus LI doesn't need to be updated.
+      if (MachineLoop *DestLoop = MLI->getLoopFor(Succ)) {
+        if (TIL == DestLoop) {
+          // Both in the same loop, the NMBB joins loop.
+          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
+        } else if (TIL->contains(DestLoop)) {
+          // Edge from an outer loop to an inner loop.  Add to the outer loop.
+          TIL->addBasicBlockToLoop(NMBB, MLI->getBase());
+        } else if (DestLoop->contains(TIL)) {
+          // Edge from an inner loop to an outer loop.  Add to the outer loop.
+          DestLoop->addBasicBlockToLoop(NMBB, MLI->getBase());
+        } else {
+          // Edge from two loops with no containment relation.  Because these
+          // are natural loops, we know that the destination block must be the
+          // header of its loop (adding a branch into a loop elsewhere would
+          // create an irreducible loop).
+          assert(DestLoop->getHeader() == Succ &&
+                 "Should not create irreducible loops!");
+          if (MachineLoop *P = DestLoop->getParentLoop())
+            P->addBasicBlockToLoop(NMBB, MLI->getBase());
+        }
+      }
+    }
+
+  return NMBB;
+}
+
 /// removeFromParent - This method unlinks 'this' from the containing function,
 /// and returns it, but does not delete it.
 MachineBasicBlock *MachineBasicBlock::removeFromParent() {

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineInstr.cpp Fri Jul  2 04:57:13 2010
@@ -1211,6 +1211,28 @@
   dbgs() << "  " << *this;
 }
 
+static void printDebugLoc(DebugLoc DL, const MachineFunction *MF, 
+                         raw_ostream &CommentOS) {
+  const LLVMContext &Ctx = MF->getFunction()->getContext();
+  if (!DL.isUnknown()) {          // Print source line info.
+    DIScope Scope(DL.getScope(Ctx));
+    // Omit the directory, because it's likely to be long and uninteresting.
+    if (Scope.Verify())
+      CommentOS << Scope.getFilename();
+    else
+      CommentOS << "<unknown>";
+    CommentOS << ':' << DL.getLine();
+    if (DL.getCol() != 0)
+      CommentOS << ':' << DL.getCol();
+    DebugLoc InlinedAtDL = DebugLoc::getFromDILocation(DL.getInlinedAt(Ctx));
+    if (!InlinedAtDL.isUnknown()) {
+      CommentOS << " @[ ";
+      printDebugLoc(InlinedAtDL, MF, CommentOS);
+      CommentOS << " ]";
+    }
+  }
+}
+
 void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const {
   // We can be a bit tidier if we know the TargetMachine and/or MachineFunction.
   const MachineFunction *MF = 0;
@@ -1308,19 +1330,8 @@
 
   if (!debugLoc.isUnknown() && MF) {
     if (!HaveSemi) OS << ";";
-
-    // TODO: print InlinedAtLoc information
-
-    DIScope Scope(debugLoc.getScope(MF->getFunction()->getContext()));
     OS << " dbg:";
-    // Omit the directory, since it's usually long and uninteresting.
-    if (Scope.Verify())
-      OS << Scope.getFilename();
-    else
-      OS << "<unknown>";
-    OS << ':' << debugLoc.getLine();
-    if (debugLoc.getCol() != 0)
-      OS << ':' << debugLoc.getCol();
+    printDebugLoc(debugLoc, MF, OS);
   }
 
   OS << "\n";
@@ -1461,6 +1472,25 @@
                                        true  /*IsImp*/));
 }
 
+void MachineInstr::setPhysRegsDeadExcept(const SmallVectorImpl<unsigned> &UsedRegs,
+                                         const TargetRegisterInfo &TRI) {
+  for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
+    MachineOperand &MO = getOperand(i);
+    if (!MO.isReg() || !MO.isDef()) continue;
+    unsigned Reg = MO.getReg();
+    if (Reg == 0) continue;
+    bool Dead = true;
+    for (SmallVectorImpl<unsigned>::const_iterator I = UsedRegs.begin(),
+         E = UsedRegs.end(); I != E; ++I)
+      if (TRI.regsOverlap(*I, Reg)) {
+        Dead = false;
+        break;
+      }
+    // If there are no uses, including partial uses, the def is dead.
+    if (Dead) MO.setIsDead();
+  }
+}
+
 unsigned
 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
   unsigned Hash = MI->getOpcode() * 37;

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineLICM.cpp Fri Jul  2 04:57:13 2010
@@ -83,7 +83,6 @@
 
     const char *getPassName() const { return "Machine Instruction LICM"; }
 
-    // FIXME: Loop preheaders?
     virtual void getAnalysisUsage(AnalysisUsage &AU) const {
       AU.setPreservesCFG();
       AU.addRequired<MachineLoopInfo>();
@@ -182,6 +181,10 @@
     /// current loop preheader that may become duplicates of instructions that
     /// are hoisted out of the loop.
     void InitCSEMap(MachineBasicBlock *BB);
+
+    /// getCurPreheader - Get the preheader for the current loop, splitting
+    /// a critical edge if needed.
+    MachineBasicBlock *getCurPreheader();
   };
 } // end anonymous namespace
 
@@ -193,11 +196,11 @@
   return new MachineLICM(PreRegAlloc);
 }
 
-/// LoopIsOuterMostWithPreheader - Test if the given loop is the outer-most
-/// loop that has a preheader.
-static bool LoopIsOuterMostWithPreheader(MachineLoop *CurLoop) {
+/// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most
+/// loop that has a unique predecessor.
+static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
   for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
-    if (L->getLoopPreheader())
+    if (L->getLoopPredecessor())
       return false;
   return true;
 }
@@ -223,20 +226,11 @@
 
   for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end(); I != E; ++I){
     CurLoop = *I;
+    CurPreheader = 0;
 
     // If this is done before regalloc, only visit outer-most preheader-sporting
     // loops.
-    if (PreRegAlloc && !LoopIsOuterMostWithPreheader(CurLoop))
-      continue;
-
-    // Determine the block to which to hoist instructions. If we can't find a
-    // suitable loop preheader, we can't do any hoisting.
-    //
-    // FIXME: We are only hoisting if the basic block coming into this loop
-    // has only one successor. This isn't the case in general because we haven't
-    // broken critical edges or added preheaders.
-    CurPreheader = CurLoop->getLoopPreheader();
-    if (!CurPreheader)
+    if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop))
       continue;
 
     if (!PreRegAlloc)
@@ -438,13 +432,16 @@
 /// operands that is safe to hoist, this instruction is called to do the
 /// dirty work.
 void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
+  MachineBasicBlock *Preheader = getCurPreheader();
+  if (!Preheader) return;
+
   // Now move the instructions to the predecessor, inserting it before any
   // terminator instructions.
   DEBUG({
       dbgs() << "Hoisting " << *MI;
-      if (CurPreheader->getBasicBlock())
+      if (Preheader->getBasicBlock())
         dbgs() << " to MachineBasicBlock "
-               << CurPreheader->getName();
+               << Preheader->getName();
       if (MI->getParent()->getBasicBlock())
         dbgs() << " from MachineBasicBlock "
                << MI->getParent()->getName();
@@ -453,7 +450,7 @@
 
   // Splice the instruction to the preheader.
   MachineBasicBlock *MBB = MI->getParent();
-  CurPreheader->splice(CurPreheader->getFirstTerminator(), MBB, MI);
+  Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
 
   // Add register to livein list to all the BBs in the current loop since a 
   // loop invariant must be kept live throughout the whole loop. This is
@@ -756,6 +753,9 @@
 /// that are safe to hoist, this instruction is called to do the dirty work.
 ///
 void MachineLICM::Hoist(MachineInstr *MI) {
+  MachineBasicBlock *Preheader = getCurPreheader();
+  if (!Preheader) return;
+
   // First check whether we should hoist this instruction.
   if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
     // If not, try unfolding a hoistable load.
@@ -767,9 +767,9 @@
   // terminator instructions.
   DEBUG({
       dbgs() << "Hoisting " << *MI;
-      if (CurPreheader->getBasicBlock())
+      if (Preheader->getBasicBlock())
         dbgs() << " to MachineBasicBlock "
-               << CurPreheader->getName();
+               << Preheader->getName();
       if (MI->getParent()->getBasicBlock())
         dbgs() << " from MachineBasicBlock "
                << MI->getParent()->getName();
@@ -779,7 +779,7 @@
   // If this is the first instruction being hoisted to the preheader,
   // initialize the CSE map with potential common expressions.
   if (FirstInLoop) {
-    InitCSEMap(CurPreheader);
+    InitCSEMap(Preheader);
     FirstInLoop = false;
   }
 
@@ -789,7 +789,7 @@
     CI = CSEMap.find(Opcode);
   if (!EliminateCSE(MI, CI)) {
     // Otherwise, splice the instruction to the preheader.
-    CurPreheader->splice(CurPreheader->getFirstTerminator(),MI->getParent(),MI);
+    Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
 
     // Clear the kill flags of any register this instruction defines,
     // since they may need to be live throughout the entire loop
@@ -813,3 +813,30 @@
   ++NumHoisted;
   Changed = true;
 }
+
+MachineBasicBlock *MachineLICM::getCurPreheader() {
+  // Determine the block to which to hoist instructions. If we can't find a
+  // suitable loop predecessor, we can't do any hoisting.
+
+  // If we've tried to get a preheader and failed, don't try again.
+  if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
+    return 0;
+
+  if (!CurPreheader) {
+    CurPreheader = CurLoop->getLoopPreheader();
+    if (!CurPreheader) {
+      MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
+      if (!Pred) {
+        CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
+        return 0;
+      }
+
+      CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), this);
+      if (!CurPreheader) {
+        CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
+        return 0;
+      }
+    }
+  }
+  return CurPreheader;
+}

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp Fri Jul  2 04:57:13 2010
@@ -182,21 +182,32 @@
                                       const TargetRegisterInfo &TRI,
                                       const TargetInstrInfo &TII) {
   // Emit the copies into the top of the block.
-  for (MachineRegisterInfo::livein_iterator LI = livein_begin(),
-         E = livein_end(); LI != E; ++LI)
-    if (LI->second) {
-      const TargetRegisterClass *RC = getRegClass(LI->second);
-      bool Emitted = TII.copyRegToReg(*EntryMBB, EntryMBB->begin(),
-                                      LI->second, LI->first, RC, RC,
-                                      DebugLoc());
-      assert(Emitted && "Unable to issue a live-in copy instruction!\n");
-      (void) Emitted;
-    }
+  for (unsigned i = 0, e = LiveIns.size(); i != e; ++i)
+    if (LiveIns[i].second) {
+      if (use_empty(LiveIns[i].second)) {
+        // The livein has no uses. Drop it.
+        //
+        // It would be preferable to have isel avoid creating live-in
+        // records for unused arguments in the first place, but it's
+        // complicated by the debug info code for arguments.
+        LiveIns.erase(LiveIns.begin() + i);
+        --i; --e;
+      } else {
+        // Emit a copy.
+        const TargetRegisterClass *RC = getRegClass(LiveIns[i].second);
+        bool Emitted = TII.copyRegToReg(*EntryMBB, EntryMBB->begin(),
+                                        LiveIns[i].second, LiveIns[i].first,
+                                        RC, RC, DebugLoc());
+        assert(Emitted && "Unable to issue a live-in copy instruction!\n");
+        (void) Emitted;
 
-  // Add function live-ins to entry block live-in set.
-  for (MachineRegisterInfo::livein_iterator I = livein_begin(),
-         E = livein_end(); I != E; ++I)
-    EntryMBB->addLiveIn(I->first);
+        // Add the register to the entry block live-in set.
+        EntryMBB->addLiveIn(LiveIns[i].first);
+      }
+    } else {
+      // Add the register to the entry block live-in set.
+      EntryMBB->addLiveIn(LiveIns[i].first);
+    }
 }
 
 void MachineRegisterInfo::closePhysRegsUsed(const TargetRegisterInfo &TRI) {

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp Fri Jul  2 04:57:13 2010
@@ -282,10 +282,9 @@
   if (MI->getParent() == SuccToSinkTo)
     return false;
 
-  // If the instruction to move defines or uses a dead physical register which
-  // is live when leaving the basic block, don't move it because it could turn
-  // into a zombie define or misuse of that preg. E.g., EFLAGS.
-  // (<rdar://problem/8030636>)
+  // If the instruction to move defines a dead physical register which is live
+  // when leaving the basic block, don't move it because it could turn into a
+  // "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
   for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
     const MachineOperand &MO = MI->getOperand(I);
     if (!MO.isReg()) continue;

Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineVerifier.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineVerifier.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineVerifier.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineVerifier.cpp Fri Jul  2 04:57:13 2010
@@ -390,7 +390,8 @@
         report("MBB exits via unconditional fall-through but its successor "
                "differs from its CFG successor!", MBB);
       }
-      if (!MBB->empty() && MBB->back().getDesc().isBarrier()) {
+      if (!MBB->empty() && MBB->back().getDesc().isBarrier() &&
+          !TII->isPredicated(&MBB->back())) {
         report("MBB exits via unconditional fall-through but ends with a "
                "barrier instruction!", MBB);
       }

Modified: llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp Fri Jul  2 04:57:13 2010
@@ -34,7 +34,6 @@
 using namespace llvm;
 
 STATISTIC(NumAtomic, "Number of atomic phis lowered");
-STATISTIC(NumSplits, "Number of critical edges split on demand");
 STATISTIC(NumReused, "Number of reused lowered phis");
 
 char PHIElimination::ID = 0;
@@ -391,58 +390,8 @@
       // (not considering PHI nodes). If the register is live in to this block
       // anyway, we would gain nothing from splitting.
       if (!LV.isLiveIn(Reg, MBB) && LV.isLiveOut(Reg, *PreMBB))
-        SplitCriticalEdge(PreMBB, &MBB);
+        PreMBB->SplitCriticalEdge(&MBB, this);
     }
   }
   return true;
 }
-
-MachineBasicBlock *PHIElimination::SplitCriticalEdge(MachineBasicBlock *A,
-                                                     MachineBasicBlock *B) {
-  assert(A && B && "Missing MBB end point");
-
-  MachineFunction *MF = A->getParent();
-  DebugLoc dl;  // FIXME: this is nowhere
-
-  // We may need to update A's terminator, but we can't do that if AnalyzeBranch
-  // fails. If A uses a jump table, we won't touch it.
-  const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
-  MachineBasicBlock *TBB = 0, *FBB = 0;
-  SmallVector<MachineOperand, 4> Cond;
-  if (TII->AnalyzeBranch(*A, TBB, FBB, Cond))
-    return NULL;
-
-  ++NumSplits;
-
-  MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
-  MF->insert(llvm::next(MachineFunction::iterator(A)), NMBB);
-  DEBUG(dbgs() << "PHIElimination splitting critical edge:"
-        " BB#" << A->getNumber()
-        << " -- BB#" << NMBB->getNumber()
-        << " -- BB#" << B->getNumber() << '\n');
-
-  A->ReplaceUsesOfBlockWith(B, NMBB);
-  A->updateTerminator();
-
-  // Insert unconditional "jump B" instruction in NMBB if necessary.
-  NMBB->addSuccessor(B);
-  if (!NMBB->isLayoutSuccessor(B)) {
-    Cond.clear();
-    MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, B, NULL, Cond, dl);
-  }
-
-  // Fix PHI nodes in B so they refer to NMBB instead of A
-  for (MachineBasicBlock::iterator i = B->begin(), e = B->end();
-       i != e && i->isPHI(); ++i)
-    for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
-      if (i->getOperand(ni+1).getMBB() == A)
-        i->getOperand(ni+1).setMBB(NMBB);
-
-  if (LiveVariables *LV=getAnalysisIfAvailable<LiveVariables>())
-    LV->addNewBlock(NMBB, A, B);
-
-  if (MachineDominatorTree *MDT=getAnalysisIfAvailable<MachineDominatorTree>())
-    MDT->addNewBlock(NMBB, A);
-
-  return NMBB;
-}

Modified: llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp Fri Jul  2 04:57:13 2010
@@ -79,6 +79,7 @@
 namespace {
   class PostRAScheduler : public MachineFunctionPass {
     AliasAnalysis *AA;
+    const TargetInstrInfo *TII;
     CodeGenOpt::Level OptLevel;
 
   public:
@@ -181,30 +182,9 @@
   };
 }
 
-/// isSchedulingBoundary - Test if the given instruction should be
-/// considered a scheduling boundary. This primarily includes labels
-/// and terminators.
-///
-static bool isSchedulingBoundary(const MachineInstr *MI,
-                                 const MachineFunction &MF) {
-  // Terminators and labels can't be scheduled around.
-  if (MI->getDesc().isTerminator() || MI->isLabel())
-    return true;
-
-  // Don't attempt to schedule around any instruction that defines
-  // a stack-oriented pointer, as it's unlikely to be profitable. This
-  // saves compile time, because it doesn't require every single
-  // stack slot reference to depend on the instruction that does the
-  // modification.
-  const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
-  if (MI->definesRegister(TLI.getStackPointerRegisterToSaveRestore()))
-    return true;
-
-  return false;
-}
-
 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
   AA = &getAnalysis<AliasAnalysis>();
+  TII = Fn.getTarget().getInstrInfo();
 
   // Check for explicit enable/disable of post-ra scheduling.
   TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
@@ -265,8 +245,8 @@
     MachineBasicBlock::iterator Current = MBB->end();
     unsigned Count = MBB->size(), CurrentCount = Count;
     for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
-      MachineInstr *MI = prior(I);
-      if (isSchedulingBoundary(MI, Fn)) {
+      MachineInstr *MI = llvm::prior(I);
+      if (TII->isSchedulingBoundary(MI, MBB, Fn)) {
         Scheduler.Run(MBB, I, Current, CurrentCount);
         Scheduler.EmitSchedule();
         Current = MI;

Modified: llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PreAllocSplitting.cpp Fri Jul  2 04:57:13 2010
@@ -512,9 +512,6 @@
     LI->addRange(LiveRange(UseIndex, EndIndex, RetVNI));
     
     // FIXME: Need to set kills properly for inter-block stuff.
-    if (RetVNI->isKill(UseIndex)) RetVNI->removeKill(UseIndex);
-    if (IsIntraBlock)
-      RetVNI->addKill(EndIndex);
   } else if (ContainsDefs && ContainsUses) {
     SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
     SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
@@ -556,12 +553,6 @@
                                       NewVNs, LiveOut, Phis, false, true);
 
     LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI));
-    
-    if (foundUse && RetVNI->isKill(StartIndex))
-      RetVNI->removeKill(StartIndex);
-    if (IsIntraBlock) {
-      RetVNI->addKill(EndIndex);
-    }
   }
   
   // Memoize results so we don't have to recompute them.
@@ -636,9 +627,6 @@
     for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
            IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
       I->second->setHasPHIKill(true);
-      SlotIndex KillIndex(LIs->getMBBEndIdx(I->first), true);
-      if (!I->second->isKill(KillIndex))
-        I->second->addKill(KillIndex);
     }
   }
       
@@ -648,8 +636,6 @@
   } else
     EndIndex = LIs->getMBBEndIdx(MBB);
   LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI));
-  if (IsIntraBlock)
-    RetVNI->addKill(EndIndex);
 
   // Memoize results so we don't have to recompute them.
   if (!IsIntraBlock)
@@ -725,25 +711,6 @@
     
     VNInfo* DeadVN = NewVNs[&*DI];
     LI->addRange(LiveRange(DefIdx, DefIdx.getNextSlot(), DeadVN));
-    DeadVN->addKill(DefIdx);
-  }
-
-  // Update kill markers.
-  for (LiveInterval::vni_iterator VI = LI->vni_begin(), VE = LI->vni_end();
-       VI != VE; ++VI) {
-    VNInfo* VNI = *VI;
-    for (unsigned i = 0, e = VNI->kills.size(); i != e; ++i) {
-      SlotIndex KillIdx = VNI->kills[i];
-      if (KillIdx.isPHI())
-        continue;
-      MachineInstr *KillMI = LIs->getInstructionFromIndex(KillIdx);
-      if (KillMI) {
-        MachineOperand *KillMO = KillMI->findRegisterUseOperand(CurrLI->reg);
-        if (KillMO)
-          // It could be a dead def.
-          KillMO->setIsKill();
-      }
-    }
   }
 }
 
@@ -773,19 +740,14 @@
     VNsToCopy.push_back(OldVN);
     
     // Locate two-address redefinitions
-    for (VNInfo::KillSet::iterator KI = OldVN->kills.begin(),
-         KE = OldVN->kills.end(); KI != KE; ++KI) {
-      assert(!KI->isPHI() &&
-             "VN previously reported having no PHI kills.");
-      MachineInstr* MI = LIs->getInstructionFromIndex(*KI);
-      unsigned DefIdx = MI->findRegisterDefOperandIdx(CurrLI->reg);
-      if (DefIdx == ~0U) continue;
-      if (MI->isRegTiedToUseOperand(DefIdx)) {
-        VNInfo* NextVN =
-          CurrLI->findDefinedVNInfoForRegInt(KI->getDefIndex());
-        if (NextVN == OldVN) continue;
+    for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(CurrLI->reg),
+         DE = MRI->def_end(); DI != DE; ++DI) {
+      if (!DI->isRegTiedToUseOperand(DI.getOperandNo())) continue;
+      SlotIndex DefIdx = LIs->getInstructionIndex(&*DI).getDefIndex();
+      VNInfo* NextVN = CurrLI->findDefinedVNInfoForRegInt(DefIdx);
+      if (std::find(VNsToCopy.begin(), VNsToCopy.end(), NextVN) !=
+          VNsToCopy.end())
         Stack.push_back(NextVN);
-      }
     }
   }
   
@@ -836,7 +798,7 @@
   if (IntervalSSMap.count(CurrLI->reg))
     IntervalSSMap[NewVReg] = IntervalSSMap[CurrLI->reg];
   
-  NumRenumbers++;
+  ++NumRenumbers;
 }
 
 bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
@@ -1192,7 +1154,7 @@
     int StoreFrameIndex;
     unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
     if (StoreVReg != Reg || StoreFrameIndex != FrameIndex)
-      NonSpills++;
+      ++NonSpills;
     
     int DefIdx = (*UI)->findRegisterDefOperandIdx(Reg);
     if (DefIdx != -1 && (*UI)->isRegTiedToUseOperand(DefIdx))
@@ -1255,7 +1217,7 @@
         (*LI)->removeValNo(CurrVN);
         DefMI->eraseFromParent();
         VNUseCount.erase(CurrVN);
-        NumDeadSpills++;
+        ++NumDeadSpills;
         changed = true;
         continue;
       }
@@ -1328,7 +1290,7 @@
           if (VI->second.erase(use))
             VI->second.insert(NewMI);
 
-        NumDeadSpills++;
+        ++NumDeadSpills;
         changed = true;
         continue;
       }
@@ -1350,7 +1312,7 @@
       LIs->RemoveMachineInstrFromMaps(DefMI);
       (*LI)->removeValNo(CurrVN);
       DefMI->eraseFromParent();
-      NumDeadSpills++;
+      ++NumDeadSpills;
       changed = true;
     }
   }

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp Fri Jul  2 04:57:13 2010
@@ -140,6 +140,8 @@
   private:
     bool runOnMachineFunction(MachineFunction &Fn);
     void AllocateBasicBlock();
+    void handleThroughOperands(MachineInstr *MI,
+                               SmallVectorImpl<unsigned> &VirtDead);
     int getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC);
     bool isLastUseOfLocalReg(MachineOperand&);
 
@@ -513,7 +515,6 @@
   bool New;
   tie(LRI, New) = LiveVirtRegs.insert(std::make_pair(VirtReg, LiveReg()));
   LiveReg &LR = LRI->second;
-  bool PartialRedef = MI->getOperand(OpNum).getSubReg();
   if (New) {
     // If there is no hint, peek at the only use of this register.
     if ((!Hint || !TargetRegisterInfo::isPhysicalRegister(Hint)) &&
@@ -525,15 +526,7 @@
         Hint = DstReg;
     }
     allocVirtReg(MI, *LRI, Hint);
-    // If this is only a partial redefinition, we must reload the other parts.
-    if (PartialRedef && MI->readsVirtualRegister(VirtReg)) {
-      const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
-      int FI = getStackSpaceFor(VirtReg, RC);
-      DEBUG(dbgs() << "Reloading for partial redef: %reg" << VirtReg << "\n");
-      TII->loadRegFromStackSlot(*MBB, MI, LR.PhysReg, FI, RC, TRI);
-      ++NumLoads;
-    }
-  } else if (LR.LastUse && !PartialRedef) {
+  } else if (LR.LastUse) {
     // Redefining a live register - kill at the last use, unless it is this
     // instruction defining VirtReg multiple times.
     if (LR.LastUse != MI || LR.LastUse->getOperand(LR.LastOpNum).isUse())
@@ -569,10 +562,16 @@
   } else if (LR.Dirty) {
     if (isLastUseOfLocalReg(MO)) {
       DEBUG(dbgs() << "Killing last use: " << MO << "\n");
-      MO.setIsKill();
+      if (MO.isUse())
+        MO.setIsKill();
+      else
+        MO.setIsDead();
     } else if (MO.isKill()) {
       DEBUG(dbgs() << "Clearing dubious kill: " << MO << "\n");
       MO.setIsKill(false);
+    } else if (MO.isDead()) {
+      DEBUG(dbgs() << "Clearing dubious dead: " << MO << "\n");
+      MO.setIsDead(false);
     }
   } else if (MO.isKill()) {
     // We must remove kill flags from uses of reloaded registers because the
@@ -581,6 +580,9 @@
     // This would cause a second reload of %x into a different register.
     DEBUG(dbgs() << "Clearing clean kill: " << MO << "\n");
     MO.setIsKill(false);
+  } else if (MO.isDead()) {
+    DEBUG(dbgs() << "Clearing clean dead: " << MO << "\n");
+    MO.setIsDead(false);
   }
   assert(LR.PhysReg && "Register not assigned");
   LR.LastUse = MI;
@@ -612,6 +614,91 @@
   return MO.isDead();
 }
 
+// Handle special instruction operand like early clobbers and tied ops when
+// there are additional physreg defines.
+void RAFast::handleThroughOperands(MachineInstr *MI,
+                                   SmallVectorImpl<unsigned> &VirtDead) {
+  DEBUG(dbgs() << "Scanning for through registers:");
+  SmallSet<unsigned, 8> ThroughRegs;
+  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+    MachineOperand &MO = MI->getOperand(i);
+    if (!MO.isReg()) continue;
+    unsigned Reg = MO.getReg();
+    if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+    if (MO.isEarlyClobber() || MI->isRegTiedToDefOperand(i) ||
+        (MO.getSubReg() && MI->readsVirtualRegister(Reg))) {
+      if (ThroughRegs.insert(Reg))
+        DEBUG(dbgs() << " %reg" << Reg);
+    }
+  }
+
+  // If any physreg defines collide with preallocated through registers,
+  // we must spill and reallocate.
+  DEBUG(dbgs() << "\nChecking for physdef collisions.\n");
+  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+    MachineOperand &MO = MI->getOperand(i);
+    if (!MO.isReg() || !MO.isDef()) continue;
+    unsigned Reg = MO.getReg();
+    if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+    UsedInInstr.set(Reg);
+    if (ThroughRegs.count(PhysRegState[Reg]))
+      definePhysReg(MI, Reg, regFree);
+    for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
+      UsedInInstr.set(*AS);
+      if (ThroughRegs.count(PhysRegState[*AS]))
+        definePhysReg(MI, *AS, regFree);
+    }
+  }
+
+  SmallVector<unsigned, 8> PartialDefs;
+  DEBUG(dbgs() << "Allocating tied uses and early clobbers.\n");
+  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+    MachineOperand &MO = MI->getOperand(i);
+    if (!MO.isReg()) continue;
+    unsigned Reg = MO.getReg();
+    if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+    if (MO.isUse()) {
+      unsigned DefIdx = 0;
+      if (!MI->isRegTiedToDefOperand(i, &DefIdx)) continue;
+      DEBUG(dbgs() << "Operand " << i << "("<< MO << ") is tied to operand "
+        << DefIdx << ".\n");
+      LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
+      unsigned PhysReg = LRI->second.PhysReg;
+      setPhysReg(MI, i, PhysReg);
+      // Note: we don't update the def operand yet. That would cause the normal
+      // def-scan to attempt spilling.
+    } else if (MO.getSubReg() && MI->readsVirtualRegister(Reg)) {
+      DEBUG(dbgs() << "Partial redefine: " << MO << "\n");
+      // Reload the register, but don't assign to the operand just yet.
+      // That would confuse the later phys-def processing pass.
+      LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0);
+      PartialDefs.push_back(LRI->second.PhysReg);
+    } else if (MO.isEarlyClobber()) {
+      // Note: defineVirtReg may invalidate MO.
+      LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, 0);
+      unsigned PhysReg = LRI->second.PhysReg;
+      if (setPhysReg(MI, i, PhysReg))
+        VirtDead.push_back(Reg);
+    }
+  }
+
+  // Restore UsedInInstr to a state usable for allocating normal virtual uses.
+  UsedInInstr.reset();
+  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+    MachineOperand &MO = MI->getOperand(i);
+    if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
+    unsigned Reg = MO.getReg();
+    if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+    UsedInInstr.set(Reg);
+    for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+      UsedInInstr.set(*AS);
+  }
+
+  // Also mark PartialDefs as used to avoid reallocation.
+  for (unsigned i = 0, e = PartialDefs.size(); i != e; ++i)
+    UsedInInstr.set(PartialDefs[i]);
+}
+
 void RAFast::AllocateBasicBlock() {
   DEBUG(dbgs() << "\nAllocating " << *MBB);
 
@@ -625,7 +712,7 @@
          E = MBB->livein_end(); I != E; ++I)
     definePhysReg(MII, *I, regReserved);
 
-  SmallVector<unsigned, 8> PhysECs, VirtDead;
+  SmallVector<unsigned, 8> VirtDead;
   SmallVector<MachineInstr*, 32> Coalesced;
 
   // Otherwise, sequentially allocate each instruction in the MBB.
@@ -689,12 +776,15 @@
 
     // Track registers used by instruction.
     UsedInInstr.reset();
-    PhysECs.clear();
 
     // First scan.
     // Mark physreg uses and early clobbers as used.
     // Find the end of the virtreg operands
     unsigned VirtOpEnd = 0;
+    bool hasTiedOps = false;
+    bool hasEarlyClobbers = false;
+    bool hasPartialRedefs = false;
+    bool hasPhysDefs = false;
     for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
       MachineOperand &MO = MI->getOperand(i);
       if (!MO.isReg()) continue;
@@ -702,6 +792,15 @@
       if (!Reg) continue;
       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
         VirtOpEnd = i+1;
+        if (MO.isUse()) {
+          hasTiedOps = hasTiedOps ||
+                                TID.getOperandConstraint(i, TOI::TIED_TO) != -1;
+        } else {
+          if (MO.isEarlyClobber())
+            hasEarlyClobbers = true;
+          if (MO.getSubReg() && MI->readsVirtualRegister(Reg))
+            hasPartialRedefs = true;
+        }
         continue;
       }
       if (!Allocatable.test(Reg)) continue;
@@ -710,13 +809,27 @@
       } else if (MO.isEarlyClobber()) {
         definePhysReg(MI, Reg, (MO.isImplicit() || MO.isDead()) ?
                                regFree : regReserved);
-        PhysECs.push_back(Reg);
-      }
+        hasEarlyClobbers = true;
+      } else
+        hasPhysDefs = true;
+    }
+
+    // The instruction may have virtual register operands that must be allocated
+    // the same register at use-time and def-time: early clobbers and tied
+    // operands. If there are also physical defs, these registers must avoid
+    // both physical defs and uses, making them more constrained than normal
+    // operands.
+    // We didn't detect inline asm tied operands above, so just make this extra
+    // pass for all inline asm.
+    if (MI->isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
+        (hasTiedOps && hasPhysDefs)) {
+      handleThroughOperands(MI, VirtDead);
+      // Don't attempt coalescing when we have funny stuff going on.
+      CopyDst = 0;
     }
 
     // Second scan.
-    // Allocate virtreg uses and early clobbers.
-    // Collect VirtKills
+    // Allocate virtreg uses.
     for (unsigned i = 0; i != VirtOpEnd; ++i) {
       MachineOperand &MO = MI->getOperand(i);
       if (!MO.isReg()) continue;
@@ -728,15 +841,6 @@
         CopySrc = (CopySrc == Reg || CopySrc == PhysReg) ? PhysReg : 0;
         if (setPhysReg(MI, i, PhysReg))
           killVirtReg(LRI);
-      } else if (MO.isEarlyClobber()) {
-        // Note: defineVirtReg may invalidate MO.
-        LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, 0);
-        unsigned PhysReg = LRI->second.PhysReg;
-        if (setPhysReg(MI, i, PhysReg))
-          VirtDead.push_back(Reg);
-        PhysECs.push_back(PhysReg);
-        // Don't attempt coalescing when earlyclobbers are present.
-        CopyDst = 0;
       }
     }
 
@@ -744,12 +848,16 @@
 
     // Track registers defined by instruction - early clobbers at this point.
     UsedInInstr.reset();
-    for (unsigned i = 0, e = PhysECs.size(); i != e; ++i) {
-      unsigned PhysReg = PhysECs[i];
-      UsedInInstr.set(PhysReg);
-      for (const unsigned *AS = TRI->getAliasSet(PhysReg);
-            unsigned Alias = *AS; ++AS)
-        UsedInInstr.set(Alias);
+    if (hasEarlyClobbers) {
+      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+        MachineOperand &MO = MI->getOperand(i);
+        if (!MO.isReg() || !MO.isDef()) continue;
+        unsigned Reg = MO.getReg();
+        if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+        UsedInInstr.set(Reg);
+        for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
+          UsedInInstr.set(*AS);
+      }
     }
 
     unsigned DefOpEnd = MI->getNumOperands();

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegAllocLinearScan.cpp Fri Jul  2 04:57:13 2010
@@ -1206,8 +1206,7 @@
     DEBUG(dbgs() << "\t\t\tspilling(c): " << *cur << '\n');
     SmallVector<LiveInterval*, 8> spillIs;
     std::vector<LiveInterval*> added;
-    
-    added = spiller_->spill(cur, spillIs); 
+    spiller_->spill(cur, added, spillIs);
 
     std::sort(added.begin(), added.end(), LISorter());
     addStackInterval(cur, ls_, li_, mri_, *vrm_);
@@ -1285,10 +1284,8 @@
     if (sli->beginIndex() < earliestStart)
       earliestStart = sli->beginIndex();
        
-    std::vector<LiveInterval*> newIs;
-    newIs = spiller_->spill(sli, spillIs, &earliestStart);
+    spiller_->spill(sli, added, spillIs, &earliestStart);
     addStackInterval(sli, ls_, li_, mri_, *vrm_);
-    std::copy(newIs.begin(), newIs.end(), std::back_inserter(added));
     spilled.insert(sli->reg);
   }
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp Fri Jul  2 04:57:13 2010
@@ -63,7 +63,7 @@
 bool CoalescerPair::setRegisters(const MachineInstr *MI) {
   srcReg_ = dstReg_ = subIdx_ = 0;
   newRC_ = 0;
-  flipped_ = false;
+  flipped_ = crossClass_ = false;
 
   unsigned Src, Dst, SrcSub, DstSub;
   if (!isMoveInstr(MI, Src, Dst, SrcSub, DstSub))
@@ -78,6 +78,7 @@
     std::swap(SrcSub, DstSub);
     flipped_ = true;
   }
+  origDstReg_ = Dst;
 
   const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
 
@@ -100,11 +101,19 @@
   } else {
     // Both registers are virtual.
 
-    // Identical sub to sub.
-    if (SrcSub == DstSub)
+    // Both registers have subreg indices.
+    if (SrcSub && DstSub) {
+      // For now we only handle the case of identical indices in commensurate
+      // registers: Dreg:ssub_1 + Dreg:ssub_1 -> Dreg
+      // FIXME: Handle Qreg:ssub_3 + Dreg:ssub_1 as QReg:dsub_1 + Dreg.
+      if (SrcSub != DstSub)
+        return false;
+      const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
+      const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
+      if (!getCommonSubClass(DstRC, SrcRC))
+        return false;
       SrcSub = DstSub = 0;
-    else if (SrcSub && DstSub)
-      return false; // FIXME: Qreg:ssub_3 + Dreg:ssub_1 => QReg:dsub_1 + Dreg.
+    }
 
     // There can be no SrcSub.
     if (SrcSub) {
@@ -124,6 +133,7 @@
       newRC_ = getCommonSubClass(DstRC, SrcRC);
     if (!newRC_)
       return false;
+    crossClass_ = newRC_ != DstRC || newRC_ != SrcRC;
   }
   // Check our invariants
   assert(TargetRegisterInfo::isVirtualRegister(Src) && "Src must be virtual");

Modified: llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAG.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAG.cpp Fri Jul  2 04:57:13 2010
@@ -380,26 +380,26 @@
 }
 #endif
 
-/// InitDAGTopologicalSorting - create the initial topological 
+/// InitDAGTopologicalSorting - create the initial topological
 /// ordering from the DAG to be scheduled.
 ///
-/// The idea of the algorithm is taken from 
+/// The idea of the algorithm is taken from
 /// "Online algorithms for managing the topological order of
 /// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
-/// This is the MNR algorithm, which was first introduced by 
-/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in  
+/// This is the MNR algorithm, which was first introduced by
+/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
 /// "Maintaining a topological order under edge insertions".
 ///
-/// Short description of the algorithm: 
+/// Short description of the algorithm:
 ///
 /// Topological ordering, ord, of a DAG maps each node to a topological
 /// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
 ///
-/// This means that if there is a path from the node X to the node Z, 
+/// This means that if there is a path from the node X to the node Z,
 /// then ord(X) < ord(Z).
 ///
 /// This property can be used to check for reachability of nodes:
-/// if Z is reachable from X, then an insertion of the edge Z->X would 
+/// if Z is reachable from X, then an insertion of the edge Z->X would
 /// create a cycle.
 ///
 /// The algorithm first computes a topological ordering for the DAG by
@@ -431,7 +431,7 @@
       // Collect leaf nodes.
       WorkList.push_back(SU);
     }
-  }  
+  }
 
   int Id = DAGSize;
   while (!WorkList.empty()) {
@@ -456,7 +456,7 @@
     SUnit *SU = &SUnits[i];
     for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
          I != E; ++I) {
-      assert(Node2Index[SU->NodeNum] > Node2Index[I->getSUnit()->NodeNum] && 
+      assert(Node2Index[SU->NodeNum] > Node2Index[I->getSUnit()->NodeNum] &&
       "Wrong topological sorting");
     }
   }
@@ -494,7 +494,7 @@
 void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
                                      bool& HasLoop) {
   std::vector<const SUnit*> WorkList;
-  WorkList.reserve(SUnits.size()); 
+  WorkList.reserve(SUnits.size());
 
   WorkList.push_back(SU);
   do {
@@ -504,20 +504,20 @@
     for (int I = SU->Succs.size()-1; I >= 0; --I) {
       int s = SU->Succs[I].getSUnit()->NodeNum;
       if (Node2Index[s] == UpperBound) {
-        HasLoop = true; 
+        HasLoop = true;
         return;
       }
       // Visit successors if not already and in affected region.
       if (!Visited.test(s) && Node2Index[s] < UpperBound) {
         WorkList.push_back(SU->Succs[I].getSUnit());
-      } 
-    } 
+      }
+    }
   } while (!WorkList.empty());
 }
 
-/// Shift - Renumber the nodes so that the topological ordering is 
+/// Shift - Renumber the nodes so that the topological ordering is
 /// preserved.
-void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound, 
+void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
                                        int UpperBound) {
   std::vector<int> L;
   int shift = 0;
@@ -568,7 +568,7 @@
   // Is Ord(TargetSU) < Ord(SU) ?
   if (LowerBound < UpperBound) {
     Visited.reset();
-    // There may be a path from TargetSU to SU. Check for it. 
+    // There may be a path from TargetSU to SU. Check for it.
     DFS(TargetSU, UpperBound, HasLoop);
   }
   return HasLoop;
@@ -580,8 +580,7 @@
   Index2Node[index] = n;
 }
 
-ScheduleDAGTopologicalSort::ScheduleDAGTopologicalSort(
-                                                     std::vector<SUnit> &sunits)
- : SUnits(sunits) {}
+ScheduleDAGTopologicalSort::
+ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits) : SUnits(sunits) {}
 
 ScheduleHazardRecognizer::~ScheduleHazardRecognizer() {}

Modified: llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ScheduleDAGInstrs.h Fri Jul  2 04:57:13 2010
@@ -69,8 +69,10 @@
                      const SmallSet<unsigned, 8> &LoopLiveIns) {
       unsigned Count = 0;
       for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end();
-           I != E; ++I, ++Count) {
+           I != E; ++I) {
         const MachineInstr *MI = I;
+        if (MI->isDebugValue())
+          continue;
         for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
           const MachineOperand &MO = MI->getOperand(i);
           if (!MO.isReg() || !MO.isUse())
@@ -79,6 +81,7 @@
           if (LoopLiveIns.count(MOReg))
             Deps.insert(std::make_pair(MOReg, std::make_pair(&MO, Count)));
         }
+        ++Count; // Not every iteration due to dbg_value above.
       }
 
       const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Fri Jul  2 04:57:13 2010
@@ -211,6 +211,7 @@
     SDValue visitBUILD_VECTOR(SDNode *N);
     SDValue visitCONCAT_VECTORS(SDNode *N);
     SDValue visitVECTOR_SHUFFLE(SDNode *N);
+    SDValue visitMEMBARRIER(SDNode *N);
 
     SDValue XformToShuffleWithZero(SDNode *N);
     SDValue ReassociateOps(unsigned Opc, DebugLoc DL, SDValue LHS, SDValue RHS);
@@ -1079,6 +1080,7 @@
   case ISD::BUILD_VECTOR:       return visitBUILD_VECTOR(N);
   case ISD::CONCAT_VECTORS:     return visitCONCAT_VECTORS(N);
   case ISD::VECTOR_SHUFFLE:     return visitVECTOR_SHUFFLE(N);
+  case ISD::MEMBARRIER:         return visitMEMBARRIER(N);
   }
   return SDValue();
 }
@@ -2320,7 +2322,8 @@
   }
 
   // fold (or x, undef) -> -1
-  if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) {
+  if (!LegalOperations &&
+      (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)) {
     EVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT;
     return DAG.getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
   }
@@ -6106,7 +6109,6 @@
    // Check if the result type doesn't match the inserted element type. A
    // SCALAR_TO_VECTOR may truncate the inserted element and the
    // EXTRACT_VECTOR_ELT may widen the extracted vector.
-   EVT EltVT = InVec.getValueType().getVectorElementType();
    SDValue InOp = InVec.getOperand(0);
    EVT NVT = N->getValueType(0);
    if (InOp.getValueType() != NVT) {
@@ -6363,6 +6365,59 @@
   return SDValue();
 }
 
+SDValue DAGCombiner::visitMEMBARRIER(SDNode* N) {
+  if (!TLI.getShouldFoldAtomicFences())
+    return SDValue();
+
+  SDValue atomic = N->getOperand(0);
+  switch (atomic.getOpcode()) {
+    case ISD::ATOMIC_CMP_SWAP:
+    case ISD::ATOMIC_SWAP:
+    case ISD::ATOMIC_LOAD_ADD:
+    case ISD::ATOMIC_LOAD_SUB:
+    case ISD::ATOMIC_LOAD_AND:
+    case ISD::ATOMIC_LOAD_OR:
+    case ISD::ATOMIC_LOAD_XOR:
+    case ISD::ATOMIC_LOAD_NAND:
+    case ISD::ATOMIC_LOAD_MIN:
+    case ISD::ATOMIC_LOAD_MAX:
+    case ISD::ATOMIC_LOAD_UMIN:
+    case ISD::ATOMIC_LOAD_UMAX:
+      break;
+    default:
+      return SDValue();
+  }
+
+  SDValue fence = atomic.getOperand(0);
+  if (fence.getOpcode() != ISD::MEMBARRIER)
+    return SDValue();
+
+  switch (atomic.getOpcode()) {
+    case ISD::ATOMIC_CMP_SWAP:
+      return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
+                                    fence.getOperand(0),
+                                    atomic.getOperand(1), atomic.getOperand(2),
+                                    atomic.getOperand(3)), atomic.getResNo());
+    case ISD::ATOMIC_SWAP:
+    case ISD::ATOMIC_LOAD_ADD:
+    case ISD::ATOMIC_LOAD_SUB:
+    case ISD::ATOMIC_LOAD_AND:
+    case ISD::ATOMIC_LOAD_OR:
+    case ISD::ATOMIC_LOAD_XOR:
+    case ISD::ATOMIC_LOAD_NAND:
+    case ISD::ATOMIC_LOAD_MIN:
+    case ISD::ATOMIC_LOAD_MAX:
+    case ISD::ATOMIC_LOAD_UMIN:
+    case ISD::ATOMIC_LOAD_UMAX:
+      return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
+                                    fence.getOperand(0),
+                                    atomic.getOperand(1), atomic.getOperand(2)),
+                     atomic.getResNo());
+    default:
+      return SDValue();
+  }
+}
+
 /// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform
 /// an AND to a vector_shuffle with the destination vector and a zero vector.
 /// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==>

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp Fri Jul  2 04:57:13 2010
@@ -48,6 +48,7 @@
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Analysis/Loads.h"
 #include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetLowering.h"
@@ -161,8 +162,11 @@
       }
     }
   } else if (const Operator *Op = dyn_cast<Operator>(V)) {
-    if (!SelectOperator(Op, Op->getOpcode())) return 0;
-    Reg = LocalValueMap[Op];
+    if (!SelectOperator(Op, Op->getOpcode()))
+      if (!isa<Instruction>(Op) ||
+          !TargetSelectInstruction(cast<Instruction>(Op)))
+        return 0;
+    Reg = lookUpRegForValue(Op);
   } else if (isa<UndefValue>(V)) {
     Reg = createResultReg(TLI.getRegClassFor(VT));
     BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
@@ -185,8 +189,9 @@
   // cache values defined by Instructions across blocks, and other values
   // only locally. This is because Instructions already have the SSA
   // def-dominates-use requirement enforced.
-  if (ValueMap.count(V))
-    return ValueMap[V];
+  DenseMap<const Value *, unsigned>::iterator I = ValueMap.find(V);
+  if (I != ValueMap.end())
+    return I->second;
   return LocalValueMap[V];
 }
 
@@ -712,8 +717,31 @@
 }
 
 bool
+FastISel::SelectLoad(const User *I) {
+  LoadInst *LI = const_cast<LoadInst *>(cast<LoadInst>(I));
+
+  // For a load from an alloca, make a limited effort to find the value
+  // already available in a register, avoiding redundant loads.
+  if (!LI->isVolatile() && isa<AllocaInst>(LI->getPointerOperand())) {
+    BasicBlock::iterator ScanFrom = LI;
+    if (const Value *V = FindAvailableLoadedValue(LI->getPointerOperand(),
+                                                  LI->getParent(), ScanFrom)) {
+      unsigned ResultReg = getRegForValue(V);
+      if (ResultReg != 0) {
+        UpdateValueMap(I, ResultReg);
+        return true;
+      }
+    }
+  }
+
+  return false;
+}
+
+bool
 FastISel::SelectOperator(const User *I, unsigned Opcode) {
   switch (Opcode) {
+  case Instruction::Load:
+    return SelectLoad(I);
   case Instruction::Add:
     return SelectBinaryOp(I, ISD::ADD);
   case Instruction::FAdd:
@@ -849,6 +877,7 @@
     TD(*TM.getTargetData()),
     TII(*TM.getInstrInfo()),
     TLI(*TM.getTargetLowering()),
+    TRI(*TM.getRegisterInfo()),
     IsBottomUp(false) {
 }
 
@@ -1182,7 +1211,7 @@
       // by bailing out early, we may leave behind some dead instructions,
       // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
       // own moves. Second, this check is necessary becuase FastISel doesn't
-      // use CreateRegForValue to create registers, so it always creates
+      // use CreateRegs to create registers, so it always creates
       // exactly one register for each non-void instruction.
       EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
       if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp Fri Jul  2 04:57:13 2010
@@ -30,7 +30,6 @@
 #include "llvm/Target/TargetData.h"
 #include "llvm/Target/TargetFrameInfo.h"
 #include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetIntrinsicInfo.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/Debug.h"
@@ -173,29 +172,30 @@
   ArgDbgValues.clear();
 }
 
-unsigned FunctionLoweringInfo::MakeReg(EVT VT) {
+/// CreateReg - Allocate a single virtual register for the given type.
+unsigned FunctionLoweringInfo::CreateReg(EVT VT) {
   return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
 }
 
-/// CreateRegForValue - Allocate the appropriate number of virtual registers of
+/// CreateRegs - Allocate the appropriate number of virtual registers of
 /// the correctly promoted or expanded types.  Assign these registers
 /// consecutive vreg numbers and return the first assigned number.
 ///
 /// In the case that the given value has struct or array type, this function
 /// will assign registers for each member or element.
 ///
-unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
+unsigned FunctionLoweringInfo::CreateRegs(const Type *Ty) {
   SmallVector<EVT, 4> ValueVTs;
-  ComputeValueVTs(TLI, V->getType(), ValueVTs);
+  ComputeValueVTs(TLI, Ty, ValueVTs);
 
   unsigned FirstReg = 0;
   for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
     EVT ValueVT = ValueVTs[Value];
-    EVT RegisterVT = TLI.getRegisterType(V->getContext(), ValueVT);
+    EVT RegisterVT = TLI.getRegisterType(Ty->getContext(), ValueVT);
 
-    unsigned NumRegs = TLI.getNumRegisters(V->getContext(), ValueVT);
+    unsigned NumRegs = TLI.getNumRegisters(Ty->getContext(), ValueVT);
     for (unsigned i = 0; i != NumRegs; ++i) {
-      unsigned R = MakeReg(RegisterVT);
+      unsigned R = CreateReg(RegisterVT);
       if (!FirstReg) FirstReg = R;
     }
   }
@@ -207,7 +207,7 @@
 void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
                         MachineBasicBlock *MBB) {
   // Inform the MachineModuleInfo of the personality for this landing pad.
-  const ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
+  const ConstantExpr *CE = cast<ConstantExpr>(I.getArgOperand(1));
   assert(CE->getOpcode() == Instruction::BitCast &&
          isa<Function>(CE->getOperand(0)) &&
          "Personality should be a function");
@@ -216,18 +216,18 @@
   // Gather all the type infos for this landing pad and pass them along to
   // MachineModuleInfo.
   std::vector<const GlobalVariable *> TyInfo;
-  unsigned N = I.getNumOperands();
+  unsigned N = I.getNumArgOperands();
 
-  for (unsigned i = N - 1; i > 2; --i) {
-    if (const ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
+  for (unsigned i = N - 1; i > 1; --i) {
+    if (const ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(i))) {
       unsigned FilterLength = CI->getZExtValue();
       unsigned FirstCatch = i + FilterLength + !FilterLength;
-      assert (FirstCatch <= N && "Invalid filter length");
+      assert(FirstCatch <= N && "Invalid filter length");
 
       if (FirstCatch < N) {
         TyInfo.reserve(N - FirstCatch);
         for (unsigned j = FirstCatch; j < N; ++j)
-          TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
+          TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
         MMI->addCatchTypeInfo(MBB, TyInfo);
         TyInfo.clear();
       }
@@ -239,7 +239,7 @@
         // Filter.
         TyInfo.reserve(FilterLength - 1);
         for (unsigned j = i + 1; j < FirstCatch; ++j)
-          TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
+          TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
         MMI->addFilterTypeInfo(MBB, TyInfo);
         TyInfo.clear();
       }
@@ -248,10 +248,10 @@
     }
   }
 
-  if (N > 3) {
-    TyInfo.reserve(N - 3);
-    for (unsigned j = 3; j < N; ++j)
-      TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
+  if (N > 2) {
+    TyInfo.reserve(N - 2);
+    for (unsigned j = 2; j < N; ++j)
+      TyInfo.push_back(ExtractTypeInfo(I.getArgOperand(j)));
     MMI->addCatchTypeInfo(MBB, TyInfo);
   }
 }

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.h Fri Jul  2 04:57:13 2010
@@ -113,20 +113,20 @@
   /// different function.
   void clear();
 
-  unsigned MakeReg(EVT VT);
-  
   /// isExportedInst - Return true if the specified value is an instruction
   /// exported from its block.
   bool isExportedInst(const Value *V) {
     return ValueMap.count(V);
   }
 
-  unsigned CreateRegForValue(const Value *V);
+  unsigned CreateReg(EVT VT);
+  
+  unsigned CreateRegs(const Type *Ty);
   
   unsigned InitializeRegForValue(const Value *V) {
     unsigned &R = ValueMap[V];
     assert(R == 0 && "Already initialized this value register!");
-    return R = CreateRegForValue(V);
+    return R = CreateRegs(V->getType());
   }
 };
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp Fri Jul  2 04:57:13 2010
@@ -123,7 +123,7 @@
 
   EVT VT = Node->getValueType(ResNo);
   const TargetRegisterClass *SrcRC = 0, *DstRC = 0;
-  SrcRC = TRI->getPhysicalRegisterRegClass(SrcReg, VT);
+  SrcRC = TRI->getMinimalPhysRegClass(SrcReg, VT);
   
   // Figure out the register class to create for the destreg.
   if (VRBase) {
@@ -676,6 +676,33 @@
 
   // Create the new machine instruction.
   MachineInstr *MI = BuildMI(*MF, Node->getDebugLoc(), II);
+
+  // The MachineInstr constructor adds implicit-def operands. Scan through
+  // these to determine which are dead.
+  if (MI->getNumOperands() != 0 &&
+      Node->getValueType(Node->getNumValues()-1) == MVT::Flag) {
+    // First, collect all used registers.
+    SmallVector<unsigned, 8> UsedRegs;
+    for (SDNode *F = Node->getFlaggedUser(); F; F = F->getFlaggedUser())
+      if (F->getOpcode() == ISD::CopyFromReg)
+        UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
+      else {
+        // Collect declared implicit uses.
+        const TargetInstrDesc &TID = TII->get(F->getMachineOpcode());
+        UsedRegs.append(TID.getImplicitUses(),
+                        TID.getImplicitUses() + TID.getNumImplicitUses());
+        // In addition to declared implicit uses, we must also check for
+        // direct RegisterSDNode operands.
+        for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
+          if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(F->getOperand(i))) {
+            unsigned Reg = R->getReg();
+            if (Reg != 0 && TargetRegisterInfo::isPhysicalRegister(Reg))
+              UsedRegs.push_back(Reg);
+          }
+      }
+    // Then mark unused registers as dead.
+    MI->setPhysRegsDeadExcept(UsedRegs, *TRI);
+  }
   
   // Add result register values for things that are defined by this
   // instruction.
@@ -767,13 +794,13 @@
     if (TargetRegisterInfo::isVirtualRegister(SrcReg))
       SrcTRC = MRI->getRegClass(SrcReg);
     else
-      SrcTRC = TRI->getPhysicalRegisterRegClass(SrcReg,SrcVal.getValueType());
+      SrcTRC = TRI->getMinimalPhysRegClass(SrcReg,SrcVal.getValueType());
 
     if (TargetRegisterInfo::isVirtualRegister(DestReg))
       DstTRC = MRI->getRegClass(DestReg);
     else
-      DstTRC = TRI->getPhysicalRegisterRegClass(DestReg,
-                                            Node->getOperand(1).getValueType());
+      DstTRC = TRI->getMinimalPhysRegClass(DestReg,
+                                           Node->getOperand(1).getValueType());
 
     bool Emitted = TII->copyRegToReg(*MBB, InsertPos, DestReg, SrcReg,
                                      DstTRC, SrcTRC, Node->getDebugLoc());

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp Fri Jul  2 04:57:13 2010
@@ -31,6 +31,7 @@
 #include "llvm/LLVMContext.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/ADT/DenseMap.h"
@@ -143,6 +144,8 @@
                              DebugLoc dl);
 
   SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned);
+  std::pair<SDValue, SDValue> ExpandChainLibCall(RTLIB::Libcall LC,
+                                                 SDNode *Node, bool isSigned);
   SDValue ExpandFPLibCall(SDNode *Node, RTLIB::Libcall Call_F32,
                           RTLIB::Libcall Call_F64, RTLIB::Libcall Call_F80,
                           RTLIB::Libcall Call_PPCF128);
@@ -172,6 +175,8 @@
   SDValue ExpandExtractFromVectorThroughStack(SDValue Op);
   SDValue ExpandVectorBuildThroughStack(SDNode* Node);
 
+  std::pair<SDValue, SDValue> ExpandAtomic(SDNode *Node);
+
   void ExpandNode(SDNode *Node, SmallVectorImpl<SDValue> &Results);
   void PromoteNode(SDNode *Node, SmallVectorImpl<SDValue> &Results);
 };
@@ -1941,6 +1946,44 @@
   return CallInfo.first;
 }
 
+// ExpandChainLibCall - Expand a node into a call to a libcall. Similar to
+// ExpandLibCall except that the first operand is the in-chain.
+std::pair<SDValue, SDValue>
+SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
+                                         SDNode *Node,
+                                         bool isSigned) {
+  assert(!IsLegalizingCall && "Cannot overlap legalization of calls!");
+  SDValue InChain = Node->getOperand(0);
+
+  TargetLowering::ArgListTy Args;
+  TargetLowering::ArgListEntry Entry;
+  for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
+    EVT ArgVT = Node->getOperand(i).getValueType();
+    const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+    Entry.Node = Node->getOperand(i);
+    Entry.Ty = ArgTy;
+    Entry.isSExt = isSigned;
+    Entry.isZExt = !isSigned;
+    Args.push_back(Entry);
+  }
+  SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
+                                         TLI.getPointerTy());
+
+  // Splice the libcall in wherever FindInputOutputChains tells us to.
+  const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+  std::pair<SDValue, SDValue> CallInfo =
+    TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
+                    0, TLI.getLibcallCallingConv(LC), false,
+                    /*isReturnValueUsed=*/true,
+                    Callee, Args, DAG, Node->getDebugLoc());
+
+  // Legalize the call sequence, starting with the chain.  This will advance
+  // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
+  // was added by LowerCallTo (guaranteeing proper serialization of calls).
+  LegalizeOp(CallInfo.second);
+  return CallInfo;
+}
+
 SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
                                               RTLIB::Libcall Call_F32,
                                               RTLIB::Libcall Call_F64,
@@ -2347,6 +2390,92 @@
   }
 }
 
+std::pair <SDValue, SDValue> SelectionDAGLegalize::ExpandAtomic(SDNode *Node) {
+  unsigned Opc = Node->getOpcode();
+  MVT VT = cast<AtomicSDNode>(Node)->getMemoryVT().getSimpleVT();
+  RTLIB::Libcall LC;
+
+  switch (Opc) {
+  default:
+    llvm_unreachable("Unhandled atomic intrinsic Expand!");
+    break;
+  case ISD::ATOMIC_SWAP:
+    switch (VT.SimpleTy) {
+    default: llvm_unreachable("Unexpected value type for atomic!");
+    case MVT::i8:  LC = RTLIB::SYNC_LOCK_TEST_AND_SET_1; break;
+    case MVT::i16: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_2; break;
+    case MVT::i32: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_4; break;
+    case MVT::i64: LC = RTLIB::SYNC_LOCK_TEST_AND_SET_8; break;
+    }
+    break;
+  case ISD::ATOMIC_CMP_SWAP:
+    switch (VT.SimpleTy) {
+    default: llvm_unreachable("Unexpected value type for atomic!");
+    case MVT::i8:  LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1; break;
+    case MVT::i16: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2; break;
+    case MVT::i32: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4; break;
+    case MVT::i64: LC = RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8; break;
+    }
+    break;
+  case ISD::ATOMIC_LOAD_ADD:
+    switch (VT.SimpleTy) {
+    default: llvm_unreachable("Unexpected value type for atomic!");
+    case MVT::i8:  LC = RTLIB::SYNC_FETCH_AND_ADD_1; break;
+    case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_ADD_2; break;
+    case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_ADD_4; break;
+    case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_ADD_8; break;
+    }
+    break;
+  case ISD::ATOMIC_LOAD_SUB:
+    switch (VT.SimpleTy) {
+    default: llvm_unreachable("Unexpected value type for atomic!");
+    case MVT::i8:  LC = RTLIB::SYNC_FETCH_AND_SUB_1; break;
+    case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_SUB_2; break;
+    case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_SUB_4; break;
+    case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_SUB_8; break;
+    }
+    break;
+  case ISD::ATOMIC_LOAD_AND:
+    switch (VT.SimpleTy) {
+    default: llvm_unreachable("Unexpected value type for atomic!");
+    case MVT::i8:  LC = RTLIB::SYNC_FETCH_AND_AND_1; break;
+    case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_AND_2; break;
+    case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_AND_4; break;
+    case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_AND_8; break;
+    }
+    break;
+  case ISD::ATOMIC_LOAD_OR:
+    switch (VT.SimpleTy) {
+    default: llvm_unreachable("Unexpected value type for atomic!");
+    case MVT::i8:  LC = RTLIB::SYNC_FETCH_AND_OR_1; break;
+    case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_OR_2; break;
+    case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_OR_4; break;
+    case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_OR_8; break;
+    }
+    break;
+  case ISD::ATOMIC_LOAD_XOR:
+    switch (VT.SimpleTy) {
+    default: llvm_unreachable("Unexpected value type for atomic!");
+    case MVT::i8:  LC = RTLIB::SYNC_FETCH_AND_XOR_1; break;
+    case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_XOR_2; break;
+    case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_XOR_4; break;
+    case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_XOR_8; break;
+    }
+    break;
+  case ISD::ATOMIC_LOAD_NAND:
+    switch (VT.SimpleTy) {
+    default: llvm_unreachable("Unexpected value type for atomic!");
+    case MVT::i8:  LC = RTLIB::SYNC_FETCH_AND_NAND_1; break;
+    case MVT::i16: LC = RTLIB::SYNC_FETCH_AND_NAND_2; break;
+    case MVT::i32: LC = RTLIB::SYNC_FETCH_AND_NAND_4; break;
+    case MVT::i64: LC = RTLIB::SYNC_FETCH_AND_NAND_8; break;
+    }
+    break;
+  }
+
+  return ExpandChainLibCall(LC, Node, false);
+}
+
 void SelectionDAGLegalize::ExpandNode(SDNode *Node,
                                       SmallVectorImpl<SDValue> &Results) {
   DebugLoc dl = Node->getDebugLoc();
@@ -2404,8 +2533,9 @@
   case ISD::ATOMIC_LOAD_UMIN:
   case ISD::ATOMIC_LOAD_UMAX:
   case ISD::ATOMIC_CMP_SWAP: {
-    assert (0 && "atomic intrinsic not lowered!");
-    Results.push_back(Node->getOperand(0));
+    std::pair<SDValue, SDValue> Tmp = ExpandAtomic(Node);
+    Results.push_back(Tmp.first);
+    Results.push_back(Tmp.second);
     break;
   }
   case ISD::DYNAMIC_STACKALLOC:
@@ -2512,15 +2642,29 @@
     EVT VT = Node->getValueType(0);
     Tmp1 = Node->getOperand(0);
     Tmp2 = Node->getOperand(1);
-    SDValue VAList = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, V, 0,
-                                 false, false, 0);
+    unsigned Align = Node->getConstantOperandVal(3);
+
+    SDValue VAListLoad = DAG.getLoad(TLI.getPointerTy(), dl, Tmp1, Tmp2, V, 0,
+                                     false, false, 0);
+    SDValue VAList = VAListLoad;
+
+    if (Align != 0 ) {
+      VAList = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
+                           DAG.getConstant(Align - 1,
+                                           TLI.getPointerTy()));
+
+      VAList = DAG.getNode(ISD::AND, dl, TLI.getPointerTy(), VAList,
+                           DAG.getConstant(-Align,
+                                           TLI.getPointerTy()));
+    }
+
     // Increment the pointer, VAList, to the next vaarg
     Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
                        DAG.getConstant(TLI.getTargetData()->
                           getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
                                        TLI.getPointerTy()));
     // Store the incremented VAList to the legalized pointer
-    Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Tmp2, V, 0,
+    Tmp3 = DAG.getStore(VAListLoad.getValue(1), dl, Tmp3, Tmp2, V, 0,
                         false, false, 0);
     // Load the actual argument out of the pointer VAList
     Results.push_back(DAG.getLoad(VT, dl, Tmp3, VAList, NULL, 0,

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp Fri Jul  2 04:57:13 2010
@@ -485,7 +485,7 @@
       NewOps.push_back(Op);
     } else if (Op != OrigOp) {
       // This is the first operand to change - add all operands so far.
-      NewOps.insert(NewOps.end(), N->op_begin(), N->op_begin() + i);
+      NewOps.append(N->op_begin(), N->op_begin() + i);
       NewOps.push_back(Op);
     }
   }

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp Fri Jul  2 04:57:13 2010
@@ -238,12 +238,17 @@
 }
 
 void DAGTypeLegalizer::ExpandRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
-  EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+  EVT OVT = N->getValueType(0);
+  EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), OVT);
   SDValue Chain = N->getOperand(0);
   SDValue Ptr = N->getOperand(1);
   DebugLoc dl = N->getDebugLoc();
+  const unsigned OldAlign = N->getConstantOperandVal(3);
+  const Type *Type = OVT.getTypeForEVT(*DAG.getContext());
+  const unsigned TypeAlign = TLI.getTargetData()->getABITypeAlignment(Type);
+  const unsigned Align = std::max(OldAlign, TypeAlign);
 
-  Lo = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2));
+  Lo = DAG.getVAArg(NVT, dl, Chain, Ptr, N->getOperand(2), Align);
   Hi = DAG.getVAArg(NVT, dl, Lo.getValue(1), Ptr, N->getOperand(2));
 
   // Handle endianness of the load.

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp Fri Jul  2 04:57:13 2010
@@ -535,7 +535,7 @@
         SUnit *LRDef = LiveRegDefs[Reg];
         EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
         const TargetRegisterClass *RC =
-          TRI->getPhysicalRegisterRegClass(Reg, VT);
+          TRI->getMinimalPhysRegClass(Reg, VT);
         const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
 
         // If cross copy register class is null, then it must be possible copy

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp Fri Jul  2 04:57:13 2010
@@ -795,7 +795,7 @@
         SUnit *LRDef = LiveRegDefs[Reg];
         EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
         const TargetRegisterClass *RC =
-          TRI->getPhysicalRegisterRegClass(Reg, VT);
+          TRI->getMinimalPhysRegClass(Reg, VT);
         const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
 
         // If cross copy register class is null, then it must be possible copy

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp Fri Jul  2 04:57:13 2010
@@ -101,7 +101,7 @@
         II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
       PhysReg = Reg;
       const TargetRegisterClass *RC =
-        TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
+        TRI->getMinimalPhysRegClass(Reg, Def->getValueType(ResNo));
       Cost = RC->getCopyCost();
     }
   }
@@ -110,17 +110,42 @@
 static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
                      SelectionDAG *DAG) {
   SmallVector<EVT, 4> VTs;
-  for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
-    VTs.push_back(N->getValueType(i));
+  SDNode *FlagDestNode = Flag.getNode();
+
+  // Don't add a flag from a node to itself.
+  if (FlagDestNode == N) return;
+
+  // Don't add a flag to something which already has a flag.
+  if (N->getValueType(N->getNumValues() - 1) == MVT::Flag) return;
+
+  for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
+    VTs.push_back(N->getValueType(I));
+
   if (AddFlag)
     VTs.push_back(MVT::Flag);
+
   SmallVector<SDValue, 4> Ops;
-  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
-    Ops.push_back(N->getOperand(i));
-  if (Flag.getNode())
+  for (unsigned I = 0, E = N->getNumOperands(); I != E; ++I)
+    Ops.push_back(N->getOperand(I));
+
+  if (FlagDestNode)
     Ops.push_back(Flag);
+
   SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
+  MachineSDNode::mmo_iterator Begin = 0, End = 0;
+  MachineSDNode *MN = dyn_cast<MachineSDNode>(N);
+
+  // Store memory references.
+  if (MN) {
+    Begin = MN->memoperands_begin();
+    End = MN->memoperands_end();
+  }
+
   DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size());
+
+  // Reset the memory references
+  if (MN)
+    MN->setMemRefs(Begin, End);
 }
 
 /// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
@@ -143,7 +168,6 @@
   DenseMap<long long, SDNode*> O2SMap;  // Map from offset to SDNode.
   bool Cluster = false;
   SDNode *Base = Node;
-  int64_t BaseOffset;
   for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
        I != E; ++I) {
     SDNode *User = *I;
@@ -159,12 +183,8 @@
       Offsets.push_back(Offset1);
     O2SMap.insert(std::make_pair(Offset2, User));
     Offsets.push_back(Offset2);
-    if (Offset2 < Offset1) {
+    if (Offset2 < Offset1)
       Base = User;
-      BaseOffset = Offset2;
-    } else {
-      BaseOffset = Offset1;
-    }
     Cluster = true;
   }
 
@@ -195,14 +215,18 @@
   // Cluster loads by adding MVT::Flag outputs and inputs. This also
   // ensure they are scheduled in order of increasing addresses.
   SDNode *Lead = Loads[0];
-  AddFlags(Lead, SDValue(0,0), true, DAG);
-  SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1);
-  for (unsigned i = 1, e = Loads.size(); i != e; ++i) {
-    bool OutFlag = i < e-1;
-    SDNode *Load = Loads[i];
+  AddFlags(Lead, SDValue(0, 0), true, DAG);
+
+  SDValue InFlag = SDValue(Lead, Lead->getNumValues() - 1);
+  for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
+    bool OutFlag = I < E - 1;
+    SDNode *Load = Loads[I];
+
     AddFlags(Load, InFlag, OutFlag, DAG);
+
     if (OutFlag)
-      InFlag = SDValue(Load, Load->getNumValues()-1);
+      InFlag = SDValue(Load, Load->getNumValues() - 1);
+
     ++LoadsClustered;
   }
 }
@@ -483,7 +507,7 @@
 }
 
 // ProcessSourceNode - Process nodes with source order numbers. These are added
-// to a vector which EmitSchedule use to determine how to insert dbg_value
+// to a vector which EmitSchedule uses to determine how to insert dbg_value
 // instructions in the right order.
 static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
                            InstrEmitter &Emitter,
@@ -540,7 +564,7 @@
     for (; PDI != PDE; ++PDI) {
       MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
       if (DbgMI)
-        BB->insert(BB->end(), DbgMI);
+        BB->push_back(DbgMI);
     }
   }
 
@@ -596,7 +620,6 @@
     SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
     // Now emit the rest according to source order.
     unsigned LastOrder = 0;
-    MachineInstr *LastMI = 0;
     for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
       unsigned Order = Orders[i].first;
       MachineInstr *MI = Orders[i].second;
@@ -628,7 +651,6 @@
         }
       }
       LastOrder = Order;
-      LastMI = MI;
     }
     // Add trailing DbgValue's before the terminator. FIXME: May want to add
     // some of them before one or more conditional branches?

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Fri Jul  2 04:57:13 2010
@@ -2286,7 +2286,6 @@
 SDValue SelectionDAG::getShuffleScalarElt(const ShuffleVectorSDNode *N,
                                           unsigned i) {
   EVT VT = N->getValueType(0);
-  DebugLoc dl = N->getDebugLoc();
   if (N->getMaskElt(i) < 0)
     return getUNDEF(VT.getVectorElementType());
   unsigned Index = N->getMaskElt(i);
@@ -2626,7 +2625,7 @@
     if (N1.getOpcode() == ISD::BUILD_VECTOR &&
         N2.getOpcode() == ISD::BUILD_VECTOR) {
       SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
-      Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
+      Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
       return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
     }
     break;
@@ -3015,7 +3014,6 @@
                               SDValue N1, SDValue N2, SDValue N3) {
   // Perform various simplifications.
   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
-  ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
   switch (Opcode) {
   case ISD::CONCAT_VECTORS:
     // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
@@ -3024,8 +3022,8 @@
         N2.getOpcode() == ISD::BUILD_VECTOR &&
         N3.getOpcode() == ISD::BUILD_VECTOR) {
       SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), N1.getNode()->op_end());
-      Elts.insert(Elts.end(), N2.getNode()->op_begin(), N2.getNode()->op_end());
-      Elts.insert(Elts.end(), N3.getNode()->op_begin(), N3.getNode()->op_end());
+      Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
+      Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
       return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
     }
     break;
@@ -3045,14 +3043,6 @@
 
     if (N2 == N3) return N2;   // select C, X, X -> X
     break;
-  case ISD::BRCOND:
-    if (N2C) {
-      if (N2C->getZExtValue()) // Unconditional branch
-        return getNode(ISD::BR, DL, MVT::Other, N1, N3);
-      else
-        return N1;         // Never-taken branch
-    }
-    break;
   case ISD::VECTOR_SHUFFLE:
     llvm_unreachable("should use getVectorShuffle constructor!");
     break;
@@ -4151,9 +4141,10 @@
 
 SDValue SelectionDAG::getVAArg(EVT VT, DebugLoc dl,
                                SDValue Chain, SDValue Ptr,
-                               SDValue SV) {
-  SDValue Ops[] = { Chain, Ptr, SV };
-  return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 3);
+                               SDValue SV,
+                               unsigned Align) {
+  SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
+  return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4);
 }
 
 SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Fri Jul  2 04:57:13 2010
@@ -805,27 +805,63 @@
   }
 }
 
+// getValue - Return an SDValue for the given Value.
 SDValue SelectionDAGBuilder::getValue(const Value *V) {
+  // If we already have an SDValue for this value, use it. It's important
+  // to do this first, so that we don't create a CopyFromReg if we already
+  // have a regular SDValue.
   SDValue &N = NodeMap[V];
   if (N.getNode()) return N;
 
+  // If there's a virtual register allocated and initialized for this
+  // value, use it.
+  DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
+  if (It != FuncInfo.ValueMap.end()) {
+    unsigned InReg = It->second;
+    RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
+    SDValue Chain = DAG.getEntryNode();
+    return N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL);
+  }
+
+  // Otherwise create a new SDValue and remember it.
+  SDValue Val = getValueImpl(V);
+  NodeMap[V] = Val;
+  return Val;
+}
+
+/// getNonRegisterValue - Return an SDValue for the given Value, but
+/// don't look in FuncInfo.ValueMap for a virtual register.
+SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
+  // If we already have an SDValue for this value, use it.
+  SDValue &N = NodeMap[V];
+  if (N.getNode()) return N;
+
+  // Otherwise create a new SDValue and remember it.
+  SDValue Val = getValueImpl(V);
+  NodeMap[V] = Val;
+  return Val;
+}
+
+/// getValueImpl - Helper function for getValue and getMaterializedValue.
+/// Create an SDValue for the given value.
+SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
   if (const Constant *C = dyn_cast<Constant>(V)) {
     EVT VT = TLI.getValueType(V->getType(), true);
 
     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
-      return N = DAG.getConstant(*CI, VT);
+      return DAG.getConstant(*CI, VT);
 
     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
-      return N = DAG.getGlobalAddress(GV, VT);
+      return DAG.getGlobalAddress(GV, VT);
 
     if (isa<ConstantPointerNull>(C))
-      return N = DAG.getConstant(0, TLI.getPointerTy());
+      return DAG.getConstant(0, TLI.getPointerTy());
 
     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
-      return N = DAG.getConstantFP(*CFP, VT);
+      return DAG.getConstantFP(*CFP, VT);
 
     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
-      return N = DAG.getUNDEF(VT);
+      return DAG.getUNDEF(VT);
 
     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
       visit(CE->getOpcode(), *CE);
@@ -913,12 +949,18 @@
       return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
   }
 
-  unsigned InReg = FuncInfo.ValueMap[V];
-  assert(InReg && "Value not in map!");
+  // If this is an instruction which fast-isel has deferred, select it now.
+  if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
+    assert(Inst->isSafeToSpeculativelyExecute() &&
+           "Instruction with side effects deferred!");
+    visit(*Inst);
+    DenseMap<const Value *, SDValue>::iterator NIt = NodeMap.find(Inst);
+    if (NIt != NodeMap.end() && NIt->second.getNode())
+      return NIt->second;
+  }
 
-  RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
-  SDValue Chain = DAG.getEntryNode();
-  return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL);
+  llvm_unreachable("Can't get register for value!");
+  return SDValue();
 }
 
 /// Get the EVTs and ArgFlags collections that represent the legalized return 
@@ -1424,18 +1466,10 @@
                                MVT::Other, getControlRoot(), Cond,
                                DAG.getBasicBlock(CB.TrueBB));
 
-  // If the branch was constant folded, fix up the CFG.
-  if (BrCond.getOpcode() == ISD::BR) {
-    SwitchBB->removeSuccessor(CB.FalseBB);
-  } else {
-    // Otherwise, go ahead and insert the false branch.
-    if (BrCond == getControlRoot())
-      SwitchBB->removeSuccessor(CB.TrueBB);
-
-    if (CB.FalseBB != NextBlock)
-      BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
-                           DAG.getBasicBlock(CB.FalseBB));
-  }
+  // Insert the false branch.
+  if (CB.FalseBB != NextBlock)
+    BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
+                         DAG.getBasicBlock(CB.FalseBB));
 
   DAG.setRoot(BrCond);
 }
@@ -1474,7 +1508,7 @@
   // therefore require extension or truncating.
   SwitchOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), TLI.getPointerTy());
 
-  unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
+  unsigned JumpTableReg = FuncInfo.CreateReg(TLI.getPointerTy());
   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
                                     JumpTableReg, SwitchOp);
   JT.Reg = JumpTableReg;
@@ -1525,7 +1559,7 @@
   SDValue ShiftOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(),
                                        TLI.getPointerTy());
 
-  B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
+  B.Reg = FuncInfo.CreateReg(TLI.getPointerTy());
   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
                                     B.Reg, ShiftOp);
 
@@ -1557,29 +1591,41 @@
                                            unsigned Reg,
                                            BitTestCase &B,
                                            MachineBasicBlock *SwitchBB) {
-  // Make desired shift
   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
                                        TLI.getPointerTy());
-  SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
-                                  TLI.getPointerTy(),
-                                  DAG.getConstant(1, TLI.getPointerTy()),
-                                  ShiftOp);
-
-  // Emit bit tests and jumps
-  SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
-                              TLI.getPointerTy(), SwitchVal,
-                              DAG.getConstant(B.Mask, TLI.getPointerTy()));
-  SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
-                                TLI.getSetCCResultType(AndOp.getValueType()),
-                                AndOp, DAG.getConstant(0, TLI.getPointerTy()),
-                                ISD::SETNE);
+  SDValue Cmp;
+  if (CountPopulation_64(B.Mask) == 1) {
+    // Testing for a single bit; just compare the shift count with what it
+    // would need to be to shift a 1 bit in that position.
+    Cmp = DAG.getSetCC(getCurDebugLoc(),
+                       TLI.getSetCCResultType(ShiftOp.getValueType()),
+                       ShiftOp,
+                       DAG.getConstant(CountTrailingZeros_64(B.Mask),
+                                       TLI.getPointerTy()),
+                       ISD::SETEQ);
+  } else {
+    // Make desired shift
+    SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
+                                    TLI.getPointerTy(),
+                                    DAG.getConstant(1, TLI.getPointerTy()),
+                                    ShiftOp);
+
+    // Emit bit tests and jumps
+    SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
+                                TLI.getPointerTy(), SwitchVal,
+                                DAG.getConstant(B.Mask, TLI.getPointerTy()));
+    Cmp = DAG.getSetCC(getCurDebugLoc(),
+                       TLI.getSetCCResultType(AndOp.getValueType()),
+                       AndOp, DAG.getConstant(0, TLI.getPointerTy()),
+                       ISD::SETNE);
+  }
 
   SwitchBB->addSuccessor(B.TargetBB);
   SwitchBB->addSuccessor(NextMBB);
 
   SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
                               MVT::Other, getControlRoot(),
-                              AndCmp, DAG.getBasicBlock(B.TargetBB));
+                              Cmp, DAG.getBasicBlock(B.TargetBB));
 
   // Set NextBlock to be the MBB immediately after the current one, if any.
   // This is used to avoid emitting unnecessary branches to the next block.
@@ -2400,7 +2446,6 @@
   // What to do depends on the size of the integer and the size of the pointer.
   // We can either truncate, zero extend, or no-op, accordingly.
   SDValue N = getValue(I.getOperand(0));
-  EVT SrcVT = N.getValueType();
   EVT DestVT = TLI.getValueType(I.getType());
   setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
 }
@@ -2409,7 +2454,6 @@
   // What to do depends on the size of the integer and the size of the pointer.
   // We can either truncate, zero extend, or no-op, accordingly.
   SDValue N = getValue(I.getOperand(0));
-  EVT SrcVT = N.getValueType();
   EVT DestVT = TLI.getValueType(I.getType());
   setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
 }
@@ -2960,8 +3004,8 @@
     Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
 
   // Add all operands of the call to the operand list.
-  for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
-    SDValue Op = getValue(I.getOperand(i));
+  for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
+    SDValue Op = getValue(I.getArgOperand(i));
     assert(TLI.isTypeLegal(Op.getValueType()) &&
            "Intrinsic uses a non-legal type?");
     Ops.push_back(Op);
@@ -3066,11 +3110,11 @@
   SDValue Root = getRoot();
   SDValue L =
     DAG.getAtomic(Op, getCurDebugLoc(),
-                  getValue(I.getOperand(2)).getValueType().getSimpleVT(),
+                  getValue(I.getArgOperand(1)).getValueType().getSimpleVT(),
                   Root,
-                  getValue(I.getOperand(1)),
-                  getValue(I.getOperand(2)),
-                  I.getOperand(1));
+                  getValue(I.getArgOperand(0)),
+                  getValue(I.getArgOperand(1)),
+                  I.getArgOperand(0));
   setValue(&I, L);
   DAG.setRoot(L.getValue(1));
   return 0;
@@ -3079,8 +3123,8 @@
 // implVisitAluOverflow - Lower arithmetic overflow instrinsics.
 const char *
 SelectionDAGBuilder::implVisitAluOverflow(const CallInst &I, ISD::NodeType Op) {
-  SDValue Op1 = getValue(I.getOperand(1));
-  SDValue Op2 = getValue(I.getOperand(2));
+  SDValue Op1 = getValue(I.getArgOperand(0));
+  SDValue Op2 = getValue(I.getArgOperand(1));
 
   SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
   setValue(&I, DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2));
@@ -3094,9 +3138,9 @@
   SDValue result;
   DebugLoc dl = getCurDebugLoc();
 
-  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+  if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
-    SDValue Op = getValue(I.getOperand(1));
+    SDValue Op = getValue(I.getArgOperand(0));
 
     // Put the exponent in the right bit position for later addition to the
     // final result:
@@ -3206,8 +3250,8 @@
   } else {
     // No special expansion.
     result = DAG.getNode(ISD::FEXP, dl,
-                         getValue(I.getOperand(1)).getValueType(),
-                         getValue(I.getOperand(1)));
+                         getValue(I.getArgOperand(0)).getValueType(),
+                         getValue(I.getArgOperand(0)));
   }
 
   setValue(&I, result);
@@ -3220,9 +3264,9 @@
   SDValue result;
   DebugLoc dl = getCurDebugLoc();
 
-  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+  if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
-    SDValue Op = getValue(I.getOperand(1));
+    SDValue Op = getValue(I.getArgOperand(0));
     SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
 
     // Scale the exponent by log(2) [0.69314718f].
@@ -3316,8 +3360,8 @@
   } else {
     // No special expansion.
     result = DAG.getNode(ISD::FLOG, dl,
-                         getValue(I.getOperand(1)).getValueType(),
-                         getValue(I.getOperand(1)));
+                         getValue(I.getArgOperand(0)).getValueType(),
+                         getValue(I.getArgOperand(0)));
   }
 
   setValue(&I, result);
@@ -3330,9 +3374,9 @@
   SDValue result;
   DebugLoc dl = getCurDebugLoc();
 
-  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+  if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
-    SDValue Op = getValue(I.getOperand(1));
+    SDValue Op = getValue(I.getArgOperand(0));
     SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
 
     // Get the exponent.
@@ -3425,8 +3469,8 @@
   } else {
     // No special expansion.
     result = DAG.getNode(ISD::FLOG2, dl,
-                         getValue(I.getOperand(1)).getValueType(),
-                         getValue(I.getOperand(1)));
+                         getValue(I.getArgOperand(0)).getValueType(),
+                         getValue(I.getArgOperand(0)));
   }
 
   setValue(&I, result);
@@ -3439,9 +3483,9 @@
   SDValue result;
   DebugLoc dl = getCurDebugLoc();
 
-  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+  if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
-    SDValue Op = getValue(I.getOperand(1));
+    SDValue Op = getValue(I.getArgOperand(0));
     SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
 
     // Scale the exponent by log10(2) [0.30102999f].
@@ -3527,8 +3571,8 @@
   } else {
     // No special expansion.
     result = DAG.getNode(ISD::FLOG10, dl,
-                         getValue(I.getOperand(1)).getValueType(),
-                         getValue(I.getOperand(1)));
+                         getValue(I.getArgOperand(0)).getValueType(),
+                         getValue(I.getArgOperand(0)));
   }
 
   setValue(&I, result);
@@ -3541,9 +3585,9 @@
   SDValue result;
   DebugLoc dl = getCurDebugLoc();
 
-  if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
+  if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
-    SDValue Op = getValue(I.getOperand(1));
+    SDValue Op = getValue(I.getArgOperand(0));
 
     SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
 
@@ -3641,8 +3685,8 @@
   } else {
     // No special expansion.
     result = DAG.getNode(ISD::FEXP2, dl,
-                         getValue(I.getOperand(1)).getValueType(),
-                         getValue(I.getOperand(1)));
+                         getValue(I.getArgOperand(0)).getValueType(),
+                         getValue(I.getArgOperand(0)));
   }
 
   setValue(&I, result);
@@ -3653,12 +3697,12 @@
 void
 SelectionDAGBuilder::visitPow(const CallInst &I) {
   SDValue result;
-  const Value *Val = I.getOperand(1);
+  const Value *Val = I.getArgOperand(0);
   DebugLoc dl = getCurDebugLoc();
   bool IsExp10 = false;
 
   if (getValue(Val).getValueType() == MVT::f32 &&
-      getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
+      getValue(I.getArgOperand(1)).getValueType() == MVT::f32 &&
       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
     if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
       if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
@@ -3669,7 +3713,7 @@
   }
 
   if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
-    SDValue Op = getValue(I.getOperand(2));
+    SDValue Op = getValue(I.getArgOperand(1));
 
     // Put the exponent in the right bit position for later addition to the
     // final result:
@@ -3774,9 +3818,9 @@
   } else {
     // No special expansion.
     result = DAG.getNode(ISD::FPOW, dl,
-                         getValue(I.getOperand(1)).getValueType(),
-                         getValue(I.getOperand(1)),
-                         getValue(I.getOperand(2)));
+                         getValue(I.getArgOperand(0)).getValueType(),
+                         getValue(I.getArgOperand(0)),
+                         getValue(I.getArgOperand(1)));
   }
 
   setValue(&I, result);
@@ -3906,11 +3950,11 @@
   case Intrinsic::vacopy:   visitVACopy(I); return 0;
   case Intrinsic::returnaddress:
     setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
-                             getValue(I.getOperand(1))));
+                             getValue(I.getArgOperand(0))));
     return 0;
   case Intrinsic::frameaddress:
     setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
-                             getValue(I.getOperand(1))));
+                             getValue(I.getArgOperand(0))));
     return 0;
   case Intrinsic::setjmp:
     return "_setjmp"+!TLI.usesUnderscoreSetJmp();
@@ -3919,63 +3963,63 @@
   case Intrinsic::memcpy: {
     // Assert for address < 256 since we support only user defined address
     // spaces.
-    assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
+    assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
            < 256 &&
-           cast<PointerType>(I.getOperand(2)->getType())->getAddressSpace()
+           cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
            < 256 &&
            "Unknown address space");
-    SDValue Op1 = getValue(I.getOperand(1));
-    SDValue Op2 = getValue(I.getOperand(2));
-    SDValue Op3 = getValue(I.getOperand(3));
-    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
-    bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
+    SDValue Op1 = getValue(I.getArgOperand(0));
+    SDValue Op2 = getValue(I.getArgOperand(1));
+    SDValue Op3 = getValue(I.getArgOperand(2));
+    unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+    bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
     DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol, false,
-                              I.getOperand(1), 0, I.getOperand(2), 0));
+                              I.getArgOperand(0), 0, I.getArgOperand(1), 0));
     return 0;
   }
   case Intrinsic::memset: {
     // Assert for address < 256 since we support only user defined address
     // spaces.
-    assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
+    assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
            < 256 &&
            "Unknown address space");
-    SDValue Op1 = getValue(I.getOperand(1));
-    SDValue Op2 = getValue(I.getOperand(2));
-    SDValue Op3 = getValue(I.getOperand(3));
-    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
-    bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
+    SDValue Op1 = getValue(I.getArgOperand(0));
+    SDValue Op2 = getValue(I.getArgOperand(1));
+    SDValue Op3 = getValue(I.getArgOperand(2));
+    unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+    bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
     DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
-                              I.getOperand(1), 0));
+                              I.getArgOperand(0), 0));
     return 0;
   }
   case Intrinsic::memmove: {
     // Assert for address < 256 since we support only user defined address
     // spaces.
-    assert(cast<PointerType>(I.getOperand(1)->getType())->getAddressSpace()
+    assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
            < 256 &&
-           cast<PointerType>(I.getOperand(2)->getType())->getAddressSpace()
+           cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
            < 256 &&
            "Unknown address space");
-    SDValue Op1 = getValue(I.getOperand(1));
-    SDValue Op2 = getValue(I.getOperand(2));
-    SDValue Op3 = getValue(I.getOperand(3));
-    unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
-    bool isVol = cast<ConstantInt>(I.getOperand(5))->getZExtValue();
+    SDValue Op1 = getValue(I.getArgOperand(0));
+    SDValue Op2 = getValue(I.getArgOperand(1));
+    SDValue Op3 = getValue(I.getArgOperand(2));
+    unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+    bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
 
     // If the source and destination are known to not be aliases, we can
     // lower memmove as memcpy.
     uint64_t Size = -1ULL;
     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
       Size = C->getZExtValue();
-    if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
+    if (AA->alias(I.getArgOperand(0), Size, I.getArgOperand(1), Size) ==
         AliasAnalysis::NoAlias) {
       DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol, 
-                                false, I.getOperand(1), 0, I.getOperand(2), 0));
+                                false, I.getArgOperand(0), 0, I.getArgOperand(1), 0));
       return 0;
     }
 
     DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
-                               I.getOperand(1), 0, I.getOperand(2), 0));
+                               I.getArgOperand(0), 0, I.getArgOperand(1), 0));
     return 0;
   }
   case Intrinsic::dbg_declare: {
@@ -4142,7 +4186,7 @@
     // Insert the EHSELECTION instruction.
     SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
     SDValue Ops[2];
-    Ops[0] = getValue(I.getOperand(1));
+    Ops[0] = getValue(I.getArgOperand(0));
     Ops[1] = getRoot();
     SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
     DAG.setRoot(Op.getValue(1));
@@ -4152,7 +4196,7 @@
 
   case Intrinsic::eh_typeid_for: {
     // Find the type id for the given typeinfo.
-    GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
+    GlobalVariable *GV = ExtractTypeInfo(I.getArgOperand(0));
     unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
     Res = DAG.getConstant(TypeID, MVT::i32);
     setValue(&I, Res);
@@ -4165,15 +4209,14 @@
     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
                             MVT::Other,
                             getControlRoot(),
-                            getValue(I.getOperand(1)),
-                            getValue(I.getOperand(2))));
+                            getValue(I.getArgOperand(0)),
+                            getValue(I.getArgOperand(1))));
     return 0;
   case Intrinsic::eh_unwind_init:
     DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
     return 0;
   case Intrinsic::eh_dwarf_cfa: {
-    EVT VT = getValue(I.getOperand(1)).getValueType();
-    SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), dl,
+    SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), dl,
                                         TLI.getPointerTy());
     SDValue Offset = DAG.getNode(ISD::ADD, dl,
                                  TLI.getPointerTy(),
@@ -4189,7 +4232,7 @@
   }
   case Intrinsic::eh_sjlj_callsite: {
     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
-    ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1));
+    ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
     assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
 
@@ -4198,13 +4241,13 @@
   }
   case Intrinsic::eh_sjlj_setjmp: {
     setValue(&I, DAG.getNode(ISD::EH_SJLJ_SETJMP, dl, MVT::i32, getRoot(),
-                             getValue(I.getOperand(1))));
+                             getValue(I.getArgOperand(0))));
     return 0;
   }
   case Intrinsic::eh_sjlj_longjmp: {
     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, dl, MVT::Other,
                             getRoot(),
-                            getValue(I.getOperand(1))));
+                            getValue(I.getArgOperand(0))));
     return 0;
   }
 
@@ -4230,34 +4273,34 @@
     case Intrinsic::convertuu:  Code = ISD::CVT_UU; break;
     }
     EVT DestVT = TLI.getValueType(I.getType());
-    const Value *Op1 = I.getOperand(1);
+    const Value *Op1 = I.getArgOperand(0);
     Res = DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
                                DAG.getValueType(DestVT),
                                DAG.getValueType(getValue(Op1).getValueType()),
-                               getValue(I.getOperand(2)),
-                               getValue(I.getOperand(3)),
+                               getValue(I.getArgOperand(1)),
+                               getValue(I.getArgOperand(2)),
                                Code);
     setValue(&I, Res);
     return 0;
   }
   case Intrinsic::sqrt:
     setValue(&I, DAG.getNode(ISD::FSQRT, dl,
-                             getValue(I.getOperand(1)).getValueType(),
-                             getValue(I.getOperand(1))));
+                             getValue(I.getArgOperand(0)).getValueType(),
+                             getValue(I.getArgOperand(0))));
     return 0;
   case Intrinsic::powi:
-    setValue(&I, ExpandPowI(dl, getValue(I.getOperand(1)),
-                            getValue(I.getOperand(2)), DAG));
+    setValue(&I, ExpandPowI(dl, getValue(I.getArgOperand(0)),
+                            getValue(I.getArgOperand(1)), DAG));
     return 0;
   case Intrinsic::sin:
     setValue(&I, DAG.getNode(ISD::FSIN, dl,
-                             getValue(I.getOperand(1)).getValueType(),
-                             getValue(I.getOperand(1))));
+                             getValue(I.getArgOperand(0)).getValueType(),
+                             getValue(I.getArgOperand(0))));
     return 0;
   case Intrinsic::cos:
     setValue(&I, DAG.getNode(ISD::FCOS, dl,
-                             getValue(I.getOperand(1)).getValueType(),
-                             getValue(I.getOperand(1))));
+                             getValue(I.getArgOperand(0)).getValueType(),
+                             getValue(I.getArgOperand(0))));
     return 0;
   case Intrinsic::log:
     visitLog(I);
@@ -4279,14 +4322,14 @@
     return 0;
   case Intrinsic::convert_to_fp16:
     setValue(&I, DAG.getNode(ISD::FP32_TO_FP16, dl,
-                             MVT::i16, getValue(I.getOperand(1))));
+                             MVT::i16, getValue(I.getArgOperand(0))));
     return 0;
   case Intrinsic::convert_from_fp16:
     setValue(&I, DAG.getNode(ISD::FP16_TO_FP32, dl,
-                             MVT::f32, getValue(I.getOperand(1))));
+                             MVT::f32, getValue(I.getArgOperand(0))));
     return 0;
   case Intrinsic::pcmarker: {
-    SDValue Tmp = getValue(I.getOperand(1));
+    SDValue Tmp = getValue(I.getArgOperand(0));
     DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
     return 0;
   }
@@ -4301,23 +4344,23 @@
   }
   case Intrinsic::bswap:
     setValue(&I, DAG.getNode(ISD::BSWAP, dl,
-                             getValue(I.getOperand(1)).getValueType(),
-                             getValue(I.getOperand(1))));
+                             getValue(I.getArgOperand(0)).getValueType(),
+                             getValue(I.getArgOperand(0))));
     return 0;
   case Intrinsic::cttz: {
-    SDValue Arg = getValue(I.getOperand(1));
+    SDValue Arg = getValue(I.getArgOperand(0));
     EVT Ty = Arg.getValueType();
     setValue(&I, DAG.getNode(ISD::CTTZ, dl, Ty, Arg));
     return 0;
   }
   case Intrinsic::ctlz: {
-    SDValue Arg = getValue(I.getOperand(1));
+    SDValue Arg = getValue(I.getArgOperand(0));
     EVT Ty = Arg.getValueType();
     setValue(&I, DAG.getNode(ISD::CTLZ, dl, Ty, Arg));
     return 0;
   }
   case Intrinsic::ctpop: {
-    SDValue Arg = getValue(I.getOperand(1));
+    SDValue Arg = getValue(I.getArgOperand(0));
     EVT Ty = Arg.getValueType();
     setValue(&I, DAG.getNode(ISD::CTPOP, dl, Ty, Arg));
     return 0;
@@ -4331,7 +4374,7 @@
     return 0;
   }
   case Intrinsic::stackrestore: {
-    Res = getValue(I.getOperand(1));
+    Res = getValue(I.getArgOperand(0));
     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Res));
     return 0;
   }
@@ -4341,8 +4384,8 @@
     MachineFrameInfo *MFI = MF.getFrameInfo();
     EVT PtrTy = TLI.getPointerTy();
 
-    SDValue Src = getValue(I.getOperand(1));   // The guard's value.
-    AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
+    SDValue Src = getValue(I.getArgOperand(0));   // The guard's value.
+    AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
 
     int FI = FuncInfo.StaticAllocaMap[Slot];
     MFI->setStackProtectorIndex(FI);
@@ -4359,11 +4402,11 @@
   }
   case Intrinsic::objectsize: {
     // If we don't know by now, we're never going to know.
-    ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(2));
+    ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
 
     assert(CI && "Non-constant type in __builtin_object_size?");
 
-    SDValue Arg = getValue(I.getOperand(0));
+    SDValue Arg = getValue(I.getCalledValue());
     EVT Ty = Arg.getValueType();
 
     if (CI->isZero())
@@ -4379,14 +4422,14 @@
     return 0;
 
   case Intrinsic::init_trampoline: {
-    const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
+    const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
 
     SDValue Ops[6];
     Ops[0] = getRoot();
-    Ops[1] = getValue(I.getOperand(1));
-    Ops[2] = getValue(I.getOperand(2));
-    Ops[3] = getValue(I.getOperand(3));
-    Ops[4] = DAG.getSrcValue(I.getOperand(1));
+    Ops[1] = getValue(I.getArgOperand(0));
+    Ops[2] = getValue(I.getArgOperand(1));
+    Ops[3] = getValue(I.getArgOperand(2));
+    Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
     Ops[5] = DAG.getSrcValue(F);
 
     Res = DAG.getNode(ISD::TRAMPOLINE, dl,
@@ -4399,8 +4442,8 @@
   }
   case Intrinsic::gcroot:
     if (GFI) {
-      const Value *Alloca = I.getOperand(1);
-      const Constant *TypeMap = cast<Constant>(I.getOperand(2));
+      const Value *Alloca = I.getArgOperand(0);
+      const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
 
       FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
       GFI->addStackRoot(FI->getIndex(), TypeMap);
@@ -4432,9 +4475,9 @@
   case Intrinsic::prefetch: {
     SDValue Ops[4];
     Ops[0] = getRoot();
-    Ops[1] = getValue(I.getOperand(1));
-    Ops[2] = getValue(I.getOperand(2));
-    Ops[3] = getValue(I.getOperand(3));
+    Ops[1] = getValue(I.getArgOperand(0));
+    Ops[2] = getValue(I.getArgOperand(1));
+    Ops[3] = getValue(I.getArgOperand(2));
     DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
     return 0;
   }
@@ -4443,7 +4486,7 @@
     SDValue Ops[6];
     Ops[0] = getRoot();
     for (int x = 1; x < 6; ++x)
-      Ops[x] = getValue(I.getOperand(x));
+      Ops[x] = getValue(I.getArgOperand(x - 1));
 
     DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
     return 0;
@@ -4452,12 +4495,12 @@
     SDValue Root = getRoot();
     SDValue L =
       DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
-                    getValue(I.getOperand(2)).getValueType().getSimpleVT(),
+                    getValue(I.getArgOperand(1)).getValueType().getSimpleVT(),
                     Root,
-                    getValue(I.getOperand(1)),
-                    getValue(I.getOperand(2)),
-                    getValue(I.getOperand(3)),
-                    I.getOperand(1));
+                    getValue(I.getArgOperand(0)),
+                    getValue(I.getArgOperand(1)),
+                    getValue(I.getArgOperand(2)),
+                    I.getArgOperand(0));
     setValue(&I, L);
     DAG.setRoot(L.getValue(1));
     return 0;
@@ -4738,16 +4781,16 @@
 /// lowered like a normal call.
 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
   // Verify that the prototype makes sense.  int memcmp(void*,void*,size_t)
-  if (I.getNumOperands() != 4)
+  if (I.getNumArgOperands() != 3)
     return false;
 
-  const Value *LHS = I.getOperand(1), *RHS = I.getOperand(2);
+  const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
   if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() ||
-      !I.getOperand(3)->getType()->isIntegerTy() ||
+      !I.getArgOperand(2)->getType()->isIntegerTy() ||
       !I.getType()->isIntegerTy())
     return false;
 
-  const ConstantInt *Size = dyn_cast<ConstantInt>(I.getOperand(3));
+  const ConstantInt *Size = dyn_cast<ConstantInt>(I.getArgOperand(2));
 
   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
@@ -4837,51 +4880,51 @@
     if (!F->hasLocalLinkage() && F->hasName()) {
       StringRef Name = F->getName();
       if (Name == "copysign" || Name == "copysignf" || Name == "copysignl") {
-        if (I.getNumOperands() == 3 &&   // Basic sanity checks.
-            I.getOperand(1)->getType()->isFloatingPointTy() &&
-            I.getType() == I.getOperand(1)->getType() &&
-            I.getType() == I.getOperand(2)->getType()) {
-          SDValue LHS = getValue(I.getOperand(1));
-          SDValue RHS = getValue(I.getOperand(2));
+        if (I.getNumArgOperands() == 2 &&   // Basic sanity checks.
+            I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+            I.getType() == I.getArgOperand(0)->getType() &&
+            I.getType() == I.getArgOperand(1)->getType()) {
+          SDValue LHS = getValue(I.getArgOperand(0));
+          SDValue RHS = getValue(I.getArgOperand(1));
           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
                                    LHS.getValueType(), LHS, RHS));
           return;
         }
       } else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
-        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
-            I.getOperand(1)->getType()->isFloatingPointTy() &&
-            I.getType() == I.getOperand(1)->getType()) {
-          SDValue Tmp = getValue(I.getOperand(1));
+        if (I.getNumArgOperands() == 1 &&   // Basic sanity checks.
+            I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+            I.getType() == I.getArgOperand(0)->getType()) {
+          SDValue Tmp = getValue(I.getArgOperand(0));
           setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
                                    Tmp.getValueType(), Tmp));
           return;
         }
       } else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
-        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
-            I.getOperand(1)->getType()->isFloatingPointTy() &&
-            I.getType() == I.getOperand(1)->getType() &&
+        if (I.getNumArgOperands() == 1 &&   // Basic sanity checks.
+            I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+            I.getType() == I.getArgOperand(0)->getType() &&
             I.onlyReadsMemory()) {
-          SDValue Tmp = getValue(I.getOperand(1));
+          SDValue Tmp = getValue(I.getArgOperand(0));
           setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
                                    Tmp.getValueType(), Tmp));
           return;
         }
       } else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
-        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
-            I.getOperand(1)->getType()->isFloatingPointTy() &&
-            I.getType() == I.getOperand(1)->getType() &&
+        if (I.getNumArgOperands() == 1 &&   // Basic sanity checks.
+            I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+            I.getType() == I.getArgOperand(0)->getType() &&
             I.onlyReadsMemory()) {
-          SDValue Tmp = getValue(I.getOperand(1));
+          SDValue Tmp = getValue(I.getArgOperand(0));
           setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
                                    Tmp.getValueType(), Tmp));
           return;
         }
       } else if (Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") {
-        if (I.getNumOperands() == 2 &&   // Basic sanity checks.
-            I.getOperand(1)->getType()->isFloatingPointTy() &&
-            I.getType() == I.getOperand(1)->getType() &&
+        if (I.getNumArgOperands() == 1 &&   // Basic sanity checks.
+            I.getArgOperand(0)->getType()->isFloatingPointTy() &&
+            I.getType() == I.getArgOperand(0)->getType() &&
             I.onlyReadsMemory()) {
-          SDValue Tmp = getValue(I.getOperand(1));
+          SDValue Tmp = getValue(I.getArgOperand(0));
           setValue(&I, DAG.getNode(ISD::FSQRT, getCurDebugLoc(),
                                    Tmp.getValueType(), Tmp));
           return;
@@ -4891,14 +4934,14 @@
           return;
       }
     }
-  } else if (isa<InlineAsm>(I.getOperand(0))) {
+  } else if (isa<InlineAsm>(I.getCalledValue())) {
     visitInlineAsm(&I);
     return;
   }
 
   SDValue Callee;
   if (!RenameFn)
-    Callee = getValue(I.getOperand(0));
+    Callee = getValue(I.getCalledValue());
   else
     Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
 
@@ -5336,7 +5379,7 @@
     }
 
     // Compute the constraint code and ConstraintType to use.
-    TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
+    TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
 
     // If this is a memory input, and if the operand is not indirect, do what we
     // need to to provide an address for the memory input.
@@ -5538,7 +5581,7 @@
 
         std::vector<SDValue> Ops;
         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
-                                         hasMemory, Ops, DAG);
+                                         Ops, DAG);
         if (Ops.empty())
           report_fatal_error("Invalid operand for inline asm constraint '" +
                              Twine(OpInfo.ConstraintCode) + "'!");
@@ -5675,8 +5718,8 @@
 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
                           MVT::Other, getRoot(),
-                          getValue(I.getOperand(1)),
-                          DAG.getSrcValue(I.getOperand(1))));
+                          getValue(I.getArgOperand(0)),
+                          DAG.getSrcValue(I.getArgOperand(0))));
 }
 
 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
@@ -5690,17 +5733,17 @@
 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
                           MVT::Other, getRoot(),
-                          getValue(I.getOperand(1)),
-                          DAG.getSrcValue(I.getOperand(1))));
+                          getValue(I.getArgOperand(0)),
+                          DAG.getSrcValue(I.getArgOperand(0))));
 }
 
 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
                           MVT::Other, getRoot(),
-                          getValue(I.getOperand(1)),
-                          getValue(I.getOperand(2)),
-                          DAG.getSrcValue(I.getOperand(1)),
-                          DAG.getSrcValue(I.getOperand(2))));
+                          getValue(I.getArgOperand(0)),
+                          getValue(I.getArgOperand(1)),
+                          DAG.getSrcValue(I.getArgOperand(0)),
+                          DAG.getSrcValue(I.getArgOperand(1))));
 }
 
 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
@@ -5879,7 +5922,7 @@
 
 void
 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
-  SDValue Op = getValue(V);
+  SDValue Op = getNonRegisterValue(V);
   assert((Op.getOpcode() != ISD::CopyFromReg ||
           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
          "Copy from a reg to the same reg!");
@@ -5897,7 +5940,6 @@
   // If this is the entry block, emit arguments.
   const Function &F = *LLVMBB->getParent();
   SelectionDAG &DAG = SDB->DAG;
-  SDValue OldRoot = DAG.getRoot();
   DebugLoc dl = SDB->getCurDebugLoc();
   const TargetData *TD = TLI.getTargetData();
   SmallVector<ISD::InputArg, 16> Ins;
@@ -6121,17 +6163,20 @@
       if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
         unsigned &RegOut = ConstantsOut[C];
         if (RegOut == 0) {
-          RegOut = FuncInfo.CreateRegForValue(C);
+          RegOut = FuncInfo.CreateRegs(C->getType());
           CopyValueToVirtualRegister(C, RegOut);
         }
         Reg = RegOut;
       } else {
-        Reg = FuncInfo.ValueMap[PHIOp];
-        if (Reg == 0) {
+        DenseMap<const Value *, unsigned>::iterator I =
+          FuncInfo.ValueMap.find(PHIOp);
+        if (I != FuncInfo.ValueMap.end())
+          Reg = I->second;
+        else {
           assert(isa<AllocaInst>(PHIOp) &&
                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
                  "Didn't codegen value into a register!??");
-          Reg = FuncInfo.CreateRegForValue(PHIOp);
+          Reg = FuncInfo.CreateRegs(PHIOp->getType());
           CopyValueToVirtualRegister(PHIOp, Reg);
         }
       }

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h Fri Jul  2 04:57:13 2010
@@ -346,6 +346,8 @@
   void visit(unsigned Opcode, const User &I);
 
   SDValue getValue(const Value *V);
+  SDValue getNonRegisterValue(const Value *V);
+  SDValue getValueImpl(const Value *V);
 
   void setValue(const Value *V, SDValue NewN) {
     SDValue &N = NodeMap[V];

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp Fri Jul  2 04:57:13 2010
@@ -709,6 +709,14 @@
       FastIS->startNewBlock(BB);
       // Do FastISel on as many instructions as possible.
       for (; BI != End; ++BI) {
+#if 0
+        // Defer instructions with no side effects; they'll be emitted
+        // on-demand later.
+        if (BI->isSafeToSpeculativelyExecute() &&
+            !FuncInfo->isExportedInst(BI))
+          continue;
+#endif
+
         // Try to select the instruction with FastISel.
         if (FastIS->SelectInstruction(BI))
           continue;
@@ -724,7 +732,7 @@
           if (!BI->getType()->isVoidTy() && !BI->use_empty()) {
             unsigned &R = FuncInfo->ValueMap[BI];
             if (!R)
-              R = FuncInfo->CreateRegForValue(BI);
+              R = FuncInfo->CreateRegs(BI->getType());
           }
 
           bool HadTailCall = false;

Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp Fri Jul  2 04:57:13 2010
@@ -261,6 +261,38 @@
   Names[RTLIB::MEMMOVE] = "memmove";
   Names[RTLIB::MEMSET] = "memset";
   Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
+  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
+  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
+  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4";
+  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8";
+  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1";
+  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2";
+  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4";
+  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8";
+  Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1";
+  Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2";
+  Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4";
+  Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8";
+  Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1";
+  Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2";
+  Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4";
+  Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8";
+  Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1";
+  Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2";
+  Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4";
+  Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8";
+  Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1";
+  Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2";
+  Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4";
+  Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8";
+  Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1";
+  Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2";
+  Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and-xor_4";
+  Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8";
+  Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1";
+  Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2";
+  Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4";
+  Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8";
 }
 
 /// InitLibcallCallingConvs - Set default libcall CallingConvs.
@@ -546,9 +578,8 @@
   SchedPreferenceInfo = Sched::Latency;
   JumpBufSize = 0;
   JumpBufAlignment = 0;
-  IfCvtBlockSizeLimit = 2;
-  IfCvtDupBlockSizeLimit = 0;
   PrefLoopAlignment = 0;
+  ShouldFoldAtomicFences = false;
 
   InitLibcallNames(LibcallRoutineNames);
   InitCmpLibcallCCs(CmpLibcallCCs);
@@ -2350,7 +2381,6 @@
 /// vector.  If it is invalid, don't add anything to Ops.
 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
                                                   char ConstraintLetter,
-                                                  bool hasMemory,
                                                   std::vector<SDValue> &Ops,
                                                   SelectionDAG &DAG) const {
   switch (ConstraintLetter) {
@@ -2511,18 +2541,18 @@
 ///     'm' over 'r', for example.
 ///
 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
-                             bool hasMemory,  const TargetLowering &TLI,
+                             const TargetLowering &TLI,
                              SDValue Op, SelectionDAG *DAG) {
   assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
   unsigned BestIdx = 0;
   TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
   int BestGenerality = -1;
-  
+
   // Loop over the options, keeping track of the most general one.
   for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
     TargetLowering::ConstraintType CType =
       TLI.getConstraintType(OpInfo.Codes[i]);
-    
+
     // If this is an 'other' constraint, see if the operand is valid for it.
     // For example, on X86 we might have an 'rI' constraint.  If the operand
     // is an integer in the range [0..31] we want to use I (saving a load
@@ -2531,7 +2561,7 @@
       assert(OpInfo.Codes[i].size() == 1 &&
              "Unhandled multi-letter 'other' constraint");
       std::vector<SDValue> ResultOps;
-      TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], hasMemory,
+      TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0],
                                        ResultOps, *DAG);
       if (!ResultOps.empty()) {
         BestType = CType;
@@ -2540,6 +2570,11 @@
       }
     }
     
+    // Things with matching constraints can only be registers, per gcc
+    // documentation.  This mainly affects "g" constraints.
+    if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
+      continue;
+    
     // This constraint letter is more general than the previous one, use it.
     int Generality = getConstraintGenerality(CType);
     if (Generality > BestGenerality) {
@@ -2558,7 +2593,6 @@
 /// OpInfo.ConstraintCode and OpInfo.ConstraintType.
 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
                                             SDValue Op, 
-                                            bool hasMemory,
                                             SelectionDAG *DAG) const {
   assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
   
@@ -2567,7 +2601,7 @@
     OpInfo.ConstraintCode = OpInfo.Codes[0];
     OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode);
   } else {
-    ChooseConstraint(OpInfo, hasMemory, *this, Op, DAG);
+    ChooseConstraint(OpInfo, *this, Op, DAG);
   }
   
   // 'X' matches anything.

Modified: llvm/branches/wendling/eh/lib/CodeGen/ShadowStackGC.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ShadowStackGC.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ShadowStackGC.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ShadowStackGC.cpp Fri Jul  2 04:57:13 2010
@@ -31,6 +31,7 @@
 #include "llvm/CodeGen/GCStrategy.h"
 #include "llvm/IntrinsicInst.h"
 #include "llvm/Module.h"
+#include "llvm/Support/CallSite.h"
 #include "llvm/Support/IRBuilder.h"
 
 using namespace llvm;
@@ -158,7 +159,8 @@
 
           // Create a new invoke instruction.
           Args.clear();
-          Args.append(CI->op_begin() + 1, CI->op_end());
+          CallSite CS(CI);
+          Args.append(CS.arg_begin(), CS.arg_end());
 
           InvokeInst *II = InvokeInst::Create(CI->getCalledValue(),
                                               NewBB, CleanupBB,
@@ -195,7 +197,7 @@
   unsigned NumMeta = 0;
   SmallVector<Constant*,16> Metadata;
   for (unsigned I = 0; I != Roots.size(); ++I) {
-    Constant *C = cast<Constant>(Roots[I].first->getOperand(2));
+    Constant *C = cast<Constant>(Roots[I].first->getArgOperand(1));
     if (!C->isNullValue())
       NumMeta = I + 1;
     Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr));
@@ -323,16 +325,16 @@
 
   assert(Roots.empty() && "Not cleaned up?");
 
-  SmallVector<std::pair<CallInst*,AllocaInst*>,16> MetaRoots;
+  SmallVector<std::pair<CallInst*, AllocaInst*>, 16> MetaRoots;
 
   for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
     for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
       if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
         if (Function *F = CI->getCalledFunction())
           if (F->getIntrinsicID() == Intrinsic::gcroot) {
-            std::pair<CallInst*,AllocaInst*> Pair = std::make_pair(
-              CI, cast<AllocaInst>(CI->getOperand(1)->stripPointerCasts()));
-            if (IsNullValue(CI->getOperand(2)))
+            std::pair<CallInst*, AllocaInst*> Pair = std::make_pair(
+              CI, cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
+            if (IsNullValue(CI->getArgOperand(1)))
               Roots.push_back(Pair);
             else
               MetaRoots.push_back(Pair);

Modified: llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp Fri Jul  2 04:57:13 2010
@@ -99,9 +99,12 @@
 ///
 /// This returns true if an interval was modified.
 ///
-bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
-                                                    LiveInterval &IntB,
+bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(const CoalescerPair &CP,
                                                     MachineInstr *CopyMI) {
+  LiveInterval &IntA =
+    li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
+  LiveInterval &IntB =
+    li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
   SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
 
   // BValNo is a value number in B that is defined by a copy from A.  'B3' in
@@ -119,7 +122,8 @@
   // AValNo is the value number in A that defines the copy, A3 in the example.
   SlotIndex CopyUseIdx = CopyIdx.getUseIndex();
   LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
-  assert(ALR != IntA.end() && "Live range not found!");
+  // The live range might not exist after fun with physreg coalescing.
+  if (ALR == IntA.end()) return false;
   VNInfo *AValNo = ALR->valno;
   // If it's re-defined by an early clobber somewhere in the live range, then
   // it's not safe to eliminate the copy. FIXME: This is a temporary workaround.
@@ -145,26 +149,21 @@
 
   // If AValNo is defined as a copy from IntB, we can potentially process this.
   // Get the instruction that defines this value number.
-  unsigned SrcReg = li_->getVNInfoSourceReg(AValNo);
-  if (!SrcReg) return false;  // Not defined by a copy.
-
-  // If the value number is not defined by a copy instruction, ignore it.
-
-  // If the source register comes from an interval other than IntB, we can't
-  // handle this.
-  if (SrcReg != IntB.reg) return false;
+  if (!CP.isCoalescable(AValNo->getCopy()))
+    return false;
 
   // Get the LiveRange in IntB that this value number starts with.
   LiveInterval::iterator ValLR =
     IntB.FindLiveRangeContaining(AValNo->def.getPrevSlot());
-  assert(ValLR != IntB.end() && "Live range not found!");
+  if (ValLR == IntB.end())
+    return false;
 
   // Make sure that the end of the live range is inside the same block as
   // CopyMI.
   MachineInstr *ValLREndInst =
     li_->getInstructionFromIndex(ValLR->end.getPrevSlot());
-  if (!ValLREndInst ||
-      ValLREndInst->getParent() != CopyMI->getParent()) return false;
+  if (!ValLREndInst || ValLREndInst->getParent() != CopyMI->getParent())
+    return false;
 
   // Okay, we now know that ValLR ends in the same block that the CopyMI
   // live-range starts.  If there are no intervening live ranges between them in
@@ -216,7 +215,6 @@
 
   // Okay, merge "B1" into the same value number as "B0".
   if (BValNo != ValLR->valno) {
-    IntB.addKills(ValLR->valno, BValNo->kills);
     IntB.MergeValueNumberInto(BValNo, ValLR->valno);
   }
   DEBUG({
@@ -230,13 +228,12 @@
   int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true);
   if (UIdx != -1) {
     ValLREndInst->getOperand(UIdx).setIsKill(false);
-    ValLR->valno->removeKill(FillerStart);
   }
 
   // If the copy instruction was killing the destination register before the
   // merge, find the last use and trim the live range. That will also add the
   // isKill marker.
-  if (ALR->valno->isKill(CopyIdx))
+  if (ALR->end == CopyIdx)
     TrimLiveIntervalToLastUse(CopyUseIdx, CopyMI->getParent(), IntA, ALR);
 
   ++numExtends;
@@ -413,7 +410,6 @@
 
   bool BHasPHIKill = BValNo->hasPHIKill();
   SmallVector<VNInfo*, 4> BDeadValNos;
-  VNInfo::KillSet BKills;
   std::map<SlotIndex, SlotIndex> BExtend;
 
   // If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
@@ -424,8 +420,6 @@
   // C = A<kill>
   // ...
   //   = B
-  //
-  // then do not add kills of A to the newly created B interval.
   bool Extended = BLR->end > ALR->end && ALR->end != ALR->start;
   if (Extended)
     BExtend[ALR->end] = BLR->end;
@@ -454,8 +448,6 @@
     if (UseMO.isKill()) {
       if (Extended)
         UseMO.setIsKill(false);
-      else
-        BKills.push_back(UseIdx.getDefIndex());
     }
     unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
     if (!tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
@@ -471,10 +463,6 @@
       BDeadValNos.push_back(DLR->valno);
       BExtend[DLR->start] = DLR->end;
       JoinedCopies.insert(UseMI);
-      // If this is a kill but it's going to be removed, the last use
-      // of the same val# is the new kill.
-      if (UseMO.isKill())
-        BKills.pop_back();
     }
   }
 
@@ -499,15 +487,10 @@
   }
 
   // Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
-  // is updated. Kills are also updated.
+  // is updated.
   VNInfo *ValNo = BValNo;
   ValNo->def = AValNo->def;
   ValNo->setCopy(0);
-  for (unsigned j = 0, ee = ValNo->kills.size(); j != ee; ++j) {
-    if (ValNo->kills[j] != BLR->end)
-      BKills.push_back(ValNo->kills[j]);
-  }
-  ValNo->kills.clear();
   for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
        AI != AE; ++AI) {
     if (AI->valno != AValNo) continue;
@@ -528,7 +511,6 @@
       }
     }
   }
-  IntB.addKills(ValNo, BKills);
   ValNo->setHasPHIKill(BHasPHIKill);
 
   DEBUG({
@@ -621,7 +603,6 @@
     // of last use.
     LastUse->setIsKill();
     removeRange(li, LastUseIdx.getDefIndex(), LR->end, li_, tri_);
-    LR->valno->addKill(LastUseIdx.getDefIndex());
     unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
     if (tii_->isMoveInstr(*LastUseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
         DstReg == li.reg && DstSubIdx == 0) {
@@ -663,6 +644,7 @@
       ValNo->isUnused() || ValNo->hasPHIKill())
     return false;
   MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def);
+  assert(DefMI && "Defining instruction disappeared");
   const TargetInstrDesc &TID = DefMI->getDesc();
   if (!TID.isAsCheapAsAMove())
     return false;
@@ -701,26 +683,13 @@
       return false;
   }
 
-  SlotIndex DefIdx = CopyIdx.getDefIndex();
-  const LiveRange *DLR= li_->getInterval(DstReg).getLiveRangeContaining(DefIdx);
-  DLR->valno->setCopy(0);
-  // Don't forget to update sub-register intervals.
-  if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
-    for (const unsigned* SR = tri_->getSubRegisters(DstReg); *SR; ++SR) {
-      if (!li_->hasInterval(*SR))
-        continue;
-      const LiveRange *DLR =
-          li_->getInterval(*SR).getLiveRangeContaining(DefIdx);
-      if (DLR && DLR->valno->getCopy() == CopyMI)
-        DLR->valno->setCopy(0);
-    }
-  }
+  RemoveCopyFlag(DstReg, CopyMI);
 
   // If copy kills the source register, find the last use and propagate
   // kill.
   bool checkForDeadDef = false;
   MachineBasicBlock *MBB = CopyMI->getParent();
-  if (SrcLR->valno->isKill(DefIdx))
+  if (SrcLR->end == CopyIdx.getDefIndex())
     if (!TrimLiveIntervalToLastUse(CopyIdx, MBB, SrcInt, SrcLR)) {
       checkForDeadDef = true;
     }
@@ -747,24 +716,8 @@
     MachineOperand &MO = CopyMI->getOperand(i);
     if (MO.isReg() && MO.isImplicit())
       NewMI->addOperand(MO);
-    if (MO.isDef() && li_->hasInterval(MO.getReg())) {
-      unsigned Reg = MO.getReg();
-      const LiveRange *DLR =
-          li_->getInterval(Reg).getLiveRangeContaining(DefIdx);
-      if (DLR && DLR->valno->getCopy() == CopyMI)
-        DLR->valno->setCopy(0);
-      // Handle subregs as well
-      if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
-        for (const unsigned* SR = tri_->getSubRegisters(Reg); *SR; ++SR) {
-          if (!li_->hasInterval(*SR))
-            continue;
-          const LiveRange *DLR =
-              li_->getInterval(*SR).getLiveRangeContaining(DefIdx);
-          if (DLR && DLR->valno->getCopy() == CopyMI)
-            DLR->valno->setCopy(0);
-        }
-      }
-    }
+    if (MO.isDef())
+      RemoveCopyFlag(MO.getReg(), CopyMI);
   }
 
   TransferImplicitOps(CopyMI, NewMI);
@@ -783,25 +736,14 @@
 /// being updated is not zero, make sure to set it to the correct physical
 /// subregister.
 void
-SimpleRegisterCoalescing::UpdateRegDefsUses(unsigned SrcReg, unsigned DstReg,
-                                            unsigned SubIdx) {
-  bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
-  if (DstIsPhys && SubIdx) {
-    // Figure out the real physical register we are updating with.
-    DstReg = tri_->getSubReg(DstReg, SubIdx);
-    SubIdx = 0;
-  }
-
-  // Collect all the instructions using SrcReg.
-  SmallPtrSet<MachineInstr*, 32> Instrs;
-  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg),
-         E = mri_->reg_end(); I != E; ++I)
-    Instrs.insert(&*I);
-
-  for (SmallPtrSet<MachineInstr*, 32>::const_iterator I = Instrs.begin(),
-       E = Instrs.end(); I != E; ++I) {
-    MachineInstr *UseMI = *I;
+SimpleRegisterCoalescing::UpdateRegDefsUses(const CoalescerPair &CP) {
+  bool DstIsPhys = CP.isPhys();
+  unsigned SrcReg = CP.getSrcReg();
+  unsigned DstReg = CP.getDstReg();
+  unsigned SubIdx = CP.getSubIdx();
 
+  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg);
+       MachineInstr *UseMI = I.skipInstruction();) {
     // A PhysReg copy that won't be coalesced can perhaps be rematerialized
     // instead.
     if (DstIsPhys) {
@@ -933,6 +875,27 @@
   return removeIntervalIfEmpty(li, li_, tri_);
 }
 
+void SimpleRegisterCoalescing::RemoveCopyFlag(unsigned DstReg,
+                                              const MachineInstr *CopyMI) {
+  SlotIndex DefIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
+  if (li_->hasInterval(DstReg)) {
+    LiveInterval &LI = li_->getInterval(DstReg);
+    if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
+      if (LR->valno->getCopy() == CopyMI)
+        LR->valno->setCopy(0);
+  }
+  if (!TargetRegisterInfo::isPhysicalRegister(DstReg))
+    return;
+  for (const unsigned* AS = tri_->getAliasSet(DstReg); *AS; ++AS) {
+    if (!li_->hasInterval(*AS))
+      continue;
+    LiveInterval &LI = li_->getInterval(*AS);
+    if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
+      if (LR->valno->getCopy() == CopyMI)
+        LR->valno->setCopy(0);
+  }
+}
+
 /// PropagateDeadness - Propagate the dead marker to the instruction which
 /// defines the val#.
 static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
@@ -1005,147 +968,12 @@
     // val#, then propagate the dead marker.
     PropagateDeadness(li, CopyMI, RemoveStart, li_, tri_);
     ++numDeadValNo;
-
-    if (LR->valno->isKill(RemoveEnd))
-      LR->valno->removeKill(RemoveEnd);
   }
 
   removeRange(li, RemoveStart, RemoveEnd, li_, tri_);
   return removeIntervalIfEmpty(li, li_, tri_);
 }
 
-/// CanCoalesceWithImpDef - Returns true if the specified copy instruction
-/// from an implicit def to another register can be coalesced away.
-bool SimpleRegisterCoalescing::CanCoalesceWithImpDef(MachineInstr *CopyMI,
-                                                     LiveInterval &li,
-                                                     LiveInterval &ImpLi) const{
-  if (!CopyMI->killsRegister(ImpLi.reg))
-    return false;
-  // Make sure this is the only use.
-  for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(ImpLi.reg),
-         UE = mri_->use_end(); UI != UE;) {
-    MachineInstr *UseMI = &*UI;
-    ++UI;
-    if (CopyMI == UseMI || JoinedCopies.count(UseMI))
-      continue;
-    return false;
-  }
-  return true;
-}
-
-
-/// isWinToJoinVRWithSrcPhysReg - Return true if it's worth while to join a
-/// a virtual destination register with physical source register.
-bool
-SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
-                                                     MachineBasicBlock *CopyMBB,
-                                                     LiveInterval &DstInt,
-                                                     LiveInterval &SrcInt) {
-  // If the virtual register live interval is long but it has low use desity,
-  // do not join them, instead mark the physical register as its allocation
-  // preference.
-  const TargetRegisterClass *RC = mri_->getRegClass(DstInt.reg);
-  unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
-  unsigned Length = li_->getApproximateInstructionCount(DstInt);
-  if (Length > Threshold &&
-      std::distance(mri_->use_nodbg_begin(DstInt.reg),
-                    mri_->use_nodbg_end()) * Threshold < Length)
-    return false;
-
-  // If the virtual register live interval extends into a loop, turn down
-  // aggressiveness.
-  SlotIndex CopyIdx =
-    li_->getInstructionIndex(CopyMI).getDefIndex();
-  const MachineLoop *L = loopInfo->getLoopFor(CopyMBB);
-  if (!L) {
-    // Let's see if the virtual register live interval extends into the loop.
-    LiveInterval::iterator DLR = DstInt.FindLiveRangeContaining(CopyIdx);
-    assert(DLR != DstInt.end() && "Live range not found!");
-    DLR = DstInt.FindLiveRangeContaining(DLR->end.getNextSlot());
-    if (DLR != DstInt.end()) {
-      CopyMBB = li_->getMBBFromIndex(DLR->start);
-      L = loopInfo->getLoopFor(CopyMBB);
-    }
-  }
-
-  if (!L || Length <= Threshold)
-    return true;
-
-  SlotIndex UseIdx = CopyIdx.getUseIndex();
-  LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
-  MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
-  if (loopInfo->getLoopFor(SMBB) != L) {
-    if (!loopInfo->isLoopHeader(CopyMBB))
-      return false;
-    // If vr's live interval extends pass the loop header, do not join.
-    for (MachineBasicBlock::succ_iterator SI = CopyMBB->succ_begin(),
-           SE = CopyMBB->succ_end(); SI != SE; ++SI) {
-      MachineBasicBlock *SuccMBB = *SI;
-      if (SuccMBB == CopyMBB)
-        continue;
-      if (DstInt.overlaps(li_->getMBBStartIdx(SuccMBB),
-                          li_->getMBBEndIdx(SuccMBB)))
-        return false;
-    }
-  }
-  return true;
-}
-
-/// isWinToJoinVRWithDstPhysReg - Return true if it's worth while to join a
-/// copy from a virtual source register to a physical destination register.
-bool
-SimpleRegisterCoalescing::isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
-                                                     MachineBasicBlock *CopyMBB,
-                                                     LiveInterval &DstInt,
-                                                     LiveInterval &SrcInt) {
-  // If the virtual register live interval is long but it has low use density,
-  // do not join them, instead mark the physical register as its allocation
-  // preference.
-  const TargetRegisterClass *RC = mri_->getRegClass(SrcInt.reg);
-  unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
-  unsigned Length = li_->getApproximateInstructionCount(SrcInt);
-  if (Length > Threshold &&
-      std::distance(mri_->use_nodbg_begin(SrcInt.reg),
-                    mri_->use_nodbg_end()) * Threshold < Length)
-    return false;
-
-  if (SrcInt.empty())
-    // Must be implicit_def.
-    return false;
-
-  // If the virtual register live interval is defined or cross a loop, turn
-  // down aggressiveness.
-  SlotIndex CopyIdx =
-    li_->getInstructionIndex(CopyMI).getDefIndex();
-  SlotIndex UseIdx = CopyIdx.getUseIndex();
-  LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
-  assert(SLR != SrcInt.end() && "Live range not found!");
-  SLR = SrcInt.FindLiveRangeContaining(SLR->start.getPrevSlot());
-  if (SLR == SrcInt.end())
-    return true;
-  MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
-  const MachineLoop *L = loopInfo->getLoopFor(SMBB);
-
-  if (!L || Length <= Threshold)
-    return true;
-
-  if (loopInfo->getLoopFor(CopyMBB) != L) {
-    if (SMBB != L->getLoopLatch())
-      return false;
-    // If vr's live interval is extended from before the loop latch, do not
-    // join.
-    for (MachineBasicBlock::pred_iterator PI = SMBB->pred_begin(),
-           PE = SMBB->pred_end(); PI != PE; ++PI) {
-      MachineBasicBlock *PredMBB = *PI;
-      if (PredMBB == SMBB)
-        continue;
-      if (SrcInt.overlaps(li_->getMBBStartIdx(PredMBB),
-                          li_->getMBBEndIdx(PredMBB)))
-        return false;
-    }
-  }
-  return true;
-}
 
 /// isWinToJoinCrossClass - Return true if it's profitable to coalesce
 /// two virtual registers from different register classes.
@@ -1191,157 +1019,6 @@
   return true;
 }
 
-/// HasIncompatibleSubRegDefUse - If we are trying to coalesce a virtual
-/// register with a physical register, check if any of the virtual register
-/// operand is a sub-register use or def. If so, make sure it won't result
-/// in an illegal extract_subreg or insert_subreg instruction. e.g.
-/// vr1024 = extract_subreg vr1025, 1
-/// ...
-/// vr1024 = mov8rr AH
-/// If vr1024 is coalesced with AH, the extract_subreg is now illegal since
-/// AH does not have a super-reg whose sub-register 1 is AH.
-bool
-SimpleRegisterCoalescing::HasIncompatibleSubRegDefUse(MachineInstr *CopyMI,
-                                                      unsigned VirtReg,
-                                                      unsigned PhysReg) {
-  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(VirtReg),
-         E = mri_->reg_end(); I != E; ++I) {
-    MachineOperand &O = I.getOperand();
-    if (O.isDebug())
-      continue;
-    MachineInstr *MI = &*I;
-    if (MI == CopyMI || JoinedCopies.count(MI))
-      continue;
-    unsigned SubIdx = O.getSubReg();
-    if (SubIdx && !tri_->getSubReg(PhysReg, SubIdx))
-      return true;
-    if (MI->isExtractSubreg()) {
-      SubIdx = MI->getOperand(2).getImm();
-      if (O.isUse() && !tri_->getSubReg(PhysReg, SubIdx))
-        return true;
-      if (O.isDef()) {
-        unsigned SrcReg = MI->getOperand(1).getReg();
-        const TargetRegisterClass *RC =
-          TargetRegisterInfo::isPhysicalRegister(SrcReg)
-          ? tri_->getPhysicalRegisterRegClass(SrcReg)
-          : mri_->getRegClass(SrcReg);
-        if (!tri_->getMatchingSuperReg(PhysReg, SubIdx, RC))
-          return true;
-      }
-    }
-    if (MI->isInsertSubreg() || MI->isSubregToReg()) {
-      SubIdx = MI->getOperand(3).getImm();
-      if (VirtReg == MI->getOperand(0).getReg()) {
-        if (!tri_->getSubReg(PhysReg, SubIdx))
-          return true;
-      } else {
-        unsigned DstReg = MI->getOperand(0).getReg();
-        const TargetRegisterClass *RC =
-          TargetRegisterInfo::isPhysicalRegister(DstReg)
-          ? tri_->getPhysicalRegisterRegClass(DstReg)
-          : mri_->getRegClass(DstReg);
-        if (!tri_->getMatchingSuperReg(PhysReg, SubIdx, RC))
-          return true;
-      }
-    }
-  }
-  return false;
-}
-
-
-/// CanJoinExtractSubRegToPhysReg - Return true if it's possible to coalesce
-/// an extract_subreg where dst is a physical register, e.g.
-/// cl = EXTRACT_SUBREG reg1024, 1
-bool
-SimpleRegisterCoalescing::CanJoinExtractSubRegToPhysReg(unsigned DstReg,
-                                               unsigned SrcReg, unsigned SubIdx,
-                                               unsigned &RealDstReg) {
-  const TargetRegisterClass *RC = mri_->getRegClass(SrcReg);
-  RealDstReg = tri_->getMatchingSuperReg(DstReg, SubIdx, RC);
-  if (!RealDstReg) {
-    DEBUG(dbgs() << "\tIncompatible source regclass: "
-                 << "none of the super-registers of " << tri_->getName(DstReg)
-                 << " are in " << RC->getName() << ".\n");
-    return false;
-  }
-
-  LiveInterval &RHS = li_->getInterval(SrcReg);
-  // For this type of EXTRACT_SUBREG, conservatively
-  // check if the live interval of the source register interfere with the
-  // actual super physical register we are trying to coalesce with.
-  if (li_->hasInterval(RealDstReg) &&
-      RHS.overlaps(li_->getInterval(RealDstReg))) {
-    DEBUG({
-        dbgs() << "\t\tInterfere with register ";
-        li_->getInterval(RealDstReg).print(dbgs(), tri_);
-      });
-    return false; // Not coalescable
-  }
-  for (const unsigned* SR = tri_->getSubRegisters(RealDstReg); *SR; ++SR)
-    // Do not check DstReg or its sub-register. JoinIntervals() will take care
-    // of that.
-    if (*SR != DstReg &&
-        !tri_->isSubRegister(DstReg, *SR) &&
-        li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
-      DEBUG({
-          dbgs() << "\t\tInterfere with sub-register ";
-          li_->getInterval(*SR).print(dbgs(), tri_);
-        });
-      return false; // Not coalescable
-    }
-  return true;
-}
-
-/// CanJoinInsertSubRegToPhysReg - Return true if it's possible to coalesce
-/// an insert_subreg where src is a physical register, e.g.
-/// reg1024 = INSERT_SUBREG reg1024, c1, 0
-bool
-SimpleRegisterCoalescing::CanJoinInsertSubRegToPhysReg(unsigned DstReg,
-                                               unsigned SrcReg, unsigned SubIdx,
-                                               unsigned &RealSrcReg) {
-  const TargetRegisterClass *RC = mri_->getRegClass(DstReg);
-  RealSrcReg = tri_->getMatchingSuperReg(SrcReg, SubIdx, RC);
-  if (!RealSrcReg) {
-    DEBUG(dbgs() << "\tIncompatible destination regclass: "
-                 << "none of the super-registers of " << tri_->getName(SrcReg)
-                 << " are in " << RC->getName() << ".\n");
-    return false;
-  }
-
-  LiveInterval &LHS = li_->getInterval(DstReg);
-  if (li_->hasInterval(RealSrcReg) &&
-      LHS.overlaps(li_->getInterval(RealSrcReg))) {
-    DEBUG({
-        dbgs() << "\t\tInterfere with register ";
-        li_->getInterval(RealSrcReg).print(dbgs(), tri_);
-      });
-    return false; // Not coalescable
-  }
-  for (const unsigned* SR = tri_->getSubRegisters(RealSrcReg); *SR; ++SR)
-    // Do not check SrcReg or its sub-register. JoinIntervals() will take care
-    // of that.
-    if (*SR != SrcReg &&
-        !tri_->isSubRegister(SrcReg, *SR) &&
-        li_->hasInterval(*SR) && LHS.overlaps(li_->getInterval(*SR))) {
-      DEBUG({
-          dbgs() << "\t\tInterfere with sub-register ";
-          li_->getInterval(*SR).print(dbgs(), tri_);
-        });
-      return false; // Not coalescable
-    }
-  return true;
-}
-
-/// getRegAllocPreference - Return register allocation preference register.
-///
-static unsigned getRegAllocPreference(unsigned Reg, MachineFunction &MF,
-                                      MachineRegisterInfo *MRI,
-                                      const TargetRegisterInfo *TRI) {
-  if (TargetRegisterInfo::isPhysicalRegister(Reg))
-    return 0;
-  std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
-  return TRI->ResolveRegAllocHint(Hint.first, Hint.second, MF);
-}
 
 /// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
 /// which are the src/dst of the copy instruction CopyMI.  This returns true
@@ -1357,393 +1034,131 @@
 
   DEBUG(dbgs() << li_->getInstructionIndex(CopyMI) << '\t' << *CopyMI);
 
-  unsigned SrcReg, DstReg, SrcSubIdx = 0, DstSubIdx = 0;
-  bool isExtSubReg = CopyMI->isExtractSubreg();
-  bool isInsSubReg = CopyMI->isInsertSubreg();
-  bool isSubRegToReg = CopyMI->isSubregToReg();
-  unsigned SubIdx = 0;
-  if (isExtSubReg) {
-    DstReg    = CopyMI->getOperand(0).getReg();
-    DstSubIdx = CopyMI->getOperand(0).getSubReg();
-    SrcReg    = CopyMI->getOperand(1).getReg();
-    SrcSubIdx = CopyMI->getOperand(2).getImm();
-  } else if (isInsSubReg || isSubRegToReg) {
-    DstReg    = CopyMI->getOperand(0).getReg();
-    DstSubIdx = CopyMI->getOperand(3).getImm();
-    SrcReg    = CopyMI->getOperand(2).getReg();
-    SrcSubIdx = CopyMI->getOperand(2).getSubReg();
-    if (SrcSubIdx && SrcSubIdx != DstSubIdx) {
-      // r1025 = INSERT_SUBREG r1025, r1024<2>, 2 Then r1024 has already been
-      // coalesced to a larger register so the subreg indices cancel out.
-      DEBUG(dbgs() << "\tSource of insert_subreg or subreg_to_reg is already "
-                      "coalesced to another register.\n");
-      return false;  // Not coalescable.
-    }
-  } else if (tii_->isMoveInstr(*CopyMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
-    if (SrcSubIdx && DstSubIdx && SrcSubIdx != DstSubIdx) {
-      // e.g. %reg16404:1<def> = MOV8rr %reg16412:2<kill>
-      Again = true;
-      return false;  // Not coalescable.
-    }
-  } else {
-    llvm_unreachable("Unrecognized copy instruction!");
-  }
-
-  // If they are already joined we continue.
-  if (SrcReg == DstReg) {
-    DEBUG(dbgs() << "\tCopy already coalesced.\n");
-    return false;  // Not coalescable.
-  }
-
   CoalescerPair CP(*tii_, *tri_);
   if (!CP.setRegisters(CopyMI)) {
     DEBUG(dbgs() << "\tNot coalescable.\n");
     return false;
   }
 
-  bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
-  bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
-
-  // If they are both physical registers, we cannot join them.
-  if (SrcIsPhys && DstIsPhys) {
-    DEBUG(dbgs() << "\tCan not coalesce physregs.\n");
-    return false;  // Not coalescable.
-  }
-
-  // We only join virtual registers with allocatable physical registers.
-  if (SrcIsPhys && !allocatableRegs_[SrcReg]) {
-    DEBUG(dbgs() << "\tSrc reg is unallocatable physreg.\n");
-    return false;  // Not coalescable.
-  }
-  if (DstIsPhys && !allocatableRegs_[DstReg]) {
-    DEBUG(dbgs() << "\tDst reg is unallocatable physreg.\n");
+  // If they are already joined we continue.
+  if (CP.getSrcReg() == CP.getDstReg()) {
+    DEBUG(dbgs() << "\tCopy already coalesced.\n");
     return false;  // Not coalescable.
   }
 
-  // We cannot handle dual subreg indices and mismatched classes at the same
-  // time.
-  if (SrcSubIdx && DstSubIdx && differingRegisterClasses(SrcReg, DstReg)) {
-    DEBUG(dbgs() << "\tCannot handle subreg indices and mismatched classes.\n");
-    return false;
-  }
-
-  // Check that a physical source register is compatible with dst regclass
-  if (SrcIsPhys) {
-    unsigned SrcSubReg = SrcSubIdx ?
-      tri_->getSubReg(SrcReg, SrcSubIdx) : SrcReg;
-    const TargetRegisterClass *DstRC = mri_->getRegClass(DstReg);
-    const TargetRegisterClass *DstSubRC = DstRC;
-    if (DstSubIdx)
-      DstSubRC = DstRC->getSubRegisterRegClass(DstSubIdx);
-    assert(DstSubRC && "Illegal subregister index");
-    if (!DstSubRC->contains(SrcSubReg)) {
-      DEBUG(dbgs() << "\tIncompatible destination regclass: "
-                   << "none of the super-registers of "
-                   << tri_->getName(SrcSubReg) << " are in "
-                   << DstSubRC->getName() << ".\n");
-      return false;             // Not coalescable.
-    }
-  }
-
-  // Check that a physical dst register is compatible with source regclass
-  if (DstIsPhys) {
-    unsigned DstSubReg = DstSubIdx ?
-      tri_->getSubReg(DstReg, DstSubIdx) : DstReg;
-    const TargetRegisterClass *SrcRC = mri_->getRegClass(SrcReg);
-    const TargetRegisterClass *SrcSubRC = SrcRC;
-    if (SrcSubIdx)
-      SrcSubRC = SrcRC->getSubRegisterRegClass(SrcSubIdx);
-    assert(SrcSubRC && "Illegal subregister index");
-    if (!SrcSubRC->contains(DstSubReg)) {
-      DEBUG(dbgs() << "\tIncompatible source regclass: "
-                   << "none of the super-registers of "
-                   << tri_->getName(DstSubReg) << " are in "
-                   << SrcSubRC->getName() << ".\n");
-      (void)DstSubReg;
-      return false;             // Not coalescable.
-    }
-  }
-
-  // Should be non-null only when coalescing to a sub-register class.
-  bool CrossRC = false;
-  const TargetRegisterClass *SrcRC= SrcIsPhys ? 0 : mri_->getRegClass(SrcReg);
-  const TargetRegisterClass *DstRC= DstIsPhys ? 0 : mri_->getRegClass(DstReg);
-  const TargetRegisterClass *NewRC = NULL;
-  unsigned RealDstReg = 0;
-  unsigned RealSrcReg = 0;
-  if (isExtSubReg || isInsSubReg || isSubRegToReg) {
-    SubIdx = CopyMI->getOperand(isExtSubReg ? 2 : 3).getImm();
-    if (SrcIsPhys && isExtSubReg) {
-      // r1024 = EXTRACT_SUBREG EAX, 0 then r1024 is really going to be
-      // coalesced with AX.
-      unsigned DstSubIdx = CopyMI->getOperand(0).getSubReg();
-      if (DstSubIdx) {
-        // r1024<2> = EXTRACT_SUBREG EAX, 2. Then r1024 has already been
-        // coalesced to a larger register so the subreg indices cancel out.
-        if (DstSubIdx != SubIdx) {
-          DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
-          return false; // Not coalescable.
-        }
-      } else
-        SrcReg = tri_->getSubReg(SrcReg, SubIdx);
-      SubIdx = 0;
-    } else if (DstIsPhys && (isInsSubReg || isSubRegToReg)) {
-      // EAX = INSERT_SUBREG EAX, r1024, 0
-      unsigned SrcSubIdx = CopyMI->getOperand(2).getSubReg();
-      if (SrcSubIdx) {
-        // EAX = INSERT_SUBREG EAX, r1024<2>, 2 Then r1024 has already been
-        // coalesced to a larger register so the subreg indices cancel out.
-        if (SrcSubIdx != SubIdx) {
-          DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
-          return false; // Not coalescable.
-        }
-      } else
-        DstReg = tri_->getSubReg(DstReg, SubIdx);
-      SubIdx = 0;
-    } else if ((DstIsPhys && isExtSubReg) ||
-               (SrcIsPhys && (isInsSubReg || isSubRegToReg))) {
-      if (!isSubRegToReg && CopyMI->getOperand(1).getSubReg()) {
-        DEBUG(dbgs() << "\tSrc of extract_subreg already coalesced with reg"
-                     << " of a super-class.\n");
-        return false; // Not coalescable.
-      }
+  DEBUG(dbgs() << "\tConsidering merging %reg" << CP.getSrcReg());
 
-      // FIXME: The following checks are somewhat conservative. Perhaps a better
-      // way to implement this is to treat this as coalescing a vr with the
-      // super physical register.
-      if (isExtSubReg) {
-        if (!CanJoinExtractSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealDstReg))
-          return false; // Not coalescable
-      } else {
-        if (!CanJoinInsertSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealSrcReg))
-          return false; // Not coalescable
-      }
-      SubIdx = 0;
-    } else {
-      unsigned OldSubIdx = isExtSubReg ? CopyMI->getOperand(0).getSubReg()
-        : CopyMI->getOperand(2).getSubReg();
-      if (OldSubIdx) {
-        if (OldSubIdx == SubIdx && !differingRegisterClasses(SrcReg, DstReg))
-          // r1024<2> = EXTRACT_SUBREG r1025, 2. Then r1024 has already been
-          // coalesced to a larger register so the subreg indices cancel out.
-          // Also check if the other larger register is of the same register
-          // class as the would be resulting register.
-          SubIdx = 0;
-        else {
-          DEBUG(dbgs() << "\t Sub-register indices mismatch.\n");
-          return false; // Not coalescable.
-        }
-      }
-      if (SubIdx) {
-        if (!DstIsPhys && !SrcIsPhys) {
-          if (isInsSubReg || isSubRegToReg) {
-            NewRC = tri_->getMatchingSuperRegClass(DstRC, SrcRC, SubIdx);
-          } else // extract_subreg {
-            NewRC = tri_->getMatchingSuperRegClass(SrcRC, DstRC, SubIdx);
-          }
-        if (!NewRC) {
-          DEBUG(dbgs() << "\t Conflicting sub-register indices.\n");
-          return false;  // Not coalescable
-        }
-
-        if (!isWinToJoinCrossClass(SrcReg, DstReg, SrcRC, DstRC, NewRC)) {
-          DEBUG(dbgs() << "\tAvoid coalescing to constrained register class: "
-                       << SrcRC->getName() << "/"
-                       << DstRC->getName() << " -> "
-                       << NewRC->getName() << ".\n");
-          Again = true;  // May be possible to coalesce later.
-          return false;
-        }
-      }
+  // Enforce policies.
+  if (CP.isPhys()) {
+    DEBUG(dbgs() <<" with physreg %" << tri_->getName(CP.getDstReg()) << "\n");
+    // Only coalesce to allocatable physreg.
+    if (!allocatableRegs_[CP.getDstReg()]) {
+      DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n");
+      return false;  // Not coalescable.
     }
-  } else if (differingRegisterClasses(SrcReg, DstReg)) {
-    if (DisableCrossClassJoin)
-      return false;
-    CrossRC = true;
+  } else {
+    DEBUG({
+      dbgs() << " with reg%" << CP.getDstReg();
+      if (CP.getSubIdx())
+        dbgs() << ":" << tri_->getSubRegIndexName(CP.getSubIdx());
+      dbgs() << " to " << CP.getNewRC()->getName() << "\n";
+    });
 
-    // FIXME: What if the result of a EXTRACT_SUBREG is then coalesced
-    // with another? If it's the resulting destination register, then
-    // the subidx must be propagated to uses (but only those defined
-    // by the EXTRACT_SUBREG). If it's being coalesced into another
-    // register, it should be safe because register is assumed to have
-    // the register class of the super-register.
-
-    // Process moves where one of the registers have a sub-register index.
-    MachineOperand *DstMO = CopyMI->findRegisterDefOperand(DstReg);
-    MachineOperand *SrcMO = CopyMI->findRegisterUseOperand(SrcReg);
-    SubIdx = DstMO->getSubReg();
-    if (SubIdx) {
-      if (SrcMO->getSubReg())
-        // FIXME: can we handle this?
+    // Avoid constraining virtual register regclass too much.
+    if (CP.isCrossClass()) {
+      if (DisableCrossClassJoin) {
+        DEBUG(dbgs() << "\tCross-class joins disabled.\n");
         return false;
-      // This is not an insert_subreg but it looks like one.
-      // e.g. %reg1024:4 = MOV32rr %EAX
-      isInsSubReg = true;
-      if (SrcIsPhys) {
-        if (!CanJoinInsertSubRegToPhysReg(DstReg, SrcReg, SubIdx, RealSrcReg))
-          return false; // Not coalescable
-        SubIdx = 0;
-      }
-    } else {
-      SubIdx = SrcMO->getSubReg();
-      if (SubIdx) {
-        // This is not a extract_subreg but it looks like one.
-        // e.g. %cl = MOV16rr %reg1024:1
-        isExtSubReg = true;
-        if (DstIsPhys) {
-          if (!CanJoinExtractSubRegToPhysReg(DstReg, SrcReg, SubIdx,RealDstReg))
-            return false; // Not coalescable
-          SubIdx = 0;
-        }
       }
-    }
-
-    // Now determine the register class of the joined register.
-    if (!SrcIsPhys && !DstIsPhys) {
-      if (isExtSubReg) {
-        NewRC =
-          SubIdx ? tri_->getMatchingSuperRegClass(SrcRC, DstRC, SubIdx) : SrcRC;
-      } else if (isInsSubReg) {
-        NewRC =
-          SubIdx ? tri_->getMatchingSuperRegClass(DstRC, SrcRC, SubIdx) : DstRC;
-      } else {
-        NewRC = getCommonSubClass(SrcRC, DstRC);
-      }
-
-      if (!NewRC) {
-        DEBUG(dbgs() << "\tDisjoint regclasses: "
-                     << SrcRC->getName() << ", "
-                     << DstRC->getName() << ".\n");
-        return false;           // Not coalescable.
-      }
-
-      // If we are joining two virtual registers and the resulting register
-      // class is more restrictive (fewer register, smaller size). Check if it's
-      // worth doing the merge.
-      if (!isWinToJoinCrossClass(SrcReg, DstReg, SrcRC, DstRC, NewRC)) {
+      if (!isWinToJoinCrossClass(CP.getSrcReg(), CP.getDstReg(),
+                                 mri_->getRegClass(CP.getSrcReg()),
+                                 mri_->getRegClass(CP.getDstReg()),
+                                 CP.getNewRC())) {
         DEBUG(dbgs() << "\tAvoid coalescing to constrained register class: "
-                     << SrcRC->getName() << "/"
-                     << DstRC->getName() << " -> "
-                     << NewRC->getName() << ".\n");
-        // Allow the coalescer to try again in case either side gets coalesced to
-        // a physical register that's compatible with the other side. e.g.
-        // r1024 = MOV32to32_ r1025
-        // But later r1024 is assigned EAX then r1025 may be coalesced with EAX.
+                     << CP.getNewRC()->getName() << ".\n");
         Again = true;  // May be possible to coalesce later.
         return false;
       }
     }
-  }
 
-  // Will it create illegal extract_subreg / insert_subreg?
-  if (SrcIsPhys && HasIncompatibleSubRegDefUse(CopyMI, DstReg, SrcReg))
-    return false;
-  if (DstIsPhys && HasIncompatibleSubRegDefUse(CopyMI, SrcReg, DstReg))
-    return false;
-
-  LiveInterval &SrcInt = li_->getInterval(SrcReg);
-  LiveInterval &DstInt = li_->getInterval(DstReg);
-  assert(SrcInt.reg == SrcReg && DstInt.reg == DstReg &&
-         "Register mapping is horribly broken!");
-
-  DEBUG({
-      dbgs() << "\t\tInspecting ";
-      if (SrcRC) dbgs() << SrcRC->getName() << ": ";
-      SrcInt.print(dbgs(), tri_);
-      dbgs() << "\n\t\t       and ";
-      if (DstRC) dbgs() << DstRC->getName() << ": ";
-      DstInt.print(dbgs(), tri_);
-      dbgs() << "\n";
-    });
+    // When possible, let DstReg be the larger interval.
+    if (!CP.getSubIdx() && li_->getInterval(CP.getSrcReg()).ranges.size() >
+                           li_->getInterval(CP.getDstReg()).ranges.size())
+      CP.flip();
+  }
+
+  // We need to be careful about coalescing a source physical register with a
+  // virtual register. Once the coalescing is done, it cannot be broken and
+  // these are not spillable! If the destination interval uses are far away,
+  // think twice about coalescing them!
+  // FIXME: Why are we skipping this test for partial copies?
+  //        CodeGen/X86/phys_subreg_coalesce-3.ll needs it.
+  if (!CP.isPartial() && CP.isPhys()) {
+    LiveInterval &JoinVInt = li_->getInterval(CP.getSrcReg());
+
+    // Don't join with physregs that have a ridiculous number of live
+    // ranges. The data structure performance is really bad when that
+    // happens.
+    if (li_->hasInterval(CP.getDstReg()) &&
+        li_->getInterval(CP.getDstReg()).ranges.size() > 1000) {
+      mri_->setRegAllocationHint(CP.getSrcReg(), 0, CP.getDstReg());
+      ++numAborts;
+      DEBUG(dbgs()
+           << "\tPhysical register live interval too complicated, abort!\n");
+      return false;
+    }
 
-  // Save a copy of the virtual register live interval. We'll manually
-  // merge this into the "real" physical register live interval this is
-  // coalesced with.
-  OwningPtr<LiveInterval> SavedLI;
-  if (RealDstReg)
-    SavedLI.reset(li_->dupInterval(&SrcInt));
-  else if (RealSrcReg)
-    SavedLI.reset(li_->dupInterval(&DstInt));
-
-  if (!isExtSubReg && !isInsSubReg && !isSubRegToReg) {
-    // Check if it is necessary to propagate "isDead" property.
-    MachineOperand *mopd = CopyMI->findRegisterDefOperand(DstReg, false);
-    bool isDead = mopd->isDead();
-
-    // We need to be careful about coalescing a source physical register with a
-    // virtual register. Once the coalescing is done, it cannot be broken and
-    // these are not spillable! If the destination interval uses are far away,
-    // think twice about coalescing them!
-    if (!isDead && (SrcIsPhys || DstIsPhys)) {
-      // If the virtual register live interval is long but it has low use
-      // density, do not join them, instead mark the physical register as its
-      // allocation preference.
-      LiveInterval &JoinVInt = SrcIsPhys ? DstInt : SrcInt;
-      LiveInterval &JoinPInt = SrcIsPhys ? SrcInt : DstInt;
-      unsigned JoinVReg = SrcIsPhys ? DstReg : SrcReg;
-      unsigned JoinPReg = SrcIsPhys ? SrcReg : DstReg;
-
-      // Don't join with physregs that have a ridiculous number of live
-      // ranges. The data structure performance is really bad when that
-      // happens.
-      if (JoinPInt.ranges.size() > 1000) {
-        mri_->setRegAllocationHint(JoinVInt.reg, 0, JoinPReg);
-        ++numAborts;
-        DEBUG(dbgs()
-              << "\tPhysical register live interval too complicated, abort!\n");
-        return false;
-      }
+    const TargetRegisterClass *RC = mri_->getRegClass(CP.getSrcReg());
+    unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
+    unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
+    if (Length > Threshold &&
+        std::distance(mri_->use_nodbg_begin(CP.getSrcReg()),
+                      mri_->use_nodbg_end()) * Threshold < Length) {
+      // Before giving up coalescing, if definition of source is defined by
+      // trivial computation, try rematerializing it.
+      if (!CP.isFlipped() &&
+          ReMaterializeTrivialDef(JoinVInt, CP.getDstReg(), 0, CopyMI))
+        return true;
 
-      const TargetRegisterClass *RC = mri_->getRegClass(JoinVReg);
-      unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
-      unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
-      if (Length > Threshold &&
-          std::distance(mri_->use_nodbg_begin(JoinVReg),
-                        mri_->use_nodbg_end()) * Threshold < Length) {
-        // Before giving up coalescing, if definition of source is defined by
-        // trivial computation, try rematerializing it.
-        if (ReMaterializeTrivialDef(SrcInt, DstReg, DstSubIdx, CopyMI))
-          return true;
-
-        mri_->setRegAllocationHint(JoinVInt.reg, 0, JoinPReg);
-        ++numAborts;
-        DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
-        Again = true;  // May be possible to coalesce later.
-        return false;
-      }
+      mri_->setRegAllocationHint(CP.getSrcReg(), 0, CP.getDstReg());
+      ++numAborts;
+      DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
+      Again = true;  // May be possible to coalesce later.
+      return false;
     }
   }
 
+  // We may need the source interval after JoinIntervals has destroyed it.
+  OwningPtr<LiveInterval> SavedLI;
+  if (CP.getOrigDstReg() != CP.getDstReg())
+    SavedLI.reset(li_->dupInterval(&li_->getInterval(CP.getSrcReg())));
+
   // Okay, attempt to join these two intervals.  On failure, this returns false.
   // Otherwise, if one of the intervals being joined is a physreg, this method
   // always canonicalizes DstInt to be it.  The output "SrcInt" will not have
   // been modified, so we can use this information below to update aliases.
-  bool Swapped = false;
-  // If SrcInt is implicitly defined, it's safe to coalesce.
-  if (SrcInt.empty()) {
-    if (!CanCoalesceWithImpDef(CopyMI, DstInt, SrcInt)) {
-      // Only coalesce an empty interval (defined by implicit_def) with
-      // another interval which has a valno defined by the CopyMI and the CopyMI
-      // is a kill of the implicit def.
-      DEBUG(dbgs() << "\tNot profitable!\n");
-      return false;
-    }
-  } else if (!JoinIntervals(DstInt, SrcInt, Swapped, CP)) {
+  if (!JoinIntervals(CP)) {
     // Coalescing failed.
 
     // If definition of source is defined by trivial computation, try
     // rematerializing it.
-    if (!isExtSubReg && !isInsSubReg && !isSubRegToReg &&
-        ReMaterializeTrivialDef(SrcInt, DstReg, DstSubIdx, CopyMI))
+    if (!CP.isFlipped() &&
+        ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()),
+                                CP.getDstReg(), 0, CopyMI))
       return true;
 
     // If we can eliminate the copy without merging the live ranges, do so now.
-    if (!isExtSubReg && !isInsSubReg && !isSubRegToReg &&
-        (AdjustCopiesBackFrom(SrcInt, DstInt, CopyMI) ||
-         RemoveCopyByCommutingDef(SrcInt, DstInt, CopyMI))) {
-      JoinedCopies.insert(CopyMI);
-      DEBUG(dbgs() << "\tTrivial!\n");
-      return true;
+    if (!CP.isPartial()) {
+      LiveInterval *UseInt = &li_->getInterval(CP.getSrcReg());
+      LiveInterval *DefInt = &li_->getInterval(CP.getDstReg());
+      if (CP.isFlipped())
+        std::swap(UseInt, DefInt);
+      if (AdjustCopiesBackFrom(CP, CopyMI) ||
+          RemoveCopyByCommutingDef(*UseInt, *DefInt, CopyMI)) {
+        JoinedCopies.insert(CopyMI);
+        DEBUG(dbgs() << "\tTrivial!\n");
+        return true;
+      }
     }
 
     // Otherwise, we are unable to join the intervals.
@@ -1752,25 +1167,15 @@
     return false;
   }
 
-  LiveInterval *ResSrcInt = &SrcInt;
-  LiveInterval *ResDstInt = &DstInt;
-  if (Swapped) {
-    std::swap(SrcReg, DstReg);
-    std::swap(ResSrcInt, ResDstInt);
-  }
-  assert(TargetRegisterInfo::isVirtualRegister(SrcReg) &&
-         "LiveInterval::join didn't work right!");
-
-  // If we're about to merge live ranges into a physical register live interval,
-  // we have to update any aliased register's live ranges to indicate that they
-  // have clobbered values for this range.
-  if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
+  if (CP.isPhys()) {
     // If this is a extract_subreg where dst is a physical register, e.g.
     // cl = EXTRACT_SUBREG reg1024, 1
     // then create and update the actual physical register allocated to RHS.
-    if (RealDstReg || RealSrcReg) {
-      LiveInterval &RealInt =
-        li_->getOrCreateInterval(RealDstReg ? RealDstReg : RealSrcReg);
+    unsigned LargerDstReg = CP.getDstReg();
+    if (CP.getOrigDstReg() != CP.getDstReg()) {
+      if (tri_->isSubRegister(CP.getOrigDstReg(), LargerDstReg))
+        LargerDstReg = CP.getOrigDstReg();
+      LiveInterval &RealInt = li_->getOrCreateInterval(CP.getDstReg());
       for (LiveInterval::const_vni_iterator I = SavedLI->vni_begin(),
              E = SavedLI->vni_end(); I != E; ++I) {
         const VNInfo *ValNo = *I;
@@ -1778,60 +1183,48 @@
                                                 false, // updated at *
                                                 li_->getVNInfoAllocator());
         NewValNo->setFlags(ValNo->getFlags()); // * updated here.
-        RealInt.addKills(NewValNo, ValNo->kills);
         RealInt.MergeValueInAsValue(*SavedLI, ValNo, NewValNo);
       }
       RealInt.weight += SavedLI->weight;
-      DstReg = RealDstReg ? RealDstReg : RealSrcReg;
     }
 
     // Update the liveintervals of sub-registers.
-    for (const unsigned *AS = tri_->getSubRegisters(DstReg); *AS; ++AS)
-      li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, *ResSrcInt,
-                                                 li_->getVNInfoAllocator());
-  }
-
-  // If this is a EXTRACT_SUBREG, make sure the result of coalescing is the
-  // larger super-register.
-  if ((isExtSubReg || isInsSubReg || isSubRegToReg) &&
-      !SrcIsPhys && !DstIsPhys) {
-    if ((isExtSubReg && !Swapped) ||
-        ((isInsSubReg || isSubRegToReg) && Swapped)) {
-      ResSrcInt->Copy(*ResDstInt, mri_, li_->getVNInfoAllocator());
-      std::swap(SrcReg, DstReg);
-      std::swap(ResSrcInt, ResDstInt);
+    LiveInterval &LargerInt = li_->getInterval(LargerDstReg);
+    for (const unsigned *AS = tri_->getSubRegisters(LargerDstReg); *AS; ++AS) {
+      LiveInterval &SRI = li_->getOrCreateInterval(*AS);
+      SRI.MergeInClobberRanges(*li_, LargerInt, li_->getVNInfoAllocator());
+      DEBUG({
+        dbgs() << "\t\tsubreg: "; SRI.print(dbgs(), tri_); dbgs() << "\n";
+      });
     }
   }
 
   // Coalescing to a virtual register that is of a sub-register class of the
   // other. Make sure the resulting register is set to the right register class.
-  if (CrossRC)
+  if (CP.isCrossClass()) {
     ++numCrossRCs;
-
-  // This may happen even if it's cross-rc coalescing. e.g.
-  // %reg1026<def> = SUBREG_TO_REG 0, %reg1037<kill>, 4
-  // reg1026 -> GR64, reg1037 -> GR32_ABCD. The resulting register will have to
-  // be allocate a register from GR64_ABCD.
-  if (NewRC)
-    mri_->setRegClass(DstReg, NewRC);
+    mri_->setRegClass(CP.getDstReg(), CP.getNewRC());
+  }
 
   // Remember to delete the copy instruction.
   JoinedCopies.insert(CopyMI);
 
-  UpdateRegDefsUses(SrcReg, DstReg, SubIdx);
+  UpdateRegDefsUses(CP);
 
   // If we have extended the live range of a physical register, make sure we
   // update live-in lists as well.
-  if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
-    const LiveInterval &VRegInterval = li_->getInterval(SrcReg);
+  if (CP.isPhys()) {
     SmallVector<MachineBasicBlock*, 16> BlockSeq;
-    for (LiveInterval::const_iterator I = VRegInterval.begin(),
-           E = VRegInterval.end(); I != E; ++I ) {
+    // JoinIntervals invalidates the VNInfos in SrcInt, but we only need the
+    // ranges for this, and they are preserved.
+    LiveInterval &SrcInt = li_->getInterval(CP.getSrcReg());
+    for (LiveInterval::const_iterator I = SrcInt.begin(), E = SrcInt.end();
+         I != E; ++I ) {
       li_->findLiveInMBBs(I->start, I->end, BlockSeq);
       for (unsigned idx = 0, size = BlockSeq.size(); idx != size; ++idx) {
         MachineBasicBlock &block = *BlockSeq[idx];
-        if (!block.isLiveIn(DstReg))
-          block.addLiveIn(DstReg);
+        if (!block.isLiveIn(CP.getDstReg()))
+          block.addLiveIn(CP.getDstReg());
       }
       BlockSeq.clear();
     }
@@ -1839,32 +1232,17 @@
 
   // SrcReg is guarateed to be the register whose live interval that is
   // being merged.
-  li_->removeInterval(SrcReg);
+  li_->removeInterval(CP.getSrcReg());
 
   // Update regalloc hint.
-  tri_->UpdateRegAllocHint(SrcReg, DstReg, *mf_);
-
-  // Manually deleted the live interval copy.
-  if (SavedLI) {
-    SavedLI->clear();
-    SavedLI.reset();
-  }
-
-  // If resulting interval has a preference that no longer fits because of subreg
-  // coalescing, just clear the preference.
-  unsigned Preference = getRegAllocPreference(ResDstInt->reg, *mf_, mri_, tri_);
-  if (Preference && (isExtSubReg || isInsSubReg || isSubRegToReg) &&
-      TargetRegisterInfo::isVirtualRegister(ResDstInt->reg)) {
-    const TargetRegisterClass *RC = mri_->getRegClass(ResDstInt->reg);
-    if (!RC->contains(Preference))
-      mri_->setRegAllocationHint(ResDstInt->reg, 0, 0);
-  }
+  tri_->UpdateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *mf_);
 
   DEBUG({
-      dbgs() << "\t\tJoined. Result = ";
-      ResDstInt->print(dbgs(), tri_);
-      dbgs() << "\n";
-    });
+    LiveInterval &DstInt = li_->getInterval(CP.getDstReg());
+    dbgs() << "\tJoined. Result = ";
+    DstInt.print(dbgs(), tri_);
+    dbgs() << "\n";
+  });
 
   ++numJoins;
   return true;
@@ -1921,257 +1299,25 @@
   return ThisValNoAssignments[VN] = UltimateVN;
 }
 
-static bool InVector(VNInfo *Val, const SmallVector<VNInfo*, 8> &V) {
-  return std::find(V.begin(), V.end(), Val) != V.end();
-}
-
-/// RangeIsDefinedByCopyFromReg - Return true if the specified live range of
-/// the specified live interval is defined by a copy from the specified
-/// register.
-bool SimpleRegisterCoalescing::RangeIsDefinedByCopy(LiveInterval &li,
-                                                    LiveRange *LR,
-                                                    CoalescerPair &CP) {
-  if (CP.isCoalescable(LR->valno->getCopy()))
-    return true;
-  // FIXME: Do isPHIDef and isDefAccurate both need to be tested?
-  if ((LR->valno->isPHIDef() || !LR->valno->isDefAccurate()) &&
-      TargetRegisterInfo::isPhysicalRegister(li.reg) &&
-      *tri_->getSuperRegisters(li.reg)) {
-    // It's a sub-register live interval, we may not have precise information.
-    // Re-compute it.
-    MachineInstr *DefMI = li_->getInstructionFromIndex(LR->start);
-    if (CP.isCoalescable(DefMI)) {
-      // Cache computed info.
-      LR->valno->def = LR->start;
-      LR->valno->setCopy(DefMI);
-      return true;
-    }
-  }
-  return false;
-}
-
-
-/// ValueLiveAt - Return true if the LiveRange pointed to by the given
-/// iterator, or any subsequent range with the same value number,
-/// is live at the given point.
-bool SimpleRegisterCoalescing::ValueLiveAt(LiveInterval::iterator LRItr,
-                                           LiveInterval::iterator LREnd,
-                                           SlotIndex defPoint) const {
-  for (const VNInfo *valno = LRItr->valno;
-       (LRItr != LREnd) && (LRItr->valno == valno); ++LRItr) {
-    if (LRItr->contains(defPoint))
-      return true;
-  }
-
-  return false;
-}
-
-
-/// SimpleJoin - Attempt to joint the specified interval into this one. The
-/// caller of this method must guarantee that the RHS only contains a single
-/// value number and that the RHS is not defined by a copy from this
-/// interval.  This returns false if the intervals are not joinable, or it
-/// joins them and returns true.
-bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval &LHS, LiveInterval &RHS,
-                                          CoalescerPair &CP) {
-  assert(RHS.containsOneValue());
-
-  // Some number (potentially more than one) value numbers in the current
-  // interval may be defined as copies from the RHS.  Scan the overlapping
-  // portions of the LHS and RHS, keeping track of this and looking for
-  // overlapping live ranges that are NOT defined as copies.  If these exist, we
-  // cannot coalesce.
-
-  LiveInterval::iterator LHSIt = LHS.begin(), LHSEnd = LHS.end();
-  LiveInterval::iterator RHSIt = RHS.begin(), RHSEnd = RHS.end();
-
-  if (LHSIt->start < RHSIt->start) {
-    LHSIt = std::upper_bound(LHSIt, LHSEnd, RHSIt->start);
-    if (LHSIt != LHS.begin()) --LHSIt;
-  } else if (RHSIt->start < LHSIt->start) {
-    RHSIt = std::upper_bound(RHSIt, RHSEnd, LHSIt->start);
-    if (RHSIt != RHS.begin()) --RHSIt;
-  }
-
-  SmallVector<VNInfo*, 8> EliminatedLHSVals;
-
-  while (1) {
-    // Determine if these live intervals overlap.
-    bool Overlaps = false;
-    if (LHSIt->start <= RHSIt->start)
-      Overlaps = LHSIt->end > RHSIt->start;
-    else
-      Overlaps = RHSIt->end > LHSIt->start;
-
-    // If the live intervals overlap, there are two interesting cases: if the
-    // LHS interval is defined by a copy from the RHS, it's ok and we record
-    // that the LHS value # is the same as the RHS.  If it's not, then we cannot
-    // coalesce these live ranges and we bail out.
-    if (Overlaps) {
-      // If we haven't already recorded that this value # is safe, check it.
-      if (!InVector(LHSIt->valno, EliminatedLHSVals)) {
-        // If it's re-defined by an early clobber somewhere in the live range,
-        // then conservatively abort coalescing.
-        if (LHSIt->valno->hasRedefByEC())
-          return false;
-        // Copy from the RHS?
-        if (!RangeIsDefinedByCopy(LHS, LHSIt, CP))
-          return false;    // Nope, bail out.
-
-        if (ValueLiveAt(LHSIt, LHS.end(), RHSIt->valno->def))
-          // Here is an interesting situation:
-          // BB1:
-          //   vr1025 = copy vr1024
-          //   ..
-          // BB2:
-          //   vr1024 = op
-          //          = vr1025
-          // Even though vr1025 is copied from vr1024, it's not safe to
-          // coalesce them since the live range of vr1025 intersects the
-          // def of vr1024. This happens because vr1025 is assigned the
-          // value of the previous iteration of vr1024.
-          return false;
-        EliminatedLHSVals.push_back(LHSIt->valno);
-      }
-
-      // We know this entire LHS live range is okay, so skip it now.
-      if (++LHSIt == LHSEnd) break;
-      continue;
-    }
-
-    if (LHSIt->end < RHSIt->end) {
-      if (++LHSIt == LHSEnd) break;
-    } else {
-      // One interesting case to check here.  It's possible that we have
-      // something like "X3 = Y" which defines a new value number in the LHS,
-      // and is the last use of this liverange of the RHS.  In this case, we
-      // want to notice this copy (so that it gets coalesced away) even though
-      // the live ranges don't actually overlap.
-      if (LHSIt->start == RHSIt->end) {
-        if (InVector(LHSIt->valno, EliminatedLHSVals)) {
-          // We already know that this value number is going to be merged in
-          // if coalescing succeeds.  Just skip the liverange.
-          if (++LHSIt == LHSEnd) break;
-        } else {
-          // If it's re-defined by an early clobber somewhere in the live range,
-          // then conservatively abort coalescing.
-          if (LHSIt->valno->hasRedefByEC())
-            return false;
-          // Otherwise, if this is a copy from the RHS, mark it as being merged
-          // in.
-          if (RangeIsDefinedByCopy(LHS, LHSIt, CP)) {
-            if (ValueLiveAt(LHSIt, LHS.end(), RHSIt->valno->def))
-              // Here is an interesting situation:
-              // BB1:
-              //   vr1025 = copy vr1024
-              //   ..
-              // BB2:
-              //   vr1024 = op
-              //          = vr1025
-              // Even though vr1025 is copied from vr1024, it's not safe to
-              // coalesced them since live range of vr1025 intersects the
-              // def of vr1024. This happens because vr1025 is assigned the
-              // value of the previous iteration of vr1024.
-              return false;
-            EliminatedLHSVals.push_back(LHSIt->valno);
-
-            // We know this entire LHS live range is okay, so skip it now.
-            if (++LHSIt == LHSEnd) break;
-          }
-        }
-      }
-
-      if (++RHSIt == RHSEnd) break;
-    }
-  }
-
-  // If we got here, we know that the coalescing will be successful and that
-  // the value numbers in EliminatedLHSVals will all be merged together.  Since
-  // the most common case is that EliminatedLHSVals has a single number, we
-  // optimize for it: if there is more than one value, we merge them all into
-  // the lowest numbered one, then handle the interval as if we were merging
-  // with one value number.
-  VNInfo *LHSValNo = NULL;
-  if (EliminatedLHSVals.size() > 1) {
-    // Loop through all the equal value numbers merging them into the smallest
-    // one.
-    VNInfo *Smallest = EliminatedLHSVals[0];
-    for (unsigned i = 1, e = EliminatedLHSVals.size(); i != e; ++i) {
-      if (EliminatedLHSVals[i]->id < Smallest->id) {
-        // Merge the current notion of the smallest into the smaller one.
-        LHS.MergeValueNumberInto(Smallest, EliminatedLHSVals[i]);
-        Smallest = EliminatedLHSVals[i];
-      } else {
-        // Merge into the smallest.
-        LHS.MergeValueNumberInto(EliminatedLHSVals[i], Smallest);
-      }
-    }
-    LHSValNo = Smallest;
-  } else if (EliminatedLHSVals.empty()) {
-    if (TargetRegisterInfo::isPhysicalRegister(LHS.reg) &&
-        *tri_->getSuperRegisters(LHS.reg))
-      // Imprecise sub-register information. Can't handle it.
-      return false;
-    llvm_unreachable("No copies from the RHS?");
-  } else {
-    LHSValNo = EliminatedLHSVals[0];
-  }
-
-  // Okay, now that there is a single LHS value number that we're merging the
-  // RHS into, update the value number info for the LHS to indicate that the
-  // value number is defined where the RHS value number was.
-  const VNInfo *VNI = RHS.getValNumInfo(0);
-  LHSValNo->def  = VNI->def;
-  LHSValNo->setCopy(VNI->getCopy());
-
-  // Okay, the final step is to loop over the RHS live intervals, adding them to
-  // the LHS.
-  if (VNI->hasPHIKill())
-    LHSValNo->setHasPHIKill(true);
-  LHS.addKills(LHSValNo, VNI->kills);
-  LHS.MergeRangesInAsValue(RHS, LHSValNo);
-
-  LHS.ComputeJoinedWeight(RHS);
-
-  // Update regalloc hint if both are virtual registers.
-  if (TargetRegisterInfo::isVirtualRegister(LHS.reg) &&
-      TargetRegisterInfo::isVirtualRegister(RHS.reg)) {
-    std::pair<unsigned, unsigned> RHSPref = mri_->getRegAllocationHint(RHS.reg);
-    std::pair<unsigned, unsigned> LHSPref = mri_->getRegAllocationHint(LHS.reg);
-    if (RHSPref != LHSPref)
-      mri_->setRegAllocationHint(LHS.reg, RHSPref.first, RHSPref.second);
-  }
-
-  // Update the liveintervals of sub-registers.
-  if (TargetRegisterInfo::isPhysicalRegister(LHS.reg))
-    for (const unsigned *AS = tri_->getSubRegisters(LHS.reg); *AS; ++AS)
-      li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, LHS,
-                                                    li_->getVNInfoAllocator());
-
-  return true;
-}
-
 /// JoinIntervals - Attempt to join these two intervals.  On failure, this
-/// returns false.  Otherwise, if one of the intervals being joined is a
-/// physreg, this method always canonicalizes LHS to be it.  The output
-/// "RHS" will not have been modified, so we can use this information
-/// below to update aliases.
-bool
-SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
-                                        bool &Swapped, CoalescerPair &CP) {
-  // Compute the final value assignment, assuming that the live ranges can be
-  // coalesced.
-  SmallVector<int, 16> LHSValNoAssignments;
-  SmallVector<int, 16> RHSValNoAssignments;
-  DenseMap<VNInfo*, VNInfo*> LHSValsDefinedFromRHS;
-  DenseMap<VNInfo*, VNInfo*> RHSValsDefinedFromLHS;
-  SmallVector<VNInfo*, 16> NewVNInfo;
+/// returns false.
+bool SimpleRegisterCoalescing::JoinIntervals(CoalescerPair &CP) {
+  LiveInterval &RHS = li_->getInterval(CP.getSrcReg());
+  DEBUG({ dbgs() << "\t\tRHS = "; RHS.print(dbgs(), tri_); dbgs() << "\n"; });
+
+  // FIXME: Join into CP.getDstReg instead of CP.getOrigDstReg.
+  // When looking at
+  //   %reg2000 = EXTRACT_SUBREG %EAX, sub_16bit
+  // we really want to join %reg2000 with %AX ( = CP.getDstReg). We are actually
+  // joining into %EAX ( = CP.getOrigDstReg) because it is guaranteed to have an
+  // existing live interval, and we are better equipped to handle interference.
+  // JoinCopy cleans up the mess by taking a copy of RHS before calling here,
+  // and merging that copy into CP.getDstReg after.
 
   // If a live interval is a physical register, conservatively check if any
   // of its sub-registers is overlapping the live interval of the virtual
   // register. If so, do not coalesce.
-  if (TargetRegisterInfo::isPhysicalRegister(LHS.reg) &&
-      *tri_->getSubRegisters(LHS.reg)) {
+  if (CP.isPhys() && *tri_->getSubRegisters(CP.getOrigDstReg())) {
     // If it's coalescing a virtual register to a physical register, estimate
     // its live interval length. This is the *cost* of scanning an entire live
     // interval. If the cost is low, we'll do an exhaustive check instead.
@@ -2191,10 +1337,11 @@
         li_->intervalIsInOneMBB(RHS) &&
         li_->getApproximateInstructionCount(RHS) <= 10) {
       // Perform a more exhaustive check for some common cases.
-      if (li_->conflictsWithSubPhysRegRef(RHS, LHS.reg, true, JoinedCopies))
+      if (li_->conflictsWithAliasRef(RHS, CP.getOrigDstReg(), JoinedCopies))
         return false;
     } else {
-      for (const unsigned* SR = tri_->getSubRegisters(LHS.reg); *SR; ++SR)
+      for (const unsigned* SR = tri_->getAliasSet(CP.getOrigDstReg()); *SR;
+           ++SR)
         if (li_->hasInterval(*SR) && RHS.overlaps(li_->getInterval(*SR))) {
           DEBUG({
               dbgs() << "\tInterfere with sub-register ";
@@ -2203,169 +1350,97 @@
           return false;
         }
     }
-  } else if (TargetRegisterInfo::isPhysicalRegister(RHS.reg) &&
-             *tri_->getSubRegisters(RHS.reg)) {
-    if (LHS.containsOneValue() &&
-        li_->getApproximateInstructionCount(LHS) <= 10) {
-      // Perform a more exhaustive check for some common cases.
-      if (li_->conflictsWithSubPhysRegRef(LHS, RHS.reg, false, JoinedCopies))
-        return false;
-    } else {
-      for (const unsigned* SR = tri_->getSubRegisters(RHS.reg); *SR; ++SR)
-        if (li_->hasInterval(*SR) && LHS.overlaps(li_->getInterval(*SR))) {
-          DEBUG({
-              dbgs() << "\tInterfere with sub-register ";
-              li_->getInterval(*SR).print(dbgs(), tri_);
-            });
-          return false;
-        }
-    }
   }
 
-  // Compute ultimate value numbers for the LHS and RHS values.
-  if (RHS.containsOneValue()) {
-    // Copies from a liveinterval with a single value are simple to handle and
-    // very common, handle the special case here.  This is important, because
-    // often RHS is small and LHS is large (e.g. a physreg).
-
-    // Find out if the RHS is defined as a copy from some value in the LHS.
-    int RHSVal0DefinedFromLHS = -1;
-    int RHSValID = -1;
-    VNInfo *RHSValNoInfo = NULL;
-    VNInfo *RHSValNoInfo0 = RHS.getValNumInfo(0);
-    unsigned RHSSrcReg = li_->getVNInfoSourceReg(RHSValNoInfo0);
-    if (RHSSrcReg == 0 || RHSSrcReg != LHS.reg) {
-      // If RHS is not defined as a copy from the LHS, we can use simpler and
-      // faster checks to see if the live ranges are coalescable.  This joiner
-      // can't swap the LHS/RHS intervals though.
-      if (!TargetRegisterInfo::isPhysicalRegister(RHS.reg)) {
-        return SimpleJoin(LHS, RHS, CP);
-      } else {
-        RHSValNoInfo = RHSValNoInfo0;
-      }
-    } else {
-      // It was defined as a copy from the LHS, find out what value # it is.
-      RHSValNoInfo =
-        LHS.getLiveRangeContaining(RHSValNoInfo0->def.getPrevSlot())->valno;
-      RHSValID = RHSValNoInfo->id;
-      RHSVal0DefinedFromLHS = RHSValID;
-    }
-
-    LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
-    RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
-    NewVNInfo.resize(LHS.getNumValNums(), NULL);
-
-    // Okay, *all* of the values in LHS that are defined as a copy from RHS
-    // should now get updated.
-    for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
-         i != e; ++i) {
-      VNInfo *VNI = *i;
-      unsigned VN = VNI->id;
-      if (unsigned LHSSrcReg = li_->getVNInfoSourceReg(VNI)) {
-        if (LHSSrcReg != RHS.reg) {
-          // If this is not a copy from the RHS, its value number will be
-          // unmodified by the coalescing.
-          NewVNInfo[VN] = VNI;
-          LHSValNoAssignments[VN] = VN;
-        } else if (RHSValID == -1) {
-          // Otherwise, it is a copy from the RHS, and we don't already have a
-          // value# for it.  Keep the current value number, but remember it.
-          LHSValNoAssignments[VN] = RHSValID = VN;
-          NewVNInfo[VN] = RHSValNoInfo;
-          LHSValsDefinedFromRHS[VNI] = RHSValNoInfo0;
-        } else {
-          // Otherwise, use the specified value #.
-          LHSValNoAssignments[VN] = RHSValID;
-          if (VN == (unsigned)RHSValID) {  // Else this val# is dead.
-            NewVNInfo[VN] = RHSValNoInfo;
-            LHSValsDefinedFromRHS[VNI] = RHSValNoInfo0;
-          }
-        }
-      } else {
-        NewVNInfo[VN] = VNI;
-        LHSValNoAssignments[VN] = VN;
-      }
-    }
+  // Compute the final value assignment, assuming that the live ranges can be
+  // coalesced.
+  SmallVector<int, 16> LHSValNoAssignments;
+  SmallVector<int, 16> RHSValNoAssignments;
+  DenseMap<VNInfo*, VNInfo*> LHSValsDefinedFromRHS;
+  DenseMap<VNInfo*, VNInfo*> RHSValsDefinedFromLHS;
+  SmallVector<VNInfo*, 16> NewVNInfo;
 
-    assert(RHSValID != -1 && "Didn't find value #?");
-    RHSValNoAssignments[0] = RHSValID;
-    if (RHSVal0DefinedFromLHS != -1) {
-      // This path doesn't go through ComputeUltimateVN so just set
-      // it to anything.
-      RHSValsDefinedFromLHS[RHSValNoInfo0] = (VNInfo*)1;
-    }
-  } else {
-    // Loop over the value numbers of the LHS, seeing if any are defined from
-    // the RHS.
-    for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
-         i != e; ++i) {
-      VNInfo *VNI = *i;
-      if (VNI->isUnused() || VNI->getCopy() == 0)  // Src not defined by a copy?
-        continue;
+  LiveInterval &LHS = li_->getInterval(CP.getOrigDstReg());
+  DEBUG({ dbgs() << "\t\tLHS = "; LHS.print(dbgs(), tri_); dbgs() << "\n"; });
 
-      // DstReg is known to be a register in the LHS interval.  If the src is
-      // from the RHS interval, we can use its value #.
-      if (!CP.isCoalescable(VNI->getCopy()))
-        continue;
+  // Loop over the value numbers of the LHS, seeing if any are defined from
+  // the RHS.
+  for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
+       i != e; ++i) {
+    VNInfo *VNI = *i;
+    if (VNI->isUnused() || VNI->getCopy() == 0)  // Src not defined by a copy?
+      continue;
 
-      // Figure out the value # from the RHS.
-      LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
-      // The copy could be to an aliased physreg.
-      if (!lr) continue;
-      LHSValsDefinedFromRHS[VNI] = lr->valno;
-    }
+    // Never join with a register that has EarlyClobber redefs.
+    if (VNI->hasRedefByEC())
+      return false;
 
-    // Loop over the value numbers of the RHS, seeing if any are defined from
-    // the LHS.
-    for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
-         i != e; ++i) {
-      VNInfo *VNI = *i;
-      if (VNI->isUnused() || VNI->getCopy() == 0)  // Src not defined by a copy?
-        continue;
+    // DstReg is known to be a register in the LHS interval.  If the src is
+    // from the RHS interval, we can use its value #.
+    if (!CP.isCoalescable(VNI->getCopy()))
+      continue;
 
-      // DstReg is known to be a register in the RHS interval.  If the src is
-      // from the LHS interval, we can use its value #.
-      if (!CP.isCoalescable(VNI->getCopy()))
-        continue;
+    // Figure out the value # from the RHS.
+    LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
+    // The copy could be to an aliased physreg.
+    if (!lr) continue;
+    LHSValsDefinedFromRHS[VNI] = lr->valno;
+  }
 
-      // Figure out the value # from the LHS.
-      LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
-      // The copy could be to an aliased physreg.
-      if (!lr) continue;
-      RHSValsDefinedFromLHS[VNI] = lr->valno;
-    }
-
-    LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
-    RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
-    NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
-
-    for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
-         i != e; ++i) {
-      VNInfo *VNI = *i;
-      unsigned VN = VNI->id;
-      if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
-        continue;
-      ComputeUltimateVN(VNI, NewVNInfo,
-                        LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
-                        LHSValNoAssignments, RHSValNoAssignments);
-    }
-    for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
-         i != e; ++i) {
-      VNInfo *VNI = *i;
-      unsigned VN = VNI->id;
-      if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
-        continue;
-      // If this value number isn't a copy from the LHS, it's a new number.
-      if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
-        NewVNInfo.push_back(VNI);
-        RHSValNoAssignments[VN] = NewVNInfo.size()-1;
-        continue;
-      }
+  // Loop over the value numbers of the RHS, seeing if any are defined from
+  // the LHS.
+  for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
+       i != e; ++i) {
+    VNInfo *VNI = *i;
+    if (VNI->isUnused() || VNI->getCopy() == 0)  // Src not defined by a copy?
+      continue;
+
+    // Never join with a register that has EarlyClobber redefs.
+    if (VNI->hasRedefByEC())
+      return false;
 
-      ComputeUltimateVN(VNI, NewVNInfo,
-                        RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
-                        RHSValNoAssignments, LHSValNoAssignments);
+    // DstReg is known to be a register in the RHS interval.  If the src is
+    // from the LHS interval, we can use its value #.
+    if (!CP.isCoalescable(VNI->getCopy()))
+      continue;
+
+    // Figure out the value # from the LHS.
+    LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
+    // The copy could be to an aliased physreg.
+    if (!lr) continue;
+    RHSValsDefinedFromLHS[VNI] = lr->valno;
+  }
+
+  LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
+  RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
+  NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
+
+  for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
+       i != e; ++i) {
+    VNInfo *VNI = *i;
+    unsigned VN = VNI->id;
+    if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
+      continue;
+    ComputeUltimateVN(VNI, NewVNInfo,
+                      LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
+                      LHSValNoAssignments, RHSValNoAssignments);
+  }
+  for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
+       i != e; ++i) {
+    VNInfo *VNI = *i;
+    unsigned VN = VNI->id;
+    if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
+      continue;
+    // If this value number isn't a copy from the LHS, it's a new number.
+    if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
+      NewVNInfo.push_back(VNI);
+      RHSValNoAssignments[VN] = NewVNInfo.size()-1;
+      continue;
     }
+
+    ComputeUltimateVN(VNI, NewVNInfo,
+                      RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
+                      RHSValNoAssignments, LHSValNoAssignments);
   }
 
   // Armed with the mappings of LHS/RHS values to ultimate values, walk the
@@ -2376,15 +1451,17 @@
   LiveInterval::const_iterator JE = RHS.end();
 
   // Skip ahead until the first place of potential sharing.
-  if (I->start < J->start) {
-    I = std::upper_bound(I, IE, J->start);
-    if (I != LHS.begin()) --I;
-  } else if (J->start < I->start) {
-    J = std::upper_bound(J, JE, I->start);
-    if (J != RHS.begin()) --J;
+  if (I != IE && J != JE) {
+    if (I->start < J->start) {
+      I = std::upper_bound(I, IE, J->start);
+      if (I != LHS.begin()) --I;
+    } else if (J->start < I->start) {
+      J = std::upper_bound(J, JE, I->start);
+      if (J != RHS.begin()) --J;
+    }
   }
 
-  while (1) {
+  while (I != IE && J != JE) {
     // Determine if these two live ranges overlap.
     bool Overlaps;
     if (I->start < J->start) {
@@ -2406,13 +1483,10 @@
         return false;
     }
 
-    if (I->end < J->end) {
+    if (I->end < J->end)
       ++I;
-      if (I == IE) break;
-    } else {
+    else
       ++J;
-      if (J == JE) break;
-    }
   }
 
   // Update kill info. Some live ranges are extended due to copy coalescing.
@@ -2420,10 +1494,8 @@
          E = LHSValsDefinedFromRHS.end(); I != E; ++I) {
     VNInfo *VNI = I->first;
     unsigned LHSValID = LHSValNoAssignments[VNI->id];
-    NewVNInfo[LHSValID]->removeKill(VNI->def);
     if (VNI->hasPHIKill())
       NewVNInfo[LHSValID]->setHasPHIKill(true);
-    RHS.addKills(NewVNInfo[LHSValID], VNI->kills);
   }
 
   // Update kill info. Some live ranges are extended due to copy coalescing.
@@ -2431,25 +1503,19 @@
          E = RHSValsDefinedFromLHS.end(); I != E; ++I) {
     VNInfo *VNI = I->first;
     unsigned RHSValID = RHSValNoAssignments[VNI->id];
-    NewVNInfo[RHSValID]->removeKill(VNI->def);
     if (VNI->hasPHIKill())
       NewVNInfo[RHSValID]->setHasPHIKill(true);
-    LHS.addKills(NewVNInfo[RHSValID], VNI->kills);
   }
 
+  if (LHSValNoAssignments.empty())
+    LHSValNoAssignments.push_back(-1);
+  if (RHSValNoAssignments.empty())
+    RHSValNoAssignments.push_back(-1);
+
   // If we get here, we know that we can coalesce the live ranges.  Ask the
   // intervals to coalesce themselves now.
-  if ((RHS.ranges.size() > LHS.ranges.size() &&
-      TargetRegisterInfo::isVirtualRegister(LHS.reg)) ||
-      TargetRegisterInfo::isPhysicalRegister(RHS.reg)) {
-    RHS.join(LHS, &RHSValNoAssignments[0], &LHSValNoAssignments[0], NewVNInfo,
-             mri_);
-    Swapped = true;
-  } else {
-    LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
-             mri_);
-    Swapped = false;
-  }
+  LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
+           mri_);
   return true;
 }
 
@@ -2729,8 +1795,8 @@
         if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
           assert((MI->isExtractSubreg() || MI->isInsertSubreg() ||
                   MI->isSubregToReg()) && "Unrecognized copy instruction");
-          DstReg = MI->getOperand(0).getReg();
-          if (TargetRegisterInfo::isPhysicalRegister(DstReg))
+          SrcReg = MI->getOperand(MI->isSubregToReg() ? 2 : 1).getReg();
+          if (TargetRegisterInfo::isPhysicalRegister(SrcReg))
             // Do not delete extract_subreg, insert_subreg of physical
             // registers unless the definition is dead. e.g.
             // %DO<def> = INSERT_SUBREG %D0<undef>, %S0<kill>, 1
@@ -2739,7 +1805,7 @@
             DoDelete = false;
         }
         if (MI->allDefsAreDead()) {
-          LiveInterval &li = li_->getInterval(DstReg);
+          LiveInterval &li = li_->getInterval(SrcReg);
           if (!ShortenDeadCopySrcLiveRange(li, MI))
             ShortenDeadCopyLiveRange(li, MI);
           DoDelete = true;
@@ -2793,8 +1859,8 @@
         if (li_->hasInterval(SrcReg)) {
           LiveInterval &RegInt = li_->getInterval(SrcReg);
           // If def of this move instruction is dead, remove its live range
-          // from the dstination register's live interval.
-          if (MI->registerDefIsDead(DstReg)) {
+          // from the destination register's live interval.
+          if (MI->allDefsAreDead()) {
             if (!ShortenDeadCopySrcLiveRange(RegInt, MI))
               ShortenDeadCopyLiveRange(RegInt, MI);
           }
@@ -2809,17 +1875,13 @@
 
       // Check for now unnecessary kill flags.
       if (li_->isNotInMIMap(MI)) continue;
-      SlotIndex UseIdx = li_->getInstructionIndex(MI).getUseIndex();
+      SlotIndex DefIdx = li_->getInstructionIndex(MI).getDefIndex();
       for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
         MachineOperand &MO = MI->getOperand(i);
         if (!MO.isReg() || !MO.isKill()) continue;
         unsigned reg = MO.getReg();
         if (!reg || !li_->hasInterval(reg)) continue;
-        LiveInterval &LI = li_->getInterval(reg);
-        const LiveRange *LR = LI.getLiveRangeContaining(UseIdx);
-        if (!LR ||
-            (!LR->valno->isKill(UseIdx.getDefIndex()) &&
-             LR->valno->def != UseIdx.getDefIndex()))
+        if (!li_->getInterval(reg).killedAt(DefIdx))
           MO.setIsKill(false);
       }
     }

Modified: llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.h Fri Jul  2 04:57:13 2010
@@ -105,22 +105,12 @@
     /// possible to coalesce this interval, but it may be possible if other
     /// things get coalesced, then it returns true by reference in 'Again'.
     bool JoinCopy(CopyRec &TheCopy, bool &Again);
-    
+
     /// JoinIntervals - Attempt to join these two intervals.  On failure, this
-    /// returns false.  Otherwise, if one of the intervals being joined is a
-    /// physreg, this method always canonicalizes DestInt to be it.  The output
-    /// "SrcInt" will not have been modified, so we can use this information
-    /// below to update aliases.
-    bool JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, bool &Swapped,
-                       CoalescerPair &CP);
-    
-    /// SimpleJoin - Attempt to join the specified interval into this one. The
-    /// caller of this method must guarantee that the RHS only contains a single
-    /// value number and that the RHS is not defined by a copy from this
-    /// interval.  This returns false if the intervals are not joinable, or it
-    /// joins them and returns true.
-    bool SimpleJoin(LiveInterval &LHS, LiveInterval &RHS, CoalescerPair &CP);
-    
+    /// returns false.  The output "SrcInt" will not have been modified, so we can
+    /// use this information below to update aliases.
+    bool JoinIntervals(CoalescerPair &CP);
+
     /// Return true if the two specified registers belong to different register
     /// classes.  The registers may be either phys or virt regs.
     bool differingRegisterClasses(unsigned RegA, unsigned RegB) const;
@@ -129,8 +119,7 @@
     /// the source value number is defined by a copy from the destination reg
     /// see if we can merge these two destination reg valno# into a single
     /// value number, eliminating a copy.
-    bool AdjustCopiesBackFrom(LiveInterval &IntA, LiveInterval &IntB,
-                              MachineInstr *CopyMI);
+    bool AdjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI);
 
     /// HasOtherReachingDefs - Return true if there are definitions of IntB
     /// other than BValNo val# that can reach uses of AValno val# of IntA.
@@ -156,28 +145,6 @@
     bool ReMaterializeTrivialDef(LiveInterval &SrcInt, unsigned DstReg,
                                  unsigned DstSubIdx, MachineInstr *CopyMI);
 
-    /// CanCoalesceWithImpDef - Returns true if the specified copy instruction
-    /// from an implicit def to another register can be coalesced away.
-    bool CanCoalesceWithImpDef(MachineInstr *CopyMI,
-                               LiveInterval &li, LiveInterval &ImpLi) const;
-
-    /// TurnCopiesFromValNoToImpDefs - The specified value# is defined by an
-    /// implicit_def and it is being removed. Turn all copies from this value#
-    /// into implicit_defs.
-    void TurnCopiesFromValNoToImpDefs(LiveInterval &li, VNInfo *VNI);
-
-    /// isWinToJoinVRWithSrcPhysReg - Return true if it's worth while to join a
-    /// a virtual destination register with physical source register.
-    bool isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
-                                    MachineBasicBlock *CopyMBB,
-                                    LiveInterval &DstInt, LiveInterval &SrcInt);
-
-    /// isWinToJoinVRWithDstPhysReg - Return true if it's worth while to join a
-    /// copy from a virtual source register to a physical destination register.
-    bool isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
-                                    MachineBasicBlock *CopyMBB,
-                                    LiveInterval &DstInt, LiveInterval &SrcInt);
-
     /// isWinToJoinCrossClass - Return true if it's profitable to coalesce
     /// two virtual registers from different register classes.
     bool isWinToJoinCrossClass(unsigned SrcReg,
@@ -186,42 +153,12 @@
                                const TargetRegisterClass *DstRC,
                                const TargetRegisterClass *NewRC);
 
-    /// HasIncompatibleSubRegDefUse - If we are trying to coalesce a virtual
-    /// register with a physical register, check if any of the virtual register
-    /// operand is a sub-register use or def. If so, make sure it won't result
-    /// in an illegal extract_subreg or insert_subreg instruction.
-    bool HasIncompatibleSubRegDefUse(MachineInstr *CopyMI,
-                                     unsigned VirtReg, unsigned PhysReg);
-
-    /// CanJoinExtractSubRegToPhysReg - Return true if it's possible to coalesce
-    /// an extract_subreg where dst is a physical register, e.g.
-    /// cl = EXTRACT_SUBREG reg1024, 1
-    bool CanJoinExtractSubRegToPhysReg(unsigned DstReg, unsigned SrcReg,
-                                       unsigned SubIdx, unsigned &RealDstReg);
-
-    /// CanJoinInsertSubRegToPhysReg - Return true if it's possible to coalesce
-    /// an insert_subreg where src is a physical register, e.g.
-    /// reg1024 = INSERT_SUBREG reg1024, c1, 0
-    bool CanJoinInsertSubRegToPhysReg(unsigned DstReg, unsigned SrcReg,
-                                      unsigned SubIdx, unsigned &RealDstReg);
-
-    /// ValueLiveAt - Return true if the LiveRange pointed to by the given
-    /// iterator, or any subsequent range with the same value number,
-    /// is live at the given point.
-    bool ValueLiveAt(LiveInterval::iterator LRItr, LiveInterval::iterator LREnd, 
-                     SlotIndex defPoint) const;                                  
-
-    /// RangeIsDefinedByCopy - Return true if the specified live range of the
-    /// specified live interval is defined by a coalescable copy.
-    bool RangeIsDefinedByCopy(LiveInterval &li, LiveRange *LR,
-                              CoalescerPair &CP);
-
     /// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
     /// update the subregister number if it is not zero. If DstReg is a
     /// physical register and the existing subregister number of the def / use
     /// being updated is not zero, make sure to set it to the correct physical
     /// subregister.
-    void UpdateRegDefsUses(unsigned SrcReg, unsigned DstReg, unsigned SubIdx);
+    void UpdateRegDefsUses(const CoalescerPair &CP);
 
     /// ShortenDeadCopyLiveRange - Shorten a live range defined by a dead copy.
     /// Return true if live interval is removed.
@@ -238,6 +175,10 @@
     /// it as well.
     bool RemoveDeadDef(LiveInterval &li, MachineInstr *DefMI);
 
+    /// RemoveCopyFlag - If DstReg is no longer defined by CopyMI, clear the
+    /// VNInfo copy flag for DstReg and all aliases.
+    void RemoveCopyFlag(unsigned DstReg, const MachineInstr *CopyMI);
+
     /// lastRegisterUse - Returns the last use of the specific register between
     /// cycles Start and End or NULL if there are no uses.
     MachineOperand *lastRegisterUse(SlotIndex Start, SlotIndex End,

Modified: llvm/branches/wendling/eh/lib/CodeGen/SjLjEHPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SjLjEHPrepare.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SjLjEHPrepare.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SjLjEHPrepare.cpp Fri Jul  2 04:57:13 2010
@@ -205,15 +205,15 @@
   for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
        AI != E; ++AI) {
     const Type *Ty = AI->getType();
-    // StructType can't be cast, but is a legal argument type, so we have
+    // Aggregate types can't be cast, but are legal argument types, so we have
     // to handle them differently. We use an extract/insert pair as a
     // lightweight method to achieve the same goal.
-    if (isa<StructType>(Ty)) {
-      Instruction *EI = ExtractValueInst::Create(AI, 0, "", AfterAllocaInsertPt);
+    if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)) {
+      Instruction *EI = ExtractValueInst::Create(AI, 0, "",AfterAllocaInsertPt);
       Instruction *NI = InsertValueInst::Create(AI, EI, 0);
       NI->insertAfter(EI);
       AI->replaceAllUsesWith(NI);
-      // Set the struct operand of the instructions back to the AllocaInst.
+      // Set the operand of the instructions back to the AllocaInst.
       EI->setOperand(0, AI);
       NI->setOperand(0, AI);
     } else {
@@ -339,7 +339,7 @@
     for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
       if (CallInst *CI = dyn_cast<CallInst>(I)) {
         if (CI->getCalledFunction() == SelectorFn) {
-          if (!PersonalityFn) PersonalityFn = CI->getOperand(2);
+          if (!PersonalityFn) PersonalityFn = CI->getArgOperand(1);
           EH_Selectors.push_back(CI);
         } else if (CI->getCalledFunction() == ExceptionFn) {
           EH_Exceptions.push_back(CI);

Modified: llvm/branches/wendling/eh/lib/CodeGen/SlotIndexes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SlotIndexes.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SlotIndexes.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SlotIndexes.cpp Fri Jul  2 04:57:13 2010
@@ -213,9 +213,11 @@
 
 // Print a SlotIndex to a raw_ostream.
 void SlotIndex::print(raw_ostream &os) const {
-  os << getIndex();
+  os << entry().getIndex();
   if (isPHI())
     os << "*";
+  else
+    os << "LudS"[getSlot()];
 }
 
 // Dump a SlotIndex to stderr.

Modified: llvm/branches/wendling/eh/lib/CodeGen/Spiller.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/Spiller.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/Spiller.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/Spiller.cpp Fri Jul  2 04:57:13 2010
@@ -19,13 +19,14 @@
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
 #include <set>
 
 using namespace llvm;
 
 namespace {
-  enum SpillerName { trivial, standard, splitting };
+  enum SpillerName { trivial, standard, splitting, inline_ };
 }
 
 static cl::opt<SpillerName>
@@ -35,6 +36,7 @@
            cl::values(clEnumVal(trivial,   "trivial spiller"),
                       clEnumVal(standard,  "default spiller"),
                       clEnumVal(splitting, "splitting spiller"),
+                      clEnumValN(inline_,  "inline", "inline spiller"),
                       clEnumValEnd),
            cl::init(standard));
 
@@ -67,7 +69,8 @@
   /// Add spill ranges for every use/def of the live interval, inserting loads
   /// immediately before each use, and stores after each def. No folding or
   /// remat is attempted.
-  std::vector<LiveInterval*> trivialSpillEverywhere(LiveInterval *li) {
+  void trivialSpillEverywhere(LiveInterval *li,
+                              std::vector<LiveInterval*> &newIntervals) {
     DEBUG(dbgs() << "Spilling everywhere " << *li << "\n");
 
     assert(li->weight != HUGE_VALF &&
@@ -78,8 +81,6 @@
 
     DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n");
 
-    std::vector<LiveInterval*> added;
-    
     const TargetRegisterClass *trc = mri->getRegClass(li->reg);
     unsigned ss = vrm->assignVirt2StackSlot(li->reg);
 
@@ -136,10 +137,10 @@
         MachineInstr *loadInstr(prior(miItr));
         SlotIndex loadIndex =
           lis->InsertMachineInstrInMaps(loadInstr).getDefIndex();
+        vrm->addSpillSlotUse(ss, loadInstr);
         SlotIndex endIndex = loadIndex.getNextIndex();
         VNInfo *loadVNI =
           newLI->getNextValue(loadIndex, 0, true, lis->getVNInfoAllocator());
-        loadVNI->addKill(endIndex);
         newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI));
       }
 
@@ -150,17 +151,15 @@
         MachineInstr *storeInstr(llvm::next(miItr));
         SlotIndex storeIndex =
           lis->InsertMachineInstrInMaps(storeInstr).getDefIndex();
+        vrm->addSpillSlotUse(ss, storeInstr);
         SlotIndex beginIndex = storeIndex.getPrevIndex();
         VNInfo *storeVNI =
           newLI->getNextValue(beginIndex, 0, true, lis->getVNInfoAllocator());
-        storeVNI->addKill(storeIndex);
         newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI));
       }
 
-      added.push_back(newLI);
+      newIntervals.push_back(newLI);
     }
-
-    return added;
   }
 };
 
@@ -176,11 +175,12 @@
   TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
     : SpillerBase(mf, lis, vrm) {}
 
-  std::vector<LiveInterval*> spill(LiveInterval *li,
-                                   SmallVectorImpl<LiveInterval*> &spillIs,
-                                   SlotIndex*) {
+  void spill(LiveInterval *li,
+             std::vector<LiveInterval*> &newIntervals,
+             SmallVectorImpl<LiveInterval*> &,
+             SlotIndex*) {
     // Ignore spillIs - we don't use it.
-    return trivialSpillEverywhere(li);
+    trivialSpillEverywhere(li, newIntervals);
   }
 };
 
@@ -200,10 +200,13 @@
     : lis(lis), loopInfo(loopInfo), vrm(vrm) {}
 
   /// Falls back on LiveIntervals::addIntervalsForSpills.
-  std::vector<LiveInterval*> spill(LiveInterval *li,
-                                   SmallVectorImpl<LiveInterval*> &spillIs,
-                                   SlotIndex*) {
-    return lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
+  void spill(LiveInterval *li,
+             std::vector<LiveInterval*> &newIntervals,
+             SmallVectorImpl<LiveInterval*> &spillIs,
+             SlotIndex*) {
+    std::vector<LiveInterval*> added =
+      lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
+    newIntervals.insert(newIntervals.end(), added.begin(), added.end());
   }
 };
 
@@ -226,15 +229,14 @@
     tri = mf->getTarget().getRegisterInfo();
   }
 
-  std::vector<LiveInterval*> spill(LiveInterval *li,
-                                   SmallVectorImpl<LiveInterval*> &spillIs,
-                                   SlotIndex *earliestStart) {
-    
-    if (worthTryingToSplit(li)) {
-      return tryVNISplit(li, earliestStart);
-    }
-    // else
-    return StandardSpiller::spill(li, spillIs, earliestStart);
+  void spill(LiveInterval *li,
+             std::vector<LiveInterval*> &newIntervals,
+             SmallVectorImpl<LiveInterval*> &spillIs,
+             SlotIndex *earliestStart) {
+    if (worthTryingToSplit(li))
+      tryVNISplit(li, earliestStart);
+    else
+      StandardSpiller::spill(li, newIntervals, spillIs, earliestStart);
   }
 
 private:
@@ -263,8 +265,8 @@
          vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) {
       VNInfo *vni = *vniItr;
       
-      // Skip unused VNIs, or VNIs with no kills.
-      if (vni->isUnused() || vni->kills.empty())
+      // Skip unused VNIs.
+      if (vni->isUnused())
         continue;
 
       DEBUG(dbgs() << "  Extracted Val #" << vni->id << " as ");
@@ -302,9 +304,8 @@
   /// Extract the given value number from the interval.
   LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const {
     assert(vni->isDefAccurate() || vni->isPHIDef());
-    assert(!vni->kills.empty());
 
-    // Create a new vreg and live interval, copy VNI kills & ranges over.                                                                                                                                                     
+    // Create a new vreg and live interval, copy VNI ranges over.
     const TargetRegisterClass *trc = mri->getRegClass(li->reg);
     unsigned newVReg = mri->createVirtualRegister(trc);
     vrm->grow();
@@ -344,7 +345,6 @@
       VNInfo *phiDefVNI = li->getNextValue(lis->getMBBStartIdx(defMBB),
                                            0, false, lis->getVNInfoAllocator());
       phiDefVNI->setIsPHIDef(true);
-      phiDefVNI->addKill(copyIdx.getDefIndex());
       li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI));
       LiveRange *oldPHIDefRange =
         newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB));
@@ -397,20 +397,9 @@
         copyMI->addRegisterKilled(li->reg, tri);
         LiveRange *origUseRange =
           li->getLiveRangeContaining(newVNI->def.getUseIndex());
-        VNInfo *origUseVNI = origUseRange->valno;
         origUseRange->end = copyIdx.getDefIndex();
-        bool updatedKills = false;
-        for (unsigned k = 0; k < origUseVNI->kills.size(); ++k) {
-          if (origUseVNI->kills[k] == defIdx.getDefIndex()) {
-            origUseVNI->kills[k] = copyIdx.getDefIndex();
-            updatedKills = true;
-            break;
-          }
-        }
-        assert(updatedKills && "Failed to update VNI kill list.");
         VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI,
                                               true, lis->getVNInfoAllocator());
-        copyVNI->addKill(defIdx.getDefIndex());
         LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI);
         newLI->addRange(copyRange);
       }    
@@ -470,56 +459,46 @@
         VNInfo *copyVNI =
           newLI->getNextValue(useIdx.getDefIndex(), 0, true,
                               lis->getVNInfoAllocator());
-        copyVNI->addKill(copyIdx.getDefIndex());
         LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI);
         newLI->addRange(copyRange);
       }
     }
-    
-    // Iterate over any PHI kills - we'll need to insert new copies for them.
-    for (VNInfo::KillSet::iterator
-         killItr = newVNI->kills.begin(), killEnd = newVNI->kills.end();
-         killItr != killEnd; ++killItr) {
-      SlotIndex killIdx(*killItr);
-      if (killItr->isPHI()) {
-        MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
-        LiveRange *oldKillRange =
-          newLI->getLiveRangeContaining(killIdx);
 
-        assert(oldKillRange != 0 && "No kill range?");
-
-        tii->copyRegToReg(*killMBB, killMBB->getFirstTerminator(),
-                          li->reg, newVReg, trc, trc,
-                          DebugLoc());
-        MachineInstr *copyMI = prior(killMBB->getFirstTerminator());
-        copyMI->addRegisterKilled(newVReg, tri);
-        SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
-
-        // Save the current end. We may need it to add a new range if the
-        // current range runs of the end of the MBB.
-        SlotIndex newKillRangeEnd = oldKillRange->end;
-        oldKillRange->end = copyIdx.getDefIndex();
-
-        if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
-          assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
-                 "PHI kill range doesn't reach kill-block end. Not sane.");
-          newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
-                                    newKillRangeEnd, newVNI));
-        }
+    // Iterate over any PHI kills - we'll need to insert new copies for them.
+    for (LiveInterval::iterator LRI = newLI->begin(), LRE = newLI->end();
+         LRI != LRE; ++LRI) {
+      if (LRI->valno != newVNI || LRI->end.isPHI())
+        continue;
+      SlotIndex killIdx = LRI->end;
+      MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx);
 
-        *killItr = oldKillRange->end;
-        VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
-                                              copyMI, true,
-                                              lis->getVNInfoAllocator());
-        newKillVNI->addKill(lis->getMBBTerminatorGap(killMBB));
-        newKillVNI->setHasPHIKill(true);
-        li->addRange(LiveRange(copyIdx.getDefIndex(),
-                               lis->getMBBEndIdx(killMBB),
-                               newKillVNI));
-      }
+      tii->copyRegToReg(*killMBB, killMBB->getFirstTerminator(),
+                        li->reg, newVReg, trc, trc,
+                        DebugLoc());
+      MachineInstr *copyMI = prior(killMBB->getFirstTerminator());
+      copyMI->addRegisterKilled(newVReg, tri);
+      SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI);
 
+      // Save the current end. We may need it to add a new range if the
+      // current range runs of the end of the MBB.
+      SlotIndex newKillRangeEnd = LRI->end;
+      LRI->end = copyIdx.getDefIndex();
+
+      if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) {
+        assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) &&
+               "PHI kill range doesn't reach kill-block end. Not sane.");
+        newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB),
+                                  newKillRangeEnd, newVNI));
+      }
+
+      VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(),
+                                            copyMI, true,
+                                            lis->getVNInfoAllocator());
+      newKillVNI->setHasPHIKill(true);
+      li->addRange(LiveRange(copyIdx.getDefIndex(),
+                             lis->getMBBEndIdx(killMBB),
+                             newKillVNI));
     }
-
     newVNI->setHasPHIKill(false);
 
     return newLI;
@@ -530,6 +509,13 @@
 } // end anonymous namespace
 
 
+namespace llvm {
+Spiller *createInlineSpiller(MachineFunction*,
+                             LiveIntervals*,
+                             const MachineLoopInfo*,
+                             VirtRegMap*);
+}
+
 llvm::Spiller* llvm::createSpiller(MachineFunction *mf, LiveIntervals *lis,
                                    const MachineLoopInfo *loopInfo,
                                    VirtRegMap *vrm) {
@@ -538,5 +524,6 @@
   case trivial: return new TrivialSpiller(mf, lis, vrm);
   case standard: return new StandardSpiller(lis, loopInfo, vrm);
   case splitting: return new SplittingSpiller(mf, lis, loopInfo, vrm);
+  case inline_: return createInlineSpiller(mf, lis, loopInfo, vrm);
   }
 }

Modified: llvm/branches/wendling/eh/lib/CodeGen/Spiller.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/Spiller.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/Spiller.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/Spiller.h Fri Jul  2 04:57:13 2010
@@ -33,11 +33,19 @@
   public:
     virtual ~Spiller() = 0;
 
-    /// Spill the given live range. The method used will depend on the Spiller
-    /// implementation selected.
-    virtual std::vector<LiveInterval*> spill(LiveInterval *li,
-					     SmallVectorImpl<LiveInterval*> &spillIs,
-                                             SlotIndex *earliestIndex = 0) = 0;
+    /// spill - Spill the given live interval. The method used will depend on
+    /// the Spiller implementation selected.
+    ///
+    /// @param li            The live interval to be spilled.
+    /// @param spillIs       A list of intervals that are about to be spilled,
+    ///                      and so cannot be used for remat etc.
+    /// @param newIntervals  The newly created intervals will be appended here.
+    /// @param earliestIndex The earliest point for splitting. (OK, it's another
+    ///                      pointer to the allocator guts).
+    virtual void spill(LiveInterval *li,
+                       std::vector<LiveInterval*> &newIntervals,
+                       SmallVectorImpl<LiveInterval*> &spillIs,
+                       SlotIndex *earliestIndex = 0) = 0;
 
   };
 

Modified: llvm/branches/wendling/eh/lib/CodeGen/StrongPHIElimination.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/StrongPHIElimination.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/StrongPHIElimination.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/StrongPHIElimination.cpp Fri Jul  2 04:57:13 2010
@@ -830,9 +830,6 @@
         LiveInterval& Int = LI.getInterval(I->getOperand(i).getReg());
         VNInfo* FirstVN = *Int.vni_begin();
         FirstVN->setHasPHIKill(false);
-        if (I->getOperand(i).isKill())
-          FirstVN->addKill(LI.getInstructionIndex(I).getUseIndex());
-        
         LiveRange LR (LI.getMBBStartIdx(I->getParent()),
                       LI.getInstructionIndex(I).getUseIndex().getNextSlot(),
                       FirstVN);

Modified: llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp Fri Jul  2 04:57:13 2010
@@ -13,6 +13,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/ADT/SmallVector.h"
@@ -27,6 +28,27 @@
 #include "llvm/Support/raw_ostream.h"
 using namespace llvm;
 
+/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
+/// after it, replacing it with an unconditional branch to NewDest.
+void
+TargetInstrInfoImpl::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+                                             MachineBasicBlock *NewDest) const {
+  MachineBasicBlock *MBB = Tail->getParent();
+
+  // Remove all the old successors of MBB from the CFG.
+  while (!MBB->succ_empty())
+    MBB->removeSuccessor(MBB->succ_begin());
+
+  // Remove all the dead instructions from the end of MBB.
+  MBB->erase(Tail, MBB->end());
+
+  // If MBB isn't immediately before MBB, insert a branch to it.
+  if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
+    InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
+                 Tail->getDebugLoc());
+  MBB->addSuccessor(NewDest);
+}
+
 // commuteInstruction - The default implementation of this method just exchanges
 // the two operands returned by findCommutedOpIndices.
 MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
@@ -316,6 +338,28 @@
   return true;
 }
 
+/// isSchedulingBoundary - Test if the given instruction should be
+/// considered a scheduling boundary. This primarily includes labels
+/// and terminators.
+bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr *MI,
+                                               const MachineBasicBlock *MBB,
+                                               const MachineFunction &MF) const{
+  // Terminators and labels can't be scheduled around.
+  if (MI->getDesc().isTerminator() || MI->isLabel())
+    return true;
+
+  // Don't attempt to schedule around any instruction that defines
+  // a stack-oriented pointer, as it's unlikely to be profitable. This
+  // saves compile time, because it doesn't require every single
+  // stack slot reference to depend on the instruction that does the
+  // modification.
+  const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
+  if (MI->definesRegister(TLI.getStackPointerRegisterToSaveRestore()))
+    return true;
+
+  return false;
+}
+
 // Default implementation of CreateTargetPostRAHazardRecognizer.
 ScheduleHazardRecognizer *TargetInstrInfoImpl::
 CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const {

Modified: llvm/branches/wendling/eh/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/TargetLoweringObjectFileImpl.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/TargetLoweringObjectFileImpl.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/TargetLoweringObjectFileImpl.cpp Fri Jul  2 04:57:13 2010
@@ -825,32 +825,32 @@
   TargetLoweringObjectFile::Initialize(Ctx, TM);
   TextSection =
     getContext().getCOFFSection(".text",
-                                MCSectionCOFF::IMAGE_SCN_CNT_CODE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_EXECUTE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_CNT_CODE |
+                                COFF::IMAGE_SCN_MEM_EXECUTE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getText());
   DataSection =
     getContext().getCOFFSection(".data",
-                                MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ |
-                                MCSectionCOFF::IMAGE_SCN_MEM_WRITE,
+                                COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+                                COFF::IMAGE_SCN_MEM_READ |
+                                COFF::IMAGE_SCN_MEM_WRITE,
                                 SectionKind::getDataRel());
   ReadOnlySection =
     getContext().getCOFFSection(".rdata",
-                                MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getReadOnly());
   StaticCtorSection =
     getContext().getCOFFSection(".ctors",
-                                MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ |
-                                MCSectionCOFF::IMAGE_SCN_MEM_WRITE,
+                                COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+                                COFF::IMAGE_SCN_MEM_READ |
+                                COFF::IMAGE_SCN_MEM_WRITE,
                                 SectionKind::getDataRel());
   StaticDtorSection =
     getContext().getCOFFSection(".dtors",
-                                MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ |
-                                MCSectionCOFF::IMAGE_SCN_MEM_WRITE,
+                                COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+                                COFF::IMAGE_SCN_MEM_READ |
+                                COFF::IMAGE_SCN_MEM_WRITE,
                                 SectionKind::getDataRel());
 
   // FIXME: We're emitting LSDA info into a readonly section on COFF, even
@@ -859,76 +859,76 @@
   // adjusted or this should be a data section.
   LSDASection =
     getContext().getCOFFSection(".gcc_except_table",
-                                MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getReadOnly());
   EHFrameSection =
     getContext().getCOFFSection(".eh_frame",
-                                MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ |
-                                MCSectionCOFF::IMAGE_SCN_MEM_WRITE,
+                                COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+                                COFF::IMAGE_SCN_MEM_READ |
+                                COFF::IMAGE_SCN_MEM_WRITE,
                                 SectionKind::getDataRel());
 
   // Debug info.
   DwarfAbbrevSection =
     getContext().getCOFFSection(".debug_abbrev",
-                                MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_MEM_DISCARDABLE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getMetadata());
   DwarfInfoSection =
     getContext().getCOFFSection(".debug_info",
-                                MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_MEM_DISCARDABLE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getMetadata());
   DwarfLineSection =
     getContext().getCOFFSection(".debug_line",
-                                MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_MEM_DISCARDABLE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getMetadata());
   DwarfFrameSection =
     getContext().getCOFFSection(".debug_frame",
-                                MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_MEM_DISCARDABLE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getMetadata());
   DwarfPubNamesSection =
     getContext().getCOFFSection(".debug_pubnames",
-                                MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_MEM_DISCARDABLE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getMetadata());
   DwarfPubTypesSection =
     getContext().getCOFFSection(".debug_pubtypes",
-                                MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_MEM_DISCARDABLE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getMetadata());
   DwarfStrSection =
     getContext().getCOFFSection(".debug_str",
-                                MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_MEM_DISCARDABLE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getMetadata());
   DwarfLocSection =
     getContext().getCOFFSection(".debug_loc",
-                                MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_MEM_DISCARDABLE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getMetadata());
   DwarfARangesSection =
     getContext().getCOFFSection(".debug_aranges",
-                                MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_MEM_DISCARDABLE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getMetadata());
   DwarfRangesSection =
     getContext().getCOFFSection(".debug_ranges",
-                                MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_MEM_DISCARDABLE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getMetadata());
   DwarfMacroInfoSection =
     getContext().getCOFFSection(".debug_macinfo",
-                                MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE |
-                                MCSectionCOFF::IMAGE_SCN_MEM_READ,
+                                COFF::IMAGE_SCN_MEM_DISCARDABLE |
+                                COFF::IMAGE_SCN_MEM_READ,
                                 SectionKind::getMetadata());
 
   DrectveSection =
     getContext().getCOFFSection(".drectve",
-                                MCSectionCOFF::IMAGE_SCN_LNK_INFO,
+                                COFF::IMAGE_SCN_LNK_INFO,
                                 SectionKind::getMetadata());
 }
 
@@ -938,25 +938,25 @@
 
   if (!K.isMetadata())
     Flags |=
-      MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE;
+      COFF::IMAGE_SCN_MEM_DISCARDABLE;
   else if (K.isText())
     Flags |=
-      MCSectionCOFF::IMAGE_SCN_MEM_EXECUTE |
-      MCSectionCOFF::IMAGE_SCN_CNT_CODE;
+      COFF::IMAGE_SCN_MEM_EXECUTE |
+      COFF::IMAGE_SCN_CNT_CODE;
   else if (K.isBSS ())
     Flags |=
-      MCSectionCOFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA |
-      MCSectionCOFF::IMAGE_SCN_MEM_READ |
-      MCSectionCOFF::IMAGE_SCN_MEM_WRITE;
+      COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA |
+      COFF::IMAGE_SCN_MEM_READ |
+      COFF::IMAGE_SCN_MEM_WRITE;
   else if (K.isReadOnly())
     Flags |=
-      MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
-      MCSectionCOFF::IMAGE_SCN_MEM_READ;
+      COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+      COFF::IMAGE_SCN_MEM_READ;
   else if (K.isWriteable())
     Flags |=
-      MCSectionCOFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
-      MCSectionCOFF::IMAGE_SCN_MEM_READ |
-      MCSectionCOFF::IMAGE_SCN_MEM_WRITE;
+      COFF::IMAGE_SCN_CNT_INITIALIZED_DATA |
+      COFF::IMAGE_SCN_MEM_READ |
+      COFF::IMAGE_SCN_MEM_WRITE;
 
   return Flags;
 }
@@ -995,10 +995,10 @@
 
     unsigned Characteristics = getCOFFSectionFlags(Kind);
 
-    Characteristics |= MCSectionCOFF::IMAGE_SCN_LNK_COMDAT;
+    Characteristics |= COFF::IMAGE_SCN_LNK_COMDAT;
 
     return getContext().getCOFFSection(Name.str(), Characteristics,
-                          MCSectionCOFF::IMAGE_COMDAT_SELECT_EXACT_MATCH, Kind);
+                          COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH, Kind);
   }
 
   if (Kind.isText())

Modified: llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp Fri Jul  2 04:57:13 2010
@@ -898,6 +898,110 @@
       }
     }
   }
+
+  // If this is an instruction with a load folded into it, try unfolding
+  // the load, e.g. avoid this:
+  //   movq %rdx, %rcx
+  //   addq (%rax), %rcx
+  // in favor of this:
+  //   movq (%rax), %rcx
+  //   addq %rdx, %rcx
+  // because it's preferable to schedule a load than a register copy.
+  if (TID.mayLoad() && !regBKilled) {
+    // Determine if a load can be unfolded.
+    unsigned LoadRegIndex;
+    unsigned NewOpc =
+      TII->getOpcodeAfterMemoryUnfold(mi->getOpcode(),
+                                      /*UnfoldLoad=*/true,
+                                      /*UnfoldStore=*/false,
+                                      &LoadRegIndex);
+    if (NewOpc != 0) {
+      const TargetInstrDesc &UnfoldTID = TII->get(NewOpc);
+      if (UnfoldTID.getNumDefs() == 1) {
+        MachineFunction &MF = *mbbi->getParent();
+
+        // Unfold the load.
+        DEBUG(dbgs() << "2addr:   UNFOLDING: " << *mi);
+        const TargetRegisterClass *RC =
+          UnfoldTID.OpInfo[LoadRegIndex].getRegClass(TRI);
+        unsigned Reg = MRI->createVirtualRegister(RC);
+        SmallVector<MachineInstr *, 2> NewMIs;
+        bool Success =
+          TII->unfoldMemoryOperand(MF, mi, Reg,
+                                   /*UnfoldLoad=*/true, /*UnfoldStore=*/false,
+                                   NewMIs);
+        (void)Success;
+        assert(Success &&
+               "unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold "
+               "succeeded!");
+        assert(NewMIs.size() == 2 &&
+               "Unfolded a load into multiple instructions!");
+        // The load was previously folded, so this is the only use.
+        NewMIs[1]->addRegisterKilled(Reg, TRI);
+
+        // Tentatively insert the instructions into the block so that they
+        // look "normal" to the transformation logic.
+        mbbi->insert(mi, NewMIs[0]);
+        mbbi->insert(mi, NewMIs[1]);
+
+        DEBUG(dbgs() << "2addr:    NEW LOAD: " << *NewMIs[0]
+                     << "2addr:    NEW INST: " << *NewMIs[1]);
+
+        // Transform the instruction, now that it no longer has a load.
+        unsigned NewDstIdx = NewMIs[1]->findRegisterDefOperandIdx(regA);
+        unsigned NewSrcIdx = NewMIs[1]->findRegisterUseOperandIdx(regB);
+        MachineBasicBlock::iterator NewMI = NewMIs[1];
+        bool TransformSuccess =
+          TryInstructionTransform(NewMI, mi, mbbi,
+                                  NewSrcIdx, NewDstIdx, Dist);
+        if (TransformSuccess ||
+            NewMIs[1]->getOperand(NewSrcIdx).isKill()) {
+          // Success, or at least we made an improvement. Keep the unfolded
+          // instructions and discard the original.
+          if (LV) {
+            for (unsigned i = 0, e = mi->getNumOperands(); i != e; ++i) {
+              MachineOperand &MO = mi->getOperand(i);
+              if (MO.isReg() && MO.getReg() != 0 &&
+                  TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
+                if (MO.isUse()) {
+                  if (MO.isKill()) {
+                    if (NewMIs[0]->killsRegister(MO.getReg()))
+                      LV->replaceKillInstruction(MO.getReg(), mi, NewMIs[0]);
+                    else {
+                      assert(NewMIs[1]->killsRegister(MO.getReg()) &&
+                             "Kill missing after load unfold!");
+                      LV->replaceKillInstruction(MO.getReg(), mi, NewMIs[1]);
+                    }
+                  }
+                } else if (LV->removeVirtualRegisterDead(MO.getReg(), mi)) {
+                  if (NewMIs[1]->registerDefIsDead(MO.getReg()))
+                    LV->addVirtualRegisterDead(MO.getReg(), NewMIs[1]);
+                  else {
+                    assert(NewMIs[0]->registerDefIsDead(MO.getReg()) &&
+                           "Dead flag missing after load unfold!");
+                    LV->addVirtualRegisterDead(MO.getReg(), NewMIs[0]);
+                  }
+                }
+              }
+            }
+            LV->addVirtualRegisterKilled(Reg, NewMIs[1]);
+          }
+          mi->eraseFromParent();
+          mi = NewMIs[1];
+          if (TransformSuccess)
+            return true;
+        } else {
+          // Transforming didn't eliminate the tie and didn't lead to an
+          // improvement. Clean up the unfolded instructions and keep the
+          // original.
+          DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n");
+          NewMIs[0]->eraseFromParent();
+          NewMIs[1]->eraseFromParent();
+        }
+      }
+    }
+  }
+
   return false;
 }
 
@@ -1397,8 +1501,11 @@
       MI->eraseFromParent();
     }
 
-    // Try coalescing some EXTRACT_SUBREG instructions.
-    CoalesceExtSubRegs(RealSrcs, DstReg);
+    // Try coalescing some EXTRACT_SUBREG instructions. This can create
+    // INSERT_SUBREG instructions that must have <undef> flags added by
+    // LiveIntervalAnalysis, so only run it when LiveVariables is available.
+    if (LV)
+      CoalesceExtSubRegs(RealSrcs, DstReg);
   }
 
   RegSequences.clear();

Modified: llvm/branches/wendling/eh/lib/CodeGen/VirtRegRewriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/VirtRegRewriter.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/VirtRegRewriter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/VirtRegRewriter.cpp Fri Jul  2 04:57:13 2010
@@ -2033,6 +2033,18 @@
           CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
             Spills.canClobberPhysReg(PhysReg);
         }
+        // If this is an asm, and PhysReg is used elsewhere as an earlyclobber
+        // operand, we can't also use it as an input.  (Outputs always come
+        // before inputs, so we can stop looking at i.)
+        if (MI.isInlineAsm()) {
+          for (unsigned k=0; k<i; ++k) {
+            MachineOperand &MOk = MI.getOperand(k);
+            if (MOk.isReg() && MOk.getReg()==PhysReg && MOk.isEarlyClobber()) {
+              CanReuse = false;
+              break;
+            }
+          }
+        }
 
         if (CanReuse) {
           // If this stack slot value is already available, reuse it!
@@ -2103,6 +2115,8 @@
         // To avoid this problem, and to avoid doing a load right after a store,
         // we emit a copy from PhysReg into the designated register for this
         // operand.
+        //
+        // This case also applies to an earlyclobber'd PhysReg.
         unsigned DesignatedReg = VRM->getPhys(VirtReg);
         assert(DesignatedReg && "Must map virtreg to physreg!");
 

Modified: llvm/branches/wendling/eh/lib/CompilerDriver/Tool.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CompilerDriver/Tool.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CompilerDriver/Tool.cpp (original)
+++ llvm/branches/wendling/eh/lib/CompilerDriver/Tool.cpp Fri Jul  2 04:57:13 2010
@@ -85,7 +85,8 @@
   StrVector Out;
 
   // HACK: this won't be needed when we'll migrate away from CommandLine.
-  std::stable_sort(Args.begin(), Args.end(), &CompareFirst<unsigned, std::string>);
+  std::stable_sort(Args.begin(), Args.end(),
+                   &CompareFirst<unsigned, std::string>);
   for (ArgsVector::iterator B = Args.begin(), E = Args.end(); B != E; ++B) {
     Out.push_back(B->second);
   }

Modified: llvm/branches/wendling/eh/lib/MC/MCAsmStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MCAsmStreamer.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MCAsmStreamer.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MCAsmStreamer.cpp Fri Jul  2 04:57:13 2010
@@ -275,17 +275,17 @@
   case MCSA_Global: // .globl/.global
     OS << MAI.getGlobalDirective();
     break;
-  case MCSA_Hidden:         OS << ".hidden ";          break;
-  case MCSA_IndirectSymbol: OS << ".indirect_symbol "; break;
-  case MCSA_Internal:       OS << ".internal ";        break;
-  case MCSA_LazyReference:  OS << ".lazy_reference ";  break;
-  case MCSA_Local:          OS << ".local ";           break;
-  case MCSA_NoDeadStrip:    OS << ".no_dead_strip ";   break;
-  case MCSA_PrivateExtern:  OS << ".private_extern ";  break;
-  case MCSA_Protected:      OS << ".protected ";       break;
-  case MCSA_Reference:      OS << ".reference ";       break;
-  case MCSA_Weak:           OS << ".weak ";            break;
-  case MCSA_WeakDefinition: OS << ".weak_definition "; break;
+  case MCSA_Hidden:         OS << "\t.hidden\t";          break;
+  case MCSA_IndirectSymbol: OS << "\t.indirect_symbol\t"; break;
+  case MCSA_Internal:       OS << "\t.internal\t";        break;
+  case MCSA_LazyReference:  OS << "\t.lazy_reference\t";  break;
+  case MCSA_Local:          OS << "\t.local\t";           break;
+  case MCSA_NoDeadStrip:    OS << "\t.no_dead_strip\t";   break;
+  case MCSA_PrivateExtern:  OS << "\t.private_extern\t";  break;
+  case MCSA_Protected:      OS << "\t.protected\t";       break;
+  case MCSA_Reference:      OS << "\t.reference\t";       break;
+  case MCSA_Weak:           OS << "\t.weak\t";            break;
+  case MCSA_WeakDefinition: OS << "\t.weak_definition\t"; break;
       // .weak_reference
   case MCSA_WeakReference:  OS << MAI.getWeakRefDirective(); break;
   }

Modified: llvm/branches/wendling/eh/lib/MC/MCAssembler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MCAssembler.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MCAssembler.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MCAssembler.cpp Fri Jul  2 04:57:13 2010
@@ -684,12 +684,8 @@
   for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
     // Create dummy fragments to eliminate any empty sections, this simplifies
     // layout.
-    if (it->getFragmentList().empty()) {
-      unsigned ValueSize = 1;
-      if (getBackend().isVirtualSection(it->getSection()))
-        ValueSize = 1;
+    if (it->getFragmentList().empty())
       new MCFillFragment(0, 1, 0, it);
-    }
 
     it->setOrdinal(SectionIndex++);
   }

Modified: llvm/branches/wendling/eh/lib/MC/MCContext.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MCContext.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MCContext.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MCContext.cpp Fri Jul  2 04:57:13 2010
@@ -27,6 +27,10 @@
   MachOUniquingMap = 0;
   ELFUniquingMap = 0;
   COFFUniquingMap = 0;
+
+  SecureLogFile = getenv("AS_SECURE_LOG_FILE");
+  SecureLog = 0;
+  SecureLogUsed = false;
 }
 
 MCContext::~MCContext() {
@@ -37,6 +41,9 @@
   delete (MachOUniqueMapTy*)MachOUniquingMap;
   delete (ELFUniqueMapTy*)ELFUniquingMap;
   delete (COFFUniqueMapTy*)COFFUniquingMap;
+
+  // If the stream for the .secure_log_unique directive was created free it.
+  delete (raw_ostream*)SecureLog;
 }
 
 //===----------------------------------------------------------------------===//

Modified: llvm/branches/wendling/eh/lib/MC/MCParser/AsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MCParser/AsmParser.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MCParser/AsmParser.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MCParser/AsmParser.cpp Fri Jul  2 04:57:13 2010
@@ -24,6 +24,7 @@
 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Target/TargetAsmParser.h"
 using namespace llvm;
@@ -31,8 +32,8 @@
 
 enum { DEFAULT_ADDRSPACE = 0 };
 
-AsmParser::AsmParser(SourceMgr &_SM, MCContext &_Ctx, MCStreamer &_Out,
-                     const MCAsmInfo &_MAI) 
+AsmParser::AsmParser(const Target &T, SourceMgr &_SM, MCContext &_Ctx,
+                     MCStreamer &_Out, const MCAsmInfo &_MAI)
   : Lexer(_MAI), Ctx(_Ctx), Out(_Out), SrcMgr(_SM), TargetParser(0),
     CurBuffer(0) {
   Lexer.setBuffer(SrcMgr.getMemoryBuffer(CurBuffer));
@@ -43,8 +44,6 @@
   AddDirectiveHandler(".loc", &AsmParser::ParseDirectiveLoc);
 }
 
-
-
 AsmParser::~AsmParser() {
 }
 
@@ -780,6 +779,10 @@
       return ParseDirectiveDarwinDumpOrLoad(IDLoc, /*IsDump=*/true);
     if (IDVal == ".load")
       return ParseDirectiveDarwinDumpOrLoad(IDLoc, /*IsLoad=*/false);
+    if (IDVal == ".secure_log_unique")
+      return ParseDirectiveDarwinSecureLogUnique(IDLoc);
+    if (IDVal == ".secure_log_reset")
+      return ParseDirectiveDarwinSecureLogReset(IDLoc);
 
     // Look up the handler in the handler table, 
     bool(AsmParser::*Handler)(StringRef, SMLoc) = DirectiveMap[IDVal];
@@ -839,7 +842,6 @@
   SMLoc EqualLoc = Lexer.getLoc();
 
   const MCExpr *Value;
-  SMLoc StartLoc = Lexer.getLoc();
   if (ParseExpression(Value))
     return true;
   
@@ -1111,7 +1113,6 @@
     return true;
 
   int64_t FillExpr = 0;
-  bool HasFillExpr = false;
   if (Lexer.isNot(AsmToken::EndOfStatement)) {
     if (Lexer.isNot(AsmToken::Comma))
       return TokError("unexpected token in '.space' directive");
@@ -1120,8 +1121,6 @@
     if (ParseAbsoluteExpression(FillExpr))
       return true;
 
-    HasFillExpr = true;
-
     if (Lexer.isNot(AsmToken::EndOfStatement))
       return TokError("unexpected token in '.space' directive");
   }
@@ -1178,7 +1177,6 @@
 ///  ::= .org expression [ , expression ]
 bool AsmParser::ParseDirectiveOrg() {
   const MCExpr *Offset;
-  SMLoc StartLoc = Lexer.getLoc();
   if (ParseExpression(Offset))
     return true;
 
@@ -1382,7 +1380,6 @@
     return TokError("unexpected token in '.desc' directive");
   Lex();
 
-  SMLoc DescLoc = Lexer.getLoc();
   int64_t DescValue;
   if (ParseAbsoluteExpression(DescValue))
     return true;
@@ -1668,7 +1665,6 @@
   Lex();
 
   const MCExpr *Value;
-  SMLoc StartLoc = Lexer.getLoc();
   if (ParseExpression(Value))
     return true;
 
@@ -1735,6 +1731,64 @@
   return false;
 }
 
+/// ParseDirectiveDarwinSecureLogUnique
+///  ::= .secure_log_unique "log message"
+bool AsmParser::ParseDirectiveDarwinSecureLogUnique(SMLoc IDLoc) {
+  std::string LogMessage;
+
+  if (Lexer.isNot(AsmToken::String))
+    LogMessage = "";
+  else{
+    LogMessage = getTok().getString();
+    Lex();
+  }
+
+  if (Lexer.isNot(AsmToken::EndOfStatement))
+    return TokError("unexpected token in '.secure_log_unique' directive");
+  
+  if (getContext().getSecureLogUsed() != false)
+    return Error(IDLoc, ".secure_log_unique specified multiple times");
+
+  char *SecureLogFile = getContext().getSecureLogFile();
+  if (SecureLogFile == NULL)
+    return Error(IDLoc, ".secure_log_unique used but AS_SECURE_LOG_FILE "
+                 "environment variable unset.");
+
+  raw_ostream *OS = getContext().getSecureLog();
+  if (OS == NULL) {
+    std::string Err;
+    OS = new raw_fd_ostream(SecureLogFile, Err, raw_fd_ostream::F_Append);
+    if (!Err.empty()) {
+       delete OS;
+       return Error(IDLoc, Twine("can't open secure log file: ") +
+                    SecureLogFile + " (" + Err + ")");
+    }
+    getContext().setSecureLog(OS);
+  }
+
+  int CurBuf = SrcMgr.FindBufferContainingLoc(IDLoc);
+  *OS << SrcMgr.getBufferInfo(CurBuf).Buffer->getBufferIdentifier() << ":"
+      << SrcMgr.FindLineNumber(IDLoc, CurBuf) << ":"
+      << LogMessage + "\n";
+
+  getContext().setSecureLogUsed(true);
+
+  return false;
+}
+
+/// ParseDirectiveDarwinSecureLogReset
+///  ::= .secure_log_reset
+bool AsmParser::ParseDirectiveDarwinSecureLogReset(SMLoc IDLoc) {
+  if (Lexer.isNot(AsmToken::EndOfStatement))
+    return TokError("unexpected token in '.secure_log_reset' directive");
+  
+  Lex();
+
+  getContext().setSecureLogUsed(false);
+
+  return false;
+}
+
 /// ParseDirectiveIf
 /// ::= .if expression
 bool AsmParser::ParseDirectiveIf(SMLoc DirectiveLoc) {
@@ -1882,7 +1936,7 @@
   }
 
   if (Lexer.isNot(AsmToken::EndOfStatement))
-    return TokError("unexpected token in '.file' directive");
+    return TokError("unexpected token in '.line' directive");
 
   return false;
 }

Modified: llvm/branches/wendling/eh/lib/MC/MCSectionCOFF.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MCSectionCOFF.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MCSectionCOFF.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MCSectionCOFF.cpp Fri Jul  2 04:57:13 2010
@@ -44,28 +44,28 @@
     OS << 'w';
   else
     OS << 'r';
-  if (getCharacteristics() & MCSectionCOFF::IMAGE_SCN_MEM_DISCARDABLE)
+  if (getCharacteristics() & COFF::IMAGE_SCN_MEM_DISCARDABLE)
     OS << 'n';
   OS << "\"\n";
   
-  if (getCharacteristics() & MCSectionCOFF::IMAGE_SCN_LNK_COMDAT) {
+  if (getCharacteristics() & COFF::IMAGE_SCN_LNK_COMDAT) {
     switch (Selection) {
-      case IMAGE_COMDAT_SELECT_NODUPLICATES:
+      case COFF::IMAGE_COMDAT_SELECT_NODUPLICATES:
         OS << "\t.linkonce one_only\n";
         break;
-      case IMAGE_COMDAT_SELECT_ANY:
+      case COFF::IMAGE_COMDAT_SELECT_ANY:
         OS << "\t.linkonce discard\n";
         break;
-      case IMAGE_COMDAT_SELECT_SAME_SIZE:
+      case COFF::IMAGE_COMDAT_SELECT_SAME_SIZE:
         OS << "\t.linkonce same_size\n";
         break;
-      case IMAGE_COMDAT_SELECT_EXACT_MATCH:
+      case COFF::IMAGE_COMDAT_SELECT_EXACT_MATCH:
         OS << "\t.linkonce same_contents\n";
         break;
     //NOTE: as of binutils 2.20, there is no way to specifiy select largest
     //      with the .linkonce directive. For now, we treat it as an invalid
     //      comdat selection value.
-      case IMAGE_COMDAT_SELECT_LARGEST:
+      case COFF::IMAGE_COMDAT_SELECT_LARGEST:
     //  OS << "\t.linkonce largest\n";
     //  break;
       default:

Modified: llvm/branches/wendling/eh/lib/MC/MachObjectWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MachObjectWriter.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MachObjectWriter.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MachObjectWriter.cpp Fri Jul  2 04:57:13 2010
@@ -747,7 +747,6 @@
            !Is64Bit &&
            "Should only be called with a 32-bit TLVP relocation!");
 
-    // If this is a subtraction then we're pcrel.
     unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
     uint32_t Value = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
     unsigned IsPCRel = 0;
@@ -761,7 +760,14 @@
     // between the picbase and the next address.  For 32-bit static the addend
     // is zero.
     if (Target.getSymB()) {
+      // If this is a subtraction then we're pcrel.
+      uint32_t FixupAddress =
+      Layout.getFragmentAddress(Fragment) + Fixup.getOffset();
+      MCSymbolData *SD_B = &Asm.getSymbolData(Target.getSymB()->getSymbol());
       IsPCRel = 1;
+      FixedValue = (FixupAddress - Layout.getSymbolAddress(SD_B) +
+                    Target.getConstant());
+      FixedValue += 1 << Log2Size;
     } else {
       FixedValue = 0;
     }
@@ -817,7 +823,6 @@
 
     // See <reloc.h>.
     uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
-    uint32_t Value = 0;
     unsigned Index = 0;
     unsigned IsExtern = 0;
     unsigned Type = 0;
@@ -828,7 +833,6 @@
       // FIXME: Currently, these are never generated (see code below). I cannot
       // find a case where they are actually emitted.
       Type = RIT_Vanilla;
-      Value = 0;
     } else {
       // Check whether we need an external or internal relocation.
       if (doesSymbolRequireExternRelocation(SD)) {
@@ -839,11 +843,9 @@
         // undefined. This occurs with weak definitions, for example.
         if (!SD->Symbol->isUndefined())
           FixedValue -= Layout.getSymbolAddress(SD);
-        Value = 0;
       } else {
         // The index is the section ordinal (1-based).
         Index = SD->getFragment()->getParent()->getOrdinal() + 1;
-        Value = Layout.getSymbolAddress(SD);
       }
 
       Type = RIT_Vanilla;

Modified: llvm/branches/wendling/eh/lib/Support/FoldingSet.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Support/FoldingSet.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Support/FoldingSet.cpp (original)
+++ llvm/branches/wendling/eh/lib/Support/FoldingSet.cpp Fri Jul  2 04:57:13 2010
@@ -175,6 +175,14 @@
   return Buckets + BucketNum;
 }
 
+/// AllocateBuckets - Allocated initialized bucket memory.
+static void **AllocateBuckets(unsigned NumBuckets) {
+  void **Buckets = static_cast<void**>(calloc(NumBuckets+1, sizeof(void*)));
+  // Set the very last bucket to be a non-null "pointer".
+  Buckets[NumBuckets] = reinterpret_cast<void*>(-1);
+  return Buckets;
+}
+
 //===----------------------------------------------------------------------===//
 // FoldingSetImpl Implementation
 
@@ -182,11 +190,11 @@
   assert(5 < Log2InitSize && Log2InitSize < 32 &&
          "Initial hash table size out of range");
   NumBuckets = 1 << Log2InitSize;
-  Buckets = new void*[NumBuckets+1];
-  clear();
+  Buckets = AllocateBuckets(NumBuckets);
+  NumNodes = 0;
 }
 FoldingSetImpl::~FoldingSetImpl() {
-  delete [] Buckets;
+  free(Buckets);
 }
 void FoldingSetImpl::clear() {
   // Set all but the last bucket to null pointers.
@@ -207,8 +215,8 @@
   NumBuckets <<= 1;
   
   // Clear out new buckets.
-  Buckets = new void*[NumBuckets+1];
-  clear();
+  Buckets = AllocateBuckets(NumBuckets);
+  NumNodes = 0;
 
   // Walk the old buckets, rehashing nodes into their new place.
   FoldingSetNodeID ID;
@@ -227,7 +235,7 @@
     }
   }
   
-  delete[] OldBuckets;
+  free(OldBuckets);
 }
 
 /// FindNodeOrInsertPos - Look up the node specified by ID.  If it exists,

Modified: llvm/branches/wendling/eh/lib/Support/MemoryBuffer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Support/MemoryBuffer.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Support/MemoryBuffer.cpp (original)
+++ llvm/branches/wendling/eh/lib/Support/MemoryBuffer.cpp Fri Jul  2 04:57:13 2010
@@ -14,6 +14,7 @@
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/SmallString.h"
+#include "llvm/Support/MathExtras.h"
 #include "llvm/System/Errno.h"
 #include "llvm/System/Path.h"
 #include "llvm/System/Process.h"
@@ -37,22 +38,7 @@
 // MemoryBuffer implementation itself.
 //===----------------------------------------------------------------------===//
 
-MemoryBuffer::~MemoryBuffer() {
-  if (MustDeleteBuffer)
-    free((void*)BufferStart);
-}
-
-/// initCopyOf - Initialize this source buffer with a copy of the specified
-/// memory range.  We make the copy so that we can null terminate it
-/// successfully.
-void MemoryBuffer::initCopyOf(const char *BufStart, const char *BufEnd) {
-  size_t Size = BufEnd-BufStart;
-  BufferStart = (char *)malloc(Size+1);
-  BufferEnd = BufferStart+Size;
-  memcpy(const_cast<char*>(BufferStart), BufStart, Size);
-  *const_cast<char*>(BufferEnd) = 0;   // Null terminate buffer.
-  MustDeleteBuffer = true;
-}
+MemoryBuffer::~MemoryBuffer() { }
 
 /// init - Initialize this MemoryBuffer as a reference to externally allocated
 /// memory, memory that we know is already null terminated.
@@ -60,27 +46,38 @@
   assert(BufEnd[0] == 0 && "Buffer is not null terminated!");
   BufferStart = BufStart;
   BufferEnd = BufEnd;
-  MustDeleteBuffer = false;
 }
 
 //===----------------------------------------------------------------------===//
 // MemoryBufferMem implementation.
 //===----------------------------------------------------------------------===//
 
+/// CopyStringRef - Copies contents of a StringRef into a block of memory and
+/// null-terminates it.
+static void CopyStringRef(char *Memory, StringRef Data) {
+  memcpy(Memory, Data.data(), Data.size());
+  Memory[Data.size()] = 0; // Null terminate string.
+}
+
+/// GetNamedBuffer - Allocates a new MemoryBuffer with Name copied after it.
+template <typename T>
+static T* GetNamedBuffer(StringRef Buffer, StringRef Name) {
+  char *Mem = static_cast<char*>(operator new(sizeof(T) + Name.size() + 1));
+  CopyStringRef(Mem + sizeof(T), Name);
+  return new (Mem) T(Buffer);
+}
+
 namespace {
+/// MemoryBufferMem - Named MemoryBuffer pointing to a block of memory.
 class MemoryBufferMem : public MemoryBuffer {
-  std::string FileID;
 public:
-  MemoryBufferMem(StringRef InputData, StringRef FID, bool Copy = false)
-  : FileID(FID) {
-    if (!Copy)
-      init(InputData.data(), InputData.data()+InputData.size());
-    else
-      initCopyOf(InputData.data(), InputData.data()+InputData.size());
+  MemoryBufferMem(StringRef InputData) {
+    init(InputData.begin(), InputData.end());
   }
-  
+
   virtual const char *getBufferIdentifier() const {
-    return FileID.c_str();
+     // The name is stored after the class itself.
+    return reinterpret_cast<const char*>(this + 1);
   }
 };
 }
@@ -88,42 +85,55 @@
 /// getMemBuffer - Open the specified memory range as a MemoryBuffer.  Note
 /// that EndPtr[0] must be a null byte and be accessible!
 MemoryBuffer *MemoryBuffer::getMemBuffer(StringRef InputData,
-                                         const char *BufferName) {
-  return new MemoryBufferMem(InputData, BufferName);
+                                         StringRef BufferName) {
+  return GetNamedBuffer<MemoryBufferMem>(InputData, BufferName);
 }
 
 /// getMemBufferCopy - Open the specified memory range as a MemoryBuffer,
 /// copying the contents and taking ownership of it.  This has no requirements
 /// on EndPtr[0].
 MemoryBuffer *MemoryBuffer::getMemBufferCopy(StringRef InputData,
-                                             const char *BufferName) {
-  return new MemoryBufferMem(InputData, BufferName, true);
+                                             StringRef BufferName) {
+  MemoryBuffer *Buf = getNewUninitMemBuffer(InputData.size(), BufferName);
+  if (!Buf) return 0;
+  memcpy(const_cast<char*>(Buf->getBufferStart()), InputData.data(),
+         InputData.size());
+  return Buf;
 }
 
 /// getNewUninitMemBuffer - Allocate a new MemoryBuffer of the specified size
-/// that is completely initialized to zeros.  Note that the caller should
-/// initialize the memory allocated by this method.  The memory is owned by
-/// the MemoryBuffer object.
+/// that is not initialized.  Note that the caller should initialize the
+/// memory allocated by this method.  The memory is owned by the MemoryBuffer
+/// object.
 MemoryBuffer *MemoryBuffer::getNewUninitMemBuffer(size_t Size,
                                                   StringRef BufferName) {
-  char *Buf = (char *)malloc(Size+1);
-  if (!Buf) return 0;
-  Buf[Size] = 0;
-  MemoryBufferMem *SB = new MemoryBufferMem(StringRef(Buf, Size), BufferName);
-  // The memory for this buffer is owned by the MemoryBuffer.
-  SB->MustDeleteBuffer = true;
-  return SB;
+  // Allocate space for the MemoryBuffer, the data and the name. It is important
+  // that MemoryBuffer and data are aligned so PointerIntPair works with them.
+  size_t AlignedStringLen =
+    RoundUpToAlignment(sizeof(MemoryBufferMem) + BufferName.size() + 1,
+                       sizeof(void*)); // TODO: Is sizeof(void*) enough?
+  size_t RealLen = AlignedStringLen + Size + 1;
+  char *Mem = static_cast<char*>(operator new(RealLen, std::nothrow));
+  if (!Mem) return 0;
+
+  // The name is stored after the class itself.
+  CopyStringRef(Mem + sizeof(MemoryBufferMem), BufferName);
+
+  // The buffer begins after the name and must be aligned.
+  char *Buf = Mem + AlignedStringLen;
+  Buf[Size] = 0; // Null terminate buffer.
+
+  return new (Mem) MemoryBufferMem(StringRef(Buf, Size));
 }
 
 /// getNewMemBuffer - Allocate a new MemoryBuffer of the specified size that
 /// is completely initialized to zeros.  Note that the caller should
 /// initialize the memory allocated by this method.  The memory is owned by
 /// the MemoryBuffer object.
-MemoryBuffer *MemoryBuffer::getNewMemBuffer(size_t Size,
-                                            const char *BufferName) {
+MemoryBuffer *MemoryBuffer::getNewMemBuffer(size_t Size, StringRef BufferName) {
   MemoryBuffer *SB = getNewUninitMemBuffer(Size, BufferName);
   if (!SB) return 0;
-  memset(const_cast<char*>(SB->getBufferStart()), 0, Size+1);
+  memset(const_cast<char*>(SB->getBufferStart()), 0, Size);
   return SB;
 }
 
@@ -141,6 +151,15 @@
   return getFile(Filename, ErrStr, FileSize, FileInfo);
 }
 
+MemoryBuffer *MemoryBuffer::getFileOrSTDIN(const char *Filename,
+                                           std::string *ErrStr,
+                                           int64_t FileSize,
+                                           struct stat *FileInfo) {
+  if (strcmp(Filename, "-") == 0)
+    return getSTDIN(ErrStr);
+  return getFile(Filename, ErrStr, FileSize, FileInfo);
+}
+
 //===----------------------------------------------------------------------===//
 // MemoryBuffer::getFile implementation.
 //===----------------------------------------------------------------------===//
@@ -149,18 +168,11 @@
 /// MemoryBufferMMapFile - This represents a file that was mapped in with the
 /// sys::Path::MapInFilePages method.  When destroyed, it calls the
 /// sys::Path::UnMapFilePages method.
-class MemoryBufferMMapFile : public MemoryBuffer {
-  std::string Filename;
+class MemoryBufferMMapFile : public MemoryBufferMem {
 public:
-  MemoryBufferMMapFile(StringRef filename, const char *Pages, uint64_t Size)
-    : Filename(filename) {
-    init(Pages, Pages+Size);
-  }
-  
-  virtual const char *getBufferIdentifier() const {
-    return Filename.c_str();
-  }
-    
+  MemoryBufferMMapFile(StringRef Buffer)
+    : MemoryBufferMem(Buffer) { }
+
   ~MemoryBufferMMapFile() {
     sys::Path::UnMapFilePages(getBufferStart(), getBufferSize());
   }
@@ -170,19 +182,24 @@
 class FileCloser {
   int FD;
 public:
-  FileCloser(int FD) : FD(FD) {}
+  explicit FileCloser(int FD) : FD(FD) {}
   ~FileCloser() { ::close(FD); }
 };
 }
 
 MemoryBuffer *MemoryBuffer::getFile(StringRef Filename, std::string *ErrStr,
                                     int64_t FileSize, struct stat *FileInfo) {
-  int OpenFlags = 0;
+  SmallString<256> PathBuf(Filename.begin(), Filename.end());
+  return MemoryBuffer::getFile(PathBuf.c_str(), ErrStr, FileSize, FileInfo);
+}
+
+MemoryBuffer *MemoryBuffer::getFile(const char *Filename, std::string *ErrStr,
+                                    int64_t FileSize, struct stat *FileInfo) {
+  int OpenFlags = O_RDONLY;
 #ifdef O_BINARY
   OpenFlags |= O_BINARY;  // Open input file in binary mode on win32.
 #endif
-  SmallString<256> PathBuf(Filename.begin(), Filename.end());
-  int FD = ::open(PathBuf.c_str(), O_RDONLY|OpenFlags);
+  int FD = ::open(Filename, OpenFlags);
   if (FD == -1) {
     if (ErrStr) *ErrStr = sys::StrError();
     return 0;
@@ -213,8 +230,8 @@
   if (FileSize >= 4096*4 &&
       (FileSize & (sys::Process::GetPageSize()-1)) != 0) {
     if (const char *Pages = sys::Path::MapInFilePages(FD, FileSize)) {
-      // Close the file descriptor, now that the whole file is in memory.
-      return new MemoryBufferMMapFile(Filename, Pages, FileSize);
+      return GetNamedBuffer<MemoryBufferMMapFile>(StringRef(Pages, FileSize),
+                                                  Filename);
     }
   }
 
@@ -254,39 +271,27 @@
 // MemoryBuffer::getSTDIN implementation.
 //===----------------------------------------------------------------------===//
 
-namespace {
-class STDINBufferFile : public MemoryBuffer {
-public:
-  virtual const char *getBufferIdentifier() const {
-    return "<stdin>";
-  }
-};
-}
-
 MemoryBuffer *MemoryBuffer::getSTDIN(std::string *ErrStr) {
-  char Buffer[4096*4];
-
-  std::vector<char> FileData;
-
   // Read in all of the data from stdin, we cannot mmap stdin.
   //
   // FIXME: That isn't necessarily true, we should try to mmap stdin and
   // fallback if it fails.
   sys::Program::ChangeStdinToBinary();
-  size_t ReadBytes;
-  do {
-    ReadBytes = fread(Buffer, sizeof(char), sizeof(Buffer), stdin);
-    FileData.insert(FileData.end(), Buffer, Buffer+ReadBytes);
-  } while (ReadBytes == sizeof(Buffer));
 
-  if (!feof(stdin)) {
-    if (ErrStr) *ErrStr = "error reading from stdin";
-    return 0;
-  }
+  const ssize_t ChunkSize = 4096*4;
+  SmallString<ChunkSize> Buffer;
+  ssize_t ReadBytes;
+  // Read into Buffer until we hit EOF.
+  do {
+    Buffer.reserve(Buffer.size() + ChunkSize);
+    ReadBytes = read(0, Buffer.end(), ChunkSize);
+    if (ReadBytes == -1) {
+      if (errno == EINTR) continue;
+      if (ErrStr) *ErrStr = sys::StrError();
+      return 0;
+    }
+    Buffer.set_size(Buffer.size() + ReadBytes);
+  } while (ReadBytes != 0);
 
-  FileData.push_back(0); // &FileData[Size] is invalid. So is &*FileData.end().
-  size_t Size = FileData.size();
-  MemoryBuffer *B = new STDINBufferFile();
-  B->initCopyOf(&FileData[0], &FileData[Size-1]);
-  return B;
+  return getMemBufferCopy(Buffer, "<stdin>");
 }

Modified: llvm/branches/wendling/eh/lib/Support/PrettyStackTrace.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Support/PrettyStackTrace.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Support/PrettyStackTrace.cpp (original)
+++ llvm/branches/wendling/eh/lib/Support/PrettyStackTrace.cpp Fri Jul  2 04:57:13 2010
@@ -12,11 +12,17 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include "llvm/Config/config.h"     // Get autoconf configuration settings
 #include "llvm/Support/PrettyStackTrace.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/System/Signals.h"
 #include "llvm/System/ThreadLocal.h"
 #include "llvm/ADT/SmallString.h"
+
+#ifdef HAVE_CRASHREPORTERCLIENT_H
+#include <CrashReporterClient.h>
+#endif
+
 using namespace llvm;
 
 namespace llvm {
@@ -48,8 +54,17 @@
   OS.flush();
 }
 
-// Integrate with crash reporter.
-#ifdef __APPLE__
+// Integrate with crash reporter libraries.
+#if defined (__APPLE__) && defined (HAVE_CRASHREPORTERCLIENT_H)
+//  If any clients of llvm try to link to libCrashReporterClient.a themselves,
+//  only one crash info struct will be used.
+extern "C" {
+CRASH_REPORTER_CLIENT_HIDDEN 
+struct crashreporter_annotations_t gCRAnnotations 
+        __attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION))) 
+        = { CRASHREPORTER_ANNOTATIONS_VERSION, 0, 0, 0, 0 };
+}
+#elif defined (__APPLE__)
 static const char *__crashreporter_info__ = 0;
 asm(".desc ___crashreporter_info__, 0x10");
 #endif
@@ -71,7 +86,11 @@
   }
   
   if (!TmpStr.empty()) {
+#ifndef HAVE_CRASHREPORTERCLIENT_H
     __crashreporter_info__ = strdup(std::string(TmpStr.str()).c_str());
+#else
+    CRSetCrashLogMessage(std::string(TmpStr.str()).c_str());
+#endif
     errs() << TmpStr.str();
   }
   

Modified: llvm/branches/wendling/eh/lib/Support/SmallPtrSet.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Support/SmallPtrSet.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Support/SmallPtrSet.cpp (original)
+++ llvm/branches/wendling/eh/lib/Support/SmallPtrSet.cpp Fri Jul  2 04:57:13 2010
@@ -166,10 +166,13 @@
   }
 }
 
-SmallPtrSetImpl::SmallPtrSetImpl(const SmallPtrSetImpl& that) {
+SmallPtrSetImpl::SmallPtrSetImpl(const void **SmallStorage,
+                                 const SmallPtrSetImpl& that) {
+  SmallArray = SmallStorage;
+
   // If we're becoming small, prepare to insert into our stack space
   if (that.isSmall()) {
-    CurArray = &SmallArray[0];
+    CurArray = SmallArray;
   // Otherwise, allocate new heap space (unless we were the same size)
   } else {
     CurArray = (const void**)malloc(sizeof(void*) * (that.CurArraySize+1));
@@ -197,7 +200,7 @@
   if (RHS.isSmall()) {
     if (!isSmall())
       free(CurArray);
-    CurArray = &SmallArray[0];
+    CurArray = SmallArray;
   // Otherwise, allocate new heap space (unless we were the same size)
   } else if (CurArraySize != RHS.CurArraySize) {
     if (isSmall())

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp Fri Jul  2 04:57:13 2010
@@ -707,6 +707,11 @@
   if (SrcRC == ARM::tGPRRegisterClass || SrcRC == ARM::tcGPRRegisterClass)
     SrcRC = ARM::GPRRegisterClass;
 
+  if (DestRC == ARM::SPR_8RegisterClass)
+    DestRC = ARM::SPRRegisterClass;
+  if (SrcRC == ARM::SPR_8RegisterClass)
+    SrcRC = ARM::SPRRegisterClass;
+
   // Allow DPR / DPR_VFP2 / DPR_8 cross-class copies.
   if (DestRC == ARM::DPR_8RegisterClass)
     DestRC = ARM::DPR_VFP2RegisterClass;
@@ -801,23 +806,27 @@
   if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass)
     RC = ARM::GPRRegisterClass;
 
-  if (RC == ARM::GPRRegisterClass) {
+  switch (RC->getID()) {
+  case ARM::GPRRegClassID:
     AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
                    .addReg(SrcReg, getKillRegState(isKill))
                    .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
-  } else if (RC == ARM::SPRRegisterClass) {
+    break;
+  case ARM::SPRRegClassID:
     AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
                    .addReg(SrcReg, getKillRegState(isKill))
                    .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
-  } else if (RC == ARM::DPRRegisterClass ||
-             RC == ARM::DPR_VFP2RegisterClass ||
-             RC == ARM::DPR_8RegisterClass) {
+    break;
+  case ARM::DPRRegClassID:
+  case ARM::DPR_VFP2RegClassID:
+  case ARM::DPR_8RegClassID:
     AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
                    .addReg(SrcReg, getKillRegState(isKill))
                    .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
-  } else if (RC == ARM::QPRRegisterClass ||
-             RC == ARM::QPR_VFP2RegisterClass ||
-             RC == ARM::QPR_8RegisterClass) {
+    break;
+  case ARM::QPRRegClassID:
+  case ARM::QPR_VFP2RegClassID:
+  case ARM::QPR_8RegClassID:
     // FIXME: Neon instructions should support predicates
     if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
       AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q))
@@ -831,7 +840,9 @@
                      .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
                      .addMemOperand(MMO));
     }
-  } else if (RC == ARM::QQPRRegisterClass || RC == ARM::QQPR_VFP2RegisterClass){
+    break;
+  case ARM::QQPRRegClassID:
+  case ARM::QQPR_VFP2RegClassID:
     if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
       // FIXME: It's possible to only store part of the QQ register if the
       // spilled def has a sub-register index.
@@ -853,8 +864,8 @@
       MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
             AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
     }
-  } else {
-    assert(RC == ARM::QQQQPRRegisterClass && "Unknown regclass!");
+    break;
+  case ARM::QQQQPRRegClassID: {
     MachineInstrBuilder MIB =
       AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMD))
                      .addFrameIndex(FI)
@@ -868,6 +879,10 @@
     MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
     MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
           AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
+    break;
+  }
+  default:
+    llvm_unreachable("Unknown regclass!");
   }
 }
 
@@ -892,20 +907,24 @@
   if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass)
     RC = ARM::GPRRegisterClass;
 
-  if (RC == ARM::GPRRegisterClass) {
+  switch (RC->getID()) {
+  case ARM::GPRRegClassID:
     AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
                    .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
-  } else if (RC == ARM::SPRRegisterClass) {
+    break;
+  case ARM::SPRRegClassID:
     AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
                    .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
-  } else if (RC == ARM::DPRRegisterClass ||
-             RC == ARM::DPR_VFP2RegisterClass ||
-             RC == ARM::DPR_8RegisterClass) {
+    break;
+  case ARM::DPRRegClassID:
+  case ARM::DPR_VFP2RegClassID:
+  case ARM::DPR_8RegClassID:
     AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
                    .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
-  } else if (RC == ARM::QPRRegisterClass ||
-             RC == ARM::QPR_VFP2RegisterClass ||
-             RC == ARM::QPR_8RegisterClass) {
+    break;
+  case ARM::QPRRegClassID:
+  case ARM::QPR_VFP2RegClassID:
+  case ARM::QPR_8RegClassID:
     if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
       AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q), DestReg)
                      .addFrameIndex(FI).addImm(128)
@@ -916,7 +935,9 @@
                      .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
                      .addMemOperand(MMO));
     }
-  } else if (RC == ARM::QQPRRegisterClass || RC == ARM::QQPR_VFP2RegisterClass){
+    break;
+  case ARM::QQPRRegClassID:
+  case ARM::QQPR_VFP2RegClassID:
     if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
       MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::VLD2q32));
       MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
@@ -935,21 +956,25 @@
       MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
             AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
     }
-  } else {
-    assert(RC == ARM::QQQQPRRegisterClass && "Unknown regclass!");
-      MachineInstrBuilder MIB =
-        AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD))
-                       .addFrameIndex(FI)
-                       .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4)))
-        .addMemOperand(MMO);
-      MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
-      MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
-      MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
-      MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
-      MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::Define, TRI);
-      MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::Define, TRI);
-      MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::Define, TRI);
-            AddDReg(MIB, DestReg, ARM::dsub_7, RegState::Define, TRI);
+    break;
+  case ARM::QQQQPRRegClassID: {
+    MachineInstrBuilder MIB =
+      AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD))
+                     .addFrameIndex(FI)
+                     .addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4)))
+      .addMemOperand(MMO);
+    MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
+    MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
+    MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
+    MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
+    MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::Define, TRI);
+    MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::Define, TRI);
+    MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::Define, TRI);
+    AddDReg(MIB, DestReg, ARM::dsub_7, RegState::Define, TRI);
+    break;
+  }
+  default:
+    llvm_unreachable("Unknown regclass!");
   }
 }
 
@@ -1286,6 +1311,165 @@
   return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
 }
 
+/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
+/// determine if two loads are loading from the same base address. It should
+/// only return true if the base pointers are the same and the only differences
+/// between the two addresses is the offset. It also returns the offsets by
+/// reference.
+bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
+                                               int64_t &Offset1,
+                                               int64_t &Offset2) const {
+  // Don't worry about Thumb: just ARM and Thumb2.
+  if (Subtarget.isThumb1Only()) return false;
+
+  if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
+    return false;
+
+  switch (Load1->getMachineOpcode()) {
+  default:
+    return false;
+  case ARM::LDR:
+  case ARM::LDRB:
+  case ARM::LDRD:
+  case ARM::LDRH:
+  case ARM::LDRSB:
+  case ARM::LDRSH:
+  case ARM::VLDRD:
+  case ARM::VLDRS:
+  case ARM::t2LDRi8:
+  case ARM::t2LDRDi8:
+  case ARM::t2LDRSHi8:
+  case ARM::t2LDRi12:
+  case ARM::t2LDRSHi12:
+    break;
+  }
+
+  switch (Load2->getMachineOpcode()) {
+  default:
+    return false;
+  case ARM::LDR:
+  case ARM::LDRB:
+  case ARM::LDRD:
+  case ARM::LDRH:
+  case ARM::LDRSB:
+  case ARM::LDRSH:
+  case ARM::VLDRD:
+  case ARM::VLDRS:
+  case ARM::t2LDRi8:
+  case ARM::t2LDRDi8:
+  case ARM::t2LDRSHi8:
+  case ARM::t2LDRi12:
+  case ARM::t2LDRSHi12:
+    break;
+  }
+
+  // Check if base addresses and chain operands match.
+  if (Load1->getOperand(0) != Load2->getOperand(0) ||
+      Load1->getOperand(4) != Load2->getOperand(4))
+    return false;
+
+  // Index should be Reg0.
+  if (Load1->getOperand(3) != Load2->getOperand(3))
+    return false;
+
+  // Determine the offsets.
+  if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
+      isa<ConstantSDNode>(Load2->getOperand(1))) {
+    Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
+    Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
+    return true;
+  }
+
+  return false;
+}
+
+/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
+/// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should
+/// be scheduled togther. On some targets if two loads are loading from
+/// addresses in the same cache line, it's better if they are scheduled
+/// together. This function takes two integers that represent the load offsets
+/// from the common base address. It returns true if it decides it's desirable
+/// to schedule the two loads together. "NumLoads" is the number of loads that
+/// have already been scheduled after Load1.
+bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+                                               int64_t Offset1, int64_t Offset2,
+                                               unsigned NumLoads) const {
+  // Don't worry about Thumb: just ARM and Thumb2.
+  if (Subtarget.isThumb1Only()) return false;
+
+  assert(Offset2 > Offset1);
+
+  if ((Offset2 - Offset1) / 8 > 64)
+    return false;
+
+  if (Load1->getMachineOpcode() != Load2->getMachineOpcode())
+    return false;  // FIXME: overly conservative?
+
+  // Four loads in a row should be sufficient.
+  if (NumLoads >= 3)
+    return false;
+
+  return true;
+}
+
+bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
+                                            const MachineBasicBlock *MBB,
+                                            const MachineFunction &MF) const {
+  // Debug info is never a scheduling boundary. It's necessary to be explicit
+  // due to the special treatment of IT instructions below, otherwise a
+  // dbg_value followed by an IT will result in the IT instruction being
+  // considered a scheduling hazard, which is wrong. It should be the actual
+  // instruction preceding the dbg_value instruction(s), just like it is
+  // when debug info is not present.
+  if (MI->isDebugValue())
+    return false;
+
+  // Terminators and labels can't be scheduled around.
+  if (MI->getDesc().isTerminator() || MI->isLabel())
+    return true;
+
+  // Treat the start of the IT block as a scheduling boundary, but schedule
+  // t2IT along with all instructions following it.
+  // FIXME: This is a big hammer. But the alternative is to add all potential
+  // true and anti dependencies to IT block instructions as implicit operands
+  // to the t2IT instruction. The added compile time and complexity does not
+  // seem worth it.
+  MachineBasicBlock::const_iterator I = MI;
+  // Make sure to skip any dbg_value instructions
+  while (++I != MBB->end() && I->isDebugValue())
+    ;
+  if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
+    return true;
+
+  // Don't attempt to schedule around any instruction that defines
+  // a stack-oriented pointer, as it's unlikely to be profitable. This
+  // saves compile time, because it doesn't require every single
+  // stack slot reference to depend on the instruction that does the
+  // modification.
+  if (MI->definesRegister(ARM::SP))
+    return true;
+
+  return false;
+}
+
+bool ARMBaseInstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const {
+  if (!NumInstrs)
+    return false;
+  if (Subtarget.getCPUString() == "generic")
+    // Generic (and overly aggressive) if-conversion limits for testing.
+    return NumInstrs <= 10;
+  else if (Subtarget.hasV7Ops())
+    return NumInstrs <= 3;
+  return NumInstrs <= 2;
+}
+  
+bool ARMBaseInstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
+                    MachineBasicBlock &FMBB, unsigned NumF) const {
+  return NumT && NumF && NumT <= 2 && NumF <= 2;
+}
+
 /// getInstrPredicate - If instruction is predicated, returns its predicate
 /// condition, otherwise returns AL. It also returns the condition code
 /// register by reference.

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.h Fri Jul  2 04:57:13 2010
@@ -116,23 +116,25 @@
     // Thumb format
     ThumbFrm      = 24 << FormShift,
 
+    // Miscelleaneous format
+    MiscFrm       = 25 << FormShift,
+
     // NEON formats
-    NEONFrm       = 25 << FormShift,
-    NEONGetLnFrm  = 26 << FormShift,
-    NEONSetLnFrm  = 27 << FormShift,
-    NEONDupFrm    = 28 << FormShift,
-    NLdStFrm      = 31 << FormShift,
-    N1RegModImmFrm= 32 << FormShift,
-    N2RegFrm      = 33 << FormShift,
-    NVCVTFrm      = 34 << FormShift,
-    NVDupLnFrm    = 35 << FormShift,
-    N2RegVShLFrm  = 36 << FormShift,
-    N2RegVShRFrm  = 37 << FormShift,
-    N3RegFrm      = 38 << FormShift,
-    N3RegVShFrm   = 39 << FormShift,
-    NVExtFrm      = 40 << FormShift,
-    NVMulSLFrm    = 41 << FormShift,
-    NVTBLFrm      = 42 << FormShift,
+    NGetLnFrm     = 26 << FormShift,
+    NSetLnFrm     = 27 << FormShift,
+    NDupFrm       = 28 << FormShift,
+    NLdStFrm      = 29 << FormShift,
+    N1RegModImmFrm= 30 << FormShift,
+    N2RegFrm      = 31 << FormShift,
+    NVCVTFrm      = 32 << FormShift,
+    NVDupLnFrm    = 33 << FormShift,
+    N2RegVShLFrm  = 34 << FormShift,
+    N2RegVShRFrm  = 35 << FormShift,
+    N3RegFrm      = 36 << FormShift,
+    N3RegVShFrm   = 37 << FormShift,
+    NVExtFrm      = 38 << FormShift,
+    NVMulSLFrm    = 39 << FormShift,
+    NVTBLFrm      = 40 << FormShift,
 
     //===------------------------------------------------------------------===//
     // Misc flags.
@@ -319,6 +321,41 @@
 
   virtual bool produceSameValue(const MachineInstr *MI0,
                                 const MachineInstr *MI1) const;
+
+  /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
+  /// determine if two loads are loading from the same base address. It should
+  /// only return true if the base pointers are the same and the only
+  /// differences between the two addresses is the offset. It also returns the
+  /// offsets by reference.
+  virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
+                                       int64_t &Offset1, int64_t &Offset2)const;
+
+  /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
+  /// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should
+  /// be scheduled togther. On some targets if two loads are loading from
+  /// addresses in the same cache line, it's better if they are scheduled
+  /// together. This function takes two integers that represent the load offsets
+  /// from the common base address. It returns true if it decides it's desirable
+  /// to schedule the two loads together. "NumLoads" is the number of loads that
+  /// have already been scheduled after Load1.
+  virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+                                       int64_t Offset1, int64_t Offset2,
+                                       unsigned NumLoads) const;
+
+  virtual bool isSchedulingBoundary(const MachineInstr *MI,
+                                    const MachineBasicBlock *MBB,
+                                    const MachineFunction &MF) const;
+
+  virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB,
+                                   unsigned NumInstrs) const;
+
+  virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,unsigned NumT,
+                                   MachineBasicBlock &FMBB,unsigned NumF) const;
+
+  virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
+                                         unsigned NumInstrs) const {
+    return NumInstrs && NumInstrs == 1;
+  }
 };
 
 static inline

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp Fri Jul  2 04:57:13 2010
@@ -674,6 +674,15 @@
          I != E; ++I) {
       for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
         if (!I->getOperand(i).isFI()) continue;
+
+        // When using ADDri to get the address of a stack object, 255 is the
+        // largest offset guaranteed to fit in the immediate offset.
+        if (I->getOpcode() == ARM::ADDri) {
+          Limit = std::min(Limit, (1U << 8) - 1);
+          break;
+        }
+
+        // Otherwise check the addressing mode.
         switch (I->getDesc().TSFlags & ARMII::AddrModeMask) {
         case ARMII::AddrMode3:
         case ARMII::AddrModeT2_i8:
@@ -1658,7 +1667,8 @@
         addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
                          JumpTarget.getTargetFlags());
     } else if (RetOpcode == ARM::TCRETURNdiND) {
-      BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPdND)).
+      BuildMI(MBB, MBBI, dl,
+            TII.get(STI.isThumb() ? ARM::TAILJMPdNDt : ARM::TAILJMPdND)).
         addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
                          JumpTarget.getTargetFlags());
     } else if (RetOpcode == ARM::TCRETURNri) {

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMCodeEmitter.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMCodeEmitter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMCodeEmitter.cpp Fri Jul  2 04:57:13 2010
@@ -55,6 +55,7 @@
     const std::vector<MachineConstantPoolEntry> *MCPEs;
     const std::vector<MachineJumpTableEntry> *MJTEs;
     bool IsPIC;
+    bool IsThumb;
 
     void getAnalysisUsage(AnalysisUsage &AU) const {
       AU.addRequired<MachineModuleInfo>();
@@ -67,8 +68,8 @@
       : MachineFunctionPass(&ID), JTI(0),
         II((const ARMInstrInfo *)tm.getInstrInfo()),
         TD(tm.getTargetData()), TM(tm),
-    MCE(mce), MCPEs(0), MJTEs(0),
-    IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
+        MCE(mce), MCPEs(0), MJTEs(0),
+        IsPIC(TM.getRelocationModel() == Reloc::PIC_), IsThumb(false) {}
 
     /// getBinaryCodeForInstr - This function, generated by the
     /// CodeEmitterGenerator using TableGen, produces the binary encoding for
@@ -139,7 +140,11 @@
 
     void emitMiscInstruction(const MachineInstr &MI);
 
-    void emitNEON1RegModImm(const MachineInstr &MI);
+    void emitNEONLaneInstruction(const MachineInstr &MI);
+    void emitNEONDupInstruction(const MachineInstr &MI);
+    void emitNEON1RegModImmInstruction(const MachineInstr &MI);
+    void emitNEON2RegInstruction(const MachineInstr &MI);
+    void emitNEON3RegInstruction(const MachineInstr &MI);
 
     /// getMachineOpValue - Return binary encoding of operand. If the machine
     /// operand requires relocation, record the relocation and return zero.
@@ -196,6 +201,7 @@
   MJTEs = 0;
   if (MF.getJumpTableInfo()) MJTEs = &MF.getJumpTableInfo()->getJumpTables();
   IsPIC = TM.getRelocationModel() == Reloc::PIC_;
+  IsThumb = MF.getInfo<ARMFunctionInfo>()->isThumbFunction();
   JTI->Initialize(MF, IsPIC);
   MMI = &getAnalysis<MachineModuleInfo>();
   MCE.setModuleInfo(MMI);
@@ -350,7 +356,7 @@
 
   MCE.processDebugLoc(MI.getDebugLoc(), true);
 
-  NumEmitted++;  // Keep track of the # of mi's emitted
+  ++NumEmitted;  // Keep track of the # of mi's emitted
   switch (MI.getDesc().TSFlags & ARMII::FormMask) {
   default: {
     llvm_unreachable("Unhandled instruction encoding format!");
@@ -411,8 +417,21 @@
     emitMiscInstruction(MI);
     break;
   // NEON instructions.
+  case ARMII::NGetLnFrm:
+  case ARMII::NSetLnFrm:
+    emitNEONLaneInstruction(MI);
+    break;
+  case ARMII::NDupFrm:
+    emitNEONDupInstruction(MI);
+    break;
   case ARMII::N1RegModImmFrm:
-    emitNEON1RegModImm(MI);
+    emitNEON1RegModImmInstruction(MI);
+    break;
+  case ARMII::N2RegFrm:
+    emitNEON2RegInstruction(MI);
+    break;
+  case ARMII::N3RegFrm:
+    emitNEON3RegInstruction(MI);
     break;
   }
   MCE.processDebugLoc(MI.getDebugLoc(), false);
@@ -1555,22 +1574,134 @@
   return Binary;
 }
 
-void ARMCodeEmitter::emitNEON1RegModImm(const MachineInstr &MI) {
+static unsigned encodeNEONRn(const MachineInstr &MI, unsigned OpIdx) {
+  unsigned RegN = MI.getOperand(OpIdx).getReg();
+  unsigned Binary = 0;
+  RegN = ARMRegisterInfo::getRegisterNumbering(RegN);
+  Binary |= (RegN & 0xf) << ARMII::RegRnShift;
+  Binary |= ((RegN >> 4) & 1) << ARMII::N_BitShift;
+  return Binary;
+}
+
+static unsigned encodeNEONRm(const MachineInstr &MI, unsigned OpIdx) {
+  unsigned RegM = MI.getOperand(OpIdx).getReg();
+  unsigned Binary = 0;
+  RegM = ARMRegisterInfo::getRegisterNumbering(RegM);
+  Binary |= (RegM & 0xf);
+  Binary |= ((RegM >> 4) & 1) << ARMII::M_BitShift;
+  return Binary;
+}
+
+/// convertNEONDataProcToThumb - Convert the ARM mode encoding for a NEON
+/// data-processing instruction to the corresponding Thumb encoding.
+static unsigned convertNEONDataProcToThumb(unsigned Binary) {
+  assert((Binary & 0xfe000000) == 0xf2000000 &&
+         "not an ARM NEON data-processing instruction");
+  unsigned UBit = (Binary >> 24) & 1;
+  return 0xef000000 | (UBit << 28) | (Binary & 0xffffff);
+}
+
+void ARMCodeEmitter::emitNEONLaneInstruction(const MachineInstr &MI) {
+  unsigned Binary = getBinaryCodeForInstr(MI);
+
+  unsigned RegTOpIdx, RegNOpIdx, LnOpIdx;
+  const TargetInstrDesc &TID = MI.getDesc();
+  if ((TID.TSFlags & ARMII::FormMask) == ARMII::NGetLnFrm) {
+    RegTOpIdx = 0;
+    RegNOpIdx = 1;
+    LnOpIdx = 2;
+  } else { // ARMII::NSetLnFrm
+    RegTOpIdx = 2;
+    RegNOpIdx = 0;
+    LnOpIdx = 3;
+  }
+
+  // Set the conditional execution predicate
+  Binary |= (IsThumb ? ARMCC::AL : II->getPredicate(&MI)) << ARMII::CondShift;
+
+  unsigned RegT = MI.getOperand(RegTOpIdx).getReg();
+  RegT = ARMRegisterInfo::getRegisterNumbering(RegT);
+  Binary |= (RegT << ARMII::RegRdShift);
+  Binary |= encodeNEONRn(MI, RegNOpIdx);
+
+  unsigned LaneShift;
+  if ((Binary & (1 << 22)) != 0)
+    LaneShift = 0; // 8-bit elements
+  else if ((Binary & (1 << 5)) != 0)
+    LaneShift = 1; // 16-bit elements
+  else
+    LaneShift = 2; // 32-bit elements
+
+  unsigned Lane = MI.getOperand(LnOpIdx).getImm() << LaneShift;
+  unsigned Opc1 = Lane >> 2;
+  unsigned Opc2 = Lane & 3;
+  assert((Opc1 & 3) == 0 && "out-of-range lane number operand");
+  Binary |= (Opc1 << 21);
+  Binary |= (Opc2 << 5);
+
+  emitWordLE(Binary);
+}
+
+void ARMCodeEmitter::emitNEONDupInstruction(const MachineInstr &MI) {
+  unsigned Binary = getBinaryCodeForInstr(MI);
+
+  // Set the conditional execution predicate
+  Binary |= (IsThumb ? ARMCC::AL : II->getPredicate(&MI)) << ARMII::CondShift;
+
+  unsigned RegT = MI.getOperand(1).getReg();
+  RegT = ARMRegisterInfo::getRegisterNumbering(RegT);
+  Binary |= (RegT << ARMII::RegRdShift);
+  Binary |= encodeNEONRn(MI, 0);
+  emitWordLE(Binary);
+}
+
+void ARMCodeEmitter::emitNEON1RegModImmInstruction(const MachineInstr &MI) {
   unsigned Binary = getBinaryCodeForInstr(MI);
   // Destination register is encoded in Dd.
   Binary |= encodeNEONRd(MI, 0);
   // Immediate fields: Op, Cmode, I, Imm3, Imm4
   unsigned Imm = MI.getOperand(1).getImm();
   unsigned Op = (Imm >> 12) & 1;
-  Binary |= (Op << 5);
   unsigned Cmode = (Imm >> 8) & 0xf;
-  Binary |= (Cmode << 8);
   unsigned I = (Imm >> 7) & 1;
-  Binary |= (I << 24);
   unsigned Imm3 = (Imm >> 4) & 0x7;
-  Binary |= (Imm3 << 16);
   unsigned Imm4 = Imm & 0xf;
-  Binary |= Imm4;
+  Binary |= (I << 24) | (Imm3 << 16) | (Cmode << 8) | (Op << 5) | Imm4;
+  if (IsThumb)
+    Binary = convertNEONDataProcToThumb(Binary);
+  emitWordLE(Binary);
+}
+
+void ARMCodeEmitter::emitNEON2RegInstruction(const MachineInstr &MI) {
+  const TargetInstrDesc &TID = MI.getDesc();
+  unsigned Binary = getBinaryCodeForInstr(MI);
+  // Destination register is encoded in Dd; source register in Dm.
+  unsigned OpIdx = 0;
+  Binary |= encodeNEONRd(MI, OpIdx++);
+  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+    ++OpIdx;
+  Binary |= encodeNEONRm(MI, OpIdx);
+  if (IsThumb)
+    Binary = convertNEONDataProcToThumb(Binary);
+  // FIXME: This does not handle VDUPfdf or VDUPfqf.
+  emitWordLE(Binary);
+}
+
+void ARMCodeEmitter::emitNEON3RegInstruction(const MachineInstr &MI) {
+  const TargetInstrDesc &TID = MI.getDesc();
+  unsigned Binary = getBinaryCodeForInstr(MI);
+  // Destination register is encoded in Dd; source registers in Dn and Dm.
+  unsigned OpIdx = 0;
+  Binary |= encodeNEONRd(MI, OpIdx++);
+  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+    ++OpIdx;
+  Binary |= encodeNEONRn(MI, OpIdx++);
+  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+    ++OpIdx;
+  Binary |= encodeNEONRm(MI, OpIdx);
+  if (IsThumb)
+    Binary = convertNEONDataProcToThumb(Binary);
+  // FIXME: This does not handle VMOVDneon or VMOVQ.
   emitWordLE(Binary);
 }
 

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMConstantIslandPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMConstantIslandPass.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMConstantIslandPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMConstantIslandPass.cpp Fri Jul  2 04:57:13 2010
@@ -407,7 +407,7 @@
     std::vector<CPEntry> CPEs;
     CPEs.push_back(CPEntry(CPEMI, i));
     CPEntries.push_back(CPEs);
-    NumCPEs++;
+    ++NumCPEs;
     DEBUG(errs() << "Moved CPI#" << i << " to end of function as #" << i
                  << "\n");
   }
@@ -492,6 +492,8 @@
     unsigned MBBSize = 0;
     for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
          I != E; ++I) {
+      if (I->isDebugValue())
+        continue;
       // Add instruction size to MBBSize.
       MBBSize += TII->GetInstSizeInBytes(I);
 
@@ -723,7 +725,7 @@
   // correspond to anything in the source.
   unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
   BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
-  NumSplit++;
+  ++NumSplit;
 
   // Update the CFG.  All succs of OrigBB are now succs of NewBB.
   while (!OrigBB->succ_empty()) {
@@ -946,7 +948,7 @@
   if (--CPE->RefCount == 0) {
     RemoveDeadCPEMI(CPEMI);
     CPE->CPEMI = NULL;
-    NumCPEs--;
+    --NumCPEs;
     return true;
   }
   return false;
@@ -1247,7 +1249,7 @@
   U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
                 .addImm(ID).addConstantPoolIndex(CPI).addImm(Size);
   CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
-  NumCPEs++;
+  ++NumCPEs;
 
   BBOffsets[NewIsland->getNumber()] = BBOffsets[NewMBB->getNumber()];
   // Compensate for .align 2 in thumb mode.
@@ -1370,7 +1372,7 @@
   BBSizes[MBB->getNumber()] += 2;
   AdjustBBOffsetsAfter(MBB, 2);
   HasFarJump = true;
-  NumUBrFixed++;
+  ++NumUBrFixed;
 
   DEBUG(errs() << "  Changed B to long jump " << *MI);
 
@@ -1403,7 +1405,7 @@
   MachineInstr *BMI = &MBB->back();
   bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
 
-  NumCBrFixed++;
+  ++NumCBrFixed;
   if (BMI != MI) {
     if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
         BMI->getOpcode() == Br.UncondBr) {

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMISelDAGToDAG.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMISelDAGToDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMISelDAGToDAG.cpp Fri Jul  2 04:57:13 2010
@@ -533,7 +533,6 @@
 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDNode *Op, SDValue N,
                                             SDValue &Base, SDValue &Offset){
   // FIXME dl should come from the parent load or store, not the address
-  DebugLoc dl = Op->getDebugLoc();
   if (N.getOpcode() != ISD::ADD) {
     ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
     if (!NC || !NC->isNullValue())
@@ -1382,13 +1381,11 @@
 
   // Quad registers are handled by load/store of subregs. Find the subreg info.
   unsigned NumElts = 0;
-  int SubregIdx = 0;
   bool Even = false;
   EVT RegVT = VT;
   if (!is64BitVector) {
     RegVT = GetNEONSubregVT(VT);
     NumElts = RegVT.getVectorNumElements();
-    SubregIdx = (Lane < NumElts) ? ARM::dsub_0 : ARM::dsub_1;
     Even = Lane < NumElts;
   }
 

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp Fri Jul  2 04:57:13 2010
@@ -67,6 +67,11 @@
   cl::desc("Enable / disable ARM interworking (for debugging only)"),
   cl::init(true));
 
+static cl::opt<bool>
+EnableARMCodePlacement("arm-code-placement", cl::Hidden,
+  cl::desc("Enable code placement pass for ARM."),
+  cl::init(false));
+
 static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
                                    CCValAssign::LocInfo &LocInfo,
                                    ISD::ArgFlagsTy &ArgFlags,
@@ -407,16 +412,55 @@
   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
   // Handle atomics directly for ARMv[67] (except for Thumb1), otherwise
   // use the default expansion.
-  TargetLowering::LegalizeAction AtomicAction =
+  bool canHandleAtomics =
     (Subtarget->hasV7Ops() ||
-      (Subtarget->hasV6Ops() && !Subtarget->isThumb1Only())) ? Custom : Expand;
-  setOperationAction(ISD::MEMBARRIER, MVT::Other, AtomicAction);
+      (Subtarget->hasV6Ops() && !Subtarget->isThumb1Only()));
+  if (canHandleAtomics) {
+    // membarrier needs custom lowering; the rest are legal and handled
+    // normally.
+    setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
+  } else {
+    // Set them all for expansion, which will force libcalls.
+    setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
+    setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i8,  Expand);
+    setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i16, Expand);
+    setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
+    setOperationAction(ISD::ATOMIC_SWAP,      MVT::i8,  Expand);
+    setOperationAction(ISD::ATOMIC_SWAP,      MVT::i16, Expand);
+    setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i8,  Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i16, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i8,  Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i16, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i8,  Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i16, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i8,  Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i16, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i8,  Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i16, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i8,  Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i16, Expand);
+    setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
+    // Since the libcalls include locking, fold in the fences
+    setShouldFoldAtomicFences(true);
+  }
+  // 64-bit versions are always libcalls (for now)
+  setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i64, Expand);
+  setOperationAction(ISD::ATOMIC_SWAP,      MVT::i64, Expand);
+  setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i64, Expand);
+  setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i64, Expand);
+  setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i64, Expand);
+  setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i64, Expand);
+  setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i64, Expand);
+  setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Expand);
 
-  // If the subtarget does not have extract instructions, sign_extend_inreg
-  // needs to be expanded. Extract is available in ARM mode on v6 and up,
-  // and on most Thumb2 implementations.
-  if ((!Subtarget->isThumb() && !Subtarget->hasV6Ops())
-      || (Subtarget->isThumb2() && !Subtarget->hasT2ExtractPack())) {
+  // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
+  if (!Subtarget->hasV6Ops()) {
     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
   }
@@ -489,28 +533,10 @@
   else
     setSchedulingPreference(Sched::Hybrid);
 
-  // FIXME: If-converter should use instruction latency to determine
-  // profitability rather than relying on fixed limits.
-  if (Subtarget->getCPUString() == "generic") {
-    // Generic (and overly aggressive) if-conversion limits.
-    setIfCvtBlockSizeLimit(10);
-    setIfCvtDupBlockSizeLimit(2);
-  } else if (Subtarget->hasV7Ops()) {
-    setIfCvtBlockSizeLimit(3);
-    setIfCvtDupBlockSizeLimit(1);
-  } else if (Subtarget->hasV6Ops()) {
-    setIfCvtBlockSizeLimit(2);
-    setIfCvtDupBlockSizeLimit(1);
-  } else {
-    setIfCvtBlockSizeLimit(3);
-    setIfCvtDupBlockSizeLimit(2);
-  }
-
   maxStoresPerMemcpy = 1;   //// temporary - rewrite interface to use type
-  // Do not enable CodePlacementOpt for now: it currently runs after the
-  // ARMConstantIslandPass and messes up branch relaxation and placement
-  // of constant islands.
-  // benefitFromCodePlacementOpt = true;
+
+  if (EnableARMCodePlacement)
+    benefitFromCodePlacementOpt = true;
 }
 
 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
@@ -621,7 +647,7 @@
 
 /// getFunctionAlignment - Return the Log2 alignment of this function.
 unsigned ARMTargetLowering::getFunctionAlignment(const Function *F) const {
-  return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 0 : 1;
+  return getTargetMachine().getSubtarget<ARMSubtarget>().isThumb() ? 1 : 2;
 }
 
 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
@@ -1349,7 +1375,6 @@
                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
                                     const SmallVectorImpl<ISD::InputArg> &Ins,
                                                      SelectionDAG& DAG) const {
-
   const Function *CallerF = DAG.getMachineFunction().getFunction();
   CallingConv::ID CallerCC = CallerF->getCallingConv();
   bool CCMatch = CallerCC == CalleeCC;
@@ -1367,17 +1392,24 @@
   if (isCalleeStructRet || isCallerStructRet)
     return false;
 
-  // On Thumb, for the moment, we can only do this to functions defined in this
-  // compilation, or to indirect calls.  A Thumb B to an ARM function is not
-  // easily fixed up in the linker, unlike BL.
-  if (Subtarget->isThumb()) {
-    if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
-      const GlobalValue *GV = G->getGlobal();
-      if (GV->isDeclaration() || GV->isWeakForLinker())
-        return false;
-    } else if (isa<ExternalSymbolSDNode>(Callee)) {
+  // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo::
+  // emitEpilogue is not ready for them.
+  if (Subtarget->isThumb1Only())
+    return false;
+
+  // For the moment, we can only do this to functions defined in this
+  // compilation, or to indirect calls.  A Thumb B to an ARM function,
+  // or vice versa, is not easily fixed up in the linker unlike BL.
+  // (We could do this by loading the address of the callee into a register;
+  // that is an extra instruction over the direct call and burns a register
+  // as well, so is not likely to be a win.)
+  if (isa<ExternalSymbolSDNode>(Callee))
+      return false;
+
+  if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+    const GlobalValue *GV = G->getGlobal();
+    if (GV->isDeclaration() || GV->isWeakForLinker())
       return false;
-    }
   }
 
   // If the calling conventions do not match, then we'd better make sure the
@@ -1854,7 +1886,6 @@
       DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr,
                   PseudoSourceValue::getConstantPool(), 0,
                   false, false, 0);
-    SDValue Chain = Result.getValue(1);
 
     if (RelocM == Reloc::PIC_) {
       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32);
@@ -2900,7 +2931,7 @@
   }
 
   default:
-    llvm_unreachable("unexpected size for EncodeNEONModImm");
+    llvm_unreachable("unexpected size for isNEONModifiedImm");
     return SDValue();
   }
 
@@ -4489,14 +4520,13 @@
   if (!Subtarget->hasV6Ops())
     // Pre-v6 does not support unaligned mem access.
     return false;
-  else {
-    // v6+ may or may not support unaligned mem access depending on the system
-    // configuration.
-    // FIXME: This is pretty conservative. Should we provide cmdline option to
-    // control the behaviour?
-    if (!Subtarget->isTargetDarwin())
-      return false;
-  }
+
+  // v6+ may or may not support unaligned mem access depending on the system
+  // configuration.
+  // FIXME: This is pretty conservative. Should we provide cmdline option to
+  // control the behaviour?
+  if (!Subtarget->isTargetDarwin())
+    return false;
 
   switch (VT.getSimpleVT().SimpleTy) {
   default:
@@ -5002,7 +5032,6 @@
 /// vector.  If it is invalid, don't add anything to Ops.
 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
                                                      char Constraint,
-                                                     bool hasMemory,
                                                      std::vector<SDValue>&Ops,
                                                      SelectionDAG &DAG) const {
   SDValue Result(0, 0);
@@ -5151,8 +5180,7 @@
     Ops.push_back(Result);
     return;
   }
-  return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
-                                                      Ops, DAG);
+  return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
 }
 
 bool

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.h (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.h Fri Jul  2 04:57:13 2010
@@ -242,7 +242,6 @@
     /// being processed is 'm'.
     virtual void LowerAsmOperandForConstraint(SDValue Op,
                                               char ConstraintLetter,
-                                              bool hasMemory,
                                               std::vector<SDValue> &Ops,
                                               SelectionDAG &DAG) const;
 

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrFormats.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrFormats.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrFormats.td Fri Jul  2 04:57:13 2010
@@ -50,27 +50,23 @@
 def VFPMiscFrm    : Format<23>;
 
 def ThumbFrm      : Format<24>;
+def MiscFrm       : Format<25>;
 
-def NEONFrm       : Format<25>;
-def NEONGetLnFrm  : Format<26>;
-def NEONSetLnFrm  : Format<27>;
-def NEONDupFrm    : Format<28>;
-
-def MiscFrm       : Format<29>;
-def ThumbMiscFrm  : Format<30>;
-
-def NLdStFrm       : Format<31>;
-def N1RegModImmFrm : Format<32>;
-def N2RegFrm       : Format<33>;
-def NVCVTFrm       : Format<34>;
-def NVDupLnFrm     : Format<35>;
-def N2RegVShLFrm   : Format<36>;
-def N2RegVShRFrm   : Format<37>;
-def N3RegFrm       : Format<38>;
-def N3RegVShFrm    : Format<39>;
-def NVExtFrm       : Format<40>;
-def NVMulSLFrm     : Format<41>;
-def NVTBLFrm       : Format<42>;
+def NGetLnFrm     : Format<26>;
+def NSetLnFrm     : Format<27>;
+def NDupFrm       : Format<28>;
+def NLdStFrm      : Format<29>;
+def N1RegModImmFrm: Format<30>;
+def N2RegFrm      : Format<31>;
+def NVCVTFrm      : Format<32>;
+def NVDupLnFrm    : Format<33>;
+def N2RegVShLFrm  : Format<34>;
+def N2RegVShRFrm  : Format<35>;
+def N3RegFrm      : Format<36>;
+def N3RegVShFrm   : Format<37>;
+def NVExtFrm      : Format<38>;
+def NVMulSLFrm    : Format<39>;
+def NVTBLFrm      : Format<40>;
 
 // Misc flags.
 
@@ -1653,17 +1649,17 @@
 class NVGetLane<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
                 dag oops, dag iops, InstrItinClass itin,
                 string opc, string dt, string asm, list<dag> pattern>
-  : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NEONGetLnFrm, itin,
+  : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NGetLnFrm, itin,
              opc, dt, asm, pattern>;
 class NVSetLane<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
                 dag oops, dag iops, InstrItinClass itin,
                 string opc, string dt, string asm, list<dag> pattern>
-  : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NEONSetLnFrm, itin,
+  : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NSetLnFrm, itin,
              opc, dt, asm, pattern>;
 class NVDup<bits<8> opcod1, bits<4> opcod2, bits<2> opcod3,
             dag oops, dag iops, InstrItinClass itin,
             string opc, string dt, string asm, list<dag> pattern>
-  : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NEONDupFrm, itin,
+  : NVLaneOp<opcod1, opcod2, opcod3, oops, iops, NDupFrm, itin,
              opc, dt, asm, pattern>;
 
 // Vector Duplicate Lane (from scalar to all elements)

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrInfo.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrInfo.td Fri Jul  2 04:57:13 2010
@@ -863,13 +863,13 @@
                     Pseudo, IIC_iALUi,
                     "adr$p\t$dst, #$label", []>;
 
+} // neverHasSideEffects
 def LEApcrelJT : AXI1<0x0, (outs GPR:$dst),
                            (ins i32imm:$label, nohash_imm:$id, pred:$p),
                       Pseudo, IIC_iALUi,
                       "adr$p\t$dst, #${label}_${id}", []> {
     let Inst{25} = 1;
 }
-} // neverHasSideEffects
 
 //===----------------------------------------------------------------------===//
 //  Control Flow Instructions.
@@ -1040,33 +1040,26 @@
               D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26,
               D27, D28, D29, D30, D31, PC],
       Uses = [SP] in {
-  def TCRETURNdi : AInoP<(outs), (ins i32imm:$dst, variable_ops),
-                     Pseudo, IIC_Br,
-                     "@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
-
-  def TCRETURNri : AInoP<(outs), (ins tcGPR:$dst, variable_ops),
-                     Pseudo, IIC_Br,
-                     "@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
-
-  def TAILJMPd : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
-                 IIC_Br, "b.w\t$dst  @ TAILCALL",
-                 []>, Requires<[IsDarwin]>;
-
-  def TAILJMPr : AXI<(outs), (ins tcGPR:$dst, variable_ops),
-                   BrMiscFrm, IIC_Br, "bx\t$dst  @ TAILCALL",
-                 []>, Requires<[IsDarwin]> {
-                 let Inst{7-4}   = 0b0001;
-                 let Inst{19-8}  = 0b111111111111;
-                 let Inst{27-20} = 0b00010010;
-                 let Inst{31-28} = 0b1110;
-  }
-
-  // FIXME: This is a hack so that MCInst lowering can preserve the TAILCALL
-  // marker on instructions, while still being able to relax.
-//  let isCodeGenOnly = 1 in {
-//    def TAILJMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
-//                         "jmp\t$dst  @ TAILCALL", []>,
-//                         Requires<[IsARM, IsDarwin]>;
+    def TCRETURNdi : AInoP<(outs), (ins i32imm:$dst, variable_ops),
+                       Pseudo, IIC_Br,
+                       "@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
+
+    def TCRETURNri : AInoP<(outs), (ins tcGPR:$dst, variable_ops),
+                       Pseudo, IIC_Br,
+                       "@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
+
+    def TAILJMPd : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
+                   IIC_Br, "b.w\t$dst  @ TAILCALL",
+                   []>, Requires<[IsDarwin]>;
+
+    def TAILJMPr : AXI<(outs), (ins tcGPR:$dst, variable_ops),
+                     BrMiscFrm, IIC_Br, "bx\t$dst  @ TAILCALL",
+                   []>, Requires<[IsDarwin]> {
+                   let Inst{7-4}   = 0b0001;
+                   let Inst{19-8}  = 0b111111111111;
+                   let Inst{27-20} = 0b00010010;
+                   let Inst{31-28} = 0b1110;
+    }
   }
 
   // Non-Darwin versions (the difference is R9).
@@ -1075,33 +1068,30 @@
               D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26,
               D27, D28, D29, D30, D31, PC],
       Uses = [SP] in {
-  def TCRETURNdiND : AInoP<(outs), (ins i32imm:$dst, variable_ops),
-                     Pseudo, IIC_Br,
-                     "@TC_RETURN","\t$dst", []>, Requires<[IsNotDarwin]>;
-
-  def TCRETURNriND : AInoP<(outs), (ins tGPR:$dst, variable_ops),
-                     Pseudo, IIC_Br,
-                     "@TC_RETURN","\t$dst", []>, Requires<[IsNotDarwin]>;
-
-  def TAILJMPdND : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
-                 IIC_Br, "b.w\t$dst  @ TAILCALL",
-                 []>, Requires<[IsNotDarwin]>;
-
-  def TAILJMPrND : AXI<(outs), (ins tGPR:$dst, variable_ops),
-                   BrMiscFrm, IIC_Br, "bx\t$dst  @ TAILCALL",
-                 []>, Requires<[IsNotDarwin]> {
-                 let Inst{7-4}   = 0b0001;
-                 let Inst{19-8}  = 0b111111111111;
-                 let Inst{27-20} = 0b00010010;
-                 let Inst{31-28} = 0b1110;
-  }
-
-  // FIXME: This is a hack so that MCInst lowering can preserve the TAILCALL
-  // marker on instructions, while still being able to relax.
-//  let isCodeGenOnly = 1 in {
-//    def TAILJMP_1ND : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst),
-//                         "jmp\t$dst  @ TAILCALL", []>,
-//                         Requires<[IsARM, IsNotDarwin]>;
+    def TCRETURNdiND : AInoP<(outs), (ins i32imm:$dst, variable_ops),
+                       Pseudo, IIC_Br,
+                       "@TC_RETURN","\t$dst", []>, Requires<[IsNotDarwin]>;
+
+    def TCRETURNriND : AInoP<(outs), (ins tcGPR:$dst, variable_ops),
+                       Pseudo, IIC_Br,
+                       "@TC_RETURN","\t$dst", []>, Requires<[IsNotDarwin]>;
+
+    def TAILJMPdND : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
+                   IIC_Br, "b\t$dst  @ TAILCALL",
+                   []>, Requires<[IsARM, IsNotDarwin]>;
+
+    def TAILJMPdNDt : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
+                   IIC_Br, "b.w\t$dst  @ TAILCALL",
+                   []>, Requires<[IsThumb, IsNotDarwin]>;
+
+    def TAILJMPrND : AXI<(outs), (ins tcGPR:$dst, variable_ops),
+                     BrMiscFrm, IIC_Br, "bx\t$dst  @ TAILCALL",
+                   []>, Requires<[IsNotDarwin]> {
+                   let Inst{7-4}   = 0b0001;
+                   let Inst{19-8}  = 0b111111111111;
+                   let Inst{27-20} = 0b00010010;
+                   let Inst{31-28} = 0b1110;
+    }
   }
 }
 

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrNEON.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrNEON.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrNEON.td Fri Jul  2 04:57:13 2010
@@ -2268,7 +2268,7 @@
                      NEONvceq, 1>;
 // For disassembly only.
 defm VCEQz    : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00010, 0, "vceq", "i",
-                           "$dst, $src, #0">;
+                            "$dst, $src, #0">;
 
 //   VCGE     : Vector Compare Greater Than or Equal
 defm VCGEs    : N3V_QHS<0, 0, 0b0011, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
@@ -2850,9 +2850,6 @@
   return ARM::getNEONModImm(N, 8, true, *CurDAG).getNode() != 0;
 }], VMOV_get_imm64>;
 
-// Note: Some of the cmode bits in the following VMOV instructions need to
-// be encoded based on the immed values.
-
 let isReMaterializable = 1 in {
 def VMOVv8i8  : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$dst),
                          (ins nModImm:$SIMM), IIC_VMOVImm,

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrThumb.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrThumb.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrThumb.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrThumb.td Fri Jul  2 04:57:13 2010
@@ -894,11 +894,11 @@
                     "adr$p\t$dst, #$label", []>,
                 T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10
 
+} // neverHasSideEffects
 def tLEApcrelJT : T1I<(outs tGPR:$dst),
                       (ins i32imm:$label, nohash_imm:$id, pred:$p),
                       IIC_iALUi, "adr$p\t$dst, #${label}_${id}", []>,
                   T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10
-} // neverHasSideEffects
 
 //===----------------------------------------------------------------------===//
 // TLS Instructions

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrThumb2.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrThumb2.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrThumb2.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrThumb2.td Fri Jul  2 04:57:13 2010
@@ -637,8 +637,7 @@
 multiclass T2I_unary_rrot<bits<3> opcod, string opc, PatFrag opnode> {
   def r     : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
                   opc, ".w\t$dst, $src",
-                 [(set GPR:$dst, (opnode GPR:$src))]>,
-                 Requires<[HasT2ExtractPack]> {
+                 [(set GPR:$dst, (opnode GPR:$src))]> {
      let Inst{31-27} = 0b11111;
      let Inst{26-23} = 0b0100;
      let Inst{22-20} = opcod;
@@ -649,8 +648,7 @@
    }
   def r_rot : T2I<(outs GPR:$dst), (ins GPR:$src, i32imm:$rot), IIC_iUNAsi,
                   opc, ".w\t$dst, $src, ror $rot",
-                 [(set GPR:$dst, (opnode (rotr GPR:$src, rot_imm:$rot)))]>,
-                 Requires<[HasT2ExtractPack]> {
+                 [(set GPR:$dst, (opnode (rotr GPR:$src, rot_imm:$rot)))]> {
      let Inst{31-27} = 0b11111;
      let Inst{26-23} = 0b0100;
      let Inst{22-20} = opcod;
@@ -661,8 +659,8 @@
    }
 }
 
-// SXTB16 and UXTB16 do not need the .w qualifier.
-multiclass T2I_unary_rrot_nw<bits<3> opcod, string opc, PatFrag opnode> {
+// UXTB16 - Requres T2ExtractPack, does not need the .w qualifier.
+multiclass T2I_unary_rrot_uxtb16<bits<3> opcod, string opc, PatFrag opnode> {
   def r     : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
                   opc, "\t$dst, $src",
                  [(set GPR:$dst, (opnode GPR:$src))]>,
@@ -689,9 +687,9 @@
    }
 }
 
-// DO variant - disassembly only, no pattern
-
-multiclass T2I_unary_rrot_DO<bits<3> opcod, string opc> {
+// SXTB16 - Requres T2ExtractPack, does not need the .w qualifier, no pattern
+// supported yet.
+multiclass T2I_unary_rrot_sxtb16<bits<3> opcod, string opc> {
   def r     : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr,
                   opc, "\t$dst, $src", []> {
      let Inst{31-27} = 0b11111;
@@ -787,6 +785,7 @@
   let Inst{19-16} = 0b1111; // Rn
   let Inst{15} = 0;
 }
+} // neverHasSideEffects
 def t2LEApcrelJT : T2XI<(outs GPR:$dst),
                         (ins i32imm:$label, nohash_imm:$id, pred:$p), IIC_iALUi,
                         "adr$p.w\t$dst, #${label}_${id}", []> {
@@ -798,7 +797,6 @@
   let Inst{19-16} = 0b1111; // Rn
   let Inst{15} = 0;
 }
-} // neverHasSideEffects
 
 // ADD r, sp, {so_imm|i12}
 def t2ADDrSPi   : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_imm:$imm),
@@ -1330,7 +1328,7 @@
                               UnOpFrag<(sext_inreg node:$Src, i8)>>;
 defm t2SXTH  : T2I_unary_rrot<0b000, "sxth",
                               UnOpFrag<(sext_inreg node:$Src, i16)>>;
-defm t2SXTB16 : T2I_unary_rrot_DO<0b010, "sxtb16">;
+defm t2SXTB16 : T2I_unary_rrot_sxtb16<0b010, "sxtb16">;
 
 defm t2SXTAB : T2I_bin_rrot<0b100, "sxtab",
                         BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS, i8))>>;
@@ -1347,13 +1345,13 @@
                                UnOpFrag<(and node:$Src, 0x000000FF)>>;
 defm t2UXTH   : T2I_unary_rrot<0b001, "uxth",
                                UnOpFrag<(and node:$Src, 0x0000FFFF)>>;
-defm t2UXTB16 : T2I_unary_rrot_nw<0b011, "uxtb16",
+defm t2UXTB16 : T2I_unary_rrot_uxtb16<0b011, "uxtb16",
                                UnOpFrag<(and node:$Src, 0x00FF00FF)>>;
 
 def : T2Pat<(and (shl GPR:$Src, (i32 8)), 0xFF00FF),
-            (t2UXTB16r_rot GPR:$Src, 24)>;
+            (t2UXTB16r_rot GPR:$Src, 24)>, Requires<[HasT2ExtractPack]>;
 def : T2Pat<(and (srl GPR:$Src, (i32 8)), 0xFF00FF),
-            (t2UXTB16r_rot GPR:$Src, 8)>;
+            (t2UXTB16r_rot GPR:$Src, 8)>, Requires<[HasT2ExtractPack]>;
 
 defm t2UXTAB : T2I_bin_rrot<0b101, "uxtab",
                            BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
@@ -2528,6 +2526,7 @@
 
 
 // IT block
+let Defs = [ITSTATE] in
 def t2IT : Thumb2XI<(outs), (ins it_pred:$cc, it_mask:$mask),
                     AddrModeNone, Size2Bytes,  IIC_iALUx,
                     "it$mask\t$cc", "", []> {

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMLoadStoreOptimizer.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMLoadStoreOptimizer.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMLoadStoreOptimizer.cpp Fri Jul  2 04:57:13 2010
@@ -74,11 +74,14 @@
   private:
     struct MemOpQueueEntry {
       int Offset;
+      unsigned Reg;
+      bool isKill;
       unsigned Position;
       MachineBasicBlock::iterator MBBI;
       bool Merged;
-      MemOpQueueEntry(int o, int p, MachineBasicBlock::iterator i)
-        : Offset(o), Position(p), MBBI(i), Merged(false) {}
+      MemOpQueueEntry(int o, unsigned r, bool k, unsigned p, 
+                      MachineBasicBlock::iterator i)
+        : Offset(o), Reg(r), isKill(k), Position(p), MBBI(i), Merged(false) {}
     };
     typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
     typedef MemOpQueue::iterator MemOpQueueIter;
@@ -128,30 +131,30 @@
 static int getLoadStoreMultipleOpcode(int Opcode) {
   switch (Opcode) {
   case ARM::LDR:
-    NumLDMGened++;
+    ++NumLDMGened;
     return ARM::LDM;
   case ARM::STR:
-    NumSTMGened++;
+    ++NumSTMGened;
     return ARM::STM;
   case ARM::t2LDRi8:
   case ARM::t2LDRi12:
-    NumLDMGened++;
+    ++NumLDMGened;
     return ARM::t2LDM;
   case ARM::t2STRi8:
   case ARM::t2STRi12:
-    NumSTMGened++;
+    ++NumSTMGened;
     return ARM::t2STM;
   case ARM::VLDRS:
-    NumVLDMGened++;
+    ++NumVLDMGened;
     return ARM::VLDMS;
   case ARM::VSTRS:
-    NumVSTMGened++;
+    ++NumVSTMGened;
     return ARM::VSTMS;
   case ARM::VLDRD:
-    NumVLDMGened++;
+    ++NumVLDMGened;
     return ARM::VLDMD;
   case ARM::VSTRD:
-    NumVSTMGened++;
+    ++NumVSTMGened;
     return ARM::VSTMD;
   default: llvm_unreachable("Unhandled opcode!");
   }
@@ -264,45 +267,59 @@
 
 // MergeOpsUpdate - call MergeOps and update MemOps and merges accordingly on
 // success.
-void ARMLoadStoreOpt::
-MergeOpsUpdate(MachineBasicBlock &MBB,
-               MemOpQueue &memOps,
-               unsigned memOpsBegin,
-               unsigned memOpsEnd,
-               unsigned insertAfter,
-               int Offset,
-               unsigned Base,
-               bool BaseKill,
-               int Opcode,
-               ARMCC::CondCodes Pred,
-               unsigned PredReg,
-               unsigned Scratch,
-               DebugLoc dl,
-               SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
+void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
+                                     MemOpQueue &memOps,
+                                     unsigned memOpsBegin, unsigned memOpsEnd,
+                                     unsigned insertAfter, int Offset,
+                                     unsigned Base, bool BaseKill,
+                                     int Opcode,
+                                     ARMCC::CondCodes Pred, unsigned PredReg,
+                                     unsigned Scratch,
+                                     DebugLoc dl,
+                          SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
   // First calculate which of the registers should be killed by the merged
   // instruction.
-  SmallVector<std::pair<unsigned, bool>, 8> Regs;
   const unsigned insertPos = memOps[insertAfter].Position;
+
+  SmallSet<unsigned, 4> UnavailRegs;
+  SmallSet<unsigned, 4> KilledRegs;
+  DenseMap<unsigned, unsigned> Killer;
+  for (unsigned i = 0; i < memOpsBegin; ++i) {
+    if (memOps[i].Position < insertPos && memOps[i].isKill) {
+      unsigned Reg = memOps[i].Reg;
+      if (memOps[i].Merged)
+        UnavailRegs.insert(Reg);
+      else {
+        KilledRegs.insert(Reg);
+        Killer[Reg] = i;
+      }
+    }
+  }
+  for (unsigned i = memOpsEnd, e = memOps.size(); i != e; ++i) {
+    if (memOps[i].Position < insertPos && memOps[i].isKill) {
+      unsigned Reg = memOps[i].Reg;
+      KilledRegs.insert(Reg);
+      Killer[Reg] = i;
+    }
+  }
+
+  SmallVector<std::pair<unsigned, bool>, 8> Regs;
   for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
-    const MachineOperand &MO = memOps[i].MBBI->getOperand(0);
-    unsigned Reg = MO.getReg();
-    bool isKill = MO.isKill();
+    unsigned Reg = memOps[i].Reg;
+    if (UnavailRegs.count(Reg))
+      // Register is killed before and it's not easy / possible to update the
+      // kill marker on already merged instructions. Abort.
+      return;
 
     // If we are inserting the merged operation after an unmerged operation that
     // uses the same register, make sure to transfer any kill flag.
-    for (unsigned j = memOpsEnd, e = memOps.size(); !isKill && j != e; ++j)
-      if (memOps[j].Position<insertPos) {
-        const MachineOperand &MOJ = memOps[j].MBBI->getOperand(0);
-        if (MOJ.getReg() == Reg && MOJ.isKill())
-          isKill = true;
-      }
-
+    bool isKill = memOps[i].isKill || KilledRegs.count(Reg);
     Regs.push_back(std::make_pair(Reg, isKill));
   }
 
   // Try to do the merge.
   MachineBasicBlock::iterator Loc = memOps[insertAfter].MBBI;
-  Loc++;
+  ++Loc;
   if (!MergeOps(MBB, Loc, Offset, Base, BaseKill, Opcode,
                 Pred, PredReg, Scratch, dl, Regs))
     return;
@@ -311,13 +328,13 @@
   Merges.push_back(prior(Loc));
   for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) {
     // Remove kill flags from any unmerged memops that come before insertPos.
-    if (Regs[i-memOpsBegin].second)
-      for (unsigned j = memOpsEnd, e = memOps.size(); j != e; ++j)
-        if (memOps[j].Position<insertPos) {
-          MachineOperand &MOJ = memOps[j].MBBI->getOperand(0);
-          if (MOJ.getReg() == Regs[i-memOpsBegin].first && MOJ.isKill())
-            MOJ.setIsKill(false);
-        }
+    if (Regs[i-memOpsBegin].second) {
+      unsigned Reg = Regs[i-memOpsBegin].first;
+      if (KilledRegs.count(Reg)) {
+        unsigned j = Killer[Reg];
+        memOps[j].MBBI->getOperand(0).setIsKill(false);
+      }
+    }
     MBB.erase(memOps[i].MBBI);
     memOps[i].Merged = true;
   }
@@ -771,18 +788,21 @@
 /// isMemoryOp - Returns true if instruction is a memory operations (that this
 /// pass is capable of operating on).
 static bool isMemoryOp(const MachineInstr *MI) {
-  if (MI->hasOneMemOperand()) {
-    const MachineMemOperand *MMO = *MI->memoperands_begin();
+  // When no memory operands are present, conservatively assume unaligned,
+  // volatile, unfoldable.
+  if (!MI->hasOneMemOperand())
+    return false;
 
-    // Don't touch volatile memory accesses - we may be changing their order.
-    if (MMO->isVolatile())
-      return false;
+  const MachineMemOperand *MMO = *MI->memoperands_begin();
 
-    // Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is
-    // not.
-    if (MMO->getAlignment() < 4)
-      return false;
-  }
+  // Don't touch volatile memory accesses - we may be changing their order.
+  if (MMO->isVolatile())
+    return false;
+
+  // Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is
+  // not.
+  if (MMO->getAlignment() < 4)
+    return false;
 
   // str <undef> could probably be eliminated entirely, but for now we just want
   // to avoid making a mess of it.
@@ -910,6 +930,7 @@
     if ((EvenRegNum & 1) == 0 && (EvenRegNum + 1) == OddRegNum)
       return false;
 
+    MachineBasicBlock::iterator NewBBI = MBBI;
     bool isT2 = Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8;
     bool isLd = Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8;
     bool EvenDeadKill = isLd ?
@@ -954,6 +975,7 @@
                   getKillRegState(OddDeadKill)  | getUndefRegState(OddUndef));
         ++NumSTRD2STM;
       }
+      NewBBI = llvm::prior(MBBI);
     } else {
       // Split into two instructions.
       assert((!isT2 || !OffReg) &&
@@ -974,6 +996,7 @@
                       OddReg, OddDeadKill, false,
                       BaseReg, false, BaseUndef, OffReg, false, OffUndef,
                       Pred, PredReg, TII, isT2);
+        NewBBI = llvm::prior(MBBI);
         InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
                       EvenReg, EvenDeadKill, false,
                       BaseReg, BaseKill, BaseUndef, OffReg, OffKill, OffUndef,
@@ -990,6 +1013,7 @@
                       EvenReg, EvenDeadKill, EvenUndef,
                       BaseReg, false, BaseUndef, OffReg, false, OffUndef,
                       Pred, PredReg, TII, isT2);
+        NewBBI = llvm::prior(MBBI);
         InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
                       OddReg, OddDeadKill, OddUndef,
                       BaseReg, BaseKill, BaseUndef, OffReg, OffKill, OffUndef,
@@ -1001,8 +1025,9 @@
         ++NumSTRD2STR;
     }
 
-    MBBI = prior(MBBI);
     MBB.erase(MI);
+    MBBI = NewBBI;
+    return true;
   }
   return false;
 }
@@ -1035,6 +1060,9 @@
     if (isMemOp) {
       int Opcode = MBBI->getOpcode();
       unsigned Size = getLSMultipleTransferSize(MBBI);
+      const MachineOperand &MO = MBBI->getOperand(0);
+      unsigned Reg = MO.getReg();
+      bool isKill = MO.isDef() ? false : MO.isKill();
       unsigned Base = MBBI->getOperand(1).getReg();
       unsigned PredReg = 0;
       ARMCC::CondCodes Pred = llvm::getInstrPredicate(MBBI, PredReg);
@@ -1056,8 +1084,8 @@
         CurrSize = Size;
         CurrPred = Pred;
         CurrPredReg = PredReg;
-        MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
-        NumMemOps++;
+        MemOps.push_back(MemOpQueueEntry(Offset, Reg, isKill, Position, MBBI));
+        ++NumMemOps;
         Advance = true;
       } else {
         if (Clobber) {
@@ -1069,15 +1097,17 @@
           // No need to match PredReg.
           // Continue adding to the queue.
           if (Offset > MemOps.back().Offset) {
-            MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
-            NumMemOps++;
+            MemOps.push_back(MemOpQueueEntry(Offset, Reg, isKill,
+                                             Position, MBBI));
+            ++NumMemOps;
             Advance = true;
           } else {
             for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
                  I != E; ++I) {
               if (Offset < I->Offset) {
-                MemOps.insert(I, MemOpQueueEntry(Offset, Position, MBBI));
-                NumMemOps++;
+                MemOps.insert(I, MemOpQueueEntry(Offset, Reg, isKill,
+                                                 Position, MBBI));
+                ++NumMemOps;
                 Advance = true;
                 break;
               } else if (Offset == I->Offset) {

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMMachineFunctionInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMMachineFunctionInfo.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMMachineFunctionInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMMachineFunctionInfo.h Fri Jul  2 04:57:13 2010
@@ -88,6 +88,9 @@
   /// VarArgsFrameIndex - FrameIndex for start of varargs area.
   int VarArgsFrameIndex;
 
+  /// HasITBlocks - True if IT blocks have been inserted.
+  bool HasITBlocks;
+
 public:
   ARMFunctionInfo() :
     isThumb(false),
@@ -97,7 +100,8 @@
     FramePtrSpillOffset(0), GPRCS1Offset(0), GPRCS2Offset(0), DPRCSOffset(0),
     GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
     GPRCS1Frames(0), GPRCS2Frames(0), DPRCSFrames(0),
-    JumpTableUId(0), ConstPoolEntryUId(0), VarArgsFrameIndex(0) {}
+    JumpTableUId(0), ConstPoolEntryUId(0), VarArgsFrameIndex(0),
+    HasITBlocks(false) {}
 
   explicit ARMFunctionInfo(MachineFunction &MF) :
     isThumb(MF.getTarget().getSubtarget<ARMSubtarget>().isThumb()),
@@ -108,7 +112,8 @@
     GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0),
     GPRCS1Frames(32), GPRCS2Frames(32), DPRCSFrames(32),
     SpilledCSRegs(MF.getTarget().getRegisterInfo()->getNumRegs()),
-    JumpTableUId(0), ConstPoolEntryUId(0), VarArgsFrameIndex(0) {}
+    JumpTableUId(0), ConstPoolEntryUId(0), VarArgsFrameIndex(0),
+    HasITBlocks(false) {}
 
   bool isThumbFunction() const { return isThumb; }
   bool isThumb1OnlyFunction() const { return isThumb && !hasThumb2; }
@@ -229,6 +234,9 @@
 
   int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
   void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
+
+  bool hasITBlocks() const { return HasITBlocks; }
+  void setHasITBlocks(bool h) { HasITBlocks = h; }
 };
 } // End llvm namespace
 

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td Fri Jul  2 04:57:13 2010
@@ -197,9 +197,9 @@
 }
 
 // Current Program Status Register.
-def CPSR  : ARMReg<0, "cpsr">;
-
-def FPSCR : ARMReg<1, "fpscr">;
+def CPSR    : ARMReg<0, "cpsr">;
+def FPSCR   : ARMReg<1, "fpscr">;
+def ITSTATE : ARMReg<2, "itstate">;
 
 // Register classes.
 //
@@ -386,14 +386,9 @@
           return ARM_GPR_NOR9_TC;
         else
           return ARM_GPR_R9_TC;
-      } else {
-        if (Subtarget.isR9Reserved())
-          return ARM_GPR_NOR9_TC;
-        else if (Subtarget.isThumb())
-          return ARM_GPR_R9_TC;
-        else
-          return ARM_GPR_R9_TC;
-      }
+      } else
+        // R9 is either callee-saved or reserved; can't use it.
+        return ARM_GPR_NOR9_TC;
     }
 
     tcGPRClass::iterator
@@ -412,14 +407,9 @@
           I = ARM_GPR_NOR9_TC + (sizeof(ARM_GPR_NOR9_TC)/sizeof(unsigned));
         else
           I = ARM_GPR_R9_TC + (sizeof(ARM_GPR_R9_TC)/sizeof(unsigned));
-      } else {
-        if (Subtarget.isR9Reserved())
-          I = ARM_GPR_NOR9_TC + (sizeof(ARM_GPR_NOR9_TC)/sizeof(unsigned));
-        else if (Subtarget.isThumb())
-          I = ARM_GPR_R9_TC + (sizeof(ARM_GPR_R9_TC)/sizeof(unsigned));
-        else
-          I = ARM_GPR_R9_TC + (sizeof(ARM_GPR_R9_TC)/sizeof(unsigned));
-      }
+      } else
+        // R9 is either callee-saved or reserved; can't use it.
+        I = ARM_GPR_NOR9_TC + (sizeof(ARM_GPR_NOR9_TC)/sizeof(unsigned));
       return I;
     }
   }];
@@ -557,4 +547,3 @@
 
 // Condition code registers.
 def CCR : RegisterClass<"ARM", [i32], 32, [CPSR]>;
-

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMScheduleA8.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMScheduleA8.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMScheduleA8.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMScheduleA8.td Fri Jul  2 04:57:13 2010
@@ -1,10 +1,10 @@
 //=- ARMScheduleA8.td - ARM Cortex-A8 Scheduling Definitions -*- tablegen -*-=//
-// 
+//
 //                     The LLVM Compiler Infrastructure
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
-// 
+//
 //===----------------------------------------------------------------------===//
 //
 // This file defines the itinerary class data for the ARM Cortex A8 processors.
@@ -32,50 +32,50 @@
   InstrItinData<IIC_iALUx    , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>]>,
   //
   // Binary Instructions that produce a result
-  InstrItinData<IIC_iALUi    , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
-  InstrItinData<IIC_iALUr    , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 2]>,
-  InstrItinData<IIC_iALUsi   , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1]>,
-  InstrItinData<IIC_iALUsr   , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1, 1]>,
+  InstrItinData<IIC_iALUi ,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
+  InstrItinData<IIC_iALUr ,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 2]>,
+  InstrItinData<IIC_iALUsi,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1]>,
+  InstrItinData<IIC_iALUsr,[InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2, 1, 1]>,
   //
   // Unary Instructions that produce a result
-  InstrItinData<IIC_iUNAr    , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
-  InstrItinData<IIC_iUNAsi   , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
-  InstrItinData<IIC_iUNAsr   , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
+  InstrItinData<IIC_iUNAr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
+  InstrItinData<IIC_iUNAsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
+  InstrItinData<IIC_iUNAsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
   //
   // Compare instructions
-  InstrItinData<IIC_iCMPi    , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
-  InstrItinData<IIC_iCMPr    , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
-  InstrItinData<IIC_iCMPsi   , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
-  InstrItinData<IIC_iCMPsr   , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
+  InstrItinData<IIC_iCMPi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
+  InstrItinData<IIC_iCMPr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 2]>,
+  InstrItinData<IIC_iCMPsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
+  InstrItinData<IIC_iCMPsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
   //
   // Move instructions, unconditional
-  InstrItinData<IIC_iMOVi    , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1]>,
-  InstrItinData<IIC_iMOVr    , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
-  InstrItinData<IIC_iMOVsi   , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
-  InstrItinData<IIC_iMOVsr   , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1, 1]>,
+  InstrItinData<IIC_iMOVi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1]>,
+  InstrItinData<IIC_iMOVr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
+  InstrItinData<IIC_iMOVsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1]>,
+  InstrItinData<IIC_iMOVsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [1, 1, 1]>,
   //
   // Move instructions, conditional
-  InstrItinData<IIC_iCMOVi   , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
-  InstrItinData<IIC_iCMOVr   , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
-  InstrItinData<IIC_iCMOVsi  , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
-  InstrItinData<IIC_iCMOVsr  , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
+  InstrItinData<IIC_iCMOVi , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2]>,
+  InstrItinData<IIC_iCMOVr , [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
+  InstrItinData<IIC_iCMOVsi, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1]>,
+  InstrItinData<IIC_iCMOVsr, [InstrStage<1, [A8_Pipe0, A8_Pipe1]>], [2, 1, 1]>,
 
   // Integer multiply pipeline
   // Result written in E5, but that is relative to the last cycle of multicycle,
   // so we use 6 for those cases
   //
   InstrItinData<IIC_iMUL16   , [InstrStage<1, [A8_Pipe0]>], [5, 1, 1]>,
-  InstrItinData<IIC_iMAC16   , [InstrStage<1, [A8_Pipe1], 0>, 
+  InstrItinData<IIC_iMAC16   , [InstrStage<1, [A8_Pipe1], 0>,
                                 InstrStage<2, [A8_Pipe0]>], [6, 1, 1, 4]>,
-  InstrItinData<IIC_iMUL32   , [InstrStage<1, [A8_Pipe1], 0>, 
+  InstrItinData<IIC_iMUL32   , [InstrStage<1, [A8_Pipe1], 0>,
                                 InstrStage<2, [A8_Pipe0]>], [6, 1, 1]>,
-  InstrItinData<IIC_iMAC32   , [InstrStage<1, [A8_Pipe1], 0>, 
+  InstrItinData<IIC_iMAC32   , [InstrStage<1, [A8_Pipe1], 0>,
                                 InstrStage<2, [A8_Pipe0]>], [6, 1, 1, 4]>,
-  InstrItinData<IIC_iMUL64   , [InstrStage<2, [A8_Pipe1], 0>, 
+  InstrItinData<IIC_iMUL64   , [InstrStage<2, [A8_Pipe1], 0>,
                                 InstrStage<3, [A8_Pipe0]>], [6, 6, 1, 1]>,
-  InstrItinData<IIC_iMAC64   , [InstrStage<2, [A8_Pipe1], 0>, 
+  InstrItinData<IIC_iMAC64   , [InstrStage<2, [A8_Pipe1], 0>,
                                 InstrStage<3, [A8_Pipe0]>], [6, 6, 1, 1]>,
-  
+
   // Integer load pipeline
   //
   // loads have an extra cycle of latency, but are fully pipelined
@@ -166,7 +166,7 @@
                                 InstrStage<2, [A8_Pipe1]>,
                                 InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
                                 InstrStage<1, [A8_LdSt0]>]>,
-  
+
   // Branch
   //
   // no delay slots, so the latency of a branch is unimportant
@@ -276,14 +276,14 @@
   //
   // Single-precision FP Load
   // use A8_Issue to enforce the 1 load/store per cycle limit
-  InstrItinData<IIC_fpLoad32, [InstrStage<1, [A8_Issue], 0>, 
+  InstrItinData<IIC_fpLoad32, [InstrStage<1, [A8_Issue], 0>,
                                InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
                                InstrStage<1, [A8_LdSt0], 0>,
                                InstrStage<1, [A8_NLSPipe]>]>,
   //
   // Double-precision FP Load
   // use A8_Issue to enforce the 1 load/store per cycle limit
-  InstrItinData<IIC_fpLoad64, [InstrStage<2, [A8_Issue], 0>, 
+  InstrItinData<IIC_fpLoad64, [InstrStage<2, [A8_Issue], 0>,
                                InstrStage<1, [A8_Pipe0], 0>,
                                InstrStage<1, [A8_Pipe1]>,
                                InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
@@ -292,7 +292,7 @@
   //
   // FP Load Multiple
   // use A8_Issue to enforce the 1 load/store per cycle limit
-  InstrItinData<IIC_fpLoadm,  [InstrStage<3, [A8_Issue], 0>, 
+  InstrItinData<IIC_fpLoadm,  [InstrStage<3, [A8_Issue], 0>,
                                InstrStage<2, [A8_Pipe0], 0>,
                                InstrStage<2, [A8_Pipe1]>,
                                InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
@@ -301,14 +301,14 @@
   //
   // Single-precision FP Store
   // use A8_Issue to enforce the 1 load/store per cycle limit
-  InstrItinData<IIC_fpStore32,[InstrStage<1, [A8_Issue], 0>, 
+  InstrItinData<IIC_fpStore32,[InstrStage<1, [A8_Issue], 0>,
                                InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
                                InstrStage<1, [A8_LdSt0], 0>,
                                InstrStage<1, [A8_NLSPipe]>]>,
   //
   // Double-precision FP Store
   // use A8_Issue to enforce the 1 load/store per cycle limit
-  InstrItinData<IIC_fpStore64,[InstrStage<2, [A8_Issue], 0>, 
+  InstrItinData<IIC_fpStore64,[InstrStage<2, [A8_Issue], 0>,
                                InstrStage<1, [A8_Pipe0], 0>,
                                InstrStage<1, [A8_Pipe1]>,
                                InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
@@ -317,7 +317,7 @@
   //
   // FP Store Multiple
   // use A8_Issue to enforce the 1 load/store per cycle limit
-  InstrItinData<IIC_fpStorem, [InstrStage<3, [A8_Issue], 0>, 
+  InstrItinData<IIC_fpStorem, [InstrStage<3, [A8_Issue], 0>,
                                InstrStage<2, [A8_Pipe0], 0>,
                                InstrStage<2, [A8_Pipe1]>,
                                InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
@@ -329,35 +329,35 @@
   //
   // VLD1
   // FIXME: We don't model this instruction properly
-  InstrItinData<IIC_VLD1,     [InstrStage<1, [A8_Issue], 0>, 
+  InstrItinData<IIC_VLD1,     [InstrStage<1, [A8_Issue], 0>,
                                InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
                                InstrStage<1, [A8_LdSt0], 0>,
                                InstrStage<1, [A8_NLSPipe]>]>,
   //
   // VLD2
   // FIXME: We don't model this instruction properly
-  InstrItinData<IIC_VLD2,     [InstrStage<1, [A8_Issue], 0>, 
+  InstrItinData<IIC_VLD2,     [InstrStage<1, [A8_Issue], 0>,
                                InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
                                InstrStage<1, [A8_LdSt0], 0>,
                                InstrStage<1, [A8_NLSPipe]>], [2, 2, 1]>,
   //
   // VLD3
   // FIXME: We don't model this instruction properly
-  InstrItinData<IIC_VLD3,     [InstrStage<1, [A8_Issue], 0>, 
+  InstrItinData<IIC_VLD3,     [InstrStage<1, [A8_Issue], 0>,
                                InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
                                InstrStage<1, [A8_LdSt0], 0>,
                                InstrStage<1, [A8_NLSPipe]>], [2, 2, 2, 1]>,
   //
   // VLD4
   // FIXME: We don't model this instruction properly
-  InstrItinData<IIC_VLD4,     [InstrStage<1, [A8_Issue], 0>, 
+  InstrItinData<IIC_VLD4,     [InstrStage<1, [A8_Issue], 0>,
                                InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
                                InstrStage<1, [A8_LdSt0], 0>,
                                InstrStage<1, [A8_NLSPipe]>], [2, 2, 2, 2, 1]>,
   //
   // VST
   // FIXME: We don't model this instruction properly
-  InstrItinData<IIC_VST,      [InstrStage<1, [A8_Issue], 0>, 
+  InstrItinData<IIC_VST,      [InstrStage<1, [A8_Issue], 0>,
                                InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
                                InstrStage<1, [A8_LdSt0], 0>,
                                InstrStage<1, [A8_NLSPipe]>]>,
@@ -600,7 +600,7 @@
   InstrItinData<IIC_VTB4,     [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
                                InstrStage<1, [A8_NLSPipe]>,
                                InstrStage<1, [A8_NPipe], 0>,
-                               InstrStage<2, [A8_NLSPipe]>], [4, 2, 2, 3, 3, 1]>,
+                               InstrStage<2, [A8_NLSPipe]>],[4, 2, 2, 3, 3, 1]>,
   //
   // VTBX
   InstrItinData<IIC_VTBX1,    [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
@@ -610,9 +610,9 @@
   InstrItinData<IIC_VTBX3,    [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
                                InstrStage<1, [A8_NLSPipe]>,
                                InstrStage<1, [A8_NPipe], 0>,
-                               InstrStage<2, [A8_NLSPipe]>], [4, 1, 2, 2, 3, 1]>,
+                               InstrStage<2, [A8_NLSPipe]>],[4, 1, 2, 2, 3, 1]>,
   InstrItinData<IIC_VTBX4,    [InstrStage<1, [A8_Pipe0, A8_Pipe1]>,
                                InstrStage<1, [A8_NLSPipe]>,
                                InstrStage<1, [A8_NPipe], 0>,
-                               InstrStage<2, [A8_NLSPipe]>], [4, 1, 2, 2, 3, 3, 1]>
+                            InstrStage<2, [A8_NLSPipe]>], [4, 1, 2, 2, 3, 3, 1]>
 ]>;

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMScheduleA9.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMScheduleA9.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMScheduleA9.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMScheduleA9.td Fri Jul  2 04:57:13 2010
@@ -1,10 +1,10 @@
 //=- ARMScheduleA9.td - ARM Cortex-A9 Scheduling Definitions -*- tablegen -*-=//
-// 
+//
 //                     The LLVM Compiler Infrastructure
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
-// 
+//
 //===----------------------------------------------------------------------===//
 //
 // This file defines the itinerary class data for the ARM Cortex A9 processors.
@@ -31,36 +31,36 @@
   // FIXME: There are no operand latencies for these instructions at all!
   //
   // Move instructions, unconditional
-  InstrItinData<IIC_iMOVi    , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1]>,
-  InstrItinData<IIC_iMOVr    , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1, 1]>,
-  InstrItinData<IIC_iMOVsi   , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1, 1]>,
-  InstrItinData<IIC_iMOVsr   , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1]>,
+  InstrItinData<IIC_iMOVi   , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1]>,
+  InstrItinData<IIC_iMOVr   , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1, 1]>,
+  InstrItinData<IIC_iMOVsi  , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [1, 1]>,
+  InstrItinData<IIC_iMOVsr  , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1]>,
   //
   // No operand cycles
   InstrItinData<IIC_iALUx    , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>]>,
   //
   // Binary Instructions that produce a result
-  InstrItinData<IIC_iALUi    , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
-  InstrItinData<IIC_iALUr    , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2, 2]>,
-  InstrItinData<IIC_iALUsi   , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1]>,
-  InstrItinData<IIC_iALUsr   , [InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1, 1]>,
+  InstrItinData<IIC_iALUi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
+  InstrItinData<IIC_iALUr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2, 2]>,
+  InstrItinData<IIC_iALUsi, [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1]>,
+  InstrItinData<IIC_iALUsr,[InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 2, 1, 1]>,
   //
   // Unary Instructions that produce a result
-  InstrItinData<IIC_iUNAr    , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
-  InstrItinData<IIC_iUNAsi   , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
-  InstrItinData<IIC_iUNAsr   , [InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
+  InstrItinData<IIC_iUNAr   , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
+  InstrItinData<IIC_iUNAsi  , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
+  InstrItinData<IIC_iUNAsr  , [InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
   //
   // Compare instructions
-  InstrItinData<IIC_iCMPi    , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2]>,
-  InstrItinData<IIC_iCMPr    , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
-  InstrItinData<IIC_iCMPsi   , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
-  InstrItinData<IIC_iCMPsr   , [InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
+  InstrItinData<IIC_iCMPi   , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2]>,
+  InstrItinData<IIC_iCMPr   , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 2]>,
+  InstrItinData<IIC_iCMPsi  , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
+  InstrItinData<IIC_iCMPsr  , [InstrStage<3, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
   //
   // Move instructions, conditional
-  InstrItinData<IIC_iCMOVi   , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2]>,
-  InstrItinData<IIC_iCMOVr   , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
-  InstrItinData<IIC_iCMOVsi  , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
-  InstrItinData<IIC_iCMOVsr  , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
+  InstrItinData<IIC_iCMOVi  , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2]>,
+  InstrItinData<IIC_iCMOVr  , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
+  InstrItinData<IIC_iCMOVsi , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>], [2, 1]>,
+  InstrItinData<IIC_iCMOVsr , [InstrStage<2, [A9_Pipe0, A9_Pipe1]>], [2, 1, 1]>,
 
   // Integer multiply pipeline
   //
@@ -152,8 +152,8 @@
   //    register file writeback!).
   // Every NEON instruction does the same but with FUs swapped.
   //
-  // Since the reserved FU cannot be acquired this models precisly "cross-domain"
-  // stalls.
+  // Since the reserved FU cannot be acquired, this models precisely
+  // "cross-domain" stalls.
 
   // VFP
   // Issue through integer pipeline, and execute in NEON unit.
@@ -373,7 +373,7 @@
                                InstrStage<1, [A9_NPipe]>]>,
   // NEON
   // Issue through integer pipeline, and execute in NEON unit.
-  // FIXME: Neon pipeline and LdSt unit are multiplexed. 
+  // FIXME: Neon pipeline and LdSt unit are multiplexed.
   //        Add some syntactic sugar to model this!
   // VLD1
   // FIXME: We don't model this instruction properly
@@ -841,5 +841,5 @@
                                // Extra latency cycles since wbck is 8 cycles
                                InstrStage<9, [A9_DRegsVFP], 0, Reserved>,
                                InstrStage<1, [A9_Pipe1]>,
-                               InstrStage<2, [A9_NPipe]>], [4, 1, 2, 2, 3, 3, 1]>
+                              InstrStage<2, [A9_NPipe]>], [4, 1, 2, 2, 3, 3, 1]>
 ]>;

Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMTargetMachine.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMTargetMachine.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMTargetMachine.cpp Fri Jul  2 04:57:13 2010
@@ -27,11 +27,6 @@
   cl::desc("Form IT blocks early before register allocation"),
   cl::init(false));
 
-static cl::opt<bool>
-EarlyIfConvert("arm-early-if-convert", cl::Hidden,
-  cl::desc("Run if-conversion before post-ra scheduling"),
-  cl::init(false));
-
 static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
   Triple TheTriple(TT);
   switch (TheTriple.getOS()) {
@@ -71,8 +66,10 @@
                                    const std::string &FS)
   : ARMBaseTargetMachine(T, TT, FS, false), InstrInfo(Subtarget),
     DataLayout(Subtarget.isAPCS_ABI() ?
-               std::string("e-p:32:32-f64:32:32-i64:32:32-n32") :
-               std::string("e-p:32:32-f64:64:64-i64:64:64-n32")),
+               std::string("e-p:32:32-f64:32:32-i64:32:32-"
+                           "v128:32:128-v64:32:64-n32") :
+               std::string("e-p:32:32-f64:64:64-i64:64:64-"
+                           "v128:64:128-v64:64:64-n32")),
     TLInfo(*this),
     TSInfo(*this) {
 }
@@ -85,9 +82,11 @@
               : ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))),
     DataLayout(Subtarget.isAPCS_ABI() ?
                std::string("e-p:32:32-f64:32:32-i64:32:32-"
-                           "i16:16:32-i8:8:32-i1:8:32-a:0:32-n32") :
+                           "i16:16:32-i8:8:32-i1:8:32-"
+                           "v128:32:128-v64:32:64-a:0:32-n32") :
                std::string("e-p:32:32-f64:64:64-i64:64:64-"
-                           "i16:16:32-i8:8:32-i1:8:32-a:0:32-n32")),
+                           "i16:16:32-i8:8:32-i1:8:32-"
+                           "v128:64:128-v64:64:64-a:0:32-n32")),
     TLInfo(*this),
     TSInfo(*this) {
 }
@@ -110,8 +109,7 @@
   if (OptLevel != CodeGenOpt::None && !Subtarget.isThumb1Only())
     PM.add(createARMLoadStoreOptimizationPass(true));
 
-  if (OptLevel != CodeGenOpt::None && Subtarget.isThumb2() &&
-      EarlyITBlockFormation)
+  if (Subtarget.isThumb2() && EarlyITBlockFormation)
     PM.add(createThumb2ITBlockPass(true));
   return true;
 }
@@ -130,25 +128,20 @@
   // proper scheduling.
   PM.add(createARMExpandPseudoPass());
 
-  if (EarlyIfConvert && OptLevel != CodeGenOpt::None) {
-    if (!Subtarget.isThumb1Only()) 
+  if (OptLevel != CodeGenOpt::None) {
+    if (!Subtarget.isThumb1Only())
       PM.add(createIfConverterPass());
   }
+  if (Subtarget.isThumb2())
+    PM.add(createThumb2ITBlockPass());
 
   return true;
 }
 
 bool ARMBaseTargetMachine::addPreEmitPass(PassManagerBase &PM,
                                           CodeGenOpt::Level OptLevel) {
-  if (!EarlyIfConvert && OptLevel != CodeGenOpt::None) {
-    if (!Subtarget.isThumb1Only())
-      PM.add(createIfConverterPass());
-  }
-
-  if (Subtarget.isThumb2()) {
-    PM.add(createThumb2ITBlockPass());
+  if (Subtarget.isThumb2())
     PM.add(createThumb2SizeReductionPass());
-  }
 
   PM.add(createARMConstantIslandPass());
   return true;

Modified: llvm/branches/wendling/eh/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/AsmParser/ARMAsmParser.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/AsmParser/ARMAsmParser.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/AsmParser/ARMAsmParser.cpp Fri Jul  2 04:57:13 2010
@@ -665,7 +665,6 @@
   
   Operands.push_back(Op.take());
 
-  SMLoc Loc = Parser.getTok().getLoc();
   if (getLexer().isNot(AsmToken::EndOfStatement)) {
 
     // Read the first operand.
@@ -763,15 +762,10 @@
   if (Tok.isNot(AsmToken::Identifier))
     return Error(L, "unexpected token in .syntax directive");
   const StringRef &Mode = Tok.getString();
-  bool unified_syntax;
-  if (Mode == "unified" || Mode == "UNIFIED") {
+  if (Mode == "unified" || Mode == "UNIFIED")
     Parser.Lex();
-    unified_syntax = true;
-  }
-  else if (Mode == "divided" || Mode == "DIVIDED") {
+  else if (Mode == "divided" || Mode == "DIVIDED")
     Parser.Lex();
-    unified_syntax = false;
-  }
   else
     return Error(L, "unrecognized syntax mode in .syntax directive");
 
@@ -791,15 +785,10 @@
   if (Tok.isNot(AsmToken::Integer))
     return Error(L, "unexpected token in .code directive");
   int64_t Val = Parser.getTok().getIntVal();
-  bool thumb_mode;
-  if (Val == 16) {
+  if (Val == 16)
     Parser.Lex();
-    thumb_mode = true;
-  }
-  else if (Val == 32) {
+  else if (Val == 32)
     Parser.Lex();
-    thumb_mode = false;
-  }
   else
     return Error(L, "invalid operand to .code directive");
 

Modified: llvm/branches/wendling/eh/lib/Target/ARM/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/CMakeLists.txt?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/CMakeLists.txt Fri Jul  2 04:57:13 2010
@@ -33,6 +33,7 @@
   NEONPreAllocPass.cpp
   Thumb1InstrInfo.cpp
   Thumb1RegisterInfo.cpp
+  Thumb2HazardRecognizer.cpp
   Thumb2ITBlockPass.cpp
   Thumb2InstrInfo.cpp
   Thumb2RegisterInfo.cpp

Modified: llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp Fri Jul  2 04:57:13 2010
@@ -2290,7 +2290,7 @@
            "Reg operand expected");
 
     RegClass = OpInfo[OpIdx].RegClass;
-    while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
+    while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
       MI.addOperand(MCOperand::CreateReg(
                       getRegisterEnum(B, RegClass, Rd,
                                       UseDRegPair(Opcode))));
@@ -2310,7 +2310,7 @@
     // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
     RegClass = OpInfo[0].RegClass;
 
-    while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
+    while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
       MI.addOperand(MCOperand::CreateReg(
                       getRegisterEnum(B, RegClass, Rd,
                                       UseDRegPair(Opcode))));
@@ -2336,7 +2336,7 @@
       ++OpIdx;
     }
 
-    while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
+    while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
       assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
              "Tied to operand expected");
       MI.addOperand(MCOperand::CreateReg(0));
@@ -2839,15 +2839,9 @@
   return true;
 }
 
-static bool DisassembleNEONFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
-    unsigned short NumOps, unsigned &NumOpsAdded, BO) {
-  assert(0 && "Unreachable code!");
-  return false;
-}
-
 // Vector Get Lane (move scalar to ARM core register) Instructions.
 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
-static bool DisassembleNEONGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
+static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
   const TargetInstrDesc &TID = ARMInsts[Opcode];
@@ -2881,7 +2875,7 @@
 
 // Vector Set Lane (move ARM core register to scalar) Instructions.
 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
-static bool DisassembleNEONSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
+static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
   const TargetInstrDesc &TID = ARMInsts[Opcode];
@@ -2920,7 +2914,7 @@
 
 // Vector Duplicate Instructions (from ARM core register to all elements).
 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
-static bool DisassembleNEONDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
+static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
   const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
@@ -3060,13 +3054,6 @@
   return false;
 }
 
-static bool DisassembleThumbMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
-    unsigned short NumOps, unsigned &NumOpsAdded, BO) {
-
-  assert(0 && "Unexpected thumb misc. instruction!");
-  return false;
-}
-
 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
 /// We divide the disassembly task into different categories, with each one
 /// corresponding to a specific instruction encoding format.  There could be
@@ -3098,12 +3085,10 @@
   &DisassembleVFPLdStMulFrm,
   &DisassembleVFPMiscFrm,
   &DisassembleThumbFrm,
-  &DisassembleNEONFrm,
-  &DisassembleNEONGetLnFrm,
-  &DisassembleNEONSetLnFrm,
-  &DisassembleNEONDupFrm,
   &DisassembleMiscFrm,
-  &DisassembleThumbMiscFrm,
+  &DisassembleNGetLnFrm,
+  &DisassembleNSetLnFrm,
+  &DisassembleNDupFrm,
 
   // VLD and VST (including one lane) Instructions.
   &DisassembleNLdSt,

Modified: llvm/branches/wendling/eh/lib/Target/ARM/Thumb1InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Thumb1InstrInfo.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Thumb1InstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Thumb1InstrInfo.cpp Fri Jul  2 04:57:13 2010
@@ -175,10 +175,10 @@
         isKill = false;
     }
 
-    if (isKill) {
+    if (isKill)
       MBB.addLiveIn(Reg);
-      MIB.addReg(Reg, RegState::Kill);
-    }
+
+    MIB.addReg(Reg, getKillRegState(isKill));
   }
   return true;
 }

Modified: llvm/branches/wendling/eh/lib/Target/ARM/Thumb1RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Thumb1RegisterInfo.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Thumb1RegisterInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Thumb1RegisterInfo.cpp Fri Jul  2 04:57:13 2010
@@ -410,6 +410,8 @@
   // before that instead and adjust the UseMI.
   bool done = false;
   for (MachineBasicBlock::iterator II = I; !done && II != UseMI ; ++II) {
+    if (II->isDebugValue())
+      continue;
     // If this instruction affects R12, adjust our restore point.
     for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
       const MachineOperand &MO = II->getOperand(i);

Modified: llvm/branches/wendling/eh/lib/Target/ARM/Thumb2ITBlockPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Thumb2ITBlockPass.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Thumb2ITBlockPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Thumb2ITBlockPass.cpp Fri Jul  2 04:57:13 2010
@@ -31,6 +31,7 @@
       MachineFunctionPass(&ID), PreRegAlloc(PreRA) {}
 
     const Thumb2InstrInfo *TII;
+    const TargetRegisterInfo *TRI;
     ARMFunctionInfo *AFI;
 
     virtual bool runOnMachineFunction(MachineFunction &Fn);
@@ -52,18 +53,15 @@
                            SmallVector<MachineInstr*,4> &LastUses);
     bool InsertITBlock(MachineInstr *First, MachineInstr *Last);
     bool InsertITBlocks(MachineBasicBlock &MBB);
+    bool MoveCopyOutOfITBlock(MachineInstr *MI,
+                              ARMCC::CondCodes CC, ARMCC::CondCodes OCC,
+                              SmallSet<unsigned, 4> &Defs,
+                              SmallSet<unsigned, 4> &Uses);
     bool InsertITInstructions(MachineBasicBlock &MBB);
   };
   char Thumb2ITBlockPass::ID = 0;
 }
 
-static ARMCC::CondCodes getPredicate(const MachineInstr *MI, unsigned &PredReg){
-  unsigned Opc = MI->getOpcode();
-  if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
-    return ARMCC::AL;
-  return llvm::getInstrPredicate(MI, PredReg);
-}
-
 bool
 Thumb2ITBlockPass::MoveCPSRUseUp(MachineBasicBlock &MBB,
                                  MachineBasicBlock::iterator MBBI,
@@ -77,7 +75,7 @@
   for (unsigned i = 0; i < 4; ++i) {
     MachineInstr *MI = &*I;
     unsigned MPredReg = 0;
-    ARMCC::CondCodes MCC = getPredicate(MI, MPredReg);
+    ARMCC::CondCodes MCC = llvm::getITInstrPredicate(MI, MPredReg);
     if (MCC != ARMCC::AL) {
       if (MPredReg != PredReg || (MCC != CC && MCC != OCC))
         return false;
@@ -143,7 +141,6 @@
                                        SmallVector<MachineInstr*,4> &FirstUses,
                                        SmallVector<MachineInstr*,4> &LastUses) {
   bool SeenUse = false;
-  MachineOperand *LastDef = 0;
   MachineOperand *LastUse = 0;
   MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
   while (MBBI != E) {
@@ -177,7 +174,6 @@
         LastUses.push_back(LastUse->getParent());
         LastUse = 0;
       }
-      LastDef = Def;
       SeenUse = false;
     }
   }
@@ -204,7 +200,7 @@
     return false;
 
   unsigned PredReg = 0;
-  ARMCC::CondCodes CC = getPredicate(First, PredReg);
+  ARMCC::CondCodes CC = llvm::getITInstrPredicate(First, PredReg);
   if (CC == ARMCC::AL)
     return Modified;
 
@@ -217,7 +213,7 @@
       return Modified;
     MachineInstr *NMI = &*MBBI;
     unsigned NPredReg = 0;
-    ARMCC::CondCodes NCC = getPredicate(NMI, NPredReg);
+    ARMCC::CondCodes NCC = llvm::getITInstrPredicate(NMI, NPredReg);
     if (NCC != CC && NCC != OCC) {
       if (NCC != ARMCC::AL)
         return Modified;
@@ -249,20 +245,79 @@
   return Modified;
 }
 
-static void TrackDefUses(MachineInstr *MI, SmallSet<unsigned, 4> &Defs,
-                         SmallSet<unsigned, 4> &Uses) {
+/// TrackDefUses - Tracking what registers are being defined and used by
+/// instructions in the IT block. This also tracks "dependencies", i.e. uses
+/// in the IT block that are defined before the IT instruction.
+static void TrackDefUses(MachineInstr *MI,
+                         SmallSet<unsigned, 4> &Defs,
+                         SmallSet<unsigned, 4> &Uses,
+                         const TargetRegisterInfo *TRI) {
+  SmallVector<unsigned, 4> LocalDefs;
+  SmallVector<unsigned, 4> LocalUses;
+
   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
     MachineOperand &MO = MI->getOperand(i);
     if (!MO.isReg())
       continue;
     unsigned Reg = MO.getReg();
-    if (!Reg)
+    if (!Reg || Reg == ARM::ITSTATE || Reg == ARM::SP)
       continue;
-    if (MO.isDef())
-      Defs.insert(Reg);
+    if (MO.isUse())
+      LocalUses.push_back(Reg);
     else
-      Uses.insert(Reg);
+      LocalDefs.push_back(Reg);
+  }
+
+  for (unsigned i = 0, e = LocalUses.size(); i != e; ++i) {
+    unsigned Reg = LocalUses[i];
+    Uses.insert(Reg);
+    for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+         *Subreg; ++Subreg)
+      Uses.insert(*Subreg);
+  }
+
+  for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
+    unsigned Reg = LocalDefs[i];
+    Defs.insert(Reg);
+    for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+         *Subreg; ++Subreg)
+      Defs.insert(*Subreg);
+    if (Reg == ARM::CPSR)
+      continue;
+  }
+}
+
+bool
+Thumb2ITBlockPass::MoveCopyOutOfITBlock(MachineInstr *MI,
+                                      ARMCC::CondCodes CC, ARMCC::CondCodes OCC,
+                                        SmallSet<unsigned, 4> &Defs,
+                                        SmallSet<unsigned, 4> &Uses) {
+  unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
+  if (TII->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
+    assert(SrcSubIdx == 0 && DstSubIdx == 0 &&
+           "Sub-register indices still around?");
+    // llvm models select's as two-address instructions. That means a copy
+    // is inserted before a t2MOVccr, etc. If the copy is scheduled in
+    // between selects we would end up creating multiple IT blocks.
+
+    // First check if it's safe to move it.
+    if (Uses.count(DstReg) || Defs.count(SrcReg))
+      return false;
+
+    // Then peek at the next instruction to see if it's predicated on CC or OCC.
+    // If not, then there is nothing to be gained by moving the copy.
+    MachineBasicBlock::iterator I = MI; ++I;
+    MachineBasicBlock::iterator E = MI->getParent()->end();
+    while (I != E && I->isDebugValue())
+      ++I;
+    if (I != E) {
+      unsigned NPredReg = 0;
+      ARMCC::CondCodes NCC = llvm::getITInstrPredicate(I, NPredReg);
+      if (NCC == CC || NCC == OCC)
+        return true;
+    }
   }
+  return false;
 }
 
 bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
@@ -275,7 +330,7 @@
     MachineInstr *MI = &*MBBI;
     DebugLoc dl = MI->getDebugLoc();
     unsigned PredReg = 0;
-    ARMCC::CondCodes CC = getPredicate(MI, PredReg);
+    ARMCC::CondCodes CC = llvm::getITInstrPredicate(MI, PredReg);
     if (CC == ARMCC::AL) {
       ++MBBI;
       continue;
@@ -283,15 +338,21 @@
 
     Defs.clear();
     Uses.clear();
-    TrackDefUses(MI, Defs, Uses);
+    TrackDefUses(MI, Defs, Uses, TRI);
 
     // Insert an IT instruction.
     MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(ARM::t2IT))
       .addImm(CC);
+
+    // Add implicit use of ITSTATE to IT block instructions.
+    MI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/,
+                                             true/*isImp*/, false/*isKill*/));
+
+    MachineInstr *LastITMI = MI;
     MachineBasicBlock::iterator InsertPos = MIB;
     ++MBBI;
 
-    // Finalize IT mask.
+    // Form IT block.
     ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
     unsigned Mask = 0, Pos = 3;
     // Branches, including tricky ones like LDM_RET, need to end an IT
@@ -305,36 +366,37 @@
       MI = NMI;
 
       unsigned NPredReg = 0;
-      ARMCC::CondCodes NCC = getPredicate(NMI, NPredReg);
-      if (NCC == CC || NCC == OCC)
+      ARMCC::CondCodes NCC = llvm::getITInstrPredicate(NMI, NPredReg);
+      if (NCC == CC || NCC == OCC) {
         Mask |= (NCC & 1) << Pos;
-      else {
-        unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
+        // Add implicit use of ITSTATE.
+        NMI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/,
+                                               true/*isImp*/, false/*isKill*/));
+        LastITMI = NMI;
+      } else {
         if (NCC == ARMCC::AL &&
-            TII->isMoveInstr(*NMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
-          assert(SrcSubIdx == 0 && DstSubIdx == 0 &&
-                 "Sub-register indices still around?");
-          // llvm models select's as two-address instructions. That means a copy
-          // is inserted before a t2MOVccr, etc. If the copy is scheduled in
-          // between selects we would end up creating multiple IT blocks.
-          if (!Uses.count(DstReg) && !Defs.count(SrcReg)) {
-            --MBBI;
-            MBB.remove(NMI);
-            MBB.insert(InsertPos, NMI);
-            ++NumMovedInsts;
-            continue;
-          }
+            MoveCopyOutOfITBlock(NMI, CC, OCC, Defs, Uses)) {
+          --MBBI;
+          MBB.remove(NMI);
+          MBB.insert(InsertPos, NMI);
+          ++NumMovedInsts;
+          continue;
         }
         break;
       }
-      TrackDefUses(NMI, Defs, Uses);
+      TrackDefUses(NMI, Defs, Uses, TRI);
       --Pos;
     }
 
+    // Finalize IT mask.
     Mask |= (1 << Pos);
     // Tag along (firstcond[0] << 4) with the mask.
     Mask |= (CC & 1) << 4;
     MIB.addImm(Mask);
+
+    // Last instruction in IT block kills ITSTATE.
+    LastITMI->findRegisterUseOperand(ARM::ITSTATE)->setIsKill();
+
     Modified = true;
     ++NumITs;
   }
@@ -346,6 +408,7 @@
   const TargetMachine &TM = Fn.getTarget();
   AFI = Fn.getInfo<ARMFunctionInfo>();
   TII = static_cast<const Thumb2InstrInfo*>(TM.getInstrInfo());
+  TRI = TM.getRegisterInfo();
 
   if (!AFI->isThumbFunction())
     return false;
@@ -360,6 +423,9 @@
       Modified |= InsertITInstructions(MBB);
   }
 
+  if (Modified && !PreRegAlloc)
+    AFI->setHasITBlocks(true);
+
   return Modified;
 }
 

Modified: llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.cpp Fri Jul  2 04:57:13 2010
@@ -17,15 +17,27 @@
 #include "ARMAddressingModes.h"
 #include "ARMGenInstrInfo.inc"
 #include "ARMMachineFunctionInfo.h"
+#include "Thumb2HazardRecognizer.h"
+#include "Thumb2InstrInfo.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/CodeGen/MachineMemOperand.h"
 #include "llvm/CodeGen/PseudoSourceValue.h"
 #include "llvm/ADT/SmallVector.h"
-#include "Thumb2InstrInfo.h"
+#include "llvm/Support/CommandLine.h"
 
 using namespace llvm;
 
+static cl::opt<unsigned>
+IfCvtLimit("thumb2-ifcvt-limit", cl::Hidden,
+           cl::desc("Thumb2 if-conversion limit (default 3)"),
+           cl::init(3));
+
+static cl::opt<unsigned>
+IfCvtDiamondLimit("thumb2-ifcvt-diamond-limit", cl::Hidden,
+                  cl::desc("Thumb2 diamond if-conversion limit (default 3)"),
+                  cl::init(3));
+
 Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
   : ARMBaseInstrInfo(STI), RI(*this, STI) {
 }
@@ -35,6 +47,79 @@
   return 0;
 }
 
+void
+Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+                                         MachineBasicBlock *NewDest) const {
+  MachineBasicBlock *MBB = Tail->getParent();
+  ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
+  if (!AFI->hasITBlocks()) {
+    TargetInstrInfoImpl::ReplaceTailWithBranchTo(Tail, NewDest);
+    return;
+  }
+
+  // If the first instruction of Tail is predicated, we may have to update
+  // the IT instruction.
+  unsigned PredReg = 0;
+  ARMCC::CondCodes CC = llvm::getInstrPredicate(Tail, PredReg);
+  MachineBasicBlock::iterator MBBI = Tail;
+  if (CC != ARMCC::AL)
+    // Expecting at least the t2IT instruction before it.
+    --MBBI;
+
+  // Actually replace the tail.
+  TargetInstrInfoImpl::ReplaceTailWithBranchTo(Tail, NewDest);
+
+  // Fix up IT.
+  if (CC != ARMCC::AL) {
+    MachineBasicBlock::iterator E = MBB->begin();
+    unsigned Count = 4; // At most 4 instructions in an IT block.
+    while (Count && MBBI != E) {
+      if (MBBI->isDebugValue()) {
+        --MBBI;
+        continue;
+      }
+      if (MBBI->getOpcode() == ARM::t2IT) {
+        unsigned Mask = MBBI->getOperand(1).getImm();
+        if (Count == 4)
+          MBBI->eraseFromParent();
+        else {
+          unsigned MaskOn = 1 << Count;
+          unsigned MaskOff = ~(MaskOn - 1);
+          MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn);
+        }
+        return;
+      }
+      --MBBI;
+      --Count;
+    }
+
+    // Ctrl flow can reach here if branch folding is run before IT block
+    // formation pass.
+  }
+}
+
+bool
+Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+                                     MachineBasicBlock::iterator MBBI) const {
+  unsigned PredReg = 0;
+  return llvm::getITInstrPredicate(MBBI, PredReg) == ARMCC::AL;
+}
+
+bool Thumb2InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
+                                          unsigned NumInstrs) const {
+  return NumInstrs && NumInstrs <= IfCvtLimit;
+}
+  
+bool Thumb2InstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
+                    MachineBasicBlock &FMBB, unsigned NumF) const {
+  // FIXME: Catch optimization such as:
+  //        r0 = movne
+  //        r0 = moveq
+  return NumT && NumF &&
+    NumT <= (IfCvtDiamondLimit) && NumF <= (IfCvtDiamondLimit);
+}
+
 bool
 Thumb2InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
                               MachineBasicBlock::iterator I,
@@ -116,6 +201,11 @@
   ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI);
 }
 
+ScheduleHazardRecognizer *Thumb2InstrInfo::
+CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const {
+  return (ScheduleHazardRecognizer *)new Thumb2HazardRecognizer(II);
+}
+
 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
                                MachineBasicBlock::iterator &MBBI, DebugLoc dl,
                                unsigned DestReg, unsigned BaseReg, int NumBytes,
@@ -134,14 +224,14 @@
       // Use a movw to materialize the 16-bit constant.
       BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg)
         .addImm(NumBytes)
-        .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
+        .addImm((unsigned)Pred).addReg(PredReg);
       Fits = true;
     } else if ((NumBytes & 0xffff) == 0) {
       // Use a movt to materialize the 32-bit constant.
       BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg)
         .addReg(DestReg)
         .addImm(NumBytes >> 16)
-        .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
+        .addImm((unsigned)Pred).addReg(PredReg);
       Fits = true;
     }
 
@@ -548,3 +638,11 @@
     MBB->insert(++MBBI, SrcMI);
   }
 }
+
+ARMCC::CondCodes
+llvm::getITInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
+  unsigned Opc = MI->getOpcode();
+  if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
+    return ARMCC::AL;
+  return llvm::getInstrPredicate(MI, PredReg);
+}

Modified: llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.h Fri Jul  2 04:57:13 2010
@@ -20,7 +20,8 @@
 #include "Thumb2RegisterInfo.h"
 
 namespace llvm {
-  class ARMSubtarget;
+class ARMSubtarget;
+class ScheduleHazardRecognizer;
 
 class Thumb2InstrInfo : public ARMBaseInstrInfo {
   Thumb2RegisterInfo RI;
@@ -31,6 +32,17 @@
   // if there is not such an opcode.
   unsigned getUnindexedOpcode(unsigned Opc) const;
 
+  void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+                               MachineBasicBlock *NewDest) const;
+
+  bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+                           MachineBasicBlock::iterator MBBI) const;
+
+  bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs) const;
+  
+  bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
+                           MachineBasicBlock &FMBB, unsigned NumFInstrs) const;
+
   bool copyRegToReg(MachineBasicBlock &MBB,
                     MachineBasicBlock::iterator I,
                     unsigned DestReg, unsigned SrcReg,
@@ -60,7 +72,17 @@
   /// always be able to get register info as well (through this method).
   ///
   const Thumb2RegisterInfo &getRegisterInfo() const { return RI; }
+
+  ScheduleHazardRecognizer *
+  CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const;
 };
+
+/// getITInstrPredicate - Valid only in Thumb2 mode. This function is identical
+/// to llvm::getInstrPredicate except it returns AL for conditional branch
+/// instructions which are "predicated", but are not in IT blocks.
+ARMCC::CondCodes getITInstrPredicate(const MachineInstr *MI, unsigned &PredReg);
+
+
 }
 
 #endif // THUMB2INSTRUCTIONINFO_H

Modified: llvm/branches/wendling/eh/lib/Target/ARM/Thumb2SizeReduction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Thumb2SizeReduction.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Thumb2SizeReduction.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Thumb2SizeReduction.cpp Fri Jul  2 04:57:13 2010
@@ -451,11 +451,18 @@
   if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
     return false;
 
-  const TargetInstrDesc &TID = MI->getDesc();
   unsigned Reg0 = MI->getOperand(0).getReg();
   unsigned Reg1 = MI->getOperand(1).getReg();
-  if (Reg0 != Reg1)
-    return false;
+  if (Reg0 != Reg1) {
+    // Try to commute the operands to make it a 2-address instruction.
+    unsigned CommOpIdx1, CommOpIdx2;
+    if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) ||
+        CommOpIdx1 != 1 || MI->getOperand(CommOpIdx2).getReg() != Reg0)
+      return false;
+    MachineInstr *CommutedMI = TII->commuteInstruction(MI);
+    if (!CommutedMI)
+      return false;
+  }
   if (Entry.LowRegs2 && !isARMLowRegister(Reg0))
     return false;
   if (Entry.Imm2Limit) {
@@ -484,6 +491,7 @@
 
   bool HasCC = false;
   bool CCDead = false;
+  const TargetInstrDesc &TID = MI->getDesc();
   if (TID.hasOptionalDef()) {
     unsigned NumOps = TID.getNumOperands();
     HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);

Modified: llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrFormats.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrFormats.td (original)
+++ llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrFormats.td Fri Jul  2 04:57:13 2010
@@ -182,7 +182,7 @@
   bits<5> Rb;
   bits<7> Function = fun;
 
-//  let isTwoAddress = 1;
+//  let Constraints = "$RFALSE = $RDEST";
   let Inst{25-21} = Ra;
   let Inst{20-16} = Rb;
   let Inst{15-13} = 0;
@@ -223,7 +223,7 @@
   bits<8> LIT;
   bits<7> Function = fun;
 
-//  let isTwoAddress = 1;
+//  let Constraints = "$RFALSE = $RDEST";
   let Inst{25-21} = Ra;
   let Inst{20-13} = LIT;
   let Inst{12} = 1;

Modified: llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.td Fri Jul  2 04:57:13 2010
@@ -680,18 +680,32 @@
 }
 
 //conditional moves, floats
-let OutOperandList = (outs F4RC:$RDEST), InOperandList = (ins F4RC:$RFALSE, F4RC:$RTRUE, F8RC:$RCOND),
-    isTwoAddress = 1 in {
-def FCMOVEQS : FPForm<0x17, 0x02A, "fcmoveq $RCOND,$RTRUE,$RDEST",[], s_fcmov>; //FCMOVE if = zero
-def FCMOVGES : FPForm<0x17, 0x02D, "fcmovge $RCOND,$RTRUE,$RDEST",[], s_fcmov>; //FCMOVE if >= zero
-def FCMOVGTS : FPForm<0x17, 0x02F, "fcmovgt $RCOND,$RTRUE,$RDEST",[], s_fcmov>; //FCMOVE if > zero
-def FCMOVLES : FPForm<0x17, 0x02E, "fcmovle $RCOND,$RTRUE,$RDEST",[], s_fcmov>; //FCMOVE if <= zero
-def FCMOVLTS : FPForm<0x17, 0x02C, "fcmovlt $RCOND,$RTRUE,$RDEST",[], s_fcmov>; // FCMOVE if < zero
-def FCMOVNES : FPForm<0x17, 0x02B, "fcmovne $RCOND,$RTRUE,$RDEST",[], s_fcmov>; //FCMOVE if != zero
+let OutOperandList = (outs F4RC:$RDEST),
+    InOperandList = (ins F4RC:$RFALSE, F4RC:$RTRUE, F8RC:$RCOND),
+    Constraints = "$RTRUE = $RDEST" in {
+def FCMOVEQS : FPForm<0x17, 0x02A, 
+                      "fcmoveq $RCOND,$RTRUE,$RDEST",
+                      [], s_fcmov>; //FCMOVE if = zero
+def FCMOVGES : FPForm<0x17, 0x02D, 
+                      "fcmovge $RCOND,$RTRUE,$RDEST",
+                      [], s_fcmov>; //FCMOVE if >= zero
+def FCMOVGTS : FPForm<0x17, 0x02F, 
+                      "fcmovgt $RCOND,$RTRUE,$RDEST",
+                      [], s_fcmov>; //FCMOVE if > zero
+def FCMOVLES : FPForm<0x17, 0x02E, 
+                      "fcmovle $RCOND,$RTRUE,$RDEST",
+                      [], s_fcmov>; //FCMOVE if <= zero
+def FCMOVLTS : FPForm<0x17, 0x02C,
+                      "fcmovlt $RCOND,$RTRUE,$RDEST",
+                      [], s_fcmov>; // FCMOVE if < zero
+def FCMOVNES : FPForm<0x17, 0x02B, 
+                      "fcmovne $RCOND,$RTRUE,$RDEST",
+                      [], s_fcmov>; //FCMOVE if != zero
 }
 //conditional moves, doubles
-let OutOperandList = (outs F8RC:$RDEST), InOperandList = (ins F8RC:$RFALSE, F8RC:$RTRUE, F8RC:$RCOND),
-    isTwoAddress = 1 in {
+let OutOperandList = (outs F8RC:$RDEST), 
+    InOperandList = (ins F8RC:$RFALSE, F8RC:$RTRUE, F8RC:$RCOND),
+    Constraints = "$RTRUE = $RDEST" in {
 def FCMOVEQT : FPForm<0x17, 0x02A, "fcmoveq $RCOND,$RTRUE,$RDEST", [], s_fcmov>;
 def FCMOVGET : FPForm<0x17, 0x02D, "fcmovge $RCOND,$RTRUE,$RDEST", [], s_fcmov>;
 def FCMOVGTT : FPForm<0x17, 0x02F, "fcmovgt $RCOND,$RTRUE,$RDEST", [], s_fcmov>;

Modified: llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.td Fri Jul  2 04:57:13 2010
@@ -488,7 +488,7 @@
              "$dst = $src;",
              []>;
 
-let isTwoAddress = 1 in
+let Constraints = "$src1 = $dst" in
 def MOVEcc: F1<(outs DP:$dst), (ins DP:$src1, DP:$src2, AnyCC:$cc),
                "if $cc $dst = $src2;",
                [(set DP:$dst, (select AnyCC:$cc, DP:$src2, DP:$src1))]>;
@@ -645,7 +645,7 @@
 // Table C-15. Bit Operations Instructions
 //===----------------------------------------------------------------------===//
 
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
 def BITCLR: F1<(outs D:$dst), (ins D:$src1, uimm5imask:$src2),
               "bitclr($dst, $src2);",
               [(set D:$dst, (and D:$src1, uimm5imask:$src2))]>;
@@ -691,7 +691,7 @@
 }
 
 let Defs = [AZ, AN, V, VS],
-    isTwoAddress = 1 in {
+    Constraints = "$src = $dst" in {
 defm SRA : SHIFT32<sra, ">>>">;
 defm SRL : SHIFT32<srl, ">>">;
 defm SLL : SHIFT32<shl, "<<">;
@@ -748,7 +748,7 @@
               "$dst = $src1 + $src2;",
               [(set D16:$dst, (add D16:$src1, D16:$src2))]>;
 
-let isTwoAddress = 1 in
+let Constraints = "$src1 = $dst" in
 def ADDimm7: F1<(outs D:$dst), (ins D:$src1, i32imm:$src2),
                 "$dst += $src2;",
                 [(set D:$dst, (add D:$src1, imm7:$src2))]>;
@@ -775,7 +775,7 @@
 def ADDpp: F1<(outs P:$dst), (ins P:$src1, P:$src2),
               "$dst = $src1 + $src2;", []>;
 
-let isTwoAddress = 1 in
+let Constraints = "$src1 = $dst" in
 def ADDpp_imm7: F1<(outs P:$dst), (ins P:$src1, i32imm:$src2),
                 "$dst += $src2;", []>;
 
@@ -802,7 +802,7 @@
 }
 
 
-let isTwoAddress = 1 in
+let Constraints = "$src1 = $dst" in
 def MUL32: F1<(outs D:$dst), (ins D:$src1, D:$src2),
             "$dst *= $src2;",
             [(set D:$dst, (mul D:$src1, D:$src2))]>;

Modified: llvm/branches/wendling/eh/lib/Target/CBackend/CBackend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CBackend/CBackend.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CBackend/CBackend.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/CBackend/CBackend.cpp Fri Jul  2 04:57:13 2010
@@ -2889,7 +2889,7 @@
   bool hasByVal = I.hasByValArgument();
   bool isStructRet = I.hasStructRetAttr();
   if (isStructRet) {
-    writeOperandDeref(I.getOperand(1));
+    writeOperandDeref(I.getArgOperand(0));
     Out << " = ";
   }
   
@@ -2944,8 +2944,8 @@
   }
 
   unsigned NumDeclaredParams = FTy->getNumParams();
-
-  CallSite::arg_iterator AI = I.op_begin()+1, AE = I.op_end();
+  CallSite CS(&I);
+  CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
   unsigned ArgNo = 0;
   if (isStructRet) {   // Skip struct return argument.
     ++AI;
@@ -2999,7 +2999,7 @@
     Out << "0; ";
       
     Out << "va_start(*(va_list*)";
-    writeOperand(I.getOperand(1));
+    writeOperand(I.getArgOperand(0));
     Out << ", ";
     // Output the last argument to the enclosing function.
     if (I.getParent()->getParent()->arg_empty())
@@ -3009,9 +3009,9 @@
     Out << ')';
     return true;
   case Intrinsic::vaend:
-    if (!isa<ConstantPointerNull>(I.getOperand(1))) {
+    if (!isa<ConstantPointerNull>(I.getArgOperand(0))) {
       Out << "0; va_end(*(va_list*)";
-      writeOperand(I.getOperand(1));
+      writeOperand(I.getArgOperand(0));
       Out << ')';
     } else {
       Out << "va_end(*(va_list*)0)";
@@ -3020,47 +3020,47 @@
   case Intrinsic::vacopy:
     Out << "0; ";
     Out << "va_copy(*(va_list*)";
-    writeOperand(I.getOperand(1));
+    writeOperand(I.getArgOperand(0));
     Out << ", *(va_list*)";
-    writeOperand(I.getOperand(2));
+    writeOperand(I.getArgOperand(1));
     Out << ')';
     return true;
   case Intrinsic::returnaddress:
     Out << "__builtin_return_address(";
-    writeOperand(I.getOperand(1));
+    writeOperand(I.getArgOperand(0));
     Out << ')';
     return true;
   case Intrinsic::frameaddress:
     Out << "__builtin_frame_address(";
-    writeOperand(I.getOperand(1));
+    writeOperand(I.getArgOperand(0));
     Out << ')';
     return true;
   case Intrinsic::powi:
     Out << "__builtin_powi(";
-    writeOperand(I.getOperand(1));
+    writeOperand(I.getArgOperand(0));
     Out << ", ";
-    writeOperand(I.getOperand(2));
+    writeOperand(I.getArgOperand(1));
     Out << ')';
     return true;
   case Intrinsic::setjmp:
     Out << "setjmp(*(jmp_buf*)";
-    writeOperand(I.getOperand(1));
+    writeOperand(I.getArgOperand(0));
     Out << ')';
     return true;
   case Intrinsic::longjmp:
     Out << "longjmp(*(jmp_buf*)";
-    writeOperand(I.getOperand(1));
+    writeOperand(I.getArgOperand(0));
     Out << ", ";
-    writeOperand(I.getOperand(2));
+    writeOperand(I.getArgOperand(1));
     Out << ')';
     return true;
   case Intrinsic::prefetch:
     Out << "LLVM_PREFETCH((const void *)";
-    writeOperand(I.getOperand(1));
+    writeOperand(I.getArgOperand(0));
     Out << ", ";
-    writeOperand(I.getOperand(2));
+    writeOperand(I.getArgOperand(1));
     Out << ", ";
-    writeOperand(I.getOperand(3));
+    writeOperand(I.getArgOperand(2));
     Out << ")";
     return true;
   case Intrinsic::stacksave:
@@ -3077,7 +3077,7 @@
     printType(Out, I.getType());
     Out << ')';  
     // Multiple GCC builtins multiplex onto this intrinsic.
-    switch (cast<ConstantInt>(I.getOperand(3))->getZExtValue()) {
+    switch (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue()) {
     default: llvm_unreachable("Invalid llvm.x86.sse.cmp!");
     case 0: Out << "__builtin_ia32_cmpeq"; break;
     case 1: Out << "__builtin_ia32_cmplt"; break;
@@ -3098,9 +3098,9 @@
       Out << 'd';
       
     Out << "(";
-    writeOperand(I.getOperand(1));
+    writeOperand(I.getArgOperand(0));
     Out << ", ";
-    writeOperand(I.getOperand(2));
+    writeOperand(I.getArgOperand(1));
     Out << ")";
     return true;
   case Intrinsic::ppc_altivec_lvsl:
@@ -3108,7 +3108,7 @@
     printType(Out, I.getType());
     Out << ')';  
     Out << "__builtin_altivec_lvsl(0, (void*)";
-    writeOperand(I.getOperand(1));
+    writeOperand(I.getArgOperand(0));
     Out << ")";
     return true;
   }
@@ -3221,7 +3221,7 @@
       DestVal = ResultVals[ValueCount].first;
       DestValNo = ResultVals[ValueCount].second;
     } else
-      DestVal = CI.getOperand(ValueCount-ResultVals.size()+1);
+      DestVal = CI.getArgOperand(ValueCount-ResultVals.size());
 
     if (I->isEarlyClobber)
       C = "&"+C;
@@ -3255,7 +3255,7 @@
     }
     
     assert(ValueCount >= ResultVals.size() && "Input can't refer to result");
-    Value *SrcVal = CI.getOperand(ValueCount-ResultVals.size()+1);
+    Value *SrcVal = CI.getArgOperand(ValueCount-ResultVals.size());
     
     Out << "\"" << C << "\"(";
     if (!I->isIndirect)

Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelDAGToDAG.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelDAGToDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelDAGToDAG.cpp Fri Jul  2 04:57:13 2010
@@ -275,7 +275,6 @@
 
     SDNode *emitBuildVector(SDNode *bvNode) {
       EVT vecVT = bvNode->getValueType(0);
-      EVT eltVT = vecVT.getVectorElementType();
       DebugLoc dl = bvNode->getDebugLoc();
 
       // Check to see if this vector can be represented as a CellSPU immediate

Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp Fri Jul  2 04:57:13 2010
@@ -1746,15 +1746,20 @@
   unsigned V0Elt = 0;
   bool monotonic = true;
   bool rotate = true;
+  EVT maskVT;             // which of the c?d instructions to use
 
   if (EltVT == MVT::i8) {
     V2EltIdx0 = 16;
+    maskVT = MVT::v16i8; 
   } else if (EltVT == MVT::i16) {
     V2EltIdx0 = 8;
+    maskVT = MVT::v8i16;
   } else if (EltVT == MVT::i32 || EltVT == MVT::f32) {
     V2EltIdx0 = 4;
+    maskVT = MVT::v4i32;
   } else if (EltVT == MVT::i64 || EltVT == MVT::f64) {
     V2EltIdx0 = 2;
+    maskVT = MVT::v2i64;
   } else
     llvm_unreachable("Unhandled vector type in LowerVECTOR_SHUFFLE");
 
@@ -1786,7 +1791,7 @@
         } else {
           rotate = false;
         }
-      } else if (PrevElt == 0) {
+      } else if (i == 0) {
         // First time through, need to keep track of previous element
         PrevElt = SrcElt;
       } else {
@@ -1798,18 +1803,16 @@
 
   if (EltsFromV2 == 1 && monotonic) {
     // Compute mask and shuffle
-    MachineFunction &MF = DAG.getMachineFunction();
-    MachineRegisterInfo &RegInfo = MF.getRegInfo();
-    unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass);
     EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
-    // Initialize temporary register to 0
-    SDValue InitTempReg =
-      DAG.getCopyToReg(DAG.getEntryNode(), dl, VReg, DAG.getConstant(0, PtrVT));
-    // Copy register's contents as index in SHUFFLE_MASK:
-    SDValue ShufMaskOp =
-      DAG.getNode(SPUISD::SHUFFLE_MASK, dl, MVT::v4i32,
-                  DAG.getTargetConstant(V2Elt, MVT::i32),
-                  DAG.getCopyFromReg(InitTempReg, dl, VReg, PtrVT));
+
+    // As SHUFFLE_MASK becomes a c?d instruction, feed it an address
+    // R1 ($sp) is used here only as it is guaranteed to have last bits zero
+    SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
+                                DAG.getRegister(SPU::R1, PtrVT),
+                                DAG.getConstant(V2Elt, MVT::i32));
+    SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, 
+                                     maskVT, Pointer);
+
     // Use shuffle mask in SHUFB synthetic instruction:
     return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1,
                        ShufMaskOp);
@@ -3061,12 +3064,10 @@
 void
 SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
                                                 char ConstraintLetter,
-                                                bool hasMemory,
                                                 std::vector<SDValue> &Ops,
                                                 SelectionDAG &DAG) const {
   // Default, for the time being, to the base class handler
-  TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, hasMemory,
-                                               Ops, DAG);
+  TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, Ops, DAG);
 }
 
 /// isLegalAddressImmediate - Return true if the integer value can be used

Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.h (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.h Fri Jul  2 04:57:13 2010
@@ -134,7 +134,6 @@
                                    EVT VT) const;
 
     void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter,
-                                      bool hasMemory,
                                       std::vector<SDValue> &Ops,
                                       SelectionDAG &DAG) const;
 

Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.cpp Fri Jul  2 04:57:13 2010
@@ -164,11 +164,9 @@
            MI.getOperand(0).isReg() &&
            MI.getOperand(1).isReg() &&
            "invalid SPU OR<type>_<vec> or LR instruction!");
-    if (MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
       sourceReg = MI.getOperand(1).getReg();
       destReg = MI.getOperand(0).getReg();
       return true;
-    }
     break;
   }
   case SPU::ORv16i8:

Modified: llvm/branches/wendling/eh/lib/Target/CppBackend/CPPBackend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CppBackend/CPPBackend.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CppBackend/CPPBackend.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/CppBackend/CPPBackend.cpp Fri Jul  2 04:57:13 2010
@@ -99,11 +99,12 @@
     ValueSet DefinedValues;
     ForwardRefMap ForwardRefs;
     bool is_inline;
+    unsigned indent_level;
 
   public:
     static char ID;
     explicit CppWriter(formatted_raw_ostream &o) :
-      ModulePass(&ID), Out(o), uniqueNum(0), is_inline(false) {}
+      ModulePass(&ID), Out(o), uniqueNum(0), is_inline(false), indent_level(0){}
 
     virtual const char *getPassName() const { return "C++ backend"; }
 
@@ -120,6 +121,11 @@
 
     void error(const std::string& msg);
 
+    
+    formatted_raw_ostream& nl(formatted_raw_ostream &Out, int delta = 0);
+    inline void in() { indent_level++; }
+    inline void out() { if (indent_level >0) indent_level--; }
+    
   private:
     void printLinkageType(GlobalValue::LinkageTypes LT);
     void printVisibilityType(GlobalValue::VisibilityTypes VisTypes);
@@ -153,1859 +159,1855 @@
 
     void printModuleBody();
   };
+} // end anonymous namespace.
 
-  static unsigned indent_level = 0;
-  inline formatted_raw_ostream& nl(formatted_raw_ostream& Out, int delta = 0) {
-    Out << "\n";
-    if (delta >= 0 || indent_level >= unsigned(-delta))
-      indent_level += delta;
-    for (unsigned i = 0; i < indent_level; ++i)
-      Out << "  ";
-    return Out;
-  }
-
-  inline void in() { indent_level++; }
-  inline void out() { if (indent_level >0) indent_level--; }
-
-  inline void
-  sanitize(std::string& str) {
-    for (size_t i = 0; i < str.length(); ++i)
-      if (!isalnum(str[i]) && str[i] != '_')
-        str[i] = '_';
+formatted_raw_ostream &CppWriter::nl(formatted_raw_ostream &Out, int delta) {
+  Out << '\n';
+  if (delta >= 0 || indent_level >= unsigned(-delta))
+    indent_level += delta;
+  Out.indent(indent_level);
+  return Out;
+}
+
+static inline void sanitize(std::string &str) {
+  for (size_t i = 0; i < str.length(); ++i)
+    if (!isalnum(str[i]) && str[i] != '_')
+      str[i] = '_';
+}
+
+static std::string getTypePrefix(const Type *Ty) {
+  switch (Ty->getTypeID()) {
+  case Type::VoidTyID:     return "void_";
+  case Type::IntegerTyID:
+    return "int" + utostr(cast<IntegerType>(Ty)->getBitWidth()) + "_";
+  case Type::FloatTyID:    return "float_";
+  case Type::DoubleTyID:   return "double_";
+  case Type::LabelTyID:    return "label_";
+  case Type::FunctionTyID: return "func_";
+  case Type::StructTyID:   return "struct_";
+  case Type::ArrayTyID:    return "array_";
+  case Type::PointerTyID:  return "ptr_";
+  case Type::VectorTyID:   return "packed_";
+  case Type::OpaqueTyID:   return "opaque_";
+  default:                 return "other_";
   }
+  return "unknown_";
+}
 
-  inline std::string
-  getTypePrefix(const Type* Ty ) {
-    switch (Ty->getTypeID()) {
-    case Type::VoidTyID:     return "void_";
-    case Type::IntegerTyID:
-      return std::string("int") + utostr(cast<IntegerType>(Ty)->getBitWidth()) +
-        "_";
-    case Type::FloatTyID:    return "float_";
-    case Type::DoubleTyID:   return "double_";
-    case Type::LabelTyID:    return "label_";
-    case Type::FunctionTyID: return "func_";
-    case Type::StructTyID:   return "struct_";
-    case Type::ArrayTyID:    return "array_";
-    case Type::PointerTyID:  return "ptr_";
-    case Type::VectorTyID:   return "packed_";
-    case Type::OpaqueTyID:   return "opaque_";
-    default:                 return "other_";
-    }
-    return "unknown_";
-  }
-
-  // Looks up the type in the symbol table and returns a pointer to its name or
-  // a null pointer if it wasn't found. Note that this isn't the same as the
-  // Mode::getTypeName function which will return an empty string, not a null
-  // pointer if the name is not found.
-  inline const std::string*
-  findTypeName(const TypeSymbolTable& ST, const Type* Ty) {
-    TypeSymbolTable::const_iterator TI = ST.begin();
-    TypeSymbolTable::const_iterator TE = ST.end();
-    for (;TI != TE; ++TI)
-      if (TI->second == Ty)
-        return &(TI->first);
-    return 0;
-  }
-
-  void CppWriter::error(const std::string& msg) {
-    report_fatal_error(msg);
-  }
-
-  // printCFP - Print a floating point constant .. very carefully :)
-  // This makes sure that conversion to/from floating yields the same binary
-  // result so that we don't lose precision.
-  void CppWriter::printCFP(const ConstantFP *CFP) {
-    bool ignored;
-    APFloat APF = APFloat(CFP->getValueAPF());  // copy
-    if (CFP->getType() == Type::getFloatTy(CFP->getContext()))
-      APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &ignored);
-    Out << "ConstantFP::get(mod->getContext(), ";
-    Out << "APFloat(";
+// Looks up the type in the symbol table and returns a pointer to its name or
+// a null pointer if it wasn't found. Note that this isn't the same as the
+// Mode::getTypeName function which will return an empty string, not a null
+// pointer if the name is not found.
+static const std::string *
+findTypeName(const TypeSymbolTable& ST, const Type* Ty) {
+  TypeSymbolTable::const_iterator TI = ST.begin();
+  TypeSymbolTable::const_iterator TE = ST.end();
+  for (;TI != TE; ++TI)
+    if (TI->second == Ty)
+      return &(TI->first);
+  return 0;
+}
+
+void CppWriter::error(const std::string& msg) {
+  report_fatal_error(msg);
+}
+
+// printCFP - Print a floating point constant .. very carefully :)
+// This makes sure that conversion to/from floating yields the same binary
+// result so that we don't lose precision.
+void CppWriter::printCFP(const ConstantFP *CFP) {
+  bool ignored;
+  APFloat APF = APFloat(CFP->getValueAPF());  // copy
+  if (CFP->getType() == Type::getFloatTy(CFP->getContext()))
+    APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &ignored);
+  Out << "ConstantFP::get(mod->getContext(), ";
+  Out << "APFloat(";
 #if HAVE_PRINTF_A
-    char Buffer[100];
-    sprintf(Buffer, "%A", APF.convertToDouble());
-    if ((!strncmp(Buffer, "0x", 2) ||
-         !strncmp(Buffer, "-0x", 3) ||
-         !strncmp(Buffer, "+0x", 3)) &&
-        APF.bitwiseIsEqual(APFloat(atof(Buffer)))) {
-      if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
-        Out << "BitsToDouble(" << Buffer << ")";
-      else
-        Out << "BitsToFloat((float)" << Buffer << ")";
-      Out << ")";
-    } else {
+  char Buffer[100];
+  sprintf(Buffer, "%A", APF.convertToDouble());
+  if ((!strncmp(Buffer, "0x", 2) ||
+       !strncmp(Buffer, "-0x", 3) ||
+       !strncmp(Buffer, "+0x", 3)) &&
+      APF.bitwiseIsEqual(APFloat(atof(Buffer)))) {
+    if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
+      Out << "BitsToDouble(" << Buffer << ")";
+    else
+      Out << "BitsToFloat((float)" << Buffer << ")";
+    Out << ")";
+  } else {
 #endif
-      std::string StrVal = ftostr(CFP->getValueAPF());
+    std::string StrVal = ftostr(CFP->getValueAPF());
 
-      while (StrVal[0] == ' ')
-        StrVal.erase(StrVal.begin());
+    while (StrVal[0] == ' ')
+      StrVal.erase(StrVal.begin());
 
-      // Check to make sure that the stringized number is not some string like
-      // "Inf" or NaN.  Check that the string matches the "[-+]?[0-9]" regex.
-      if (((StrVal[0] >= '0' && StrVal[0] <= '9') ||
-           ((StrVal[0] == '-' || StrVal[0] == '+') &&
-            (StrVal[1] >= '0' && StrVal[1] <= '9'))) &&
-          (CFP->isExactlyValue(atof(StrVal.c_str())))) {
-        if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
-          Out <<  StrVal;
-        else
-          Out << StrVal << "f";
-      } else if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
-        Out << "BitsToDouble(0x"
-            << utohexstr(CFP->getValueAPF().bitcastToAPInt().getZExtValue())
-            << "ULL) /* " << StrVal << " */";
+    // Check to make sure that the stringized number is not some string like
+    // "Inf" or NaN.  Check that the string matches the "[-+]?[0-9]" regex.
+    if (((StrVal[0] >= '0' && StrVal[0] <= '9') ||
+         ((StrVal[0] == '-' || StrVal[0] == '+') &&
+          (StrVal[1] >= '0' && StrVal[1] <= '9'))) &&
+        (CFP->isExactlyValue(atof(StrVal.c_str())))) {
+      if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
+        Out <<  StrVal;
       else
-        Out << "BitsToFloat(0x"
-            << utohexstr((uint32_t)CFP->getValueAPF().
-                                        bitcastToAPInt().getZExtValue())
-            << "U) /* " << StrVal << " */";
-      Out << ")";
-#if HAVE_PRINTF_A
-    }
-#endif
+        Out << StrVal << "f";
+    } else if (CFP->getType() == Type::getDoubleTy(CFP->getContext()))
+      Out << "BitsToDouble(0x"
+          << utohexstr(CFP->getValueAPF().bitcastToAPInt().getZExtValue())
+          << "ULL) /* " << StrVal << " */";
+    else
+      Out << "BitsToFloat(0x"
+          << utohexstr((uint32_t)CFP->getValueAPF().
+                                      bitcastToAPInt().getZExtValue())
+          << "U) /* " << StrVal << " */";
     Out << ")";
+#if HAVE_PRINTF_A
   }
+#endif
+  Out << ")";
+}
 
-  void CppWriter::printCallingConv(CallingConv::ID cc){
-    // Print the calling convention.
-    switch (cc) {
-    case CallingConv::C:     Out << "CallingConv::C"; break;
-    case CallingConv::Fast:  Out << "CallingConv::Fast"; break;
-    case CallingConv::Cold:  Out << "CallingConv::Cold"; break;
-    case CallingConv::FirstTargetCC: Out << "CallingConv::FirstTargetCC"; break;
-    default:                 Out << cc; break;
-    }
+void CppWriter::printCallingConv(CallingConv::ID cc){
+  // Print the calling convention.
+  switch (cc) {
+  case CallingConv::C:     Out << "CallingConv::C"; break;
+  case CallingConv::Fast:  Out << "CallingConv::Fast"; break;
+  case CallingConv::Cold:  Out << "CallingConv::Cold"; break;
+  case CallingConv::FirstTargetCC: Out << "CallingConv::FirstTargetCC"; break;
+  default:                 Out << cc; break;
   }
+}
 
-  void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) {
-    switch (LT) {
-    case GlobalValue::InternalLinkage:
-      Out << "GlobalValue::InternalLinkage"; break;
-    case GlobalValue::PrivateLinkage:
-      Out << "GlobalValue::PrivateLinkage"; break;
-    case GlobalValue::LinkerPrivateLinkage:
-      Out << "GlobalValue::LinkerPrivateLinkage"; break;
-    case GlobalValue::AvailableExternallyLinkage:
-      Out << "GlobalValue::AvailableExternallyLinkage "; break;
-    case GlobalValue::LinkOnceAnyLinkage:
-      Out << "GlobalValue::LinkOnceAnyLinkage "; break;
-    case GlobalValue::LinkOnceODRLinkage:
-      Out << "GlobalValue::LinkOnceODRLinkage "; break;
-    case GlobalValue::WeakAnyLinkage:
-      Out << "GlobalValue::WeakAnyLinkage"; break;
-    case GlobalValue::WeakODRLinkage:
-      Out << "GlobalValue::WeakODRLinkage"; break;
-    case GlobalValue::AppendingLinkage:
-      Out << "GlobalValue::AppendingLinkage"; break;
-    case GlobalValue::ExternalLinkage:
-      Out << "GlobalValue::ExternalLinkage"; break;
-    case GlobalValue::DLLImportLinkage:
-      Out << "GlobalValue::DLLImportLinkage"; break;
-    case GlobalValue::DLLExportLinkage:
-      Out << "GlobalValue::DLLExportLinkage"; break;
-    case GlobalValue::ExternalWeakLinkage:
-      Out << "GlobalValue::ExternalWeakLinkage"; break;
-    case GlobalValue::CommonLinkage:
-      Out << "GlobalValue::CommonLinkage"; break;
-    }
+void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) {
+  switch (LT) {
+  case GlobalValue::InternalLinkage:
+    Out << "GlobalValue::InternalLinkage"; break;
+  case GlobalValue::PrivateLinkage:
+    Out << "GlobalValue::PrivateLinkage"; break;
+  case GlobalValue::LinkerPrivateLinkage:
+    Out << "GlobalValue::LinkerPrivateLinkage"; break;
+  case GlobalValue::LinkerPrivateWeakLinkage:
+    Out << "GlobalValue::LinkerPrivateWeakLinkage"; break;
+  case GlobalValue::AvailableExternallyLinkage:
+    Out << "GlobalValue::AvailableExternallyLinkage "; break;
+  case GlobalValue::LinkOnceAnyLinkage:
+    Out << "GlobalValue::LinkOnceAnyLinkage "; break;
+  case GlobalValue::LinkOnceODRLinkage:
+    Out << "GlobalValue::LinkOnceODRLinkage "; break;
+  case GlobalValue::WeakAnyLinkage:
+    Out << "GlobalValue::WeakAnyLinkage"; break;
+  case GlobalValue::WeakODRLinkage:
+    Out << "GlobalValue::WeakODRLinkage"; break;
+  case GlobalValue::AppendingLinkage:
+    Out << "GlobalValue::AppendingLinkage"; break;
+  case GlobalValue::ExternalLinkage:
+    Out << "GlobalValue::ExternalLinkage"; break;
+  case GlobalValue::DLLImportLinkage:
+    Out << "GlobalValue::DLLImportLinkage"; break;
+  case GlobalValue::DLLExportLinkage:
+    Out << "GlobalValue::DLLExportLinkage"; break;
+  case GlobalValue::ExternalWeakLinkage:
+    Out << "GlobalValue::ExternalWeakLinkage"; break;
+  case GlobalValue::CommonLinkage:
+    Out << "GlobalValue::CommonLinkage"; break;
   }
+}
 
-  void CppWriter::printVisibilityType(GlobalValue::VisibilityTypes VisType) {
-    switch (VisType) {
-    default: llvm_unreachable("Unknown GVar visibility");
-    case GlobalValue::DefaultVisibility:
-      Out << "GlobalValue::DefaultVisibility";
-      break;
-    case GlobalValue::HiddenVisibility:
-      Out << "GlobalValue::HiddenVisibility";
-      break;
-    case GlobalValue::ProtectedVisibility:
-      Out << "GlobalValue::ProtectedVisibility";
-      break;
-    }
+void CppWriter::printVisibilityType(GlobalValue::VisibilityTypes VisType) {
+  switch (VisType) {
+  default: llvm_unreachable("Unknown GVar visibility");
+  case GlobalValue::DefaultVisibility:
+    Out << "GlobalValue::DefaultVisibility";
+    break;
+  case GlobalValue::HiddenVisibility:
+    Out << "GlobalValue::HiddenVisibility";
+    break;
+  case GlobalValue::ProtectedVisibility:
+    Out << "GlobalValue::ProtectedVisibility";
+    break;
   }
+}
 
-  // printEscapedString - Print each character of the specified string, escaping
-  // it if it is not printable or if it is an escape char.
-  void CppWriter::printEscapedString(const std::string &Str) {
-    for (unsigned i = 0, e = Str.size(); i != e; ++i) {
-      unsigned char C = Str[i];
-      if (isprint(C) && C != '"' && C != '\\') {
-        Out << C;
-      } else {
-        Out << "\\x"
-            << (char) ((C/16  < 10) ? ( C/16 +'0') : ( C/16 -10+'A'))
-            << (char)(((C&15) < 10) ? ((C&15)+'0') : ((C&15)-10+'A'));
-      }
+// printEscapedString - Print each character of the specified string, escaping
+// it if it is not printable or if it is an escape char.
+void CppWriter::printEscapedString(const std::string &Str) {
+  for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+    unsigned char C = Str[i];
+    if (isprint(C) && C != '"' && C != '\\') {
+      Out << C;
+    } else {
+      Out << "\\x"
+          << (char) ((C/16  < 10) ? ( C/16 +'0') : ( C/16 -10+'A'))
+          << (char)(((C&15) < 10) ? ((C&15)+'0') : ((C&15)-10+'A'));
     }
   }
+}
 
-  std::string CppWriter::getCppName(const Type* Ty) {
-    // First, handle the primitive types .. easy
-    if (Ty->isPrimitiveType() || Ty->isIntegerTy()) {
-      switch (Ty->getTypeID()) {
-      case Type::VoidTyID:   return "Type::getVoidTy(mod->getContext())";
-      case Type::IntegerTyID: {
-        unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
-        return "IntegerType::get(mod->getContext(), " + utostr(BitWidth) + ")";
-      }
-      case Type::X86_FP80TyID: return "Type::getX86_FP80Ty(mod->getContext())";
-      case Type::FloatTyID:    return "Type::getFloatTy(mod->getContext())";
-      case Type::DoubleTyID:   return "Type::getDoubleTy(mod->getContext())";
-      case Type::LabelTyID:    return "Type::getLabelTy(mod->getContext())";
-      default:
-        error("Invalid primitive type");
-        break;
-      }
-      // shouldn't be returned, but make it sensible
-      return "Type::getVoidTy(mod->getContext())";
-    }
-
-    // Now, see if we've seen the type before and return that
-    TypeMap::iterator I = TypeNames.find(Ty);
-    if (I != TypeNames.end())
-      return I->second;
-
-    // Okay, let's build a new name for this type. Start with a prefix
-    const char* prefix = 0;
+std::string CppWriter::getCppName(const Type* Ty) {
+  // First, handle the primitive types .. easy
+  if (Ty->isPrimitiveType() || Ty->isIntegerTy()) {
     switch (Ty->getTypeID()) {
-    case Type::FunctionTyID:    prefix = "FuncTy_"; break;
-    case Type::StructTyID:      prefix = "StructTy_"; break;
-    case Type::ArrayTyID:       prefix = "ArrayTy_"; break;
-    case Type::PointerTyID:     prefix = "PointerTy_"; break;
-    case Type::OpaqueTyID:      prefix = "OpaqueTy_"; break;
-    case Type::VectorTyID:      prefix = "VectorTy_"; break;
-    default:                    prefix = "OtherTy_"; break; // prevent breakage
+    case Type::VoidTyID:   return "Type::getVoidTy(mod->getContext())";
+    case Type::IntegerTyID: {
+      unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
+      return "IntegerType::get(mod->getContext(), " + utostr(BitWidth) + ")";
+    }
+    case Type::X86_FP80TyID: return "Type::getX86_FP80Ty(mod->getContext())";
+    case Type::FloatTyID:    return "Type::getFloatTy(mod->getContext())";
+    case Type::DoubleTyID:   return "Type::getDoubleTy(mod->getContext())";
+    case Type::LabelTyID:    return "Type::getLabelTy(mod->getContext())";
+    default:
+      error("Invalid primitive type");
+      break;
     }
+    // shouldn't be returned, but make it sensible
+    return "Type::getVoidTy(mod->getContext())";
+  }
 
-    // See if the type has a name in the symboltable and build accordingly
-    const std::string* tName = findTypeName(TheModule->getTypeSymbolTable(), Ty);
-    std::string name;
-    if (tName)
-      name = std::string(prefix) + *tName;
-    else
-      name = std::string(prefix) + utostr(uniqueNum++);
-    sanitize(name);
+  // Now, see if we've seen the type before and return that
+  TypeMap::iterator I = TypeNames.find(Ty);
+  if (I != TypeNames.end())
+    return I->second;
+
+  // Okay, let's build a new name for this type. Start with a prefix
+  const char* prefix = 0;
+  switch (Ty->getTypeID()) {
+  case Type::FunctionTyID:    prefix = "FuncTy_"; break;
+  case Type::StructTyID:      prefix = "StructTy_"; break;
+  case Type::ArrayTyID:       prefix = "ArrayTy_"; break;
+  case Type::PointerTyID:     prefix = "PointerTy_"; break;
+  case Type::OpaqueTyID:      prefix = "OpaqueTy_"; break;
+  case Type::VectorTyID:      prefix = "VectorTy_"; break;
+  default:                    prefix = "OtherTy_"; break; // prevent breakage
+  }
+
+  // See if the type has a name in the symboltable and build accordingly
+  const std::string* tName = findTypeName(TheModule->getTypeSymbolTable(), Ty);
+  std::string name;
+  if (tName)
+    name = std::string(prefix) + *tName;
+  else
+    name = std::string(prefix) + utostr(uniqueNum++);
+  sanitize(name);
 
-    // Save the name
-    return TypeNames[Ty] = name;
-  }
+  // Save the name
+  return TypeNames[Ty] = name;
+}
 
-  void CppWriter::printCppName(const Type* Ty) {
-    printEscapedString(getCppName(Ty));
-  }
-
-  std::string CppWriter::getCppName(const Value* val) {
-    std::string name;
-    ValueMap::iterator I = ValueNames.find(val);
-    if (I != ValueNames.end() && I->first == val)
-      return  I->second;
-
-    if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(val)) {
-      name = std::string("gvar_") +
-        getTypePrefix(GV->getType()->getElementType());
-    } else if (isa<Function>(val)) {
-      name = std::string("func_");
-    } else if (const Constant* C = dyn_cast<Constant>(val)) {
-      name = std::string("const_") + getTypePrefix(C->getType());
-    } else if (const Argument* Arg = dyn_cast<Argument>(val)) {
-      if (is_inline) {
-        unsigned argNum = std::distance(Arg->getParent()->arg_begin(),
-                                        Function::const_arg_iterator(Arg)) + 1;
-        name = std::string("arg_") + utostr(argNum);
-        NameSet::iterator NI = UsedNames.find(name);
-        if (NI != UsedNames.end())
-          name += std::string("_") + utostr(uniqueNum++);
-        UsedNames.insert(name);
-        return ValueNames[val] = name;
-      } else {
-        name = getTypePrefix(val->getType());
-      }
+void CppWriter::printCppName(const Type* Ty) {
+  printEscapedString(getCppName(Ty));
+}
+
+std::string CppWriter::getCppName(const Value* val) {
+  std::string name;
+  ValueMap::iterator I = ValueNames.find(val);
+  if (I != ValueNames.end() && I->first == val)
+    return  I->second;
+
+  if (const GlobalVariable* GV = dyn_cast<GlobalVariable>(val)) {
+    name = std::string("gvar_") +
+      getTypePrefix(GV->getType()->getElementType());
+  } else if (isa<Function>(val)) {
+    name = std::string("func_");
+  } else if (const Constant* C = dyn_cast<Constant>(val)) {
+    name = std::string("const_") + getTypePrefix(C->getType());
+  } else if (const Argument* Arg = dyn_cast<Argument>(val)) {
+    if (is_inline) {
+      unsigned argNum = std::distance(Arg->getParent()->arg_begin(),
+                                      Function::const_arg_iterator(Arg)) + 1;
+      name = std::string("arg_") + utostr(argNum);
+      NameSet::iterator NI = UsedNames.find(name);
+      if (NI != UsedNames.end())
+        name += std::string("_") + utostr(uniqueNum++);
+      UsedNames.insert(name);
+      return ValueNames[val] = name;
     } else {
       name = getTypePrefix(val->getType());
     }
-    if (val->hasName())
-      name += val->getName();
-    else
-      name += utostr(uniqueNum++);
-    sanitize(name);
-    NameSet::iterator NI = UsedNames.find(name);
-    if (NI != UsedNames.end())
-      name += std::string("_") + utostr(uniqueNum++);
-    UsedNames.insert(name);
-    return ValueNames[val] = name;
-  }
-
-  void CppWriter::printCppName(const Value* val) {
-    printEscapedString(getCppName(val));
-  }
-
-  void CppWriter::printAttributes(const AttrListPtr &PAL,
-                                  const std::string &name) {
-    Out << "AttrListPtr " << name << "_PAL;";
-    nl(Out);
-    if (!PAL.isEmpty()) {
-      Out << '{'; in(); nl(Out);
-      Out << "SmallVector<AttributeWithIndex, 4> Attrs;"; nl(Out);
-      Out << "AttributeWithIndex PAWI;"; nl(Out);
-      for (unsigned i = 0; i < PAL.getNumSlots(); ++i) {
-        unsigned index = PAL.getSlot(i).Index;
-        Attributes attrs = PAL.getSlot(i).Attrs;
-        Out << "PAWI.Index = " << index << "U; PAWI.Attrs = 0 ";
+  } else {
+    name = getTypePrefix(val->getType());
+  }
+  if (val->hasName())
+    name += val->getName();
+  else
+    name += utostr(uniqueNum++);
+  sanitize(name);
+  NameSet::iterator NI = UsedNames.find(name);
+  if (NI != UsedNames.end())
+    name += std::string("_") + utostr(uniqueNum++);
+  UsedNames.insert(name);
+  return ValueNames[val] = name;
+}
+
+void CppWriter::printCppName(const Value* val) {
+  printEscapedString(getCppName(val));
+}
+
+void CppWriter::printAttributes(const AttrListPtr &PAL,
+                                const std::string &name) {
+  Out << "AttrListPtr " << name << "_PAL;";
+  nl(Out);
+  if (!PAL.isEmpty()) {
+    Out << '{'; in(); nl(Out);
+    Out << "SmallVector<AttributeWithIndex, 4> Attrs;"; nl(Out);
+    Out << "AttributeWithIndex PAWI;"; nl(Out);
+    for (unsigned i = 0; i < PAL.getNumSlots(); ++i) {
+      unsigned index = PAL.getSlot(i).Index;
+      Attributes attrs = PAL.getSlot(i).Attrs;
+      Out << "PAWI.Index = " << index << "U; PAWI.Attrs = 0 ";
 #define HANDLE_ATTR(X)                 \
-        if (attrs & Attribute::X)      \
-          Out << " | Attribute::" #X;  \
-        attrs &= ~Attribute::X;
-        
-        HANDLE_ATTR(SExt);
-        HANDLE_ATTR(ZExt);
-        HANDLE_ATTR(NoReturn);
-        HANDLE_ATTR(InReg);
-        HANDLE_ATTR(StructRet);
-        HANDLE_ATTR(NoUnwind);
-        HANDLE_ATTR(NoAlias);
-        HANDLE_ATTR(ByVal);
-        HANDLE_ATTR(Nest);
-        HANDLE_ATTR(ReadNone);
-        HANDLE_ATTR(ReadOnly);
-        HANDLE_ATTR(InlineHint);
-        HANDLE_ATTR(NoInline);
-        HANDLE_ATTR(AlwaysInline);
-        HANDLE_ATTR(OptimizeForSize);
-        HANDLE_ATTR(StackProtect);
-        HANDLE_ATTR(StackProtectReq);
-        HANDLE_ATTR(NoCapture);
+      if (attrs & Attribute::X)      \
+        Out << " | Attribute::" #X;  \
+      attrs &= ~Attribute::X;
+      
+      HANDLE_ATTR(SExt);
+      HANDLE_ATTR(ZExt);
+      HANDLE_ATTR(NoReturn);
+      HANDLE_ATTR(InReg);
+      HANDLE_ATTR(StructRet);
+      HANDLE_ATTR(NoUnwind);
+      HANDLE_ATTR(NoAlias);
+      HANDLE_ATTR(ByVal);
+      HANDLE_ATTR(Nest);
+      HANDLE_ATTR(ReadNone);
+      HANDLE_ATTR(ReadOnly);
+      HANDLE_ATTR(InlineHint);
+      HANDLE_ATTR(NoInline);
+      HANDLE_ATTR(AlwaysInline);
+      HANDLE_ATTR(OptimizeForSize);
+      HANDLE_ATTR(StackProtect);
+      HANDLE_ATTR(StackProtectReq);
+      HANDLE_ATTR(NoCapture);
 #undef HANDLE_ATTR
-        assert(attrs == 0 && "Unhandled attribute!");
-        Out << ";";
-        nl(Out);
-        Out << "Attrs.push_back(PAWI);";
-        nl(Out);
-      }
-      Out << name << "_PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());";
+      assert(attrs == 0 && "Unhandled attribute!");
+      Out << ";";
+      nl(Out);
+      Out << "Attrs.push_back(PAWI);";
       nl(Out);
-      out(); nl(Out);
-      Out << '}'; nl(Out);
     }
+    Out << name << "_PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());";
+    nl(Out);
+    out(); nl(Out);
+    Out << '}'; nl(Out);
   }
+}
 
-  bool CppWriter::printTypeInternal(const Type* Ty) {
-    // We don't print definitions for primitive types
-    if (Ty->isPrimitiveType() || Ty->isIntegerTy())
-      return false;
-
-    // If we already defined this type, we don't need to define it again.
-    if (DefinedTypes.find(Ty) != DefinedTypes.end())
-      return false;
-
-    // Everything below needs the name for the type so get it now.
-    std::string typeName(getCppName(Ty));
-
-    // Search the type stack for recursion. If we find it, then generate this
-    // as an OpaqueType, but make sure not to do this multiple times because
-    // the type could appear in multiple places on the stack. Once the opaque
-    // definition is issued, it must not be re-issued. Consequently we have to
-    // check the UnresolvedTypes list as well.
-    TypeList::const_iterator TI = std::find(TypeStack.begin(), TypeStack.end(),
-                                            Ty);
-    if (TI != TypeStack.end()) {
-      TypeMap::const_iterator I = UnresolvedTypes.find(Ty);
-      if (I == UnresolvedTypes.end()) {
-        Out << "PATypeHolder " << typeName;
-        Out << "_fwd = OpaqueType::get(mod->getContext());";
-        nl(Out);
-        UnresolvedTypes[Ty] = typeName;
-      }
-      return true;
-    }
+bool CppWriter::printTypeInternal(const Type* Ty) {
+  // We don't print definitions for primitive types
+  if (Ty->isPrimitiveType() || Ty->isIntegerTy())
+    return false;
 
-    // We're going to print a derived type which, by definition, contains other
-    // types. So, push this one we're printing onto the type stack to assist with
-    // recursive definitions.
-    TypeStack.push_back(Ty);
+  // If we already defined this type, we don't need to define it again.
+  if (DefinedTypes.find(Ty) != DefinedTypes.end())
+    return false;
 
-    // Print the type definition
-    switch (Ty->getTypeID()) {
-    case Type::FunctionTyID:  {
-      const FunctionType* FT = cast<FunctionType>(Ty);
-      Out << "std::vector<const Type*>" << typeName << "_args;";
-      nl(Out);
-      FunctionType::param_iterator PI = FT->param_begin();
-      FunctionType::param_iterator PE = FT->param_end();
-      for (; PI != PE; ++PI) {
-        const Type* argTy = static_cast<const Type*>(*PI);
-        bool isForward = printTypeInternal(argTy);
-        std::string argName(getCppName(argTy));
-        Out << typeName << "_args.push_back(" << argName;
-        if (isForward)
-          Out << "_fwd";
-        Out << ");";
-        nl(Out);
-      }
-      bool isForward = printTypeInternal(FT->getReturnType());
-      std::string retTypeName(getCppName(FT->getReturnType()));
-      Out << "FunctionType* " << typeName << " = FunctionType::get(";
-      in(); nl(Out) << "/*Result=*/" << retTypeName;
+  // Everything below needs the name for the type so get it now.
+  std::string typeName(getCppName(Ty));
+
+  // Search the type stack for recursion. If we find it, then generate this
+  // as an OpaqueType, but make sure not to do this multiple times because
+  // the type could appear in multiple places on the stack. Once the opaque
+  // definition is issued, it must not be re-issued. Consequently we have to
+  // check the UnresolvedTypes list as well.
+  TypeList::const_iterator TI = std::find(TypeStack.begin(), TypeStack.end(),
+                                          Ty);
+  if (TI != TypeStack.end()) {
+    TypeMap::const_iterator I = UnresolvedTypes.find(Ty);
+    if (I == UnresolvedTypes.end()) {
+      Out << "PATypeHolder " << typeName;
+      Out << "_fwd = OpaqueType::get(mod->getContext());";
+      nl(Out);
+      UnresolvedTypes[Ty] = typeName;
+    }
+    return true;
+  }
+
+  // We're going to print a derived type which, by definition, contains other
+  // types. So, push this one we're printing onto the type stack to assist with
+  // recursive definitions.
+  TypeStack.push_back(Ty);
+
+  // Print the type definition
+  switch (Ty->getTypeID()) {
+  case Type::FunctionTyID:  {
+    const FunctionType* FT = cast<FunctionType>(Ty);
+    Out << "std::vector<const Type*>" << typeName << "_args;";
+    nl(Out);
+    FunctionType::param_iterator PI = FT->param_begin();
+    FunctionType::param_iterator PE = FT->param_end();
+    for (; PI != PE; ++PI) {
+      const Type* argTy = static_cast<const Type*>(*PI);
+      bool isForward = printTypeInternal(argTy);
+      std::string argName(getCppName(argTy));
+      Out << typeName << "_args.push_back(" << argName;
       if (isForward)
         Out << "_fwd";
-      Out << ",";
-      nl(Out) << "/*Params=*/" << typeName << "_args,";
-      nl(Out) << "/*isVarArg=*/" << (FT->isVarArg() ? "true" : "false") << ");";
-      out();
-      nl(Out);
-      break;
-    }
-    case Type::StructTyID: {
-      const StructType* ST = cast<StructType>(Ty);
-      Out << "std::vector<const Type*>" << typeName << "_fields;";
-      nl(Out);
-      StructType::element_iterator EI = ST->element_begin();
-      StructType::element_iterator EE = ST->element_end();
-      for (; EI != EE; ++EI) {
-        const Type* fieldTy = static_cast<const Type*>(*EI);
-        bool isForward = printTypeInternal(fieldTy);
-        std::string fieldName(getCppName(fieldTy));
-        Out << typeName << "_fields.push_back(" << fieldName;
-        if (isForward)
-          Out << "_fwd";
-        Out << ");";
-        nl(Out);
-      }
-      Out << "StructType* " << typeName << " = StructType::get("
-          << "mod->getContext(), "
-          << typeName << "_fields, /*isPacked=*/"
-          << (ST->isPacked() ? "true" : "false") << ");";
-      nl(Out);
-      break;
-    }
-    case Type::ArrayTyID: {
-      const ArrayType* AT = cast<ArrayType>(Ty);
-      const Type* ET = AT->getElementType();
-      bool isForward = printTypeInternal(ET);
-      std::string elemName(getCppName(ET));
-      Out << "ArrayType* " << typeName << " = ArrayType::get("
-          << elemName << (isForward ? "_fwd" : "")
-          << ", " << utostr(AT->getNumElements()) << ");";
-      nl(Out);
-      break;
-    }
-    case Type::PointerTyID: {
-      const PointerType* PT = cast<PointerType>(Ty);
-      const Type* ET = PT->getElementType();
-      bool isForward = printTypeInternal(ET);
-      std::string elemName(getCppName(ET));
-      Out << "PointerType* " << typeName << " = PointerType::get("
-          << elemName << (isForward ? "_fwd" : "")
-          << ", " << utostr(PT->getAddressSpace()) << ");";
-      nl(Out);
-      break;
-    }
-    case Type::VectorTyID: {
-      const VectorType* PT = cast<VectorType>(Ty);
-      const Type* ET = PT->getElementType();
-      bool isForward = printTypeInternal(ET);
-      std::string elemName(getCppName(ET));
-      Out << "VectorType* " << typeName << " = VectorType::get("
-          << elemName << (isForward ? "_fwd" : "")
-          << ", " << utostr(PT->getNumElements()) << ");";
+      Out << ");";
       nl(Out);
-      break;
     }
-    case Type::OpaqueTyID: {
-      Out << "OpaqueType* " << typeName;
-      Out << " = OpaqueType::get(mod->getContext());";
+    bool isForward = printTypeInternal(FT->getReturnType());
+    std::string retTypeName(getCppName(FT->getReturnType()));
+    Out << "FunctionType* " << typeName << " = FunctionType::get(";
+    in(); nl(Out) << "/*Result=*/" << retTypeName;
+    if (isForward)
+      Out << "_fwd";
+    Out << ",";
+    nl(Out) << "/*Params=*/" << typeName << "_args,";
+    nl(Out) << "/*isVarArg=*/" << (FT->isVarArg() ? "true" : "false") << ");";
+    out();
+    nl(Out);
+    break;
+  }
+  case Type::StructTyID: {
+    const StructType* ST = cast<StructType>(Ty);
+    Out << "std::vector<const Type*>" << typeName << "_fields;";
+    nl(Out);
+    StructType::element_iterator EI = ST->element_begin();
+    StructType::element_iterator EE = ST->element_end();
+    for (; EI != EE; ++EI) {
+      const Type* fieldTy = static_cast<const Type*>(*EI);
+      bool isForward = printTypeInternal(fieldTy);
+      std::string fieldName(getCppName(fieldTy));
+      Out << typeName << "_fields.push_back(" << fieldName;
+      if (isForward)
+        Out << "_fwd";
+      Out << ");";
       nl(Out);
-      break;
-    }
-    default:
-      error("Invalid TypeID");
     }
+    Out << "StructType* " << typeName << " = StructType::get("
+        << "mod->getContext(), "
+        << typeName << "_fields, /*isPacked=*/"
+        << (ST->isPacked() ? "true" : "false") << ");";
+    nl(Out);
+    break;
+  }
+  case Type::ArrayTyID: {
+    const ArrayType* AT = cast<ArrayType>(Ty);
+    const Type* ET = AT->getElementType();
+    bool isForward = printTypeInternal(ET);
+    std::string elemName(getCppName(ET));
+    Out << "ArrayType* " << typeName << " = ArrayType::get("
+        << elemName << (isForward ? "_fwd" : "")
+        << ", " << utostr(AT->getNumElements()) << ");";
+    nl(Out);
+    break;
+  }
+  case Type::PointerTyID: {
+    const PointerType* PT = cast<PointerType>(Ty);
+    const Type* ET = PT->getElementType();
+    bool isForward = printTypeInternal(ET);
+    std::string elemName(getCppName(ET));
+    Out << "PointerType* " << typeName << " = PointerType::get("
+        << elemName << (isForward ? "_fwd" : "")
+        << ", " << utostr(PT->getAddressSpace()) << ");";
+    nl(Out);
+    break;
+  }
+  case Type::VectorTyID: {
+    const VectorType* PT = cast<VectorType>(Ty);
+    const Type* ET = PT->getElementType();
+    bool isForward = printTypeInternal(ET);
+    std::string elemName(getCppName(ET));
+    Out << "VectorType* " << typeName << " = VectorType::get("
+        << elemName << (isForward ? "_fwd" : "")
+        << ", " << utostr(PT->getNumElements()) << ");";
+    nl(Out);
+    break;
+  }
+  case Type::OpaqueTyID: {
+    Out << "OpaqueType* " << typeName;
+    Out << " = OpaqueType::get(mod->getContext());";
+    nl(Out);
+    break;
+  }
+  default:
+    error("Invalid TypeID");
+  }
 
-    // If the type had a name, make sure we recreate it.
-    const std::string* progTypeName =
-      findTypeName(TheModule->getTypeSymbolTable(),Ty);
-    if (progTypeName) {
-      Out << "mod->addTypeName(\"" << *progTypeName << "\", "
-          << typeName << ");";
-      nl(Out);
-    }
+  // If the type had a name, make sure we recreate it.
+  const std::string* progTypeName =
+    findTypeName(TheModule->getTypeSymbolTable(),Ty);
+  if (progTypeName) {
+    Out << "mod->addTypeName(\"" << *progTypeName << "\", "
+        << typeName << ");";
+    nl(Out);
+  }
 
-    // Pop us off the type stack
-    TypeStack.pop_back();
-
-    // Indicate that this type is now defined.
-    DefinedTypes.insert(Ty);
-
-    // Early resolve as many unresolved types as possible. Search the unresolved
-    // types map for the type we just printed. Now that its definition is complete
-    // we can resolve any previous references to it. This prevents a cascade of
-    // unresolved types.
-    TypeMap::iterator I = UnresolvedTypes.find(Ty);
-    if (I != UnresolvedTypes.end()) {
-      Out << "cast<OpaqueType>(" << I->second
-          << "_fwd.get())->refineAbstractTypeTo(" << I->second << ");";
-      nl(Out);
-      Out << I->second << " = cast<";
-      switch (Ty->getTypeID()) {
-      case Type::FunctionTyID: Out << "FunctionType"; break;
-      case Type::ArrayTyID:    Out << "ArrayType"; break;
-      case Type::StructTyID:   Out << "StructType"; break;
-      case Type::VectorTyID:   Out << "VectorType"; break;
-      case Type::PointerTyID:  Out << "PointerType"; break;
-      case Type::OpaqueTyID:   Out << "OpaqueType"; break;
-      default:                 Out << "NoSuchDerivedType"; break;
-      }
-      Out << ">(" << I->second << "_fwd.get());";
-      nl(Out); nl(Out);
-      UnresolvedTypes.erase(I);
-    }
+  // Pop us off the type stack
+  TypeStack.pop_back();
 
-    // Finally, separate the type definition from other with a newline.
-    nl(Out);
+  // Indicate that this type is now defined.
+  DefinedTypes.insert(Ty);
 
-    // We weren't a recursive type
-    return false;
+  // Early resolve as many unresolved types as possible. Search the unresolved
+  // types map for the type we just printed. Now that its definition is complete
+  // we can resolve any previous references to it. This prevents a cascade of
+  // unresolved types.
+  TypeMap::iterator I = UnresolvedTypes.find(Ty);
+  if (I != UnresolvedTypes.end()) {
+    Out << "cast<OpaqueType>(" << I->second
+        << "_fwd.get())->refineAbstractTypeTo(" << I->second << ");";
+    nl(Out);
+    Out << I->second << " = cast<";
+    switch (Ty->getTypeID()) {
+    case Type::FunctionTyID: Out << "FunctionType"; break;
+    case Type::ArrayTyID:    Out << "ArrayType"; break;
+    case Type::StructTyID:   Out << "StructType"; break;
+    case Type::VectorTyID:   Out << "VectorType"; break;
+    case Type::PointerTyID:  Out << "PointerType"; break;
+    case Type::OpaqueTyID:   Out << "OpaqueType"; break;
+    default:                 Out << "NoSuchDerivedType"; break;
+    }
+    Out << ">(" << I->second << "_fwd.get());";
+    nl(Out); nl(Out);
+    UnresolvedTypes.erase(I);
   }
 
-  // Prints a type definition. Returns true if it could not resolve all the
-  // types in the definition but had to use a forward reference.
-  void CppWriter::printType(const Type* Ty) {
-    assert(TypeStack.empty());
-    TypeStack.clear();
-    printTypeInternal(Ty);
-    assert(TypeStack.empty());
-  }
-
-  void CppWriter::printTypes(const Module* M) {
-    // Walk the symbol table and print out all its types
-    const TypeSymbolTable& symtab = M->getTypeSymbolTable();
-    for (TypeSymbolTable::const_iterator TI = symtab.begin(), TE = symtab.end();
-         TI != TE; ++TI) {
-
-      // For primitive types and types already defined, just add a name
-      TypeMap::const_iterator TNI = TypeNames.find(TI->second);
-      if (TI->second->isIntegerTy() || TI->second->isPrimitiveType() ||
-          TNI != TypeNames.end()) {
-        Out << "mod->addTypeName(\"";
-        printEscapedString(TI->first);
-        Out << "\", " << getCppName(TI->second) << ");";
-        nl(Out);
-        // For everything else, define the type
-      } else {
-        printType(TI->second);
-      }
-    }
+  // Finally, separate the type definition from other with a newline.
+  nl(Out);
 
-    // Add all of the global variables to the value table...
-    for (Module::const_global_iterator I = TheModule->global_begin(),
-           E = TheModule->global_end(); I != E; ++I) {
-      if (I->hasInitializer())
-        printType(I->getInitializer()->getType());
-      printType(I->getType());
-    }
+  // We weren't a recursive type
+  return false;
+}
 
-    // Add all the functions to the table
-    for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
-         FI != FE; ++FI) {
-      printType(FI->getReturnType());
-      printType(FI->getFunctionType());
-      // Add all the function arguments
-      for (Function::const_arg_iterator AI = FI->arg_begin(),
-             AE = FI->arg_end(); AI != AE; ++AI) {
-        printType(AI->getType());
-      }
+// Prints a type definition. Returns true if it could not resolve all the
+// types in the definition but had to use a forward reference.
+void CppWriter::printType(const Type* Ty) {
+  assert(TypeStack.empty());
+  TypeStack.clear();
+  printTypeInternal(Ty);
+  assert(TypeStack.empty());
+}
 
-      // Add all of the basic blocks and instructions
-      for (Function::const_iterator BB = FI->begin(),
-             E = FI->end(); BB != E; ++BB) {
-        printType(BB->getType());
-        for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
-             ++I) {
-          printType(I->getType());
-          for (unsigned i = 0; i < I->getNumOperands(); ++i)
-            printType(I->getOperand(i)->getType());
-        }
+void CppWriter::printTypes(const Module* M) {
+  // Walk the symbol table and print out all its types
+  const TypeSymbolTable& symtab = M->getTypeSymbolTable();
+  for (TypeSymbolTable::const_iterator TI = symtab.begin(), TE = symtab.end();
+       TI != TE; ++TI) {
+
+    // For primitive types and types already defined, just add a name
+    TypeMap::const_iterator TNI = TypeNames.find(TI->second);
+    if (TI->second->isIntegerTy() || TI->second->isPrimitiveType() ||
+        TNI != TypeNames.end()) {
+      Out << "mod->addTypeName(\"";
+      printEscapedString(TI->first);
+      Out << "\", " << getCppName(TI->second) << ");";
+      nl(Out);
+      // For everything else, define the type
+    } else {
+      printType(TI->second);
+    }
+  }
+
+  // Add all of the global variables to the value table...
+  for (Module::const_global_iterator I = TheModule->global_begin(),
+         E = TheModule->global_end(); I != E; ++I) {
+    if (I->hasInitializer())
+      printType(I->getInitializer()->getType());
+    printType(I->getType());
+  }
+
+  // Add all the functions to the table
+  for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
+       FI != FE; ++FI) {
+    printType(FI->getReturnType());
+    printType(FI->getFunctionType());
+    // Add all the function arguments
+    for (Function::const_arg_iterator AI = FI->arg_begin(),
+           AE = FI->arg_end(); AI != AE; ++AI) {
+      printType(AI->getType());
+    }
+
+    // Add all of the basic blocks and instructions
+    for (Function::const_iterator BB = FI->begin(),
+           E = FI->end(); BB != E; ++BB) {
+      printType(BB->getType());
+      for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
+           ++I) {
+        printType(I->getType());
+        for (unsigned i = 0; i < I->getNumOperands(); ++i)
+          printType(I->getOperand(i)->getType());
       }
     }
   }
+}
 
 
-  // printConstant - Print out a constant pool entry...
-  void CppWriter::printConstant(const Constant *CV) {
-    // First, if the constant is actually a GlobalValue (variable or function)
-    // or its already in the constant list then we've printed it already and we
-    // can just return.
-    if (isa<GlobalValue>(CV) || ValueNames.find(CV) != ValueNames.end())
-      return;
-
-    std::string constName(getCppName(CV));
-    std::string typeName(getCppName(CV->getType()));
-
-    if (isa<GlobalValue>(CV)) {
-      // Skip variables and functions, we emit them elsewhere
-      return;
-    }
-
-    if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
-      std::string constValue = CI->getValue().toString(10, true);
-      Out << "ConstantInt* " << constName
-          << " = ConstantInt::get(mod->getContext(), APInt("
-          << cast<IntegerType>(CI->getType())->getBitWidth()
-          << ", StringRef(\"" <<  constValue << "\"), 10));";
-    } else if (isa<ConstantAggregateZero>(CV)) {
-      Out << "ConstantAggregateZero* " << constName
-          << " = ConstantAggregateZero::get(" << typeName << ");";
-    } else if (isa<ConstantPointerNull>(CV)) {
-      Out << "ConstantPointerNull* " << constName
-          << " = ConstantPointerNull::get(" << typeName << ");";
-    } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
-      Out << "ConstantFP* " << constName << " = ";
-      printCFP(CFP);
-      Out << ";";
-    } else if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
-      if (CA->isString() &&
-          CA->getType()->getElementType() ==
-              Type::getInt8Ty(CA->getContext())) {
-        Out << "Constant* " << constName <<
-               " = ConstantArray::get(mod->getContext(), \"";
-        std::string tmp = CA->getAsString();
-        bool nullTerminate = false;
-        if (tmp[tmp.length()-1] == 0) {
-          tmp.erase(tmp.length()-1);
-          nullTerminate = true;
-        }
-        printEscapedString(tmp);
-        // Determine if we want null termination or not.
-        if (nullTerminate)
-          Out << "\", true"; // Indicate that the null terminator should be
-                             // added.
-        else
-          Out << "\", false";// No null terminator
-        Out << ");";
-      } else {
-        Out << "std::vector<Constant*> " << constName << "_elems;";
-        nl(Out);
-        unsigned N = CA->getNumOperands();
-        for (unsigned i = 0; i < N; ++i) {
-          printConstant(CA->getOperand(i)); // recurse to print operands
-          Out << constName << "_elems.push_back("
-              << getCppName(CA->getOperand(i)) << ");";
-          nl(Out);
-        }
-        Out << "Constant* " << constName << " = ConstantArray::get("
-            << typeName << ", " << constName << "_elems);";
-      }
-    } else if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
-      Out << "std::vector<Constant*> " << constName << "_fields;";
-      nl(Out);
-      unsigned N = CS->getNumOperands();
-      for (unsigned i = 0; i < N; i++) {
-        printConstant(CS->getOperand(i));
-        Out << constName << "_fields.push_back("
-            << getCppName(CS->getOperand(i)) << ");";
-        nl(Out);
-      }
-      Out << "Constant* " << constName << " = ConstantStruct::get("
-          << typeName << ", " << constName << "_fields);";
-    } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) {
+// printConstant - Print out a constant pool entry...
+void CppWriter::printConstant(const Constant *CV) {
+  // First, if the constant is actually a GlobalValue (variable or function)
+  // or its already in the constant list then we've printed it already and we
+  // can just return.
+  if (isa<GlobalValue>(CV) || ValueNames.find(CV) != ValueNames.end())
+    return;
+
+  std::string constName(getCppName(CV));
+  std::string typeName(getCppName(CV->getType()));
+
+  if (isa<GlobalValue>(CV)) {
+    // Skip variables and functions, we emit them elsewhere
+    return;
+  }
+
+  if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {
+    std::string constValue = CI->getValue().toString(10, true);
+    Out << "ConstantInt* " << constName
+        << " = ConstantInt::get(mod->getContext(), APInt("
+        << cast<IntegerType>(CI->getType())->getBitWidth()
+        << ", StringRef(\"" <<  constValue << "\"), 10));";
+  } else if (isa<ConstantAggregateZero>(CV)) {
+    Out << "ConstantAggregateZero* " << constName
+        << " = ConstantAggregateZero::get(" << typeName << ");";
+  } else if (isa<ConstantPointerNull>(CV)) {
+    Out << "ConstantPointerNull* " << constName
+        << " = ConstantPointerNull::get(" << typeName << ");";
+  } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CV)) {
+    Out << "ConstantFP* " << constName << " = ";
+    printCFP(CFP);
+    Out << ";";
+  } else if (const ConstantArray *CA = dyn_cast<ConstantArray>(CV)) {
+    if (CA->isString() &&
+        CA->getType()->getElementType() ==
+            Type::getInt8Ty(CA->getContext())) {
+      Out << "Constant* " << constName <<
+             " = ConstantArray::get(mod->getContext(), \"";
+      std::string tmp = CA->getAsString();
+      bool nullTerminate = false;
+      if (tmp[tmp.length()-1] == 0) {
+        tmp.erase(tmp.length()-1);
+        nullTerminate = true;
+      }
+      printEscapedString(tmp);
+      // Determine if we want null termination or not.
+      if (nullTerminate)
+        Out << "\", true"; // Indicate that the null terminator should be
+                           // added.
+      else
+        Out << "\", false";// No null terminator
+      Out << ");";
+    } else {
       Out << "std::vector<Constant*> " << constName << "_elems;";
       nl(Out);
-      unsigned N = CP->getNumOperands();
+      unsigned N = CA->getNumOperands();
       for (unsigned i = 0; i < N; ++i) {
-        printConstant(CP->getOperand(i));
+        printConstant(CA->getOperand(i)); // recurse to print operands
         Out << constName << "_elems.push_back("
-            << getCppName(CP->getOperand(i)) << ");";
+            << getCppName(CA->getOperand(i)) << ");";
         nl(Out);
       }
-      Out << "Constant* " << constName << " = ConstantVector::get("
+      Out << "Constant* " << constName << " = ConstantArray::get("
           << typeName << ", " << constName << "_elems);";
-    } else if (isa<UndefValue>(CV)) {
-      Out << "UndefValue* " << constName << " = UndefValue::get("
-          << typeName << ");";
-    } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
-      if (CE->getOpcode() == Instruction::GetElementPtr) {
-        Out << "std::vector<Constant*> " << constName << "_indices;";
-        nl(Out);
-        printConstant(CE->getOperand(0));
-        for (unsigned i = 1; i < CE->getNumOperands(); ++i ) {
-          printConstant(CE->getOperand(i));
-          Out << constName << "_indices.push_back("
-              << getCppName(CE->getOperand(i)) << ");";
-          nl(Out);
-        }
-        Out << "Constant* " << constName
-            << " = ConstantExpr::getGetElementPtr("
-            << getCppName(CE->getOperand(0)) << ", "
-            << "&" << constName << "_indices[0], "
-            << constName << "_indices.size()"
-            << ");";
-      } else if (CE->isCast()) {
-        printConstant(CE->getOperand(0));
-        Out << "Constant* " << constName << " = ConstantExpr::getCast(";
-        switch (CE->getOpcode()) {
-        default: llvm_unreachable("Invalid cast opcode");
-        case Instruction::Trunc: Out << "Instruction::Trunc"; break;
-        case Instruction::ZExt:  Out << "Instruction::ZExt"; break;
-        case Instruction::SExt:  Out << "Instruction::SExt"; break;
-        case Instruction::FPTrunc:  Out << "Instruction::FPTrunc"; break;
-        case Instruction::FPExt:  Out << "Instruction::FPExt"; break;
-        case Instruction::FPToUI:  Out << "Instruction::FPToUI"; break;
-        case Instruction::FPToSI:  Out << "Instruction::FPToSI"; break;
-        case Instruction::UIToFP:  Out << "Instruction::UIToFP"; break;
-        case Instruction::SIToFP:  Out << "Instruction::SIToFP"; break;
-        case Instruction::PtrToInt:  Out << "Instruction::PtrToInt"; break;
-        case Instruction::IntToPtr:  Out << "Instruction::IntToPtr"; break;
-        case Instruction::BitCast:  Out << "Instruction::BitCast"; break;
-        }
-        Out << ", " << getCppName(CE->getOperand(0)) << ", "
-            << getCppName(CE->getType()) << ");";
-      } else {
-        unsigned N = CE->getNumOperands();
-        for (unsigned i = 0; i < N; ++i ) {
-          printConstant(CE->getOperand(i));
+    }
+  } else if (const ConstantStruct *CS = dyn_cast<ConstantStruct>(CV)) {
+    Out << "std::vector<Constant*> " << constName << "_fields;";
+    nl(Out);
+    unsigned N = CS->getNumOperands();
+    for (unsigned i = 0; i < N; i++) {
+      printConstant(CS->getOperand(i));
+      Out << constName << "_fields.push_back("
+          << getCppName(CS->getOperand(i)) << ");";
+      nl(Out);
+    }
+    Out << "Constant* " << constName << " = ConstantStruct::get("
+        << typeName << ", " << constName << "_fields);";
+  } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) {
+    Out << "std::vector<Constant*> " << constName << "_elems;";
+    nl(Out);
+    unsigned N = CP->getNumOperands();
+    for (unsigned i = 0; i < N; ++i) {
+      printConstant(CP->getOperand(i));
+      Out << constName << "_elems.push_back("
+          << getCppName(CP->getOperand(i)) << ");";
+      nl(Out);
+    }
+    Out << "Constant* " << constName << " = ConstantVector::get("
+        << typeName << ", " << constName << "_elems);";
+  } else if (isa<UndefValue>(CV)) {
+    Out << "UndefValue* " << constName << " = UndefValue::get("
+        << typeName << ");";
+  } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV)) {
+    if (CE->getOpcode() == Instruction::GetElementPtr) {
+      Out << "std::vector<Constant*> " << constName << "_indices;";
+      nl(Out);
+      printConstant(CE->getOperand(0));
+      for (unsigned i = 1; i < CE->getNumOperands(); ++i ) {
+        printConstant(CE->getOperand(i));
+        Out << constName << "_indices.push_back("
+            << getCppName(CE->getOperand(i)) << ");";
+        nl(Out);
+      }
+      Out << "Constant* " << constName
+          << " = ConstantExpr::getGetElementPtr("
+          << getCppName(CE->getOperand(0)) << ", "
+          << "&" << constName << "_indices[0], "
+          << constName << "_indices.size()"
+          << ");";
+    } else if (CE->isCast()) {
+      printConstant(CE->getOperand(0));
+      Out << "Constant* " << constName << " = ConstantExpr::getCast(";
+      switch (CE->getOpcode()) {
+      default: llvm_unreachable("Invalid cast opcode");
+      case Instruction::Trunc: Out << "Instruction::Trunc"; break;
+      case Instruction::ZExt:  Out << "Instruction::ZExt"; break;
+      case Instruction::SExt:  Out << "Instruction::SExt"; break;
+      case Instruction::FPTrunc:  Out << "Instruction::FPTrunc"; break;
+      case Instruction::FPExt:  Out << "Instruction::FPExt"; break;
+      case Instruction::FPToUI:  Out << "Instruction::FPToUI"; break;
+      case Instruction::FPToSI:  Out << "Instruction::FPToSI"; break;
+      case Instruction::UIToFP:  Out << "Instruction::UIToFP"; break;
+      case Instruction::SIToFP:  Out << "Instruction::SIToFP"; break;
+      case Instruction::PtrToInt:  Out << "Instruction::PtrToInt"; break;
+      case Instruction::IntToPtr:  Out << "Instruction::IntToPtr"; break;
+      case Instruction::BitCast:  Out << "Instruction::BitCast"; break;
+      }
+      Out << ", " << getCppName(CE->getOperand(0)) << ", "
+          << getCppName(CE->getType()) << ");";
+    } else {
+      unsigned N = CE->getNumOperands();
+      for (unsigned i = 0; i < N; ++i ) {
+        printConstant(CE->getOperand(i));
+      }
+      Out << "Constant* " << constName << " = ConstantExpr::";
+      switch (CE->getOpcode()) {
+      case Instruction::Add:    Out << "getAdd(";  break;
+      case Instruction::FAdd:   Out << "getFAdd(";  break;
+      case Instruction::Sub:    Out << "getSub("; break;
+      case Instruction::FSub:   Out << "getFSub("; break;
+      case Instruction::Mul:    Out << "getMul("; break;
+      case Instruction::FMul:   Out << "getFMul("; break;
+      case Instruction::UDiv:   Out << "getUDiv("; break;
+      case Instruction::SDiv:   Out << "getSDiv("; break;
+      case Instruction::FDiv:   Out << "getFDiv("; break;
+      case Instruction::URem:   Out << "getURem("; break;
+      case Instruction::SRem:   Out << "getSRem("; break;
+      case Instruction::FRem:   Out << "getFRem("; break;
+      case Instruction::And:    Out << "getAnd("; break;
+      case Instruction::Or:     Out << "getOr("; break;
+      case Instruction::Xor:    Out << "getXor("; break;
+      case Instruction::ICmp:
+        Out << "getICmp(ICmpInst::ICMP_";
+        switch (CE->getPredicate()) {
+        case ICmpInst::ICMP_EQ:  Out << "EQ"; break;
+        case ICmpInst::ICMP_NE:  Out << "NE"; break;
+        case ICmpInst::ICMP_SLT: Out << "SLT"; break;
+        case ICmpInst::ICMP_ULT: Out << "ULT"; break;
+        case ICmpInst::ICMP_SGT: Out << "SGT"; break;
+        case ICmpInst::ICMP_UGT: Out << "UGT"; break;
+        case ICmpInst::ICMP_SLE: Out << "SLE"; break;
+        case ICmpInst::ICMP_ULE: Out << "ULE"; break;
+        case ICmpInst::ICMP_SGE: Out << "SGE"; break;
+        case ICmpInst::ICMP_UGE: Out << "UGE"; break;
+        default: error("Invalid ICmp Predicate");
         }
-        Out << "Constant* " << constName << " = ConstantExpr::";
-        switch (CE->getOpcode()) {
-        case Instruction::Add:    Out << "getAdd(";  break;
-        case Instruction::FAdd:   Out << "getFAdd(";  break;
-        case Instruction::Sub:    Out << "getSub("; break;
-        case Instruction::FSub:   Out << "getFSub("; break;
-        case Instruction::Mul:    Out << "getMul("; break;
-        case Instruction::FMul:   Out << "getFMul("; break;
-        case Instruction::UDiv:   Out << "getUDiv("; break;
-        case Instruction::SDiv:   Out << "getSDiv("; break;
-        case Instruction::FDiv:   Out << "getFDiv("; break;
-        case Instruction::URem:   Out << "getURem("; break;
-        case Instruction::SRem:   Out << "getSRem("; break;
-        case Instruction::FRem:   Out << "getFRem("; break;
-        case Instruction::And:    Out << "getAnd("; break;
-        case Instruction::Or:     Out << "getOr("; break;
-        case Instruction::Xor:    Out << "getXor("; break;
-        case Instruction::ICmp:
-          Out << "getICmp(ICmpInst::ICMP_";
-          switch (CE->getPredicate()) {
-          case ICmpInst::ICMP_EQ:  Out << "EQ"; break;
-          case ICmpInst::ICMP_NE:  Out << "NE"; break;
-          case ICmpInst::ICMP_SLT: Out << "SLT"; break;
-          case ICmpInst::ICMP_ULT: Out << "ULT"; break;
-          case ICmpInst::ICMP_SGT: Out << "SGT"; break;
-          case ICmpInst::ICMP_UGT: Out << "UGT"; break;
-          case ICmpInst::ICMP_SLE: Out << "SLE"; break;
-          case ICmpInst::ICMP_ULE: Out << "ULE"; break;
-          case ICmpInst::ICMP_SGE: Out << "SGE"; break;
-          case ICmpInst::ICMP_UGE: Out << "UGE"; break;
-          default: error("Invalid ICmp Predicate");
-          }
-          break;
-        case Instruction::FCmp:
-          Out << "getFCmp(FCmpInst::FCMP_";
-          switch (CE->getPredicate()) {
-          case FCmpInst::FCMP_FALSE: Out << "FALSE"; break;
-          case FCmpInst::FCMP_ORD:   Out << "ORD"; break;
-          case FCmpInst::FCMP_UNO:   Out << "UNO"; break;
-          case FCmpInst::FCMP_OEQ:   Out << "OEQ"; break;
-          case FCmpInst::FCMP_UEQ:   Out << "UEQ"; break;
-          case FCmpInst::FCMP_ONE:   Out << "ONE"; break;
-          case FCmpInst::FCMP_UNE:   Out << "UNE"; break;
-          case FCmpInst::FCMP_OLT:   Out << "OLT"; break;
-          case FCmpInst::FCMP_ULT:   Out << "ULT"; break;
-          case FCmpInst::FCMP_OGT:   Out << "OGT"; break;
-          case FCmpInst::FCMP_UGT:   Out << "UGT"; break;
-          case FCmpInst::FCMP_OLE:   Out << "OLE"; break;
-          case FCmpInst::FCMP_ULE:   Out << "ULE"; break;
-          case FCmpInst::FCMP_OGE:   Out << "OGE"; break;
-          case FCmpInst::FCMP_UGE:   Out << "UGE"; break;
-          case FCmpInst::FCMP_TRUE:  Out << "TRUE"; break;
-          default: error("Invalid FCmp Predicate");
-          }
-          break;
-        case Instruction::Shl:     Out << "getShl("; break;
-        case Instruction::LShr:    Out << "getLShr("; break;
-        case Instruction::AShr:    Out << "getAShr("; break;
-        case Instruction::Select:  Out << "getSelect("; break;
-        case Instruction::ExtractElement: Out << "getExtractElement("; break;
-        case Instruction::InsertElement:  Out << "getInsertElement("; break;
-        case Instruction::ShuffleVector:  Out << "getShuffleVector("; break;
-        default:
-          error("Invalid constant expression");
-          break;
+        break;
+      case Instruction::FCmp:
+        Out << "getFCmp(FCmpInst::FCMP_";
+        switch (CE->getPredicate()) {
+        case FCmpInst::FCMP_FALSE: Out << "FALSE"; break;
+        case FCmpInst::FCMP_ORD:   Out << "ORD"; break;
+        case FCmpInst::FCMP_UNO:   Out << "UNO"; break;
+        case FCmpInst::FCMP_OEQ:   Out << "OEQ"; break;
+        case FCmpInst::FCMP_UEQ:   Out << "UEQ"; break;
+        case FCmpInst::FCMP_ONE:   Out << "ONE"; break;
+        case FCmpInst::FCMP_UNE:   Out << "UNE"; break;
+        case FCmpInst::FCMP_OLT:   Out << "OLT"; break;
+        case FCmpInst::FCMP_ULT:   Out << "ULT"; break;
+        case FCmpInst::FCMP_OGT:   Out << "OGT"; break;
+        case FCmpInst::FCMP_UGT:   Out << "UGT"; break;
+        case FCmpInst::FCMP_OLE:   Out << "OLE"; break;
+        case FCmpInst::FCMP_ULE:   Out << "ULE"; break;
+        case FCmpInst::FCMP_OGE:   Out << "OGE"; break;
+        case FCmpInst::FCMP_UGE:   Out << "UGE"; break;
+        case FCmpInst::FCMP_TRUE:  Out << "TRUE"; break;
+        default: error("Invalid FCmp Predicate");
         }
-        Out << getCppName(CE->getOperand(0));
-        for (unsigned i = 1; i < CE->getNumOperands(); ++i)
-          Out << ", " << getCppName(CE->getOperand(i));
-        Out << ");";
+        break;
+      case Instruction::Shl:     Out << "getShl("; break;
+      case Instruction::LShr:    Out << "getLShr("; break;
+      case Instruction::AShr:    Out << "getAShr("; break;
+      case Instruction::Select:  Out << "getSelect("; break;
+      case Instruction::ExtractElement: Out << "getExtractElement("; break;
+      case Instruction::InsertElement:  Out << "getInsertElement("; break;
+      case Instruction::ShuffleVector:  Out << "getShuffleVector("; break;
+      default:
+        error("Invalid constant expression");
+        break;
       }
-    } else {
-      error("Bad Constant");
-      Out << "Constant* " << constName << " = 0; ";
+      Out << getCppName(CE->getOperand(0));
+      for (unsigned i = 1; i < CE->getNumOperands(); ++i)
+        Out << ", " << getCppName(CE->getOperand(i));
+      Out << ");";
     }
-    nl(Out);
+  } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(CV)) {
+    Out << "Constant* " << constName << " = ";
+    Out << "BlockAddress::get(" << getOpName(BA->getBasicBlock()) << ");";
+  } else {
+    error("Bad Constant");
+    Out << "Constant* " << constName << " = 0; ";
   }
+  nl(Out);
+}
 
-  void CppWriter::printConstants(const Module* M) {
-    // Traverse all the global variables looking for constant initializers
-    for (Module::const_global_iterator I = TheModule->global_begin(),
-           E = TheModule->global_end(); I != E; ++I)
-      if (I->hasInitializer())
-        printConstant(I->getInitializer());
-
-    // Traverse the LLVM functions looking for constants
-    for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
-         FI != FE; ++FI) {
-      // Add all of the basic blocks and instructions
-      for (Function::const_iterator BB = FI->begin(),
-             E = FI->end(); BB != E; ++BB) {
-        for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
-             ++I) {
-          for (unsigned i = 0; i < I->getNumOperands(); ++i) {
-            if (Constant* C = dyn_cast<Constant>(I->getOperand(i))) {
-              printConstant(C);
-            }
+void CppWriter::printConstants(const Module* M) {
+  // Traverse all the global variables looking for constant initializers
+  for (Module::const_global_iterator I = TheModule->global_begin(),
+         E = TheModule->global_end(); I != E; ++I)
+    if (I->hasInitializer())
+      printConstant(I->getInitializer());
+
+  // Traverse the LLVM functions looking for constants
+  for (Module::const_iterator FI = TheModule->begin(), FE = TheModule->end();
+       FI != FE; ++FI) {
+    // Add all of the basic blocks and instructions
+    for (Function::const_iterator BB = FI->begin(),
+           E = FI->end(); BB != E; ++BB) {
+      for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;
+           ++I) {
+        for (unsigned i = 0; i < I->getNumOperands(); ++i) {
+          if (Constant* C = dyn_cast<Constant>(I->getOperand(i))) {
+            printConstant(C);
           }
         }
       }
     }
   }
+}
 
-  void CppWriter::printVariableUses(const GlobalVariable *GV) {
-    nl(Out) << "// Type Definitions";
-    nl(Out);
-    printType(GV->getType());
-    if (GV->hasInitializer()) {
-      Constant *Init = GV->getInitializer();
-      printType(Init->getType());
-      if (Function *F = dyn_cast<Function>(Init)) {
-        nl(Out)<< "/ Function Declarations"; nl(Out);
-        printFunctionHead(F);
-      } else if (GlobalVariable* gv = dyn_cast<GlobalVariable>(Init)) {
-        nl(Out) << "// Global Variable Declarations"; nl(Out);
-        printVariableHead(gv);
-        
-        nl(Out) << "// Global Variable Definitions"; nl(Out);
-        printVariableBody(gv);
-      } else  {
-        nl(Out) << "// Constant Definitions"; nl(Out);
-        printConstant(Init);
-      }
+void CppWriter::printVariableUses(const GlobalVariable *GV) {
+  nl(Out) << "// Type Definitions";
+  nl(Out);
+  printType(GV->getType());
+  if (GV->hasInitializer()) {
+    Constant *Init = GV->getInitializer();
+    printType(Init->getType());
+    if (Function *F = dyn_cast<Function>(Init)) {
+      nl(Out)<< "/ Function Declarations"; nl(Out);
+      printFunctionHead(F);
+    } else if (GlobalVariable* gv = dyn_cast<GlobalVariable>(Init)) {
+      nl(Out) << "// Global Variable Declarations"; nl(Out);
+      printVariableHead(gv);
+      
+      nl(Out) << "// Global Variable Definitions"; nl(Out);
+      printVariableBody(gv);
+    } else  {
+      nl(Out) << "// Constant Definitions"; nl(Out);
+      printConstant(Init);
     }
   }
+}
 
-  void CppWriter::printVariableHead(const GlobalVariable *GV) {
-    nl(Out) << "GlobalVariable* " << getCppName(GV);
-    if (is_inline) {
-      Out << " = mod->getGlobalVariable(mod->getContext(), ";
-      printEscapedString(GV->getName());
-      Out << ", " << getCppName(GV->getType()->getElementType()) << ",true)";
-      nl(Out) << "if (!" << getCppName(GV) << ") {";
-      in(); nl(Out) << getCppName(GV);
-    }
-    Out << " = new GlobalVariable(/*Module=*/*mod, ";
-    nl(Out) << "/*Type=*/";
-    printCppName(GV->getType()->getElementType());
-    Out << ",";
-    nl(Out) << "/*isConstant=*/" << (GV->isConstant()?"true":"false");
-    Out << ",";
-    nl(Out) << "/*Linkage=*/";
-    printLinkageType(GV->getLinkage());
-    Out << ",";
-    nl(Out) << "/*Initializer=*/0, ";
-    if (GV->hasInitializer()) {
-      Out << "// has initializer, specified below";
-    }
-    nl(Out) << "/*Name=*/\"";
+void CppWriter::printVariableHead(const GlobalVariable *GV) {
+  nl(Out) << "GlobalVariable* " << getCppName(GV);
+  if (is_inline) {
+    Out << " = mod->getGlobalVariable(mod->getContext(), ";
     printEscapedString(GV->getName());
+    Out << ", " << getCppName(GV->getType()->getElementType()) << ",true)";
+    nl(Out) << "if (!" << getCppName(GV) << ") {";
+    in(); nl(Out) << getCppName(GV);
+  }
+  Out << " = new GlobalVariable(/*Module=*/*mod, ";
+  nl(Out) << "/*Type=*/";
+  printCppName(GV->getType()->getElementType());
+  Out << ",";
+  nl(Out) << "/*isConstant=*/" << (GV->isConstant()?"true":"false");
+  Out << ",";
+  nl(Out) << "/*Linkage=*/";
+  printLinkageType(GV->getLinkage());
+  Out << ",";
+  nl(Out) << "/*Initializer=*/0, ";
+  if (GV->hasInitializer()) {
+    Out << "// has initializer, specified below";
+  }
+  nl(Out) << "/*Name=*/\"";
+  printEscapedString(GV->getName());
+  Out << "\");";
+  nl(Out);
+
+  if (GV->hasSection()) {
+    printCppName(GV);
+    Out << "->setSection(\"";
+    printEscapedString(GV->getSection());
     Out << "\");";
     nl(Out);
-
-    if (GV->hasSection()) {
-      printCppName(GV);
-      Out << "->setSection(\"";
-      printEscapedString(GV->getSection());
-      Out << "\");";
-      nl(Out);
-    }
-    if (GV->getAlignment()) {
-      printCppName(GV);
-      Out << "->setAlignment(" << utostr(GV->getAlignment()) << ");";
-      nl(Out);
-    }
-    if (GV->getVisibility() != GlobalValue::DefaultVisibility) {
-      printCppName(GV);
-      Out << "->setVisibility(";
-      printVisibilityType(GV->getVisibility());
-      Out << ");";
-      nl(Out);
-    }
-    if (GV->isThreadLocal()) {
-      printCppName(GV);
-      Out << "->setThreadLocal(true);";
-      nl(Out);
-    }
-    if (is_inline) {
-      out(); Out << "}"; nl(Out);
-    }
   }
-
-  void CppWriter::printVariableBody(const GlobalVariable *GV) {
-    if (GV->hasInitializer()) {
-      printCppName(GV);
-      Out << "->setInitializer(";
-      Out << getCppName(GV->getInitializer()) << ");";
-      nl(Out);
-    }
+  if (GV->getAlignment()) {
+    printCppName(GV);
+    Out << "->setAlignment(" << utostr(GV->getAlignment()) << ");";
+    nl(Out);
   }
-
-  std::string CppWriter::getOpName(Value* V) {
-    if (!isa<Instruction>(V) || DefinedValues.find(V) != DefinedValues.end())
-      return getCppName(V);
-
-    // See if its alread in the map of forward references, if so just return the
-    // name we already set up for it
-    ForwardRefMap::const_iterator I = ForwardRefs.find(V);
-    if (I != ForwardRefs.end())
-      return I->second;
-
-    // This is a new forward reference. Generate a unique name for it
-    std::string result(std::string("fwdref_") + utostr(uniqueNum++));
-
-    // Yes, this is a hack. An Argument is the smallest instantiable value that
-    // we can make as a placeholder for the real value. We'll replace these
-    // Argument instances later.
-    Out << "Argument* " << result << " = new Argument("
-        << getCppName(V->getType()) << ");";
+  if (GV->getVisibility() != GlobalValue::DefaultVisibility) {
+    printCppName(GV);
+    Out << "->setVisibility(";
+    printVisibilityType(GV->getVisibility());
+    Out << ");";
     nl(Out);
-    ForwardRefs[V] = result;
-    return result;
   }
+  if (GV->isThreadLocal()) {
+    printCppName(GV);
+    Out << "->setThreadLocal(true);";
+    nl(Out);
+  }
+  if (is_inline) {
+    out(); Out << "}"; nl(Out);
+  }
+}
 
-  // printInstruction - This member is called for each Instruction in a function.
-  void CppWriter::printInstruction(const Instruction *I,
-                                   const std::string& bbname) {
-    std::string iName(getCppName(I));
-
-    // Before we emit this instruction, we need to take care of generating any
-    // forward references. So, we get the names of all the operands in advance
-    const unsigned Ops(I->getNumOperands());
-    std::string* opNames = new std::string[Ops];
-    for (unsigned i = 0; i < Ops; i++) {
-      opNames[i] = getOpName(I->getOperand(i));
-    }
+void CppWriter::printVariableBody(const GlobalVariable *GV) {
+  if (GV->hasInitializer()) {
+    printCppName(GV);
+    Out << "->setInitializer(";
+    Out << getCppName(GV->getInitializer()) << ");";
+    nl(Out);
+  }
+}
 
-    switch (I->getOpcode()) {
-    default:
-      error("Invalid instruction");
-      break;
+std::string CppWriter::getOpName(Value* V) {
+  if (!isa<Instruction>(V) || DefinedValues.find(V) != DefinedValues.end())
+    return getCppName(V);
+
+  // See if its alread in the map of forward references, if so just return the
+  // name we already set up for it
+  ForwardRefMap::const_iterator I = ForwardRefs.find(V);
+  if (I != ForwardRefs.end())
+    return I->second;
+
+  // This is a new forward reference. Generate a unique name for it
+  std::string result(std::string("fwdref_") + utostr(uniqueNum++));
+
+  // Yes, this is a hack. An Argument is the smallest instantiable value that
+  // we can make as a placeholder for the real value. We'll replace these
+  // Argument instances later.
+  Out << "Argument* " << result << " = new Argument("
+      << getCppName(V->getType()) << ");";
+  nl(Out);
+  ForwardRefs[V] = result;
+  return result;
+}
 
-    case Instruction::Ret: {
-      const ReturnInst* ret =  cast<ReturnInst>(I);
-      Out << "ReturnInst::Create(mod->getContext(), "
-          << (ret->getReturnValue() ? opNames[0] + ", " : "") << bbname << ");";
-      break;
-    }
-    case Instruction::Br: {
-      const BranchInst* br = cast<BranchInst>(I);
-      Out << "BranchInst::Create(" ;
-      if (br->getNumOperands() == 3 ) {
-        Out << opNames[2] << ", "
-            << opNames[1] << ", "
-            << opNames[0] << ", ";
-
-      } else if (br->getNumOperands() == 1) {
-        Out << opNames[0] << ", ";
-      } else {
-        error("Branch with 2 operands?");
-      }
-      Out << bbname << ");";
-      break;
-    }
-    case Instruction::Switch: {
-      const SwitchInst *SI = cast<SwitchInst>(I);
-      Out << "SwitchInst* " << iName << " = SwitchInst::Create("
-          << opNames[0] << ", "
+// printInstruction - This member is called for each Instruction in a function.
+void CppWriter::printInstruction(const Instruction *I,
+                                 const std::string& bbname) {
+  std::string iName(getCppName(I));
+
+  // Before we emit this instruction, we need to take care of generating any
+  // forward references. So, we get the names of all the operands in advance
+  const unsigned Ops(I->getNumOperands());
+  std::string* opNames = new std::string[Ops];
+  for (unsigned i = 0; i < Ops; i++)
+    opNames[i] = getOpName(I->getOperand(i));
+
+  switch (I->getOpcode()) {
+  default:
+    error("Invalid instruction");
+    break;
+
+  case Instruction::Ret: {
+    const ReturnInst* ret =  cast<ReturnInst>(I);
+    Out << "ReturnInst::Create(mod->getContext(), "
+        << (ret->getReturnValue() ? opNames[0] + ", " : "") << bbname << ");";
+    break;
+  }
+  case Instruction::Br: {
+    const BranchInst* br = cast<BranchInst>(I);
+    Out << "BranchInst::Create(" ;
+    if (br->getNumOperands() == 3) {
+      Out << opNames[2] << ", "
           << opNames[1] << ", "
-          << SI->getNumCases() << ", " << bbname << ");";
-      nl(Out);
-      for (unsigned i = 2; i != SI->getNumOperands(); i += 2) {
-        Out << iName << "->addCase("
-            << opNames[i] << ", "
-            << opNames[i+1] << ");";
-        nl(Out);
-      }
-      break;
+          << opNames[0] << ", ";
+
+    } else if (br->getNumOperands() == 1) {
+      Out << opNames[0] << ", ";
+    } else {
+      error("Branch with 2 operands?");
     }
-    case Instruction::IndirectBr: {
-      const IndirectBrInst *IBI = cast<IndirectBrInst>(I);
-      Out << "IndirectBrInst *" << iName << " = IndirectBrInst::Create("
-          << opNames[0] << ", " << IBI->getNumDestinations() << ");";
+    Out << bbname << ");";
+    break;
+  }
+  case Instruction::Switch: {
+    const SwitchInst *SI = cast<SwitchInst>(I);
+    Out << "SwitchInst* " << iName << " = SwitchInst::Create("
+        << opNames[0] << ", "
+        << opNames[1] << ", "
+        << SI->getNumCases() << ", " << bbname << ");";
+    nl(Out);
+    for (unsigned i = 2; i != SI->getNumOperands(); i += 2) {
+      Out << iName << "->addCase("
+          << opNames[i] << ", "
+          << opNames[i+1] << ");";
       nl(Out);
-      for (unsigned i = 1; i != IBI->getNumOperands(); ++i) {
-        Out << iName << "->addDestination(" << opNames[i] << ");";
-        nl(Out);
-      }
-      break;
     }
-    case Instruction::Invoke: {
-      const InvokeInst* inv = cast<InvokeInst>(I);
-      Out << "std::vector<Value*> " << iName << "_params;";
-      nl(Out);
-      for (unsigned i = 0; i < inv->getNumArgOperands(); ++i) {
-        Out << iName << "_params.push_back("
-            << getOpName(inv->getArgOperand(i)) << ");";
-        nl(Out);
-      }
-      // FIXME: This shouldn't use magic numbers -3, -2, and -1.
-      Out << "InvokeInst *" << iName << " = InvokeInst::Create("
-          << getOpName(inv->getCalledFunction()) << ", "
-          << getOpName(inv->getNormalDest()) << ", "
-          << getOpName(inv->getUnwindDest()) << ", "
-          << iName << "_params.begin(), "
-          << iName << "_params.end(), \"";
-      printEscapedString(inv->getName());
-      Out << "\", " << bbname << ");";
-      nl(Out) << iName << "->setCallingConv(";
-      printCallingConv(inv->getCallingConv());
-      Out << ");";
-      printAttributes(inv->getAttributes(), iName);
-      Out << iName << "->setAttributes(" << iName << "_PAL);";
+    break;
+  }
+  case Instruction::IndirectBr: {
+    const IndirectBrInst *IBI = cast<IndirectBrInst>(I);
+    Out << "IndirectBrInst *" << iName << " = IndirectBrInst::Create("
+        << opNames[0] << ", " << IBI->getNumDestinations() << ");";
+    nl(Out);
+    for (unsigned i = 1; i != IBI->getNumOperands(); ++i) {
+      Out << iName << "->addDestination(" << opNames[i] << ");";
       nl(Out);
-      break;
-    }
-    case Instruction::Unwind: {
-      Out << "new UnwindInst("
-          << bbname << ");";
-      break;
-    }
-    case Instruction::Unreachable: {
-      Out << "new UnreachableInst("
-          << "mod->getContext(), "
-          << bbname << ");";
-      break;
-    }
-    case Instruction::Add:
-    case Instruction::FAdd:
-    case Instruction::Sub:
-    case Instruction::FSub:
-    case Instruction::Mul:
-    case Instruction::FMul:
-    case Instruction::UDiv:
-    case Instruction::SDiv:
-    case Instruction::FDiv:
-    case Instruction::URem:
-    case Instruction::SRem:
-    case Instruction::FRem:
-    case Instruction::And:
-    case Instruction::Or:
-    case Instruction::Xor:
-    case Instruction::Shl:
-    case Instruction::LShr:
-    case Instruction::AShr:{
-      Out << "BinaryOperator* " << iName << " = BinaryOperator::Create(";
-      switch (I->getOpcode()) {
-      case Instruction::Add: Out << "Instruction::Add"; break;
-      case Instruction::FAdd: Out << "Instruction::FAdd"; break;
-      case Instruction::Sub: Out << "Instruction::Sub"; break;
-      case Instruction::FSub: Out << "Instruction::FSub"; break;
-      case Instruction::Mul: Out << "Instruction::Mul"; break;
-      case Instruction::FMul: Out << "Instruction::FMul"; break;
-      case Instruction::UDiv:Out << "Instruction::UDiv"; break;
-      case Instruction::SDiv:Out << "Instruction::SDiv"; break;
-      case Instruction::FDiv:Out << "Instruction::FDiv"; break;
-      case Instruction::URem:Out << "Instruction::URem"; break;
-      case Instruction::SRem:Out << "Instruction::SRem"; break;
-      case Instruction::FRem:Out << "Instruction::FRem"; break;
-      case Instruction::And: Out << "Instruction::And"; break;
-      case Instruction::Or:  Out << "Instruction::Or";  break;
-      case Instruction::Xor: Out << "Instruction::Xor"; break;
-      case Instruction::Shl: Out << "Instruction::Shl"; break;
-      case Instruction::LShr:Out << "Instruction::LShr"; break;
-      case Instruction::AShr:Out << "Instruction::AShr"; break;
-      default: Out << "Instruction::BadOpCode"; break;
-      }
-      Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
-      printEscapedString(I->getName());
-      Out << "\", " << bbname << ");";
-      break;
     }
-    case Instruction::FCmp: {
-      Out << "FCmpInst* " << iName << " = new FCmpInst(*" << bbname << ", ";
-      switch (cast<FCmpInst>(I)->getPredicate()) {
-      case FCmpInst::FCMP_FALSE: Out << "FCmpInst::FCMP_FALSE"; break;
-      case FCmpInst::FCMP_OEQ  : Out << "FCmpInst::FCMP_OEQ"; break;
-      case FCmpInst::FCMP_OGT  : Out << "FCmpInst::FCMP_OGT"; break;
-      case FCmpInst::FCMP_OGE  : Out << "FCmpInst::FCMP_OGE"; break;
-      case FCmpInst::FCMP_OLT  : Out << "FCmpInst::FCMP_OLT"; break;
-      case FCmpInst::FCMP_OLE  : Out << "FCmpInst::FCMP_OLE"; break;
-      case FCmpInst::FCMP_ONE  : Out << "FCmpInst::FCMP_ONE"; break;
-      case FCmpInst::FCMP_ORD  : Out << "FCmpInst::FCMP_ORD"; break;
-      case FCmpInst::FCMP_UNO  : Out << "FCmpInst::FCMP_UNO"; break;
-      case FCmpInst::FCMP_UEQ  : Out << "FCmpInst::FCMP_UEQ"; break;
-      case FCmpInst::FCMP_UGT  : Out << "FCmpInst::FCMP_UGT"; break;
-      case FCmpInst::FCMP_UGE  : Out << "FCmpInst::FCMP_UGE"; break;
-      case FCmpInst::FCMP_ULT  : Out << "FCmpInst::FCMP_ULT"; break;
-      case FCmpInst::FCMP_ULE  : Out << "FCmpInst::FCMP_ULE"; break;
-      case FCmpInst::FCMP_UNE  : Out << "FCmpInst::FCMP_UNE"; break;
-      case FCmpInst::FCMP_TRUE : Out << "FCmpInst::FCMP_TRUE"; break;
-      default: Out << "FCmpInst::BAD_ICMP_PREDICATE"; break;
-      }
-      Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
-      printEscapedString(I->getName());
-      Out << "\");";
-      break;
-    }
-    case Instruction::ICmp: {
-      Out << "ICmpInst* " << iName << " = new ICmpInst(*" << bbname << ", ";
-      switch (cast<ICmpInst>(I)->getPredicate()) {
-      case ICmpInst::ICMP_EQ:  Out << "ICmpInst::ICMP_EQ";  break;
-      case ICmpInst::ICMP_NE:  Out << "ICmpInst::ICMP_NE";  break;
-      case ICmpInst::ICMP_ULE: Out << "ICmpInst::ICMP_ULE"; break;
-      case ICmpInst::ICMP_SLE: Out << "ICmpInst::ICMP_SLE"; break;
-      case ICmpInst::ICMP_UGE: Out << "ICmpInst::ICMP_UGE"; break;
-      case ICmpInst::ICMP_SGE: Out << "ICmpInst::ICMP_SGE"; break;
-      case ICmpInst::ICMP_ULT: Out << "ICmpInst::ICMP_ULT"; break;
-      case ICmpInst::ICMP_SLT: Out << "ICmpInst::ICMP_SLT"; break;
-      case ICmpInst::ICMP_UGT: Out << "ICmpInst::ICMP_UGT"; break;
-      case ICmpInst::ICMP_SGT: Out << "ICmpInst::ICMP_SGT"; break;
-      default: Out << "ICmpInst::BAD_ICMP_PREDICATE"; break;
-      }
-      Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
-      printEscapedString(I->getName());
-      Out << "\");";
-      break;
-    }
-    case Instruction::Alloca: {
-      const AllocaInst* allocaI = cast<AllocaInst>(I);
-      Out << "AllocaInst* " << iName << " = new AllocaInst("
-          << getCppName(allocaI->getAllocatedType()) << ", ";
-      if (allocaI->isArrayAllocation())
-        Out << opNames[0] << ", ";
-      Out << "\"";
-      printEscapedString(allocaI->getName());
-      Out << "\", " << bbname << ");";
-      if (allocaI->getAlignment())
-        nl(Out) << iName << "->setAlignment("
-            << allocaI->getAlignment() << ");";
-      break;
-    }
-    case Instruction::Load:{
-      const LoadInst* load = cast<LoadInst>(I);
-      Out << "LoadInst* " << iName << " = new LoadInst("
-          << opNames[0] << ", \"";
-      printEscapedString(load->getName());
-      Out << "\", " << (load->isVolatile() ? "true" : "false" )
-          << ", " << bbname << ");";
-      break;
-    }
-    case Instruction::Store: {
-      const StoreInst* store = cast<StoreInst>(I);
-      Out << " new StoreInst("
-          << opNames[0] << ", "
-          << opNames[1] << ", "
-          << (store->isVolatile() ? "true" : "false")
-          << ", " << bbname << ");";
-      break;
+    break;
+  }
+  case Instruction::Invoke: {
+    const InvokeInst* inv = cast<InvokeInst>(I);
+    Out << "std::vector<Value*> " << iName << "_params;";
+    nl(Out);
+    for (unsigned i = 0; i < inv->getNumArgOperands(); ++i) {
+      Out << iName << "_params.push_back("
+          << getOpName(inv->getArgOperand(i)) << ");";
+      nl(Out);
+    }
+    // FIXME: This shouldn't use magic numbers -3, -2, and -1.
+    Out << "InvokeInst *" << iName << " = InvokeInst::Create("
+        << getOpName(inv->getCalledFunction()) << ", "
+        << getOpName(inv->getNormalDest()) << ", "
+        << getOpName(inv->getUnwindDest()) << ", "
+        << iName << "_params.begin(), "
+        << iName << "_params.end(), \"";
+    printEscapedString(inv->getName());
+    Out << "\", " << bbname << ");";
+    nl(Out) << iName << "->setCallingConv(";
+    printCallingConv(inv->getCallingConv());
+    Out << ");";
+    printAttributes(inv->getAttributes(), iName);
+    Out << iName << "->setAttributes(" << iName << "_PAL);";
+    nl(Out);
+    break;
+  }
+  case Instruction::Unwind: {
+    Out << "new UnwindInst("
+        << bbname << ");";
+    break;
+  }
+  case Instruction::Unreachable: {
+    Out << "new UnreachableInst("
+        << "mod->getContext(), "
+        << bbname << ");";
+    break;
+  }
+  case Instruction::Add:
+  case Instruction::FAdd:
+  case Instruction::Sub:
+  case Instruction::FSub:
+  case Instruction::Mul:
+  case Instruction::FMul:
+  case Instruction::UDiv:
+  case Instruction::SDiv:
+  case Instruction::FDiv:
+  case Instruction::URem:
+  case Instruction::SRem:
+  case Instruction::FRem:
+  case Instruction::And:
+  case Instruction::Or:
+  case Instruction::Xor:
+  case Instruction::Shl:
+  case Instruction::LShr:
+  case Instruction::AShr:{
+    Out << "BinaryOperator* " << iName << " = BinaryOperator::Create(";
+    switch (I->getOpcode()) {
+    case Instruction::Add: Out << "Instruction::Add"; break;
+    case Instruction::FAdd: Out << "Instruction::FAdd"; break;
+    case Instruction::Sub: Out << "Instruction::Sub"; break;
+    case Instruction::FSub: Out << "Instruction::FSub"; break;
+    case Instruction::Mul: Out << "Instruction::Mul"; break;
+    case Instruction::FMul: Out << "Instruction::FMul"; break;
+    case Instruction::UDiv:Out << "Instruction::UDiv"; break;
+    case Instruction::SDiv:Out << "Instruction::SDiv"; break;
+    case Instruction::FDiv:Out << "Instruction::FDiv"; break;
+    case Instruction::URem:Out << "Instruction::URem"; break;
+    case Instruction::SRem:Out << "Instruction::SRem"; break;
+    case Instruction::FRem:Out << "Instruction::FRem"; break;
+    case Instruction::And: Out << "Instruction::And"; break;
+    case Instruction::Or:  Out << "Instruction::Or";  break;
+    case Instruction::Xor: Out << "Instruction::Xor"; break;
+    case Instruction::Shl: Out << "Instruction::Shl"; break;
+    case Instruction::LShr:Out << "Instruction::LShr"; break;
+    case Instruction::AShr:Out << "Instruction::AShr"; break;
+    default: Out << "Instruction::BadOpCode"; break;
+    }
+    Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
+    printEscapedString(I->getName());
+    Out << "\", " << bbname << ");";
+    break;
+  }
+  case Instruction::FCmp: {
+    Out << "FCmpInst* " << iName << " = new FCmpInst(*" << bbname << ", ";
+    switch (cast<FCmpInst>(I)->getPredicate()) {
+    case FCmpInst::FCMP_FALSE: Out << "FCmpInst::FCMP_FALSE"; break;
+    case FCmpInst::FCMP_OEQ  : Out << "FCmpInst::FCMP_OEQ"; break;
+    case FCmpInst::FCMP_OGT  : Out << "FCmpInst::FCMP_OGT"; break;
+    case FCmpInst::FCMP_OGE  : Out << "FCmpInst::FCMP_OGE"; break;
+    case FCmpInst::FCMP_OLT  : Out << "FCmpInst::FCMP_OLT"; break;
+    case FCmpInst::FCMP_OLE  : Out << "FCmpInst::FCMP_OLE"; break;
+    case FCmpInst::FCMP_ONE  : Out << "FCmpInst::FCMP_ONE"; break;
+    case FCmpInst::FCMP_ORD  : Out << "FCmpInst::FCMP_ORD"; break;
+    case FCmpInst::FCMP_UNO  : Out << "FCmpInst::FCMP_UNO"; break;
+    case FCmpInst::FCMP_UEQ  : Out << "FCmpInst::FCMP_UEQ"; break;
+    case FCmpInst::FCMP_UGT  : Out << "FCmpInst::FCMP_UGT"; break;
+    case FCmpInst::FCMP_UGE  : Out << "FCmpInst::FCMP_UGE"; break;
+    case FCmpInst::FCMP_ULT  : Out << "FCmpInst::FCMP_ULT"; break;
+    case FCmpInst::FCMP_ULE  : Out << "FCmpInst::FCMP_ULE"; break;
+    case FCmpInst::FCMP_UNE  : Out << "FCmpInst::FCMP_UNE"; break;
+    case FCmpInst::FCMP_TRUE : Out << "FCmpInst::FCMP_TRUE"; break;
+    default: Out << "FCmpInst::BAD_ICMP_PREDICATE"; break;
     }
-    case Instruction::GetElementPtr: {
-      const GetElementPtrInst* gep = cast<GetElementPtrInst>(I);
-      if (gep->getNumOperands() <= 2) {
-        Out << "GetElementPtrInst* " << iName << " = GetElementPtrInst::Create("
-            << opNames[0];
-        if (gep->getNumOperands() == 2)
-          Out << ", " << opNames[1];
-      } else {
-        Out << "std::vector<Value*> " << iName << "_indices;";
-        nl(Out);
-        for (unsigned i = 1; i < gep->getNumOperands(); ++i ) {
-          Out << iName << "_indices.push_back("
-              << opNames[i] << ");";
-          nl(Out);
-        }
-        Out << "Instruction* " << iName << " = GetElementPtrInst::Create("
-            << opNames[0] << ", " << iName << "_indices.begin(), "
-            << iName << "_indices.end()";
-      }
-      Out << ", \"";
-      printEscapedString(gep->getName());
-      Out << "\", " << bbname << ");";
-      break;
+    Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
+    printEscapedString(I->getName());
+    Out << "\");";
+    break;
+  }
+  case Instruction::ICmp: {
+    Out << "ICmpInst* " << iName << " = new ICmpInst(*" << bbname << ", ";
+    switch (cast<ICmpInst>(I)->getPredicate()) {
+    case ICmpInst::ICMP_EQ:  Out << "ICmpInst::ICMP_EQ";  break;
+    case ICmpInst::ICMP_NE:  Out << "ICmpInst::ICMP_NE";  break;
+    case ICmpInst::ICMP_ULE: Out << "ICmpInst::ICMP_ULE"; break;
+    case ICmpInst::ICMP_SLE: Out << "ICmpInst::ICMP_SLE"; break;
+    case ICmpInst::ICMP_UGE: Out << "ICmpInst::ICMP_UGE"; break;
+    case ICmpInst::ICMP_SGE: Out << "ICmpInst::ICMP_SGE"; break;
+    case ICmpInst::ICMP_ULT: Out << "ICmpInst::ICMP_ULT"; break;
+    case ICmpInst::ICMP_SLT: Out << "ICmpInst::ICMP_SLT"; break;
+    case ICmpInst::ICMP_UGT: Out << "ICmpInst::ICMP_UGT"; break;
+    case ICmpInst::ICMP_SGT: Out << "ICmpInst::ICMP_SGT"; break;
+    default: Out << "ICmpInst::BAD_ICMP_PREDICATE"; break;
     }
-    case Instruction::PHI: {
-      const PHINode* phi = cast<PHINode>(I);
-
-      Out << "PHINode* " << iName << " = PHINode::Create("
-          << getCppName(phi->getType()) << ", \"";
-      printEscapedString(phi->getName());
-      Out << "\", " << bbname << ");";
-      nl(Out) << iName << "->reserveOperandSpace("
-        << phi->getNumIncomingValues()
-          << ");";
+    Out << ", " << opNames[0] << ", " << opNames[1] << ", \"";
+    printEscapedString(I->getName());
+    Out << "\");";
+    break;
+  }
+  case Instruction::Alloca: {
+    const AllocaInst* allocaI = cast<AllocaInst>(I);
+    Out << "AllocaInst* " << iName << " = new AllocaInst("
+        << getCppName(allocaI->getAllocatedType()) << ", ";
+    if (allocaI->isArrayAllocation())
+      Out << opNames[0] << ", ";
+    Out << "\"";
+    printEscapedString(allocaI->getName());
+    Out << "\", " << bbname << ");";
+    if (allocaI->getAlignment())
+      nl(Out) << iName << "->setAlignment("
+          << allocaI->getAlignment() << ");";
+    break;
+  }
+  case Instruction::Load: {
+    const LoadInst* load = cast<LoadInst>(I);
+    Out << "LoadInst* " << iName << " = new LoadInst("
+        << opNames[0] << ", \"";
+    printEscapedString(load->getName());
+    Out << "\", " << (load->isVolatile() ? "true" : "false" )
+        << ", " << bbname << ");";
+    break;
+  }
+  case Instruction::Store: {
+    const StoreInst* store = cast<StoreInst>(I);
+    Out << " new StoreInst("
+        << opNames[0] << ", "
+        << opNames[1] << ", "
+        << (store->isVolatile() ? "true" : "false")
+        << ", " << bbname << ");";
+    break;
+  }
+  case Instruction::GetElementPtr: {
+    const GetElementPtrInst* gep = cast<GetElementPtrInst>(I);
+    if (gep->getNumOperands() <= 2) {
+      Out << "GetElementPtrInst* " << iName << " = GetElementPtrInst::Create("
+          << opNames[0];
+      if (gep->getNumOperands() == 2)
+        Out << ", " << opNames[1];
+    } else {
+      Out << "std::vector<Value*> " << iName << "_indices;";
       nl(Out);
-      for (unsigned i = 0; i < phi->getNumOperands(); i+=2) {
-        Out << iName << "->addIncoming("
-            << opNames[i] << ", " << opNames[i+1] << ");";
-        nl(Out);
-      }
-      break;
-    }
-    case Instruction::Trunc:
-    case Instruction::ZExt:
-    case Instruction::SExt:
-    case Instruction::FPTrunc:
-    case Instruction::FPExt:
-    case Instruction::FPToUI:
-    case Instruction::FPToSI:
-    case Instruction::UIToFP:
-    case Instruction::SIToFP:
-    case Instruction::PtrToInt:
-    case Instruction::IntToPtr:
-    case Instruction::BitCast: {
-      const CastInst* cst = cast<CastInst>(I);
-      Out << "CastInst* " << iName << " = new ";
-      switch (I->getOpcode()) {
-      case Instruction::Trunc:    Out << "TruncInst"; break;
-      case Instruction::ZExt:     Out << "ZExtInst"; break;
-      case Instruction::SExt:     Out << "SExtInst"; break;
-      case Instruction::FPTrunc:  Out << "FPTruncInst"; break;
-      case Instruction::FPExt:    Out << "FPExtInst"; break;
-      case Instruction::FPToUI:   Out << "FPToUIInst"; break;
-      case Instruction::FPToSI:   Out << "FPToSIInst"; break;
-      case Instruction::UIToFP:   Out << "UIToFPInst"; break;
-      case Instruction::SIToFP:   Out << "SIToFPInst"; break;
-      case Instruction::PtrToInt: Out << "PtrToIntInst"; break;
-      case Instruction::IntToPtr: Out << "IntToPtrInst"; break;
-      case Instruction::BitCast:  Out << "BitCastInst"; break;
-      default: assert(!"Unreachable"); break;
-      }
-      Out << "(" << opNames[0] << ", "
-          << getCppName(cst->getType()) << ", \"";
-      printEscapedString(cst->getName());
-      Out << "\", " << bbname << ");";
-      break;
-    }
-    case Instruction::Call:{
-      const CallInst* call = cast<CallInst>(I);
-      if (const InlineAsm* ila = dyn_cast<InlineAsm>(call->getCalledValue())) {
-        Out << "InlineAsm* " << getCppName(ila) << " = InlineAsm::get("
-            << getCppName(ila->getFunctionType()) << ", \""
-            << ila->getAsmString() << "\", \""
-            << ila->getConstraintString() << "\","
-            << (ila->hasSideEffects() ? "true" : "false") << ");";
-        nl(Out);
-      }
-      if (call->getNumOperands() > 2) {
-        Out << "std::vector<Value*> " << iName << "_params;";
+      for (unsigned i = 1; i < gep->getNumOperands(); ++i ) {
+        Out << iName << "_indices.push_back("
+            << opNames[i] << ");";
         nl(Out);
-        for (unsigned i = 1; i < call->getNumOperands(); ++i) {
-          Out << iName << "_params.push_back(" << opNames[i] << ");";
-          nl(Out);
-        }
-        Out << "CallInst* " << iName << " = CallInst::Create("
-            << opNames[0] << ", " << iName << "_params.begin(), "
-            << iName << "_params.end(), \"";
-      } else if (call->getNumOperands() == 2) {
-        Out << "CallInst* " << iName << " = CallInst::Create("
-            << opNames[0] << ", " << opNames[1] << ", \"";
-      } else {
-        Out << "CallInst* " << iName << " = CallInst::Create(" << opNames[0]
-            << ", \"";
       }
-      printEscapedString(call->getName());
-      Out << "\", " << bbname << ");";
-      nl(Out) << iName << "->setCallingConv(";
-      printCallingConv(call->getCallingConv());
-      Out << ");";
-      nl(Out) << iName << "->setTailCall("
-          << (call->isTailCall() ? "true":"false");
-      Out << ");";
-      printAttributes(call->getAttributes(), iName);
-      Out << iName << "->setAttributes(" << iName << "_PAL);";
+      Out << "Instruction* " << iName << " = GetElementPtrInst::Create("
+          << opNames[0] << ", " << iName << "_indices.begin(), "
+          << iName << "_indices.end()";
+    }
+    Out << ", \"";
+    printEscapedString(gep->getName());
+    Out << "\", " << bbname << ");";
+    break;
+  }
+  case Instruction::PHI: {
+    const PHINode* phi = cast<PHINode>(I);
+
+    Out << "PHINode* " << iName << " = PHINode::Create("
+        << getCppName(phi->getType()) << ", \"";
+    printEscapedString(phi->getName());
+    Out << "\", " << bbname << ");";
+    nl(Out) << iName << "->reserveOperandSpace("
+      << phi->getNumIncomingValues()
+        << ");";
+    nl(Out);
+    for (unsigned i = 0; i < phi->getNumOperands(); i+=2) {
+      Out << iName << "->addIncoming("
+          << opNames[i] << ", " << opNames[i+1] << ");";
+      nl(Out);
+    }
+    break;
+  }
+  case Instruction::Trunc:
+  case Instruction::ZExt:
+  case Instruction::SExt:
+  case Instruction::FPTrunc:
+  case Instruction::FPExt:
+  case Instruction::FPToUI:
+  case Instruction::FPToSI:
+  case Instruction::UIToFP:
+  case Instruction::SIToFP:
+  case Instruction::PtrToInt:
+  case Instruction::IntToPtr:
+  case Instruction::BitCast: {
+    const CastInst* cst = cast<CastInst>(I);
+    Out << "CastInst* " << iName << " = new ";
+    switch (I->getOpcode()) {
+    case Instruction::Trunc:    Out << "TruncInst"; break;
+    case Instruction::ZExt:     Out << "ZExtInst"; break;
+    case Instruction::SExt:     Out << "SExtInst"; break;
+    case Instruction::FPTrunc:  Out << "FPTruncInst"; break;
+    case Instruction::FPExt:    Out << "FPExtInst"; break;
+    case Instruction::FPToUI:   Out << "FPToUIInst"; break;
+    case Instruction::FPToSI:   Out << "FPToSIInst"; break;
+    case Instruction::UIToFP:   Out << "UIToFPInst"; break;
+    case Instruction::SIToFP:   Out << "SIToFPInst"; break;
+    case Instruction::PtrToInt: Out << "PtrToIntInst"; break;
+    case Instruction::IntToPtr: Out << "IntToPtrInst"; break;
+    case Instruction::BitCast:  Out << "BitCastInst"; break;
+    default: assert(!"Unreachable"); break;
+    }
+    Out << "(" << opNames[0] << ", "
+        << getCppName(cst->getType()) << ", \"";
+    printEscapedString(cst->getName());
+    Out << "\", " << bbname << ");";
+    break;
+  }
+  case Instruction::Call: {
+    const CallInst* call = cast<CallInst>(I);
+    if (const InlineAsm* ila = dyn_cast<InlineAsm>(call->getCalledValue())) {
+      Out << "InlineAsm* " << getCppName(ila) << " = InlineAsm::get("
+          << getCppName(ila->getFunctionType()) << ", \""
+          << ila->getAsmString() << "\", \""
+          << ila->getConstraintString() << "\","
+          << (ila->hasSideEffects() ? "true" : "false") << ");";
       nl(Out);
-      break;
-    }
-    case Instruction::Select: {
-      const SelectInst* sel = cast<SelectInst>(I);
-      Out << "SelectInst* " << getCppName(sel) << " = SelectInst::Create(";
-      Out << opNames[0] << ", " << opNames[1] << ", " << opNames[2] << ", \"";
-      printEscapedString(sel->getName());
-      Out << "\", " << bbname << ");";
-      break;
-    }
-    case Instruction::UserOp1:
-      /// FALL THROUGH
-    case Instruction::UserOp2: {
-      /// FIXME: What should be done here?
-      break;
-    }
-    case Instruction::VAArg: {
-      const VAArgInst* va = cast<VAArgInst>(I);
-      Out << "VAArgInst* " << getCppName(va) << " = new VAArgInst("
-          << opNames[0] << ", " << getCppName(va->getType()) << ", \"";
-      printEscapedString(va->getName());
-      Out << "\", " << bbname << ");";
-      break;
-    }
-    case Instruction::ExtractElement: {
-      const ExtractElementInst* eei = cast<ExtractElementInst>(I);
-      Out << "ExtractElementInst* " << getCppName(eei)
-          << " = new ExtractElementInst(" << opNames[0]
-          << ", " << opNames[1] << ", \"";
-      printEscapedString(eei->getName());
-      Out << "\", " << bbname << ");";
-      break;
-    }
-    case Instruction::InsertElement: {
-      const InsertElementInst* iei = cast<InsertElementInst>(I);
-      Out << "InsertElementInst* " << getCppName(iei)
-          << " = InsertElementInst::Create(" << opNames[0]
-          << ", " << opNames[1] << ", " << opNames[2] << ", \"";
-      printEscapedString(iei->getName());
-      Out << "\", " << bbname << ");";
-      break;
     }
-    case Instruction::ShuffleVector: {
-      const ShuffleVectorInst* svi = cast<ShuffleVectorInst>(I);
-      Out << "ShuffleVectorInst* " << getCppName(svi)
-          << " = new ShuffleVectorInst(" << opNames[0]
-          << ", " << opNames[1] << ", " << opNames[2] << ", \"";
-      printEscapedString(svi->getName());
-      Out << "\", " << bbname << ");";
-      break;
-    }
-    case Instruction::ExtractValue: {
-      const ExtractValueInst *evi = cast<ExtractValueInst>(I);
-      Out << "std::vector<unsigned> " << iName << "_indices;";
+    if (call->getNumArgOperands() > 1) {
+      Out << "std::vector<Value*> " << iName << "_params;";
       nl(Out);
-      for (unsigned i = 0; i < evi->getNumIndices(); ++i) {
-        Out << iName << "_indices.push_back("
-            << evi->idx_begin()[i] << ");";
+      for (unsigned i = 1; i < call->getNumOperands(); ++i) {
+        Out << iName << "_params.push_back(" << opNames[i] << ");";
         nl(Out);
       }
-      Out << "ExtractValueInst* " << getCppName(evi)
-          << " = ExtractValueInst::Create(" << opNames[0]
-          << ", "
-          << iName << "_indices.begin(), " << iName << "_indices.end(), \"";
-      printEscapedString(evi->getName());
-      Out << "\", " << bbname << ");";
-      break;
+      Out << "CallInst* " << iName << " = CallInst::Create("
+          << opNames[0] << ", " << iName << "_params.begin(), "
+          << iName << "_params.end(), \"";
+    } else if (call->getNumArgOperands() == 1) {
+      Out << "CallInst* " << iName << " = CallInst::Create("
+          << opNames[0] << ", " << opNames[1] << ", \"";
+    } else {
+      Out << "CallInst* " << iName << " = CallInst::Create(" << opNames[0]
+          << ", \"";
     }
-    case Instruction::InsertValue: {
-      const InsertValueInst *ivi = cast<InsertValueInst>(I);
-      Out << "std::vector<unsigned> " << iName << "_indices;";
+    printEscapedString(call->getName());
+    Out << "\", " << bbname << ");";
+    nl(Out) << iName << "->setCallingConv(";
+    printCallingConv(call->getCallingConv());
+    Out << ");";
+    nl(Out) << iName << "->setTailCall("
+        << (call->isTailCall() ? "true" : "false");
+    Out << ");";
+    printAttributes(call->getAttributes(), iName);
+    Out << iName << "->setAttributes(" << iName << "_PAL);";
+    nl(Out);
+    break;
+  }
+  case Instruction::Select: {
+    const SelectInst* sel = cast<SelectInst>(I);
+    Out << "SelectInst* " << getCppName(sel) << " = SelectInst::Create(";
+    Out << opNames[0] << ", " << opNames[1] << ", " << opNames[2] << ", \"";
+    printEscapedString(sel->getName());
+    Out << "\", " << bbname << ");";
+    break;
+  }
+  case Instruction::UserOp1:
+    /// FALL THROUGH
+  case Instruction::UserOp2: {
+    /// FIXME: What should be done here?
+    break;
+  }
+  case Instruction::VAArg: {
+    const VAArgInst* va = cast<VAArgInst>(I);
+    Out << "VAArgInst* " << getCppName(va) << " = new VAArgInst("
+        << opNames[0] << ", " << getCppName(va->getType()) << ", \"";
+    printEscapedString(va->getName());
+    Out << "\", " << bbname << ");";
+    break;
+  }
+  case Instruction::ExtractElement: {
+    const ExtractElementInst* eei = cast<ExtractElementInst>(I);
+    Out << "ExtractElementInst* " << getCppName(eei)
+        << " = new ExtractElementInst(" << opNames[0]
+        << ", " << opNames[1] << ", \"";
+    printEscapedString(eei->getName());
+    Out << "\", " << bbname << ");";
+    break;
+  }
+  case Instruction::InsertElement: {
+    const InsertElementInst* iei = cast<InsertElementInst>(I);
+    Out << "InsertElementInst* " << getCppName(iei)
+        << " = InsertElementInst::Create(" << opNames[0]
+        << ", " << opNames[1] << ", " << opNames[2] << ", \"";
+    printEscapedString(iei->getName());
+    Out << "\", " << bbname << ");";
+    break;
+  }
+  case Instruction::ShuffleVector: {
+    const ShuffleVectorInst* svi = cast<ShuffleVectorInst>(I);
+    Out << "ShuffleVectorInst* " << getCppName(svi)
+        << " = new ShuffleVectorInst(" << opNames[0]
+        << ", " << opNames[1] << ", " << opNames[2] << ", \"";
+    printEscapedString(svi->getName());
+    Out << "\", " << bbname << ");";
+    break;
+  }
+  case Instruction::ExtractValue: {
+    const ExtractValueInst *evi = cast<ExtractValueInst>(I);
+    Out << "std::vector<unsigned> " << iName << "_indices;";
+    nl(Out);
+    for (unsigned i = 0; i < evi->getNumIndices(); ++i) {
+      Out << iName << "_indices.push_back("
+          << evi->idx_begin()[i] << ");";
+      nl(Out);
+    }
+    Out << "ExtractValueInst* " << getCppName(evi)
+        << " = ExtractValueInst::Create(" << opNames[0]
+        << ", "
+        << iName << "_indices.begin(), " << iName << "_indices.end(), \"";
+    printEscapedString(evi->getName());
+    Out << "\", " << bbname << ");";
+    break;
+  }
+  case Instruction::InsertValue: {
+    const InsertValueInst *ivi = cast<InsertValueInst>(I);
+    Out << "std::vector<unsigned> " << iName << "_indices;";
+    nl(Out);
+    for (unsigned i = 0; i < ivi->getNumIndices(); ++i) {
+      Out << iName << "_indices.push_back("
+          << ivi->idx_begin()[i] << ");";
       nl(Out);
-      for (unsigned i = 0; i < ivi->getNumIndices(); ++i) {
-        Out << iName << "_indices.push_back("
-            << ivi->idx_begin()[i] << ");";
-        nl(Out);
-      }
-      Out << "InsertValueInst* " << getCppName(ivi)
-          << " = InsertValueInst::Create(" << opNames[0]
-          << ", " << opNames[1] << ", "
-          << iName << "_indices.begin(), " << iName << "_indices.end(), \"";
-      printEscapedString(ivi->getName());
-      Out << "\", " << bbname << ");";
-      break;
     }
+    Out << "InsertValueInst* " << getCppName(ivi)
+        << " = InsertValueInst::Create(" << opNames[0]
+        << ", " << opNames[1] << ", "
+        << iName << "_indices.begin(), " << iName << "_indices.end(), \"";
+    printEscapedString(ivi->getName());
+    Out << "\", " << bbname << ");";
+    break;
+  }
   }
   DefinedValues.insert(I);
   nl(Out);
   delete [] opNames;
 }
 
-  // Print out the types, constants and declarations needed by one function
-  void CppWriter::printFunctionUses(const Function* F) {
-    nl(Out) << "// Type Definitions"; nl(Out);
-    if (!is_inline) {
-      // Print the function's return type
-      printType(F->getReturnType());
-
-      // Print the function's function type
-      printType(F->getFunctionType());
-
-      // Print the types of each of the function's arguments
-      for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
-           AI != AE; ++AI) {
-        printType(AI->getType());
-      }
-    }
+// Print out the types, constants and declarations needed by one function
+void CppWriter::printFunctionUses(const Function* F) {
+  nl(Out) << "// Type Definitions"; nl(Out);
+  if (!is_inline) {
+    // Print the function's return type
+    printType(F->getReturnType());
 
-    // Print type definitions for every type referenced by an instruction and
-    // make a note of any global values or constants that are referenced
-    SmallPtrSet<GlobalValue*,64> gvs;
-    SmallPtrSet<Constant*,64> consts;
-    for (Function::const_iterator BB = F->begin(), BE = F->end();
-         BB != BE; ++BB){
-      for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
-           I != E; ++I) {
-        // Print the type of the instruction itself
-        printType(I->getType());
+    // Print the function's function type
+    printType(F->getFunctionType());
 
-        // Print the type of each of the instruction's operands
-        for (unsigned i = 0; i < I->getNumOperands(); ++i) {
-          Value* operand = I->getOperand(i);
-          printType(operand->getType());
-
-          // If the operand references a GVal or Constant, make a note of it
-          if (GlobalValue* GV = dyn_cast<GlobalValue>(operand)) {
-            gvs.insert(GV);
-            if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
-              if (GVar->hasInitializer())
-                consts.insert(GVar->getInitializer());
-          } else if (Constant* C = dyn_cast<Constant>(operand))
-            consts.insert(C);
-        }
-      }
+    // Print the types of each of the function's arguments
+    for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+         AI != AE; ++AI) {
+      printType(AI->getType());
     }
+  }
 
-    // Print the function declarations for any functions encountered
-    nl(Out) << "// Function Declarations"; nl(Out);
-    for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
+  // Print type definitions for every type referenced by an instruction and
+  // make a note of any global values or constants that are referenced
+  SmallPtrSet<GlobalValue*,64> gvs;
+  SmallPtrSet<Constant*,64> consts;
+  for (Function::const_iterator BB = F->begin(), BE = F->end();
+       BB != BE; ++BB){
+    for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
          I != E; ++I) {
-      if (Function* Fun = dyn_cast<Function>(*I)) {
-        if (!is_inline || Fun != F)
-          printFunctionHead(Fun);
+      // Print the type of the instruction itself
+      printType(I->getType());
+
+      // Print the type of each of the instruction's operands
+      for (unsigned i = 0; i < I->getNumOperands(); ++i) {
+        Value* operand = I->getOperand(i);
+        printType(operand->getType());
+
+        // If the operand references a GVal or Constant, make a note of it
+        if (GlobalValue* GV = dyn_cast<GlobalValue>(operand)) {
+          gvs.insert(GV);
+          if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
+            if (GVar->hasInitializer())
+              consts.insert(GVar->getInitializer());
+        } else if (Constant* C = dyn_cast<Constant>(operand))
+          consts.insert(C);
       }
     }
+  }
 
-    // Print the global variable declarations for any variables encountered
-    nl(Out) << "// Global Variable Declarations"; nl(Out);
-    for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
-         I != E; ++I) {
-      if (GlobalVariable* F = dyn_cast<GlobalVariable>(*I))
-        printVariableHead(F);
+  // Print the function declarations for any functions encountered
+  nl(Out) << "// Function Declarations"; nl(Out);
+  for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
+       I != E; ++I) {
+    if (Function* Fun = dyn_cast<Function>(*I)) {
+      if (!is_inline || Fun != F)
+        printFunctionHead(Fun);
     }
+  }
 
-  // Print the constants found
-    nl(Out) << "// Constant Definitions"; nl(Out);
-    for (SmallPtrSet<Constant*,64>::iterator I = consts.begin(),
-           E = consts.end(); I != E; ++I) {
-      printConstant(*I);
-    }
-
-    // Process the global variables definitions now that all the constants have
-    // been emitted. These definitions just couple the gvars with their constant
-    // initializers.
-    nl(Out) << "// Global Variable Definitions"; nl(Out);
-    for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
-         I != E; ++I) {
-      if (GlobalVariable* GV = dyn_cast<GlobalVariable>(*I))
-        printVariableBody(GV);
-    }
+  // Print the global variable declarations for any variables encountered
+  nl(Out) << "// Global Variable Declarations"; nl(Out);
+  for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
+       I != E; ++I) {
+    if (GlobalVariable* F = dyn_cast<GlobalVariable>(*I))
+      printVariableHead(F);
+  }
+
+// Print the constants found
+  nl(Out) << "// Constant Definitions"; nl(Out);
+  for (SmallPtrSet<Constant*,64>::iterator I = consts.begin(),
+         E = consts.end(); I != E; ++I) {
+    printConstant(*I);
+  }
+
+  // Process the global variables definitions now that all the constants have
+  // been emitted. These definitions just couple the gvars with their constant
+  // initializers.
+  nl(Out) << "// Global Variable Definitions"; nl(Out);
+  for (SmallPtrSet<GlobalValue*,64>::iterator I = gvs.begin(), E = gvs.end();
+       I != E; ++I) {
+    if (GlobalVariable* GV = dyn_cast<GlobalVariable>(*I))
+      printVariableBody(GV);
   }
+}
 
-  void CppWriter::printFunctionHead(const Function* F) {
-    nl(Out) << "Function* " << getCppName(F);
-    if (is_inline) {
-      Out << " = mod->getFunction(\"";
-      printEscapedString(F->getName());
-      Out << "\", " << getCppName(F->getFunctionType()) << ");";
-      nl(Out) << "if (!" << getCppName(F) << ") {";
-      nl(Out) << getCppName(F);
-    }
-    Out<< " = Function::Create(";
-    nl(Out,1) << "/*Type=*/" << getCppName(F->getFunctionType()) << ",";
-    nl(Out) << "/*Linkage=*/";
-    printLinkageType(F->getLinkage());
-    Out << ",";
-    nl(Out) << "/*Name=*/\"";
+void CppWriter::printFunctionHead(const Function* F) {
+  nl(Out) << "Function* " << getCppName(F);
+  if (is_inline) {
+    Out << " = mod->getFunction(\"";
     printEscapedString(F->getName());
-    Out << "\", mod); " << (F->isDeclaration()? "// (external, no body)" : "");
-    nl(Out,-1);
+    Out << "\", " << getCppName(F->getFunctionType()) << ");";
+    nl(Out) << "if (!" << getCppName(F) << ") {";
+    nl(Out) << getCppName(F);
+  }
+  Out<< " = Function::Create(";
+  nl(Out,1) << "/*Type=*/" << getCppName(F->getFunctionType()) << ",";
+  nl(Out) << "/*Linkage=*/";
+  printLinkageType(F->getLinkage());
+  Out << ",";
+  nl(Out) << "/*Name=*/\"";
+  printEscapedString(F->getName());
+  Out << "\", mod); " << (F->isDeclaration()? "// (external, no body)" : "");
+  nl(Out,-1);
+  printCppName(F);
+  Out << "->setCallingConv(";
+  printCallingConv(F->getCallingConv());
+  Out << ");";
+  nl(Out);
+  if (F->hasSection()) {
+    printCppName(F);
+    Out << "->setSection(\"" << F->getSection() << "\");";
+    nl(Out);
+  }
+  if (F->getAlignment()) {
+    printCppName(F);
+    Out << "->setAlignment(" << F->getAlignment() << ");";
+    nl(Out);
+  }
+  if (F->getVisibility() != GlobalValue::DefaultVisibility) {
     printCppName(F);
-    Out << "->setCallingConv(";
-    printCallingConv(F->getCallingConv());
+    Out << "->setVisibility(";
+    printVisibilityType(F->getVisibility());
     Out << ");";
     nl(Out);
-    if (F->hasSection()) {
-      printCppName(F);
-      Out << "->setSection(\"" << F->getSection() << "\");";
-      nl(Out);
-    }
-    if (F->getAlignment()) {
-      printCppName(F);
-      Out << "->setAlignment(" << F->getAlignment() << ");";
-      nl(Out);
-    }
-    if (F->getVisibility() != GlobalValue::DefaultVisibility) {
-      printCppName(F);
-      Out << "->setVisibility(";
-      printVisibilityType(F->getVisibility());
-      Out << ");";
-      nl(Out);
-    }
-    if (F->hasGC()) {
-      printCppName(F);
-      Out << "->setGC(\"" << F->getGC() << "\");";
-      nl(Out);
-    }
-    if (is_inline) {
-      Out << "}";
-      nl(Out);
-    }
-    printAttributes(F->getAttributes(), getCppName(F));
+  }
+  if (F->hasGC()) {
     printCppName(F);
-    Out << "->setAttributes(" << getCppName(F) << "_PAL);";
+    Out << "->setGC(\"" << F->getGC() << "\");";
     nl(Out);
   }
+  if (is_inline) {
+    Out << "}";
+    nl(Out);
+  }
+  printAttributes(F->getAttributes(), getCppName(F));
+  printCppName(F);
+  Out << "->setAttributes(" << getCppName(F) << "_PAL);";
+  nl(Out);
+}
 
-  void CppWriter::printFunctionBody(const Function *F) {
-    if (F->isDeclaration())
-      return; // external functions have no bodies.
-
-    // Clear the DefinedValues and ForwardRefs maps because we can't have
-    // cross-function forward refs
-    ForwardRefs.clear();
-    DefinedValues.clear();
-
-    // Create all the argument values
-    if (!is_inline) {
-      if (!F->arg_empty()) {
-        Out << "Function::arg_iterator args = " << getCppName(F)
-            << "->arg_begin();";
-        nl(Out);
-      }
-      for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
-           AI != AE; ++AI) {
-        Out << "Value* " << getCppName(AI) << " = args++;";
+void CppWriter::printFunctionBody(const Function *F) {
+  if (F->isDeclaration())
+    return; // external functions have no bodies.
+
+  // Clear the DefinedValues and ForwardRefs maps because we can't have
+  // cross-function forward refs
+  ForwardRefs.clear();
+  DefinedValues.clear();
+
+  // Create all the argument values
+  if (!is_inline) {
+    if (!F->arg_empty()) {
+      Out << "Function::arg_iterator args = " << getCppName(F)
+          << "->arg_begin();";
+      nl(Out);
+    }
+    for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+         AI != AE; ++AI) {
+      Out << "Value* " << getCppName(AI) << " = args++;";
+      nl(Out);
+      if (AI->hasName()) {
+        Out << getCppName(AI) << "->setName(\"" << AI->getName() << "\");";
         nl(Out);
-        if (AI->hasName()) {
-          Out << getCppName(AI) << "->setName(\"" << AI->getName() << "\");";
-          nl(Out);
-        }
       }
     }
+  }
 
-    // Create all the basic blocks
+  // Create all the basic blocks
+  nl(Out);
+  for (Function::const_iterator BI = F->begin(), BE = F->end();
+       BI != BE; ++BI) {
+    std::string bbname(getCppName(BI));
+    Out << "BasicBlock* " << bbname <<
+           " = BasicBlock::Create(mod->getContext(), \"";
+    if (BI->hasName())
+      printEscapedString(BI->getName());
+    Out << "\"," << getCppName(BI->getParent()) << ",0);";
     nl(Out);
-    for (Function::const_iterator BI = F->begin(), BE = F->end();
-         BI != BE; ++BI) {
-      std::string bbname(getCppName(BI));
-      Out << "BasicBlock* " << bbname <<
-             " = BasicBlock::Create(mod->getContext(), \"";
-      if (BI->hasName())
-        printEscapedString(BI->getName());
-      Out << "\"," << getCppName(BI->getParent()) << ",0);";
-      nl(Out);
-    }
-
-    // Output all of its basic blocks... for the function
-    for (Function::const_iterator BI = F->begin(), BE = F->end();
-         BI != BE; ++BI) {
-      std::string bbname(getCppName(BI));
-      nl(Out) << "// Block " << BI->getName() << " (" << bbname << ")";
-      nl(Out);
-
-      // Output all of the instructions in the basic block...
-      for (BasicBlock::const_iterator I = BI->begin(), E = BI->end();
-           I != E; ++I) {
-        printInstruction(I,bbname);
-      }
-    }
+  }
 
-    // Loop over the ForwardRefs and resolve them now that all instructions
-    // are generated.
-    if (!ForwardRefs.empty()) {
-      nl(Out) << "// Resolve Forward References";
-      nl(Out);
-    }
+  // Output all of its basic blocks... for the function
+  for (Function::const_iterator BI = F->begin(), BE = F->end();
+       BI != BE; ++BI) {
+    std::string bbname(getCppName(BI));
+    nl(Out) << "// Block " << BI->getName() << " (" << bbname << ")";
+    nl(Out);
 
-    while (!ForwardRefs.empty()) {
-      ForwardRefMap::iterator I = ForwardRefs.begin();
-      Out << I->second << "->replaceAllUsesWith("
-          << getCppName(I->first) << "); delete " << I->second << ";";
-      nl(Out);
-      ForwardRefs.erase(I);
+    // Output all of the instructions in the basic block...
+    for (BasicBlock::const_iterator I = BI->begin(), E = BI->end();
+         I != E; ++I) {
+      printInstruction(I,bbname);
     }
   }
 
-  void CppWriter::printInline(const std::string& fname,
-                              const std::string& func) {
-    const Function* F = TheModule->getFunction(func);
-    if (!F) {
-      error(std::string("Function '") + func + "' not found in input module");
-      return;
-    }
-    if (F->isDeclaration()) {
-      error(std::string("Function '") + func + "' is external!");
-      return;
-    }
-    nl(Out) << "BasicBlock* " << fname << "(Module* mod, Function *"
-            << getCppName(F);
-    unsigned arg_count = 1;
-    for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
-         AI != AE; ++AI) {
-      Out << ", Value* arg_" << arg_count;
-    }
-    Out << ") {";
+  // Loop over the ForwardRefs and resolve them now that all instructions
+  // are generated.
+  if (!ForwardRefs.empty()) {
+    nl(Out) << "// Resolve Forward References";
     nl(Out);
-    is_inline = true;
-    printFunctionUses(F);
-    printFunctionBody(F);
-    is_inline = false;
-    Out << "return " << getCppName(F->begin()) << ";";
-    nl(Out) << "}";
+  }
+
+  while (!ForwardRefs.empty()) {
+    ForwardRefMap::iterator I = ForwardRefs.begin();
+    Out << I->second << "->replaceAllUsesWith("
+        << getCppName(I->first) << "); delete " << I->second << ";";
     nl(Out);
+    ForwardRefs.erase(I);
   }
+}
 
-  void CppWriter::printModuleBody() {
-    // Print out all the type definitions
-    nl(Out) << "// Type Definitions"; nl(Out);
-    printTypes(TheModule);
-
-    // Functions can call each other and global variables can reference them so
-    // define all the functions first before emitting their function bodies.
-    nl(Out) << "// Function Declarations"; nl(Out);
-    for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
-         I != E; ++I)
-      printFunctionHead(I);
-
-    // Process the global variables declarations. We can't initialze them until
-    // after the constants are printed so just print a header for each global
-    nl(Out) << "// Global Variable Declarations\n"; nl(Out);
-    for (Module::const_global_iterator I = TheModule->global_begin(),
-           E = TheModule->global_end(); I != E; ++I) {
-      printVariableHead(I);
-    }
-
-    // Print out all the constants definitions. Constants don't recurse except
-    // through GlobalValues. All GlobalValues have been declared at this point
-    // so we can proceed to generate the constants.
-    nl(Out) << "// Constant Definitions"; nl(Out);
-    printConstants(TheModule);
-
-    // Process the global variables definitions now that all the constants have
-    // been emitted. These definitions just couple the gvars with their constant
-    // initializers.
-    nl(Out) << "// Global Variable Definitions"; nl(Out);
-    for (Module::const_global_iterator I = TheModule->global_begin(),
-           E = TheModule->global_end(); I != E; ++I) {
-      printVariableBody(I);
-    }
-
-    // Finally, we can safely put out all of the function bodies.
-    nl(Out) << "// Function Definitions"; nl(Out);
-    for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
-         I != E; ++I) {
-      if (!I->isDeclaration()) {
-        nl(Out) << "// Function: " << I->getName() << " (" << getCppName(I)
-                << ")";
-        nl(Out) << "{";
-        nl(Out,1);
-        printFunctionBody(I);
-        nl(Out,-1) << "}";
-        nl(Out);
-      }
+void CppWriter::printInline(const std::string& fname,
+                            const std::string& func) {
+  const Function* F = TheModule->getFunction(func);
+  if (!F) {
+    error(std::string("Function '") + func + "' not found in input module");
+    return;
+  }
+  if (F->isDeclaration()) {
+    error(std::string("Function '") + func + "' is external!");
+    return;
+  }
+  nl(Out) << "BasicBlock* " << fname << "(Module* mod, Function *"
+          << getCppName(F);
+  unsigned arg_count = 1;
+  for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
+       AI != AE; ++AI) {
+    Out << ", Value* arg_" << arg_count;
+  }
+  Out << ") {";
+  nl(Out);
+  is_inline = true;
+  printFunctionUses(F);
+  printFunctionBody(F);
+  is_inline = false;
+  Out << "return " << getCppName(F->begin()) << ";";
+  nl(Out) << "}";
+  nl(Out);
+}
+
+void CppWriter::printModuleBody() {
+  // Print out all the type definitions
+  nl(Out) << "// Type Definitions"; nl(Out);
+  printTypes(TheModule);
+
+  // Functions can call each other and global variables can reference them so
+  // define all the functions first before emitting their function bodies.
+  nl(Out) << "// Function Declarations"; nl(Out);
+  for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
+       I != E; ++I)
+    printFunctionHead(I);
+
+  // Process the global variables declarations. We can't initialze them until
+  // after the constants are printed so just print a header for each global
+  nl(Out) << "// Global Variable Declarations\n"; nl(Out);
+  for (Module::const_global_iterator I = TheModule->global_begin(),
+         E = TheModule->global_end(); I != E; ++I) {
+    printVariableHead(I);
+  }
+
+  // Print out all the constants definitions. Constants don't recurse except
+  // through GlobalValues. All GlobalValues have been declared at this point
+  // so we can proceed to generate the constants.
+  nl(Out) << "// Constant Definitions"; nl(Out);
+  printConstants(TheModule);
+
+  // Process the global variables definitions now that all the constants have
+  // been emitted. These definitions just couple the gvars with their constant
+  // initializers.
+  nl(Out) << "// Global Variable Definitions"; nl(Out);
+  for (Module::const_global_iterator I = TheModule->global_begin(),
+         E = TheModule->global_end(); I != E; ++I) {
+    printVariableBody(I);
+  }
+
+  // Finally, we can safely put out all of the function bodies.
+  nl(Out) << "// Function Definitions"; nl(Out);
+  for (Module::const_iterator I = TheModule->begin(), E = TheModule->end();
+       I != E; ++I) {
+    if (!I->isDeclaration()) {
+      nl(Out) << "// Function: " << I->getName() << " (" << getCppName(I)
+              << ")";
+      nl(Out) << "{";
+      nl(Out,1);
+      printFunctionBody(I);
+      nl(Out,-1) << "}";
+      nl(Out);
     }
   }
+}
 
-  void CppWriter::printProgram(const std::string& fname,
-                               const std::string& mName) {
-    Out << "#include <llvm/LLVMContext.h>\n";
-    Out << "#include <llvm/Module.h>\n";
-    Out << "#include <llvm/DerivedTypes.h>\n";
-    Out << "#include <llvm/Constants.h>\n";
-    Out << "#include <llvm/GlobalVariable.h>\n";
-    Out << "#include <llvm/Function.h>\n";
-    Out << "#include <llvm/CallingConv.h>\n";
-    Out << "#include <llvm/BasicBlock.h>\n";
-    Out << "#include <llvm/Instructions.h>\n";
-    Out << "#include <llvm/InlineAsm.h>\n";
-    Out << "#include <llvm/Support/FormattedStream.h>\n";
-    Out << "#include <llvm/Support/MathExtras.h>\n";
-    Out << "#include <llvm/Pass.h>\n";
-    Out << "#include <llvm/PassManager.h>\n";
-    Out << "#include <llvm/ADT/SmallVector.h>\n";
-    Out << "#include <llvm/Analysis/Verifier.h>\n";
-    Out << "#include <llvm/Assembly/PrintModulePass.h>\n";
-    Out << "#include <algorithm>\n";
-    Out << "using namespace llvm;\n\n";
-    Out << "Module* " << fname << "();\n\n";
-    Out << "int main(int argc, char**argv) {\n";
-    Out << "  Module* Mod = " << fname << "();\n";
-    Out << "  verifyModule(*Mod, PrintMessageAction);\n";
-    Out << "  PassManager PM;\n";
-    Out << "  PM.add(createPrintModulePass(&outs()));\n";
-    Out << "  PM.run(*Mod);\n";
-    Out << "  return 0;\n";
-    Out << "}\n\n";
-    printModule(fname,mName);
+void CppWriter::printProgram(const std::string& fname,
+                             const std::string& mName) {
+  Out << "#include <llvm/LLVMContext.h>\n";
+  Out << "#include <llvm/Module.h>\n";
+  Out << "#include <llvm/DerivedTypes.h>\n";
+  Out << "#include <llvm/Constants.h>\n";
+  Out << "#include <llvm/GlobalVariable.h>\n";
+  Out << "#include <llvm/Function.h>\n";
+  Out << "#include <llvm/CallingConv.h>\n";
+  Out << "#include <llvm/BasicBlock.h>\n";
+  Out << "#include <llvm/Instructions.h>\n";
+  Out << "#include <llvm/InlineAsm.h>\n";
+  Out << "#include <llvm/Support/FormattedStream.h>\n";
+  Out << "#include <llvm/Support/MathExtras.h>\n";
+  Out << "#include <llvm/Pass.h>\n";
+  Out << "#include <llvm/PassManager.h>\n";
+  Out << "#include <llvm/ADT/SmallVector.h>\n";
+  Out << "#include <llvm/Analysis/Verifier.h>\n";
+  Out << "#include <llvm/Assembly/PrintModulePass.h>\n";
+  Out << "#include <algorithm>\n";
+  Out << "using namespace llvm;\n\n";
+  Out << "Module* " << fname << "();\n\n";
+  Out << "int main(int argc, char**argv) {\n";
+  Out << "  Module* Mod = " << fname << "();\n";
+  Out << "  verifyModule(*Mod, PrintMessageAction);\n";
+  Out << "  PassManager PM;\n";
+  Out << "  PM.add(createPrintModulePass(&outs()));\n";
+  Out << "  PM.run(*Mod);\n";
+  Out << "  return 0;\n";
+  Out << "}\n\n";
+  printModule(fname,mName);
+}
+
+void CppWriter::printModule(const std::string& fname,
+                            const std::string& mName) {
+  nl(Out) << "Module* " << fname << "() {";
+  nl(Out,1) << "// Module Construction";
+  nl(Out) << "Module* mod = new Module(\"";
+  printEscapedString(mName);
+  Out << "\", getGlobalContext());";
+  if (!TheModule->getTargetTriple().empty()) {
+    nl(Out) << "mod->setDataLayout(\"" << TheModule->getDataLayout() << "\");";
+  }
+  if (!TheModule->getTargetTriple().empty()) {
+    nl(Out) << "mod->setTargetTriple(\"" << TheModule->getTargetTriple()
+            << "\");";
+  }
+
+  if (!TheModule->getModuleInlineAsm().empty()) {
+    nl(Out) << "mod->setModuleInlineAsm(\"";
+    printEscapedString(TheModule->getModuleInlineAsm());
+    Out << "\");";
   }
+  nl(Out);
 
-  void CppWriter::printModule(const std::string& fname,
+  // Loop over the dependent libraries and emit them.
+  Module::lib_iterator LI = TheModule->lib_begin();
+  Module::lib_iterator LE = TheModule->lib_end();
+  while (LI != LE) {
+    Out << "mod->addLibrary(\"" << *LI << "\");";
+    nl(Out);
+    ++LI;
+  }
+  printModuleBody();
+  nl(Out) << "return mod;";
+  nl(Out,-1) << "}";
+  nl(Out);
+}
+
+void CppWriter::printContents(const std::string& fname,
                               const std::string& mName) {
-    nl(Out) << "Module* " << fname << "() {";
-    nl(Out,1) << "// Module Construction";
-    nl(Out) << "Module* mod = new Module(\"";
-    printEscapedString(mName);
-    Out << "\", getGlobalContext());";
-    if (!TheModule->getTargetTriple().empty()) {
-      nl(Out) << "mod->setDataLayout(\"" << TheModule->getDataLayout() << "\");";
-    }
-    if (!TheModule->getTargetTriple().empty()) {
-      nl(Out) << "mod->setTargetTriple(\"" << TheModule->getTargetTriple()
-              << "\");";
-    }
-
-    if (!TheModule->getModuleInlineAsm().empty()) {
-      nl(Out) << "mod->setModuleInlineAsm(\"";
-      printEscapedString(TheModule->getModuleInlineAsm());
-      Out << "\");";
-    }
-    nl(Out);
-
-    // Loop over the dependent libraries and emit them.
-    Module::lib_iterator LI = TheModule->lib_begin();
-    Module::lib_iterator LE = TheModule->lib_end();
-    while (LI != LE) {
-      Out << "mod->addLibrary(\"" << *LI << "\");";
-      nl(Out);
-      ++LI;
-    }
-    printModuleBody();
-    nl(Out) << "return mod;";
-    nl(Out,-1) << "}";
-    nl(Out);
-  }
-
-  void CppWriter::printContents(const std::string& fname,
-                                const std::string& mName) {
-    Out << "\nModule* " << fname << "(Module *mod) {\n";
-    Out << "\nmod->setModuleIdentifier(\"";
-    printEscapedString(mName);
-    Out << "\");\n";
-    printModuleBody();
-    Out << "\nreturn mod;\n";
-    Out << "\n}\n";
-  }
-
-  void CppWriter::printFunction(const std::string& fname,
-                                const std::string& funcName) {
-    const Function* F = TheModule->getFunction(funcName);
-    if (!F) {
-      error(std::string("Function '") + funcName + "' not found in input module");
-      return;
-    }
-    Out << "\nFunction* " << fname << "(Module *mod) {\n";
-    printFunctionUses(F);
-    printFunctionHead(F);
-    printFunctionBody(F);
-    Out << "return " << getCppName(F) << ";\n";
-    Out << "}\n";
-  }
-
-  void CppWriter::printFunctions() {
-    const Module::FunctionListType &funcs = TheModule->getFunctionList();
-    Module::const_iterator I  = funcs.begin();
-    Module::const_iterator IE = funcs.end();
-
-    for (; I != IE; ++I) {
-      const Function &func = *I;
-      if (!func.isDeclaration()) {
-        std::string name("define_");
-        name += func.getName();
-        printFunction(name, func.getName());
-      }
+  Out << "\nModule* " << fname << "(Module *mod) {\n";
+  Out << "\nmod->setModuleIdentifier(\"";
+  printEscapedString(mName);
+  Out << "\");\n";
+  printModuleBody();
+  Out << "\nreturn mod;\n";
+  Out << "\n}\n";
+}
+
+void CppWriter::printFunction(const std::string& fname,
+                              const std::string& funcName) {
+  const Function* F = TheModule->getFunction(funcName);
+  if (!F) {
+    error(std::string("Function '") + funcName + "' not found in input module");
+    return;
+  }
+  Out << "\nFunction* " << fname << "(Module *mod) {\n";
+  printFunctionUses(F);
+  printFunctionHead(F);
+  printFunctionBody(F);
+  Out << "return " << getCppName(F) << ";\n";
+  Out << "}\n";
+}
+
+void CppWriter::printFunctions() {
+  const Module::FunctionListType &funcs = TheModule->getFunctionList();
+  Module::const_iterator I  = funcs.begin();
+  Module::const_iterator IE = funcs.end();
+
+  for (; I != IE; ++I) {
+    const Function &func = *I;
+    if (!func.isDeclaration()) {
+      std::string name("define_");
+      name += func.getName();
+      printFunction(name, func.getName());
     }
   }
+}
 
-  void CppWriter::printVariable(const std::string& fname,
-                                const std::string& varName) {
-    const GlobalVariable* GV = TheModule->getNamedGlobal(varName);
-
-    if (!GV) {
-      error(std::string("Variable '") + varName + "' not found in input module");
-      return;
-    }
-    Out << "\nGlobalVariable* " << fname << "(Module *mod) {\n";
-    printVariableUses(GV);
-    printVariableHead(GV);
-    printVariableBody(GV);
-    Out << "return " << getCppName(GV) << ";\n";
-    Out << "}\n";
-  }
-
-  void CppWriter::printType(const std::string& fname,
-                            const std::string& typeName) {
-    const Type* Ty = TheModule->getTypeByName(typeName);
-    if (!Ty) {
-      error(std::string("Type '") + typeName + "' not found in input module");
-      return;
-    }
-    Out << "\nType* " << fname << "(Module *mod) {\n";
-    printType(Ty);
-    Out << "return " << getCppName(Ty) << ";\n";
-    Out << "}\n";
-  }
-
-  bool CppWriter::runOnModule(Module &M) {
-    TheModule = &M;
-
-    // Emit a header
-    Out << "// Generated by llvm2cpp - DO NOT MODIFY!\n\n";
-
-    // Get the name of the function we're supposed to generate
-    std::string fname = FuncName.getValue();
-
-    // Get the name of the thing we are to generate
-    std::string tgtname = NameToGenerate.getValue();
-    if (GenerationType == GenModule ||
-        GenerationType == GenContents ||
-        GenerationType == GenProgram ||
-        GenerationType == GenFunctions) {
-      if (tgtname == "!bad!") {
-        if (M.getModuleIdentifier() == "-")
-          tgtname = "<stdin>";
-        else
-          tgtname = M.getModuleIdentifier();
-      }
-    } else if (tgtname == "!bad!")
-      error("You must use the -for option with -gen-{function,variable,type}");
+void CppWriter::printVariable(const std::string& fname,
+                              const std::string& varName) {
+  const GlobalVariable* GV = TheModule->getNamedGlobal(varName);
+
+  if (!GV) {
+    error(std::string("Variable '") + varName + "' not found in input module");
+    return;
+  }
+  Out << "\nGlobalVariable* " << fname << "(Module *mod) {\n";
+  printVariableUses(GV);
+  printVariableHead(GV);
+  printVariableBody(GV);
+  Out << "return " << getCppName(GV) << ";\n";
+  Out << "}\n";
+}
 
-    switch (WhatToGenerate(GenerationType)) {
-     case GenProgram:
-      if (fname.empty())
-        fname = "makeLLVMModule";
-      printProgram(fname,tgtname);
-      break;
-     case GenModule:
-      if (fname.empty())
-        fname = "makeLLVMModule";
-      printModule(fname,tgtname);
-      break;
-     case GenContents:
-      if (fname.empty())
-        fname = "makeLLVMModuleContents";
-      printContents(fname,tgtname);
-      break;
-     case GenFunction:
-      if (fname.empty())
-        fname = "makeLLVMFunction";
-      printFunction(fname,tgtname);
-      break;
-     case GenFunctions:
-      printFunctions();
-      break;
-     case GenInline:
-      if (fname.empty())
-        fname = "makeLLVMInline";
-      printInline(fname,tgtname);
-      break;
-     case GenVariable:
-      if (fname.empty())
-        fname = "makeLLVMVariable";
-      printVariable(fname,tgtname);
-      break;
-     case GenType:
-      if (fname.empty())
-        fname = "makeLLVMType";
-      printType(fname,tgtname);
-      break;
-     default:
-      error("Invalid generation option");
+void CppWriter::printType(const std::string& fname,
+                          const std::string& typeName) {
+  const Type* Ty = TheModule->getTypeByName(typeName);
+  if (!Ty) {
+    error(std::string("Type '") + typeName + "' not found in input module");
+    return;
+  }
+  Out << "\nType* " << fname << "(Module *mod) {\n";
+  printType(Ty);
+  Out << "return " << getCppName(Ty) << ";\n";
+  Out << "}\n";
+}
+
+bool CppWriter::runOnModule(Module &M) {
+  TheModule = &M;
+
+  // Emit a header
+  Out << "// Generated by llvm2cpp - DO NOT MODIFY!\n\n";
+
+  // Get the name of the function we're supposed to generate
+  std::string fname = FuncName.getValue();
+
+  // Get the name of the thing we are to generate
+  std::string tgtname = NameToGenerate.getValue();
+  if (GenerationType == GenModule ||
+      GenerationType == GenContents ||
+      GenerationType == GenProgram ||
+      GenerationType == GenFunctions) {
+    if (tgtname == "!bad!") {
+      if (M.getModuleIdentifier() == "-")
+        tgtname = "<stdin>";
+      else
+        tgtname = M.getModuleIdentifier();
     }
+  } else if (tgtname == "!bad!")
+    error("You must use the -for option with -gen-{function,variable,type}");
 
-    return false;
+  switch (WhatToGenerate(GenerationType)) {
+   case GenProgram:
+    if (fname.empty())
+      fname = "makeLLVMModule";
+    printProgram(fname,tgtname);
+    break;
+   case GenModule:
+    if (fname.empty())
+      fname = "makeLLVMModule";
+    printModule(fname,tgtname);
+    break;
+   case GenContents:
+    if (fname.empty())
+      fname = "makeLLVMModuleContents";
+    printContents(fname,tgtname);
+    break;
+   case GenFunction:
+    if (fname.empty())
+      fname = "makeLLVMFunction";
+    printFunction(fname,tgtname);
+    break;
+   case GenFunctions:
+    printFunctions();
+    break;
+   case GenInline:
+    if (fname.empty())
+      fname = "makeLLVMInline";
+    printInline(fname,tgtname);
+    break;
+   case GenVariable:
+    if (fname.empty())
+      fname = "makeLLVMVariable";
+    printVariable(fname,tgtname);
+    break;
+   case GenType:
+    if (fname.empty())
+      fname = "makeLLVMType";
+    printType(fname,tgtname);
+    break;
+   default:
+    error("Invalid generation option");
   }
+
+  return false;
 }
 
 char CppWriter::ID = 0;

Modified: llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeISelLowering.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeISelLowering.cpp Fri Jul  2 04:57:13 2010
@@ -439,10 +439,8 @@
 SDValue MBlazeTargetLowering::
 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
   SDValue ResNode;
-  EVT PtrVT = Op.getValueType();
   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
   const Constant *C = N->getConstVal();
-  SDValue Zero = DAG.getConstant(0, PtrVT);
   DebugLoc dl = Op.getDebugLoc();
 
   SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),

Modified: llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.td Fri Jul  2 04:57:13 2010
@@ -25,13 +25,16 @@
 def SDT_MSP430Call         : SDTypeProfile<0, -1, [SDTCisVT<0, iPTR>]>;
 def SDT_MSP430CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i16>]>;
 def SDT_MSP430CallSeqEnd   : SDCallSeqEnd<[SDTCisVT<0, i16>, SDTCisVT<1, i16>]>;
-def SDT_MSP430Wrapper      : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
+def SDT_MSP430Wrapper      : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
+                                                  SDTCisPtrTy<0>]>;
 def SDT_MSP430Cmp          : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
 def SDT_MSP430BrCC         : SDTypeProfile<0, 2, [SDTCisVT<0, OtherVT>,
                                                   SDTCisVT<1, i8>]>;
-def SDT_MSP430SelectCC     : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, 
+def SDT_MSP430SelectCC     : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>,
+                                                  SDTCisSameAs<1, 2>, 
                                                   SDTCisVT<3, i8>]>;
-def SDT_MSP430Shift        : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisI8<2>]>;
+def SDT_MSP430Shift        : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>,
+                                                  SDTCisI8<2>]>;
 
 //===----------------------------------------------------------------------===//
 // MSP430 Specific Node Definitions.
@@ -46,7 +49,7 @@
 def MSP430rrc     : SDNode<"MSP430ISD::RRC", SDTIntUnaryOp, []>;
 
 def MSP430call    : SDNode<"MSP430ISD::CALL", SDT_MSP430Call,
-                     [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag]>;
+                     [SDNPHasChain, SDNPOutFlag, SDNPOptInFlag, SDNPVariadic]>;
 def MSP430callseq_start :
                  SDNode<"ISD::CALLSEQ_START", SDT_MSP430CallSeqStart,
                         [SDNPHasChain, SDNPOutFlag]>;
@@ -55,8 +58,10 @@
                         [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
 def MSP430Wrapper : SDNode<"MSP430ISD::Wrapper", SDT_MSP430Wrapper>;
 def MSP430cmp     : SDNode<"MSP430ISD::CMP", SDT_MSP430Cmp, [SDNPOutFlag]>;
-def MSP430brcc    : SDNode<"MSP430ISD::BR_CC", SDT_MSP430BrCC, [SDNPHasChain, SDNPInFlag]>;
-def MSP430selectcc: SDNode<"MSP430ISD::SELECT_CC", SDT_MSP430SelectCC, [SDNPInFlag]>;
+def MSP430brcc    : SDNode<"MSP430ISD::BR_CC", SDT_MSP430BrCC,
+                            [SDNPHasChain, SDNPInFlag]>;
+def MSP430selectcc: SDNode<"MSP430ISD::SELECT_CC", SDT_MSP430SelectCC,
+                            [SDNPInFlag]>;
 def MSP430shl     : SDNode<"MSP430ISD::SHL", SDT_MSP430Shift, []>;
 def MSP430sra     : SDNode<"MSP430ISD::SRA", SDT_MSP430Shift, []>;
 def MSP430srl     : SDNode<"MSP430ISD::SRL", SDT_MSP430Shift, []>;
@@ -117,14 +122,14 @@
 }
 
 let usesCustomInserter = 1 in {
-  def Select8  : Pseudo<(outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cc),
+  def Select8  : Pseudo<(outs GR8:$dst), (ins GR8:$src, GR8:$src2, i8imm:$cc),
                         "# Select8 PSEUDO",
                         [(set GR8:$dst,
-                          (MSP430selectcc GR8:$src1, GR8:$src2, imm:$cc))]>;
-  def Select16 : Pseudo<(outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cc),
+                          (MSP430selectcc GR8:$src, GR8:$src2, imm:$cc))]>;
+  def Select16 : Pseudo<(outs GR16:$dst), (ins GR16:$src, GR16:$src2, i8imm:$cc),
                         "# Select16 PSEUDO",
                         [(set GR16:$dst,
-                          (MSP430selectcc GR16:$src1, GR16:$src2, imm:$cc))]>;
+                          (MSP430selectcc GR16:$src, GR16:$src2, imm:$cc))]>;
   let Defs = [SRW] in {
   def Shl8     : Pseudo<(outs GR8:$dst), (ins GR8:$src, GR8:$cnt),
                         "# Shl8 PSEUDO",
@@ -330,60 +335,60 @@
 //===----------------------------------------------------------------------===//
 // Arithmetic Instructions
 
-let isTwoAddress = 1 in {
+let Constraints = "$src = $dst" in {
 
 let Defs = [SRW] in {
 
 let isCommutable = 1 in { // X = ADD Y, Z  == X = ADD Z, Y
 
 def ADD8rr  : I8rr<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
                    "add.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (add GR8:$src1, GR8:$src2)),
+                   [(set GR8:$dst, (add GR8:$src, GR8:$src2)),
                     (implicit SRW)]>;
 def ADD16rr : I16rr<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
                     "add.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (add GR16:$src1, GR16:$src2)),
+                    [(set GR16:$dst, (add GR16:$src, GR16:$src2)),
                      (implicit SRW)]>;
 }
 
 def ADD8rm  : I8rm<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
                    "add.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (add GR8:$src1, (load addr:$src2))),
+                   [(set GR8:$dst, (add GR8:$src, (load addr:$src2))),
                     (implicit SRW)]>;
 def ADD16rm : I16rm<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
                     "add.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (add GR16:$src1, (load addr:$src2))),
+                    [(set GR16:$dst, (add GR16:$src, (load addr:$src2))),
                      (implicit SRW)]>;
 
 let mayLoad = 1, hasExtraDefRegAllocReq = 1, 
-Constraints = "$base = $base_wb, $src1 = $dst" in {
+Constraints = "$base = $base_wb, $src = $dst" in {
 def ADD8rm_POST : IForm8<0x0, DstReg, SrcPostInc, Size2Bytes,
                          (outs GR8:$dst, GR16:$base_wb),
-                         (ins GR8:$src1, GR16:$base),
+                         (ins GR8:$src, GR16:$base),
                          "add.b\t{@$base+, $dst}", []>;
 def ADD16rm_POST : IForm16<0x0, DstReg, SrcPostInc, Size2Bytes,
                            (outs GR16:$dst, GR16:$base_wb),
-                           (ins GR16:$src1, GR16:$base),
+                           (ins GR16:$src, GR16:$base),
                           "add.w\t{@$base+, $dst}", []>;
 }
 
 
 def ADD8ri  : I8ri<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
                    "add.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (add GR8:$src1, imm:$src2)),
+                   [(set GR8:$dst, (add GR8:$src, imm:$src2)),
                     (implicit SRW)]>;
 def ADD16ri : I16ri<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
                     "add.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (add GR16:$src1, imm:$src2)),
+                    [(set GR16:$dst, (add GR16:$src, imm:$src2)),
                      (implicit SRW)]>;
 
-let isTwoAddress = 0 in {
+let Constraints = "" in {
 def ADD8mr  : I8mr<0x0,
                    (outs), (ins memdst:$dst, GR8:$src),
                    "add.b\t{$src, $dst}",
@@ -424,40 +429,40 @@
 
 let isCommutable = 1 in { // X = ADDC Y, Z  == X = ADDC Z, Y
 def ADC8rr  : I8rr<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
                    "addc.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (adde GR8:$src1, GR8:$src2)),
+                   [(set GR8:$dst, (adde GR8:$src, GR8:$src2)),
                     (implicit SRW)]>;
 def ADC16rr : I16rr<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
                     "addc.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (adde GR16:$src1, GR16:$src2)),
+                    [(set GR16:$dst, (adde GR16:$src, GR16:$src2)),
                      (implicit SRW)]>;
 } // isCommutable
 
 def ADC8ri  : I8ri<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
                    "addc.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (adde GR8:$src1, imm:$src2)),
+                   [(set GR8:$dst, (adde GR8:$src, imm:$src2)),
                     (implicit SRW)]>;
 def ADC16ri : I16ri<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
                     "addc.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (adde GR16:$src1, imm:$src2)),
+                    [(set GR16:$dst, (adde GR16:$src, imm:$src2)),
                      (implicit SRW)]>;
 
 def ADC8rm  : I8rm<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
                    "addc.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (adde GR8:$src1, (load addr:$src2))),
+                   [(set GR8:$dst, (adde GR8:$src, (load addr:$src2))),
                     (implicit SRW)]>;
 def ADC16rm : I16rm<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
                     "addc.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (adde GR16:$src1, (load addr:$src2))),
+                    [(set GR16:$dst, (adde GR16:$src, (load addr:$src2))),
                      (implicit SRW)]>;
 
-let isTwoAddress = 0 in {
+let Constraints = "" in {
 def ADC8mr  : I8mr<0x0,
                    (outs), (ins memdst:$dst, GR8:$src),
                    "addc.b\t{$src, $dst}",
@@ -498,52 +503,52 @@
 
 let isCommutable = 1 in { // X = AND Y, Z  == X = AND Z, Y
 def AND8rr  : I8rr<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
                    "and.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (and GR8:$src1, GR8:$src2)),
+                   [(set GR8:$dst, (and GR8:$src, GR8:$src2)),
                     (implicit SRW)]>;
 def AND16rr : I16rr<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
                     "and.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (and GR16:$src1, GR16:$src2)),
+                    [(set GR16:$dst, (and GR16:$src, GR16:$src2)),
                      (implicit SRW)]>;
 }
 
 def AND8ri  : I8ri<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
                    "and.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (and GR8:$src1, imm:$src2)),
+                   [(set GR8:$dst, (and GR8:$src, imm:$src2)),
                     (implicit SRW)]>;
 def AND16ri : I16ri<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
                     "and.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (and GR16:$src1, imm:$src2)),
+                    [(set GR16:$dst, (and GR16:$src, imm:$src2)),
                      (implicit SRW)]>;
 
 def AND8rm  : I8rm<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
                    "and.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (and GR8:$src1, (load addr:$src2))),
+                   [(set GR8:$dst, (and GR8:$src, (load addr:$src2))),
                     (implicit SRW)]>;
 def AND16rm : I16rm<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
                     "and.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (and GR16:$src1, (load addr:$src2))),
+                    [(set GR16:$dst, (and GR16:$src, (load addr:$src2))),
                      (implicit SRW)]>;
 
 let mayLoad = 1, hasExtraDefRegAllocReq = 1, 
-Constraints = "$base = $base_wb, $src1 = $dst" in {
+Constraints = "$base = $base_wb, $src = $dst" in {
 def AND8rm_POST : IForm8<0x0, DstReg, SrcPostInc, Size2Bytes,
                          (outs GR8:$dst, GR16:$base_wb),
-                         (ins GR8:$src1, GR16:$base),
+                         (ins GR8:$src, GR16:$base),
                          "and.b\t{@$base+, $dst}", []>;
 def AND16rm_POST : IForm16<0x0, DstReg, SrcPostInc, Size2Bytes,
                            (outs GR16:$dst, GR16:$base_wb),
-                           (ins GR16:$src1, GR16:$base),
+                           (ins GR16:$src, GR16:$base),
                            "and.w\t{@$base+, $dst}", []>;
 }
 
-let isTwoAddress = 0 in {
+let Constraints = "" in {
 def AND8mr  : I8mr<0x0,
                    (outs), (ins memdst:$dst, GR8:$src),
                    "and.b\t{$src, $dst}",
@@ -582,46 +587,46 @@
 
 let isCommutable = 1 in { // X = OR Y, Z  == X = OR Z, Y
 def OR8rr  : I8rr<0x0,
-                  (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+                  (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
                   "bis.b\t{$src2, $dst}",
-                  [(set GR8:$dst, (or GR8:$src1, GR8:$src2))]>;
+                  [(set GR8:$dst, (or GR8:$src, GR8:$src2))]>;
 def OR16rr : I16rr<0x0,
-                   (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+                   (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
                    "bis.w\t{$src2, $dst}",
-                   [(set GR16:$dst, (or GR16:$src1, GR16:$src2))]>;
+                   [(set GR16:$dst, (or GR16:$src, GR16:$src2))]>;
 }
 
 def OR8ri  : I8ri<0x0,
-                  (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+                  (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
                   "bis.b\t{$src2, $dst}",
-                  [(set GR8:$dst, (or GR8:$src1, imm:$src2))]>;
+                  [(set GR8:$dst, (or GR8:$src, imm:$src2))]>;
 def OR16ri : I16ri<0x0,
-                   (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+                   (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
                    "bis.w\t{$src2, $dst}",
-                   [(set GR16:$dst, (or GR16:$src1, imm:$src2))]>;
+                   [(set GR16:$dst, (or GR16:$src, imm:$src2))]>;
 
 def OR8rm  : I8rm<0x0,
-                  (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+                  (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
                   "bis.b\t{$src2, $dst}",
-                  [(set GR8:$dst, (or GR8:$src1, (load addr:$src2)))]>;
+                  [(set GR8:$dst, (or GR8:$src, (load addr:$src2)))]>;
 def OR16rm : I16rm<0x0,
-                   (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+                   (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
                    "bis.w\t{$src2, $dst}",
-                   [(set GR16:$dst, (or GR16:$src1, (load addr:$src2)))]>;
+                   [(set GR16:$dst, (or GR16:$src, (load addr:$src2)))]>;
 
 let mayLoad = 1, hasExtraDefRegAllocReq = 1, 
-Constraints = "$base = $base_wb, $src1 = $dst" in {
+Constraints = "$base = $base_wb, $src = $dst" in {
 def OR8rm_POST : IForm8<0x0, DstReg, SrcPostInc, Size2Bytes,
                         (outs GR8:$dst, GR16:$base_wb),
-                        (ins GR8:$src1, GR16:$base),
+                        (ins GR8:$src, GR16:$base),
                         "bis.b\t{@$base+, $dst}", []>;
 def OR16rm_POST : IForm16<0x0, DstReg, SrcPostInc, Size2Bytes,
                           (outs GR16:$dst, GR16:$base_wb),
-                          (ins GR16:$src1, GR16:$base),
+                          (ins GR16:$src, GR16:$base),
                           "bis.w\t{@$base+, $dst}", []>;
 }
 
-let isTwoAddress = 0 in {
+let Constraints = "" in {
 def OR8mr  : I8mr<0x0,
                   (outs), (ins memdst:$dst, GR8:$src),
                   "bis.b\t{$src, $dst}",
@@ -654,24 +659,24 @@
 
 // bic does not modify condition codes
 def BIC8rr :  I8rr<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
                    "bic.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (and GR8:$src1, (not GR8:$src2)))]>;
+                   [(set GR8:$dst, (and GR8:$src, (not GR8:$src2)))]>;
 def BIC16rr : I16rr<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
                     "bic.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (and GR16:$src1, (not GR16:$src2)))]>;
+                    [(set GR16:$dst, (and GR16:$src, (not GR16:$src2)))]>;
 
 def BIC8rm :  I8rm<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
                    "bic.b\t{$src2, $dst}",
-                    [(set GR8:$dst, (and GR8:$src1, (not (i8 (load addr:$src2)))))]>;
+                    [(set GR8:$dst, (and GR8:$src, (not (i8 (load addr:$src2)))))]>;
 def BIC16rm : I16rm<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
                     "bic.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (and GR16:$src1, (not (i16 (load addr:$src2)))))]>;
+                    [(set GR16:$dst, (and GR16:$src, (not (i16 (load addr:$src2)))))]>;
 
-let isTwoAddress = 0 in {
+let Constraints = "" in {
 def BIC8mr :  I8mr<0x0,
                    (outs), (ins memdst:$dst, GR8:$src),
                    "bic.b\t{$src, $dst}",
@@ -695,52 +700,52 @@
 
 let isCommutable = 1 in { // X = XOR Y, Z  == X = XOR Z, Y
 def XOR8rr  : I8rr<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
                    "xor.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (xor GR8:$src1, GR8:$src2)),
+                   [(set GR8:$dst, (xor GR8:$src, GR8:$src2)),
                     (implicit SRW)]>;
 def XOR16rr : I16rr<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
                     "xor.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (xor GR16:$src1, GR16:$src2)),
+                    [(set GR16:$dst, (xor GR16:$src, GR16:$src2)),
                      (implicit SRW)]>;
 }
 
 def XOR8ri  : I8ri<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
                    "xor.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (xor GR8:$src1, imm:$src2)),
+                   [(set GR8:$dst, (xor GR8:$src, imm:$src2)),
                     (implicit SRW)]>;
 def XOR16ri : I16ri<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
                     "xor.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (xor GR16:$src1, imm:$src2)),
+                    [(set GR16:$dst, (xor GR16:$src, imm:$src2)),
                      (implicit SRW)]>;
 
 def XOR8rm  : I8rm<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
                    "xor.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (xor GR8:$src1, (load addr:$src2))),
+                   [(set GR8:$dst, (xor GR8:$src, (load addr:$src2))),
                     (implicit SRW)]>;
 def XOR16rm : I16rm<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
                     "xor.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (xor GR16:$src1, (load addr:$src2))),
+                    [(set GR16:$dst, (xor GR16:$src, (load addr:$src2))),
                      (implicit SRW)]>;
 
 let mayLoad = 1, hasExtraDefRegAllocReq = 1, 
-Constraints = "$base = $base_wb, $src1 = $dst" in {
+Constraints = "$base = $base_wb, $src = $dst" in {
 def XOR8rm_POST : IForm8<0x0, DstReg, SrcPostInc, Size2Bytes,
                          (outs GR8:$dst, GR16:$base_wb),
-                         (ins GR8:$src1, GR16:$base),
+                         (ins GR8:$src, GR16:$base),
                          "xor.b\t{@$base+, $dst}", []>;
 def XOR16rm_POST : IForm16<0x0, DstReg, SrcPostInc, Size2Bytes,
                            (outs GR16:$dst, GR16:$base_wb),
-                           (ins GR16:$src1, GR16:$base),
+                           (ins GR16:$src, GR16:$base),
                            "xor.w\t{@$base+, $dst}", []>;
 }
 
-let isTwoAddress = 0 in {
+let Constraints = "" in {
 def XOR8mr  : I8mr<0x0,
                    (outs), (ins memdst:$dst, GR8:$src),
                    "xor.b\t{$src, $dst}",
@@ -777,51 +782,51 @@
 
 
 def SUB8rr  : I8rr<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
                    "sub.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (sub GR8:$src1, GR8:$src2)),
+                   [(set GR8:$dst, (sub GR8:$src, GR8:$src2)),
                     (implicit SRW)]>;
 def SUB16rr : I16rr<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
                     "sub.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (sub GR16:$src1, GR16:$src2)),
+                    [(set GR16:$dst, (sub GR16:$src, GR16:$src2)),
                      (implicit SRW)]>;
 
 def SUB8ri  : I8ri<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
                    "sub.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (sub GR8:$src1, imm:$src2)),
+                   [(set GR8:$dst, (sub GR8:$src, imm:$src2)),
                     (implicit SRW)]>;
 def SUB16ri : I16ri<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
                     "sub.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (sub GR16:$src1, imm:$src2)),
+                    [(set GR16:$dst, (sub GR16:$src, imm:$src2)),
                      (implicit SRW)]>;
 
 def SUB8rm  : I8rm<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
                    "sub.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (sub GR8:$src1, (load addr:$src2))),
+                   [(set GR8:$dst, (sub GR8:$src, (load addr:$src2))),
                     (implicit SRW)]>;
 def SUB16rm : I16rm<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
                     "sub.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (sub GR16:$src1, (load addr:$src2))),
+                    [(set GR16:$dst, (sub GR16:$src, (load addr:$src2))),
                      (implicit SRW)]>;
 
 let mayLoad = 1, hasExtraDefRegAllocReq = 1, 
-Constraints = "$base = $base_wb, $src1 = $dst" in {
+Constraints = "$base = $base_wb, $src = $dst" in {
 def SUB8rm_POST : IForm8<0x0, DstReg, SrcPostInc, Size2Bytes,
                          (outs GR8:$dst, GR16:$base_wb),
-                         (ins GR8:$src1, GR16:$base),
+                         (ins GR8:$src, GR16:$base),
                          "sub.b\t{@$base+, $dst}", []>;
 def SUB16rm_POST : IForm16<0x0, DstReg, SrcPostInc, Size2Bytes,
                           (outs GR16:$dst, GR16:$base_wb),
-                          (ins GR16:$src1, GR16:$base),
+                          (ins GR16:$src, GR16:$base),
                           "sub.w\t{@$base+, $dst}", []>;
 }
 
-let isTwoAddress = 0 in {
+let Constraints = "" in {
 def SUB8mr  : I8mr<0x0,
                    (outs), (ins memdst:$dst, GR8:$src),
                    "sub.b\t{$src, $dst}",
@@ -860,39 +865,39 @@
 
 let Uses = [SRW] in {
 def SBC8rr  : I8rr<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, GR8:$src2),
                    "subc.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (sube GR8:$src1, GR8:$src2)),
+                   [(set GR8:$dst, (sube GR8:$src, GR8:$src2)),
                     (implicit SRW)]>;
 def SBC16rr : I16rr<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, GR16:$src2),
                     "subc.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (sube GR16:$src1, GR16:$src2)),
+                    [(set GR16:$dst, (sube GR16:$src, GR16:$src2)),
                      (implicit SRW)]>;
 
 def SBC8ri  : I8ri<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, i8imm:$src2),
                    "subc.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (sube GR8:$src1, imm:$src2)),
+                   [(set GR8:$dst, (sube GR8:$src, imm:$src2)),
                     (implicit SRW)]>;
 def SBC16ri : I16ri<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, i16imm:$src2),
                     "subc.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (sube GR16:$src1, imm:$src2)),
+                    [(set GR16:$dst, (sube GR16:$src, imm:$src2)),
                      (implicit SRW)]>;
 
 def SBC8rm  : I8rm<0x0,
-                   (outs GR8:$dst), (ins GR8:$src1, memsrc:$src2),
+                   (outs GR8:$dst), (ins GR8:$src, memsrc:$src2),
                    "subc.b\t{$src2, $dst}",
-                   [(set GR8:$dst, (sube GR8:$src1, (load addr:$src2))),
+                   [(set GR8:$dst, (sube GR8:$src, (load addr:$src2))),
                     (implicit SRW)]>;
 def SBC16rm : I16rm<0x0,
-                    (outs GR16:$dst), (ins GR16:$src1, memsrc:$src2),
+                    (outs GR16:$dst), (ins GR16:$src, memsrc:$src2),
                     "subc.w\t{$src2, $dst}",
-                    [(set GR16:$dst, (sube GR16:$src1, (load addr:$src2))),
+                    [(set GR16:$dst, (sube GR16:$src, (load addr:$src2))),
                      (implicit SRW)]>;
 
-let isTwoAddress = 0 in {
+let Constraints = "" in {
 def SBC8mr  : I8mr<0x0,
                    (outs), (ins memdst:$dst, GR8:$src),
                    "subc.b\t{$src, $dst}",
@@ -985,59 +990,59 @@
                     "swpb\t$dst",
                     [(set GR16:$dst, (bswap GR16:$src))]>;
 
-} // isTwoAddress = 1
+} // Constraints = "$src = $dst"
 
 // Integer comparisons
 let Defs = [SRW] in {
 def CMP8rr  : I8rr<0x0,
-                   (outs), (ins GR8:$src1, GR8:$src2),
-                   "cmp.b\t{$src2, $src1}",
-                   [(MSP430cmp GR8:$src1, GR8:$src2), (implicit SRW)]>;
+                   (outs), (ins GR8:$src, GR8:$src2),
+                   "cmp.b\t{$src2, $src}",
+                   [(MSP430cmp GR8:$src, GR8:$src2), (implicit SRW)]>;
 def CMP16rr : I16rr<0x0,
-                    (outs), (ins GR16:$src1, GR16:$src2),
-                    "cmp.w\t{$src2, $src1}",
-                    [(MSP430cmp GR16:$src1, GR16:$src2), (implicit SRW)]>;
+                    (outs), (ins GR16:$src, GR16:$src2),
+                    "cmp.w\t{$src2, $src}",
+                    [(MSP430cmp GR16:$src, GR16:$src2), (implicit SRW)]>;
 
 def CMP8ri  : I8ri<0x0,
-                   (outs), (ins GR8:$src1, i8imm:$src2),
-                   "cmp.b\t{$src2, $src1}",
-                   [(MSP430cmp GR8:$src1, imm:$src2), (implicit SRW)]>;
+                   (outs), (ins GR8:$src, i8imm:$src2),
+                   "cmp.b\t{$src2, $src}",
+                   [(MSP430cmp GR8:$src, imm:$src2), (implicit SRW)]>;
 def CMP16ri : I16ri<0x0,
-                    (outs), (ins GR16:$src1, i16imm:$src2),
-                    "cmp.w\t{$src2, $src1}",
-                    [(MSP430cmp GR16:$src1, imm:$src2), (implicit SRW)]>;
+                    (outs), (ins GR16:$src, i16imm:$src2),
+                    "cmp.w\t{$src2, $src}",
+                    [(MSP430cmp GR16:$src, imm:$src2), (implicit SRW)]>;
 
 def CMP8mi  : I8mi<0x0,
-                   (outs), (ins memsrc:$src1, i8imm:$src2),
-                   "cmp.b\t{$src2, $src1}",
-                   [(MSP430cmp (load addr:$src1),
+                   (outs), (ins memsrc:$src, i8imm:$src2),
+                   "cmp.b\t{$src2, $src}",
+                   [(MSP430cmp (load addr:$src),
                                (i8 imm:$src2)), (implicit SRW)]>;
 def CMP16mi : I16mi<0x0,
-                    (outs), (ins memsrc:$src1, i16imm:$src2),
-                    "cmp.w\t{$src2, $src1}",
-                     [(MSP430cmp (load addr:$src1),
+                    (outs), (ins memsrc:$src, i16imm:$src2),
+                    "cmp.w\t{$src2, $src}",
+                     [(MSP430cmp (load addr:$src),
                                  (i16 imm:$src2)), (implicit SRW)]>;
 
 def CMP8rm  : I8rm<0x0,
-                   (outs), (ins GR8:$src1, memsrc:$src2),
-                   "cmp.b\t{$src2, $src1}",
-                   [(MSP430cmp GR8:$src1, (load addr:$src2)), 
+                   (outs), (ins GR8:$src, memsrc:$src2),
+                   "cmp.b\t{$src2, $src}",
+                   [(MSP430cmp GR8:$src, (load addr:$src2)), 
                     (implicit SRW)]>;
 def CMP16rm : I16rm<0x0,
-                    (outs), (ins GR16:$src1, memsrc:$src2),
-                    "cmp.w\t{$src2, $src1}",
-                    [(MSP430cmp GR16:$src1, (load addr:$src2)),
+                    (outs), (ins GR16:$src, memsrc:$src2),
+                    "cmp.w\t{$src2, $src}",
+                    [(MSP430cmp GR16:$src, (load addr:$src2)),
                      (implicit SRW)]>;
 
 def CMP8mr  : I8mr<0x0,
-                   (outs), (ins memsrc:$src1, GR8:$src2),
-                   "cmp.b\t{$src2, $src1}",
-                   [(MSP430cmp (load addr:$src1), GR8:$src2),
+                   (outs), (ins memsrc:$src, GR8:$src2),
+                   "cmp.b\t{$src2, $src}",
+                   [(MSP430cmp (load addr:$src), GR8:$src2),
                     (implicit SRW)]>;
 def CMP16mr : I16mr<0x0,
-                    (outs), (ins memsrc:$src1, GR16:$src2),
-                    "cmp.w\t{$src2, $src1}",
-                    [(MSP430cmp (load addr:$src1), GR16:$src2), 
+                    (outs), (ins memsrc:$src, GR16:$src2),
+                    "cmp.w\t{$src2, $src}",
+                    [(MSP430cmp (load addr:$src), GR16:$src2), 
                      (implicit SRW)]>;
 
 
@@ -1045,71 +1050,71 @@
 // Note that the C condition is set differently than when using CMP.
 let isCommutable = 1 in {
 def BIT8rr  : I8rr<0x0,
-                   (outs), (ins GR8:$src1, GR8:$src2),
-                   "bit.b\t{$src2, $src1}",
-                   [(MSP430cmp (and_su GR8:$src1, GR8:$src2), 0),
+                   (outs), (ins GR8:$src, GR8:$src2),
+                   "bit.b\t{$src2, $src}",
+                   [(MSP430cmp (and_su GR8:$src, GR8:$src2), 0),
                     (implicit SRW)]>;
 def BIT16rr : I16rr<0x0,
-                    (outs), (ins GR16:$src1, GR16:$src2),
-                    "bit.w\t{$src2, $src1}",
-                    [(MSP430cmp (and_su GR16:$src1, GR16:$src2), 0),
+                    (outs), (ins GR16:$src, GR16:$src2),
+                    "bit.w\t{$src2, $src}",
+                    [(MSP430cmp (and_su GR16:$src, GR16:$src2), 0),
                      (implicit SRW)]>;
 }
 def BIT8ri  : I8ri<0x0,
-                   (outs), (ins GR8:$src1, i8imm:$src2),
-                   "bit.b\t{$src2, $src1}",
-                   [(MSP430cmp (and_su GR8:$src1, imm:$src2), 0),
+                   (outs), (ins GR8:$src, i8imm:$src2),
+                   "bit.b\t{$src2, $src}",
+                   [(MSP430cmp (and_su GR8:$src, imm:$src2), 0),
                     (implicit SRW)]>;
 def BIT16ri : I16ri<0x0,
-                    (outs), (ins GR16:$src1, i16imm:$src2),
-                    "bit.w\t{$src2, $src1}",
-                    [(MSP430cmp (and_su GR16:$src1, imm:$src2), 0),
+                    (outs), (ins GR16:$src, i16imm:$src2),
+                    "bit.w\t{$src2, $src}",
+                    [(MSP430cmp (and_su GR16:$src, imm:$src2), 0),
                      (implicit SRW)]>;
 
 def BIT8rm  : I8rm<0x0,
-                   (outs), (ins GR8:$src1, memdst:$src2),
-                   "bit.b\t{$src2, $src1}",
-                   [(MSP430cmp (and_su GR8:$src1,  (load addr:$src2)), 0),
+                   (outs), (ins GR8:$src, memdst:$src2),
+                   "bit.b\t{$src2, $src}",
+                   [(MSP430cmp (and_su GR8:$src,  (load addr:$src2)), 0),
                     (implicit SRW)]>;
 def BIT16rm : I16rm<0x0,
-                    (outs), (ins GR16:$src1, memdst:$src2),
-                    "bit.w\t{$src2, $src1}",
-                    [(MSP430cmp (and_su GR16:$src1,  (load addr:$src2)), 0),
+                    (outs), (ins GR16:$src, memdst:$src2),
+                    "bit.w\t{$src2, $src}",
+                    [(MSP430cmp (and_su GR16:$src,  (load addr:$src2)), 0),
                      (implicit SRW)]>;
 
 def BIT8mr  : I8mr<0x0,
-                  (outs), (ins memsrc:$src1, GR8:$src2),
-                  "bit.b\t{$src2, $src1}",
-                  [(MSP430cmp (and_su (load addr:$src1), GR8:$src2), 0),
+                  (outs), (ins memsrc:$src, GR8:$src2),
+                  "bit.b\t{$src2, $src}",
+                  [(MSP430cmp (and_su (load addr:$src), GR8:$src2), 0),
                    (implicit SRW)]>;
 def BIT16mr : I16mr<0x0,
-                    (outs), (ins memsrc:$src1, GR16:$src2),
-                    "bit.w\t{$src2, $src1}",
-                    [(MSP430cmp (and_su (load addr:$src1), GR16:$src2), 0),
+                    (outs), (ins memsrc:$src, GR16:$src2),
+                    "bit.w\t{$src2, $src}",
+                    [(MSP430cmp (and_su (load addr:$src), GR16:$src2), 0),
                      (implicit SRW)]>;
 
 def BIT8mi  : I8mi<0x0,
-                   (outs), (ins memsrc:$src1, i8imm:$src2),
-                   "bit.b\t{$src2, $src1}",
-                   [(MSP430cmp (and_su (load addr:$src1), (i8 imm:$src2)), 0),
+                   (outs), (ins memsrc:$src, i8imm:$src2),
+                   "bit.b\t{$src2, $src}",
+                   [(MSP430cmp (and_su (load addr:$src), (i8 imm:$src2)), 0),
                     (implicit SRW)]>;
 def BIT16mi : I16mi<0x0,
-                    (outs), (ins memsrc:$src1, i16imm:$src2),
-                    "bit.w\t{$src2, $src1}",
-                    [(MSP430cmp (and_su (load addr:$src1), (i16 imm:$src2)), 0),
+                    (outs), (ins memsrc:$src, i16imm:$src2),
+                    "bit.w\t{$src2, $src}",
+                    [(MSP430cmp (and_su (load addr:$src), (i16 imm:$src2)), 0),
                      (implicit SRW)]>;
 
 def BIT8mm  : I8mm<0x0,
-                   (outs), (ins memsrc:$src1, memsrc:$src2),
-                   "bit.b\t{$src2, $src1}",
-                   [(MSP430cmp (and_su (i8 (load addr:$src1)),
+                   (outs), (ins memsrc:$src, memsrc:$src2),
+                   "bit.b\t{$src2, $src}",
+                   [(MSP430cmp (and_su (i8 (load addr:$src)),
                                        (load addr:$src2)),
                                  0),
                       (implicit SRW)]>;
 def BIT16mm : I16mm<0x0,
-                    (outs), (ins memsrc:$src1, memsrc:$src2),
-                    "bit.w\t{$src2, $src1}",
-                    [(MSP430cmp (and_su (i16 (load addr:$src1)),
+                    (outs), (ins memsrc:$src, memsrc:$src2),
+                    "bit.w\t{$src2, $src}",
+                    [(MSP430cmp (and_su (i16 (load addr:$src)),
                                         (load addr:$src2)),
                                  0),
                      (implicit SRW)]>;
@@ -1134,12 +1139,12 @@
 def : Pat<(i16 (MSP430Wrapper texternalsym:$dst)), (MOV16ri texternalsym:$dst)>;
 def : Pat<(i16 (MSP430Wrapper tblockaddress:$dst)), (MOV16ri tblockaddress:$dst)>;
 
-def : Pat<(add GR16:$src1, (MSP430Wrapper tglobaladdr :$src2)),
-          (ADD16ri GR16:$src1, tglobaladdr:$src2)>;
-def : Pat<(add GR16:$src1, (MSP430Wrapper texternalsym:$src2)),
-          (ADD16ri GR16:$src1, texternalsym:$src2)>;
-def : Pat<(add GR16:$src1, (MSP430Wrapper tblockaddress:$src2)),
-          (ADD16ri GR16:$src1, tblockaddress:$src2)>;
+def : Pat<(add GR16:$src, (MSP430Wrapper tglobaladdr :$src2)),
+          (ADD16ri GR16:$src, tglobaladdr:$src2)>;
+def : Pat<(add GR16:$src, (MSP430Wrapper texternalsym:$src2)),
+          (ADD16ri GR16:$src, texternalsym:$src2)>;
+def : Pat<(add GR16:$src, (MSP430Wrapper tblockaddress:$src2)),
+          (ADD16ri GR16:$src, tblockaddress:$src2)>;
 
 def : Pat<(store (i16 (MSP430Wrapper tglobaladdr:$src)), addr:$dst),
           (MOV16mi addr:$dst, tglobaladdr:$src)>;
@@ -1155,45 +1160,45 @@
           (CALLi texternalsym:$dst)>;
 
 // add and sub always produce carry
-def : Pat<(addc GR16:$src1, GR16:$src2),
-          (ADD16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(addc GR16:$src1, (load addr:$src2)),
-          (ADD16rm GR16:$src1, addr:$src2)>;
-def : Pat<(addc GR16:$src1, imm:$src2),
-          (ADD16ri GR16:$src1, imm:$src2)>;
+def : Pat<(addc GR16:$src, GR16:$src2),
+          (ADD16rr GR16:$src, GR16:$src2)>;
+def : Pat<(addc GR16:$src, (load addr:$src2)),
+          (ADD16rm GR16:$src, addr:$src2)>;
+def : Pat<(addc GR16:$src, imm:$src2),
+          (ADD16ri GR16:$src, imm:$src2)>;
 def : Pat<(store (addc (load addr:$dst), GR16:$src), addr:$dst),
           (ADD16mr addr:$dst, GR16:$src)>;
 def : Pat<(store (addc (load addr:$dst), (i16 (load addr:$src))), addr:$dst),
           (ADD16mm addr:$dst, addr:$src)>;
 
-def : Pat<(addc GR8:$src1, GR8:$src2),
-          (ADD8rr GR8:$src1, GR8:$src2)>;
-def : Pat<(addc GR8:$src1, (load addr:$src2)),
-          (ADD8rm GR8:$src1, addr:$src2)>;
-def : Pat<(addc GR8:$src1, imm:$src2),
-          (ADD8ri GR8:$src1, imm:$src2)>;
+def : Pat<(addc GR8:$src, GR8:$src2),
+          (ADD8rr GR8:$src, GR8:$src2)>;
+def : Pat<(addc GR8:$src, (load addr:$src2)),
+          (ADD8rm GR8:$src, addr:$src2)>;
+def : Pat<(addc GR8:$src, imm:$src2),
+          (ADD8ri GR8:$src, imm:$src2)>;
 def : Pat<(store (addc (load addr:$dst), GR8:$src), addr:$dst),
           (ADD8mr addr:$dst, GR8:$src)>;
 def : Pat<(store (addc (load addr:$dst), (i8 (load addr:$src))), addr:$dst),
           (ADD8mm addr:$dst, addr:$src)>;
 
-def : Pat<(subc GR16:$src1, GR16:$src2),
-          (SUB16rr GR16:$src1, GR16:$src2)>;
-def : Pat<(subc GR16:$src1, (load addr:$src2)),
-          (SUB16rm GR16:$src1, addr:$src2)>;
-def : Pat<(subc GR16:$src1, imm:$src2),
-          (SUB16ri GR16:$src1, imm:$src2)>;
+def : Pat<(subc GR16:$src, GR16:$src2),
+          (SUB16rr GR16:$src, GR16:$src2)>;
+def : Pat<(subc GR16:$src, (load addr:$src2)),
+          (SUB16rm GR16:$src, addr:$src2)>;
+def : Pat<(subc GR16:$src, imm:$src2),
+          (SUB16ri GR16:$src, imm:$src2)>;
 def : Pat<(store (subc (load addr:$dst), GR16:$src), addr:$dst),
           (SUB16mr addr:$dst, GR16:$src)>;
 def : Pat<(store (subc (load addr:$dst), (i16 (load addr:$src))), addr:$dst),
           (SUB16mm addr:$dst, addr:$src)>;
 
-def : Pat<(subc GR8:$src1, GR8:$src2),
-          (SUB8rr GR8:$src1, GR8:$src2)>;
-def : Pat<(subc GR8:$src1, (load addr:$src2)),
-          (SUB8rm GR8:$src1, addr:$src2)>;
-def : Pat<(subc GR8:$src1, imm:$src2),
-          (SUB8ri GR8:$src1, imm:$src2)>;
+def : Pat<(subc GR8:$src, GR8:$src2),
+          (SUB8rr GR8:$src, GR8:$src2)>;
+def : Pat<(subc GR8:$src, (load addr:$src2)),
+          (SUB8rm GR8:$src, addr:$src2)>;
+def : Pat<(subc GR8:$src, imm:$src2),
+          (SUB8ri GR8:$src, imm:$src2)>;
 def : Pat<(store (subc (load addr:$dst), GR8:$src), addr:$dst),
           (SUB8mr addr:$dst, GR8:$src)>;
 def : Pat<(store (subc (load addr:$dst), (i8 (load addr:$src))), addr:$dst),
@@ -1201,6 +1206,6 @@
 
 // peephole patterns
 def : Pat<(and GR16:$src, 255), (ZEXT16r GR16:$src)>;
-def : Pat<(MSP430cmp (trunc (and_su GR16:$src1, GR16:$src2)), 0),
-          (BIT8rr (EXTRACT_SUBREG GR16:$src1, subreg_8bit),
+def : Pat<(MSP430cmp (trunc (and_su GR16:$src, GR16:$src2)), 0),
+          (BIT8rr (EXTRACT_SUBREG GR16:$src, subreg_8bit),
                   (EXTRACT_SUBREG GR16:$src2, subreg_8bit))>;

Modified: llvm/branches/wendling/eh/lib/Target/Mangler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Mangler.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Mangler.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/Mangler.cpp Fri Jul  2 04:57:13 2010
@@ -180,7 +180,7 @@
   ManglerPrefixTy PrefixTy = Mangler::Default;
   if (GV->hasPrivateLinkage() || isImplicitlyPrivate)
     PrefixTy = Mangler::Private;
-  else if (GV->hasLinkerPrivateLinkage())
+  else if (GV->hasLinkerPrivateLinkage() || GV->hasLinkerPrivateWeakLinkage())
     PrefixTy = Mangler::LinkerPrivate;
   
   // If this global has a name, handle it simply.

Modified: llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.td Fri Jul  2 04:57:13 2010
@@ -541,7 +541,7 @@
 def MIPS_CMOV_ZERO  : PatLeaf<(i32 0)>;
 def MIPS_CMOV_NZERO : PatLeaf<(i32 1)>;
 
-let Predicates = [HasCondMov], isTwoAddress = 1 in {
+let Predicates = [HasCondMov], Constraints = "$F = $dst" in {
   def MOVN : CondMov<0x0a, "movn", MIPS_CMOV_NZERO>;
   def MOVZ : CondMov<0x0b, "movz", MIPS_CMOV_ZERO>;
 }

Modified: llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.td Fri Jul  2 04:57:13 2010
@@ -134,7 +134,7 @@
 //===----------------------------------------------------------------------===//
 
 // W = W Op F : Load the value from F and do Op to W.
-let isTwoAddress = 1, mayLoad = 1 in
+let Constraints = "$src = $dst", mayLoad = 1 in
 class BinOpFW<bits<6> OpCode, string OpcStr, SDNode OpNode>:
   ByteFormat<OpCode, (outs GPR:$dst),
              (ins GPR:$src, i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
@@ -160,7 +160,7 @@
                                              )]>;
 
 // W = W Op L : Do Op of L with W and place result in W.
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
 class BinOpWL<bits<6> opcode, string OpcStr, SDNode OpNode> :
   LiteralFormat<opcode, (outs GPR:$dst),
                 (ins GPR:$src, i8imm:$literal),
@@ -220,7 +220,7 @@
              "movwf ${fsr}L",
              []>;
 
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
 def set_fsrhi:
   ByteFormat<0, (outs FSR16:$dst), 
              (ins FSR16:$src, GPR:$val),
@@ -362,7 +362,7 @@
 }
 
 // W -= [F] ; load from F and sub the value from W.
-let isTwoAddress = 1, mayLoad = 1 in
+let Constraints = "$src = $dst", mayLoad = 1 in
 class SUBFW<bits<6> OpCode, string OpcStr, SDNode OpNode>:
   ByteFormat<OpCode, (outs GPR:$dst),
              (ins GPR:$src, i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi),
@@ -418,7 +418,7 @@
 
 // sublw 
 // W = C - W ; sub W from literal. (Without borrow).
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
 class SUBLW<bits<6> opcode, string OpcStr, SDNode OpNode> :
   LiteralFormat<opcode, (outs GPR:$dst),
                 (ins GPR:$src, i8imm:$literal),
@@ -426,7 +426,7 @@
                 [(set GPR:$dst, (OpNode (i8 imm:$literal), GPR:$src))]>;
 // subwl 
 // W = W - C ; sub literal from W  (Without borrow).
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
 class SUBWL<bits<6> opcode, string OpcStr, SDNode OpNode> :
   LiteralFormat<opcode, (outs GPR:$dst),
                 (ins GPR:$src, i8imm:$literal),

Modified: llvm/branches/wendling/eh/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/PIC16/PIC16Passes/PIC16Cloner.cpp Fri Jul  2 04:57:13 2010
@@ -150,8 +150,8 @@
 
 
 // For PIC16, automatic variables of a function are emitted as globals.
-// Clone the auto variables of a function  and put them in ValueMap, 
-// this ValueMap will be used while
+// Clone the auto variables of a function  and put them in VMap, 
+// this VMap will be used while
 // Cloning the code of function itself.
 //
 void PIC16Cloner::CloneAutos(Function *F) {
@@ -160,11 +160,11 @@
   Module *M = F->getParent();
   Module::GlobalListType &Globals = M->getGlobalList();
 
-  // Clear the leftovers in ValueMap by any previous cloning.
-  ValueMap.clear();
+  // Clear the leftovers in VMap by any previous cloning.
+  VMap.clear();
 
   // Find the auto globls for this function and clone them, and put them
-  // in ValueMap.
+  // in VMap.
   std::string FnName = F->getName().str();
   std::string VarName, ClonedVarName;
   for (Module::global_iterator I = M->global_begin(), E = M->global_end();
@@ -182,8 +182,8 @@
       // Add these new globals to module's globals list.
       Globals.push_back(ClonedGV);
  
-      // Update ValueMap.
-      ValueMap[GV] = ClonedGV;
+      // Update VMap.
+      VMap[GV] = ClonedGV;
      }
   }
 }
@@ -236,10 +236,10 @@
 }
 
 // Clone the given function and return it.
-// Note: it uses the ValueMap member of the class, which is already populated
+// Note: it uses the VMap member of the class, which is already populated
 // by cloneAutos by the time we reach here. 
-// FIXME: Should we just pass ValueMap's ref as a parameter here? rather
-// than keeping the ValueMap as a member.
+// FIXME: Should we just pass VMap's ref as a parameter here? rather
+// than keeping the VMap as a member.
 Function *
 PIC16Cloner::cloneFunction(Function *OrgF) {
    Function *ClonedF;
@@ -252,11 +252,11 @@
    }
 
    // Clone does not exist. 
-   // First clone the autos, and populate ValueMap.
+   // First clone the autos, and populate VMap.
    CloneAutos(OrgF);
 
    // Now create the clone.
-   ClonedF = CloneFunction(OrgF, ValueMap);
+   ClonedF = CloneFunction(OrgF, VMap);
 
    // The new function should be for interrupt line. Therefore should have 
    // the name suffixed with IL and section attribute marked with IL. 

Modified: llvm/branches/wendling/eh/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h (original)
+++ llvm/branches/wendling/eh/lib/Target/PIC16/PIC16Passes/PIC16Cloner.h Fri Jul  2 04:57:13 2010
@@ -15,7 +15,7 @@
 #ifndef PIC16CLONER_H
 #define PIC16CLONER_H
 
-#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ValueMap.h"
 
 using namespace llvm;
 using std::vector;
@@ -72,7 +72,7 @@
     // the corresponding cloned auto variable of the cloned function. 
     // This value map is passed during the function cloning so that all the
     // uses of auto variables be updated properly. 
-    DenseMap<const Value*, Value*> ValueMap;
+    ValueMap<const Value*, Value*> VMap;
 
     // Map of a already cloned functions. 
     map<Function *, Function *> ClonedFunctionMap;

Modified: llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.cpp Fri Jul  2 04:57:13 2010
@@ -2737,7 +2737,6 @@
   assert((CallConv == CallingConv::C ||
           CallConv == CallingConv::Fast) && "Unknown calling convention!");
 
-  EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   unsigned PtrByteSize = 4;
 
   MachineFunction &MF = DAG.getMachineFunction();
@@ -5380,11 +5379,8 @@
 
 
 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
-/// vector.  If it is invalid, don't add anything to Ops. If hasMemory is true
-/// it means one of the asm constraint of the inline asm instruction being
-/// processed is 'm'.
+/// vector.  If it is invalid, don't add anything to Ops.
 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter,
-                                                     bool hasMemory,
                                                      std::vector<SDValue>&Ops,
                                                      SelectionDAG &DAG) const {
   SDValue Result(0,0);
@@ -5443,7 +5439,7 @@
   }
 
   // Handle standard constraint letters.
-  TargetLowering::LowerAsmOperandForConstraint(Op, Letter, hasMemory, Ops, DAG);
+  TargetLowering::LowerAsmOperandForConstraint(Op, Letter, Ops, DAG);
 }
 
 // isLegalAddressingMode - Return true if the addressing mode represented

Modified: llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.h?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.h (original)
+++ llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.h Fri Jul  2 04:57:13 2010
@@ -318,12 +318,9 @@
     unsigned getByValTypeAlignment(const Type *Ty) const;
 
     /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
-    /// vector.  If it is invalid, don't add anything to Ops. If hasMemory is
-    /// true it means one of the asm constraint of the inline asm instruction
-    /// being processed is 'm'.
+    /// vector.  If it is invalid, don't add anything to Ops.
     virtual void LowerAsmOperandForConstraint(SDValue Op,
                                               char ConstraintLetter,
-                                              bool hasMemory,
                                               std::vector<SDValue> &Ops,
                                               SelectionDAG &DAG) const;
     

Modified: llvm/branches/wendling/eh/lib/Target/README.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/README.txt?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/README.txt (original)
+++ llvm/branches/wendling/eh/lib/Target/README.txt Fri Jul  2 04:57:13 2010
@@ -300,6 +300,14 @@
     return v ^ (t >> 8);
 }
 
+Neither is this (very standard idiom):
+
+int f(int n)
+{
+  return (((n) << 24) | (((n) & 0xff00) << 8) 
+       | (((n) >> 8) & 0xff00) | ((n) >> 24));
+}
+
 //===---------------------------------------------------------------------===//
 
 [LOOP RECOGNITION]

Modified: llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.td Fri Jul  2 04:57:13 2010
@@ -665,7 +665,7 @@
 //===----------------------------------------------------------------------===//
 
 // V9 Conditional Moves.
-let Predicates = [HasV9], isTwoAddress = 1 in {
+let Predicates = [HasV9], Constraints = "$T = $dst" in {
   // Move Integer Register on Condition (MOVcc) p. 194 of the V9 manual.
   // FIXME: Add instruction encodings for the JIT some day.
   def MOVICCrr

Modified: llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrFP.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrFP.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrFP.td (original)
+++ llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrFP.td Fri Jul  2 04:57:13 2010
@@ -126,7 +126,7 @@
                         (implicit PSW)]>;
 }
 
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
 let Defs = [PSW] in {
 let isCommutable = 1 in { // X = ADD Y, Z  == X = ADD Z, Y
 def FADD32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src1, FP32:$src2),
@@ -237,7 +237,7 @@
                        "ddb\t{$dst, $src2}",
                        [(set FP64:$dst, (fdiv FP64:$src1, (load rriaddr12:$src2)))]>;
 
-} // isTwoAddress = 1
+} // Constraints = "$src1 = $dst"
 
 def FSQRT32rr : Pseudo<(outs FP32:$dst), (ins FP32:$src),
                        "sqebr\t{$dst, $src}",

Modified: llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.td?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.td Fri Jul  2 04:57:13 2010
@@ -478,7 +478,8 @@
                      "lmg\t{$from, $to, $dst}",
                      []>;
 
-let isReMaterializable = 1, isAsCheapAsAMove = 1, isTwoAddress = 1 in {
+let isReMaterializable = 1, isAsCheapAsAMove = 1,
+    Constraints = "$src = $dst" in {
 def MOV64Pr0_even : Pseudo<(outs GR64P:$dst), (ins GR64P:$src),
                            "lhi\t${dst:subreg_even}, 0",
                            []>;
@@ -537,7 +538,7 @@
                       (implicit PSW)]>;
 }
 
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
 
 let Defs = [PSW] in {
 
@@ -924,12 +925,12 @@
                       "dlg\t{$dst, $src2}",
                       []>;
 } // mayLoad
-} // isTwoAddress = 1
+} // Constraints = "$src1 = $dst"
 
 //===----------------------------------------------------------------------===//
 // Shifts
 
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
 def SRL32rri : RSI<0x88,
                    (outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
                    "srl\t{$src, $amt}",
@@ -939,7 +940,7 @@
                     "srlg\t{$dst, $src, $amt}",
                     [(set GR64:$dst, (srl GR64:$src, riaddr:$amt))]>;
 
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
 def SHL32rri : RSI<0x89,
                    (outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
                    "sll\t{$src, $amt}",
@@ -950,7 +951,7 @@
                     [(set GR64:$dst, (shl GR64:$src, riaddr:$amt))]>;
 
 let Defs = [PSW] in {
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
 def SRA32rri : RSI<0x8A,
                    (outs GR32:$dst), (ins GR32:$src, riaddr32:$amt),
                    "sra\t{$src, $amt}",

Modified: llvm/branches/wendling/eh/lib/Target/TargetRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/TargetRegisterInfo.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/TargetRegisterInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/TargetRegisterInfo.cpp Fri Jul  2 04:57:13 2010
@@ -63,7 +63,7 @@
 /// getMinimalPhysRegClass - Returns the Register Class of a physical
 /// register of the given type.
 const TargetRegisterClass *
-TargetRegisterInfo::getMinimalPhysRegClass(unsigned reg) const {
+TargetRegisterInfo::getMinimalPhysRegClass(unsigned reg, EVT VT) const {
   assert(isPhysicalRegister(reg) && "reg must be a physical register");
 
   // Pick the most sub register class of the right type that contains
@@ -71,7 +71,8 @@
   const TargetRegisterClass* BestRC = 0;
   for (regclass_iterator I = regclass_begin(), E = regclass_end(); I != E; ++I){
     const TargetRegisterClass* RC = *I;
-    if (RC->contains(reg) && (!BestRC || BestRC->hasSubClass(RC)))
+    if ((VT == MVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
+        (!BestRC || BestRC->hasSubClass(RC)))
       BestRC = RC;
   }
 

Modified: llvm/branches/wendling/eh/lib/Target/X86/AsmParser/X86AsmLexer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/AsmParser/X86AsmLexer.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/AsmParser/X86AsmLexer.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/AsmParser/X86AsmLexer.cpp Fri Jul  2 04:57:13 2010
@@ -33,13 +33,11 @@
   }
   
   const AsmToken &lexDefinite() {
-    if(tentativeIsValid) {
+    if (tentativeIsValid) {
       tentativeIsValid = false;
       return tentativeToken;
     }
-    else {
-      return getLexer()->Lex();
-    }
+    return getLexer()->Lex();
   }
   
   AsmToken LexTokenATT();
@@ -72,38 +70,65 @@
 static unsigned MatchRegisterName(StringRef Name);
 
 AsmToken X86AsmLexer::LexTokenATT() {
-  const AsmToken lexedToken = lexDefinite();
+  AsmToken lexedToken = lexDefinite();
   
   switch (lexedToken.getKind()) {
   default:
-    return AsmToken(lexedToken);
+    return lexedToken;
   case AsmToken::Error:
     SetError(Lexer->getErrLoc(), Lexer->getErr());
-    return AsmToken(lexedToken);
-  case AsmToken::Percent:
-  {
+    return lexedToken;
+      
+  case AsmToken::Percent: {
     const AsmToken &nextToken = lexTentative();
-    if (nextToken.getKind() == AsmToken::Identifier) {
-      unsigned regID = MatchRegisterName(nextToken.getString());
+    if (nextToken.getKind() != AsmToken::Identifier)
+      return lexedToken;
+
       
-      if (regID) {
-        lexDefinite();
+    if (unsigned regID = MatchRegisterName(nextToken.getString())) {
+      lexDefinite();
         
+      // FIXME: This is completely wrong when there is a space or other
+      // punctuation between the % and the register name.
+      StringRef regStr(lexedToken.getString().data(),
+                       lexedToken.getString().size() + 
+                       nextToken.getString().size());
+      
+      return AsmToken(AsmToken::Register, regStr, 
+                      static_cast<int64_t>(regID));
+    }
+    
+    // Match register name failed.  If this is "db[0-7]", match it as an alias
+    // for dr[0-7].
+    if (nextToken.getString().size() == 3 &&
+        nextToken.getString().startswith("db")) {
+      int RegNo = -1;
+      switch (nextToken.getString()[2]) {
+      case '0': RegNo = X86::DR0; break;
+      case '1': RegNo = X86::DR1; break;
+      case '2': RegNo = X86::DR2; break;
+      case '3': RegNo = X86::DR3; break;
+      case '4': RegNo = X86::DR4; break;
+      case '5': RegNo = X86::DR5; break;
+      case '6': RegNo = X86::DR6; break;
+      case '7': RegNo = X86::DR7; break;
+      }
+      
+      if (RegNo != -1) {
+        lexDefinite();
+
+        // FIXME: This is completely wrong when there is a space or other
+        // punctuation between the % and the register name.
         StringRef regStr(lexedToken.getString().data(),
                          lexedToken.getString().size() + 
                          nextToken.getString().size());
-        
-        return AsmToken(AsmToken::Register, 
-                        regStr, 
-                        static_cast<int64_t>(regID));
-      }
-      else {
-        return AsmToken(lexedToken);
+        return AsmToken(AsmToken::Register, regStr, 
+                        static_cast<int64_t>(RegNo));
       }
     }
-    else {
-      return AsmToken(lexedToken);
-    }
+      
+   
+    return lexedToken;
   }    
   }
 }
@@ -113,26 +138,22 @@
   
   switch(lexedToken.getKind()) {
   default:
-    return AsmToken(lexedToken);
+    return lexedToken;
   case AsmToken::Error:
     SetError(Lexer->getErrLoc(), Lexer->getErr());
-    return AsmToken(lexedToken);
-  case AsmToken::Identifier:
-  {
+    return lexedToken;
+  case AsmToken::Identifier: {
     std::string upperCase = lexedToken.getString().str();
     std::string lowerCase = LowercaseString(upperCase);
     StringRef lowerRef(lowerCase);
     
     unsigned regID = MatchRegisterName(lowerRef);
     
-    if (regID) {
+    if (regID)
       return AsmToken(AsmToken::Register,
                       lexedToken.getString(),
                       static_cast<int64_t>(regID));
-    }
-    else {
-      return AsmToken(lexedToken);
-    }
+    return lexedToken;
   }
   }
 }

Modified: llvm/branches/wendling/eh/lib/Target/X86/AsmParser/X86AsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/AsmParser/X86AsmParser.cpp?rev=107465&r1=107464&r2=107465&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/AsmParser/X86AsmParser.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/AsmParser/X86AsmParser.cpp Fri Jul  2 04:57:13 2010
@@ -412,6 +412,28 @@
     return false;
   }
   
+  // If this is "db[0-7]", match it as an alias
+  // for dr[0-7].
+  if (RegNo == 0 && Tok.getString().size() == 3 &&
+      Tok.getString().startswith("db")) {
+    switch (Tok.getString()[2]) {
+    case '0': RegNo = X86::DR0; break;
+    case '1': RegNo = X86::DR1; break;
+    case '2': RegNo = X86::DR2; break;
+    case '3': RegNo = X86::DR3; break;
+    case '4': RegNo = X86::DR4; break;
+    case '5': RegNo = X86::DR5; break;
+    case '6': RegNo = X86::DR6; break;
+    case '7': RegNo = X86::DR7; break;
+    }
+    
+    if (RegNo != 0) {
+      EndLoc = Tok.getLoc();
+      Parser.Lex(); // Eat it.
+      return false;
+    }
+  }
+  
   if (RegNo == 0)
     return Error(Tok.getLoc(), "invalid register name");
 
@@ -664,11 +686,13 @@
 
   // FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
   const MCExpr *ExtraImmOp = 0;
-  if (PatchedName.startswith("cmp") &&
+  if ((PatchedName.startswith("cmp") || PatchedName.startswith("vcmp")) &&
       (PatchedName.endswith("ss") || PatchedName.endswith("sd") ||
        PatchedName.endswith("ps") || PatchedName.endswith("pd"))) {
+    bool IsVCMP = PatchedName.startswith("vcmp");
+    unsigned SSECCIdx = IsVCMP ? 4 : 3;
     unsigned SSEComparisonCode = StringSwitch<unsigned>(
-      PatchedName.slice(3, PatchedName.size() - 2))
+      PatchedName.slice(SSECCIdx, PatchedName.size() - 2))
       .Case("eq", 0)
       .Case("lt", 1)
       .Case("le", 2)
@@ -682,14 +706,14 @@
       ExtraImmOp = MCConstantExpr::Create(SSEComparisonCode,
                                           getParser().getContext());
       if (PatchedName.endswith("ss")) {
-        PatchedName = "cmpss";
+        PatchedName = IsVCMP ? "vcmpss" : "cmpss";
       } else if (PatchedName.endswith("sd")) {
-        PatchedName = "cmpsd";
+        PatchedName = IsVCMP ? "vcmpsd" : "cmpsd";
       } else if (PatchedName.endswith("ps")) {
-        PatchedName = "cmpps";
+        PatchedName = IsVCMP ? "vcmpps" : "cmpps";
       } else {
         assert(PatchedName.endswith("pd") && "Unexpected mnemonic!");
-        PatchedName = "cmppd";
+        PatchedName = IsVCMP ? "vcmppd" : "cmppd";
       }
     }
   }





More information about the llvm-branch-commits mailing list