[llvm-branch-commits] [llvm-branch] r134363 [1/2] - in /llvm/branches/type-system-rewrite: ./ autoconf/ cmake/ cmake/modules/ docs/ include/llvm/ include/llvm/ADT/ include/llvm/Analysis/ include/llvm/Bitcode/ include/llvm/CodeGen/ include/llvm/Config/ include/llvm/MC/ include/llvm/Object/ include/llvm/Support/ include/llvm/Target/ lib/Analysis/ lib/AsmParser/ lib/Bitcode/Reader/ lib/Bitcode/Writer/ lib/CodeGen/ lib/CodeGen/AsmPrinter/ lib/CodeGen/SelectionDAG/ lib/ExecutionEngine/ lib/MC/ lib/MC/MCDisassembler/ lib/MC/MCParse...

Chris Lattner sabre at nondot.org
Sat Jul 2 20:28:10 PDT 2011


Author: lattner
Date: Sat Jul  2 22:28:07 2011
New Revision: 134363

URL: http://llvm.org/viewvc/llvm-project?rev=134363&view=rev
Log:
merge up to mainline (up to and including r134362).


Added:
    llvm/branches/type-system-rewrite/include/llvm/Analysis/BlockFrequency.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/Analysis/BlockFrequency.h
    llvm/branches/type-system-rewrite/include/llvm/Analysis/BlockFrequencyImpl.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/Analysis/BlockFrequencyImpl.h
    llvm/branches/type-system-rewrite/include/llvm/MC/MCInstrDesc.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/MC/MCInstrDesc.h
    llvm/branches/type-system-rewrite/include/llvm/MC/MCInstrInfo.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/MC/MCInstrInfo.h
    llvm/branches/type-system-rewrite/include/llvm/MC/MCInstrItineraries.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/MC/MCInstrItineraries.h
    llvm/branches/type-system-rewrite/include/llvm/MC/MCRegisterInfo.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/MC/MCRegisterInfo.h
    llvm/branches/type-system-rewrite/include/llvm/MC/MCSubtargetInfo.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/MC/MCSubtargetInfo.h
    llvm/branches/type-system-rewrite/include/llvm/MC/SubtargetFeature.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/MC/SubtargetFeature.h
    llvm/branches/type-system-rewrite/include/llvm/Object/Binary.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/Object/Binary.h
    llvm/branches/type-system-rewrite/include/llvm/Object/COFF.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/Object/COFF.h
    llvm/branches/type-system-rewrite/include/llvm/Object/Error.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/Object/Error.h
    llvm/branches/type-system-rewrite/include/llvm/Support/FEnv.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/Support/FEnv.h
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetSubtargetInfo.h
      - copied unchanged from r134362, llvm/trunk/include/llvm/Target/TargetSubtargetInfo.h
    llvm/branches/type-system-rewrite/lib/Analysis/BlockFrequency.cpp
      - copied unchanged from r134362, llvm/trunk/lib/Analysis/BlockFrequency.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/RegisterCoalescer.h
      - copied unchanged from r134362, llvm/trunk/lib/CodeGen/RegisterCoalescer.h
    llvm/branches/type-system-rewrite/lib/MC/MCSubtargetInfo.cpp
      - copied unchanged from r134362, llvm/trunk/lib/MC/MCSubtargetInfo.cpp
    llvm/branches/type-system-rewrite/lib/MC/SubtargetFeature.cpp
      - copied unchanged from r134362, llvm/trunk/lib/MC/SubtargetFeature.cpp
    llvm/branches/type-system-rewrite/lib/Object/Binary.cpp
      - copied unchanged from r134362, llvm/trunk/lib/Object/Binary.cpp
    llvm/branches/type-system-rewrite/lib/Object/Error.cpp
      - copied unchanged from r134362, llvm/trunk/lib/Object/Error.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMMachObjectWriter.cpp
      - copied unchanged from r134362, llvm/trunk/lib/Target/ARM/ARMMachObjectWriter.cpp
    llvm/branches/type-system-rewrite/lib/Target/PTX/generate-register-td.py
      - copied unchanged from r134362, llvm/trunk/lib/Target/PTX/generate-register-td.py
    llvm/branches/type-system-rewrite/lib/Target/TargetSubtargetInfo.cpp
      - copied unchanged from r134362, llvm/trunk/lib/Target/TargetSubtargetInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/MCTargetDesc/
      - copied from r134362, llvm/trunk/lib/Target/X86/MCTargetDesc/
    llvm/branches/type-system-rewrite/lib/Target/X86/MCTargetDesc/CMakeLists.txt
      - copied unchanged from r134362, llvm/trunk/lib/Target/X86/MCTargetDesc/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/X86/MCTargetDesc/Makefile
      - copied unchanged from r134362, llvm/trunk/lib/Target/X86/MCTargetDesc/Makefile
    llvm/branches/type-system-rewrite/lib/Target/X86/MCTargetDesc/X86TargetDesc.cpp
      - copied unchanged from r134362, llvm/trunk/lib/Target/X86/MCTargetDesc/X86TargetDesc.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/MCTargetDesc/X86TargetDesc.h
      - copied unchanged from r134362, llvm/trunk/lib/Target/X86/MCTargetDesc/X86TargetDesc.h
    llvm/branches/type-system-rewrite/test/CodeGen/ARM/2011-06-29-MergeGlobalsAlign.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/ARM/2011-06-29-MergeGlobalsAlign.ll
    llvm/branches/type-system-rewrite/test/CodeGen/ARM/debug-info-blocks.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/ARM/debug-info-blocks.ll
    llvm/branches/type-system-rewrite/test/CodeGen/ARM/vcvt_combine.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/ARM/vcvt_combine.ll
    llvm/branches/type-system-rewrite/test/CodeGen/ARM/vdiv_combine.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/ARM/vdiv_combine.ll
    llvm/branches/type-system-rewrite/test/CodeGen/CBackend/2011-06-08-addWithOverflow.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/CBackend/2011-06-08-addWithOverflow.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Mips/inlineasmmemop.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/Mips/inlineasmmemop.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/aggregates.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/PTX/aggregates.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PowerPC/ppc32-vaarg.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/PowerPC/ppc32-vaarg.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PowerPC/ppc64-32bit-addic.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/PowerPC/ppc64-32bit-addic.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Thumb/inlineasm-thumb.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/Thumb/inlineasm-thumb.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/atomic-or.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/X86/atomic-or.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/dbg-i128-const.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/X86/dbg-i128-const.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/fp-stack-O0.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/X86/fp-stack-O0.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/promote-trunc.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/X86/promote-trunc.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/sibcall-byval.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/X86/sibcall-byval.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/tail-dup-addr.ll
      - copied unchanged from r134362, llvm/trunk/test/CodeGen/X86/tail-dup-addr.ll
    llvm/branches/type-system-rewrite/test/MC/ARM/arm-arithmetic-aliases.s
      - copied unchanged from r134362, llvm/trunk/test/MC/ARM/arm-arithmetic-aliases.s
    llvm/branches/type-system-rewrite/test/MC/ARM/thumb2-movt-fixup.s
      - copied unchanged from r134362, llvm/trunk/test/MC/ARM/thumb2-movt-fixup.s
    llvm/branches/type-system-rewrite/test/MC/ARM/vpush-vpop.s
      - copied unchanged from r134362, llvm/trunk/test/MC/ARM/vpush-vpop.s
    llvm/branches/type-system-rewrite/test/Transforms/GVN/2011-04-27-phioperands.ll
      - copied unchanged from r134362, llvm/trunk/test/Transforms/GVN/2011-04-27-phioperands.ll
    llvm/branches/type-system-rewrite/test/Transforms/InstSimplify/undef.ll
      - copied unchanged from r134362, llvm/trunk/test/Transforms/InstSimplify/undef.ll
    llvm/branches/type-system-rewrite/test/Transforms/LoopDeletion/2011-06-21-phioperands.ll
      - copied unchanged from r134362, llvm/trunk/test/Transforms/LoopDeletion/2011-06-21-phioperands.ll
    llvm/branches/type-system-rewrite/test/Transforms/SimplifyCFG/lifetime.ll
      - copied unchanged from r134362, llvm/trunk/test/Transforms/SimplifyCFG/lifetime.ll
    llvm/branches/type-system-rewrite/utils/TableGen/Error.cpp
      - copied unchanged from r134362, llvm/trunk/utils/TableGen/Error.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/Error.h
      - copied unchanged from r134362, llvm/trunk/utils/TableGen/Error.h
Removed:
    llvm/branches/type-system-rewrite/include/llvm/CodeGen/RegisterCoalescer.h
    llvm/branches/type-system-rewrite/include/llvm/Target/SubtargetFeature.h
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrDesc.h
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrItineraries.h
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetSubtarget.h
    llvm/branches/type-system-rewrite/lib/CodeGen/PreAllocSplitting.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SimpleRegisterCoalescing.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SimpleRegisterCoalescing.h
    llvm/branches/type-system-rewrite/lib/Target/SubtargetFeature.cpp
    llvm/branches/type-system-rewrite/lib/Target/TargetSubtarget.cpp
    llvm/branches/type-system-rewrite/test/CodeGen/Generic/legalize-dbg-value.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/inline-asm-fpstack2.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/inline-asm-fpstack3.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/inline-asm-fpstack4.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/inline-asm-fpstack5.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pre-split1.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pre-split10.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pre-split11.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pre-split4.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pre-split5.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pre-split6.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pre-split7.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pre-split8.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pre-split9.ll
Modified:
    llvm/branches/type-system-rewrite/   (props changed)
    llvm/branches/type-system-rewrite/Makefile.rules
    llvm/branches/type-system-rewrite/autoconf/configure.ac
    llvm/branches/type-system-rewrite/cmake/config-ix.cmake
    llvm/branches/type-system-rewrite/cmake/modules/HandleLLVMOptions.cmake
    llvm/branches/type-system-rewrite/cmake/modules/LLVMLibDeps.cmake
    llvm/branches/type-system-rewrite/configure
    llvm/branches/type-system-rewrite/docs/ExtendingLLVM.html
    llvm/branches/type-system-rewrite/docs/LangRef.html
    llvm/branches/type-system-rewrite/include/llvm/ADT/ArrayRef.h
    llvm/branches/type-system-rewrite/include/llvm/ADT/StringMap.h
    llvm/branches/type-system-rewrite/include/llvm/ADT/Triple.h
    llvm/branches/type-system-rewrite/include/llvm/Analysis/DIBuilder.h
    llvm/branches/type-system-rewrite/include/llvm/Analysis/IVUsers.h
    llvm/branches/type-system-rewrite/include/llvm/Analysis/ScalarEvolutionExpander.h
    llvm/branches/type-system-rewrite/include/llvm/Analysis/ValueTracking.h
    llvm/branches/type-system-rewrite/include/llvm/BasicBlock.h
    llvm/branches/type-system-rewrite/include/llvm/Bitcode/BitstreamReader.h
    llvm/branches/type-system-rewrite/include/llvm/CodeGen/LinkAllCodegenComponents.h
    llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineFunction.h
    llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineInstr.h
    llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineInstrBuilder.h
    llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineOperand.h
    llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineRegisterInfo.h
    llvm/branches/type-system-rewrite/include/llvm/CodeGen/Passes.h
    llvm/branches/type-system-rewrite/include/llvm/CodeGen/ScheduleDAG.h
    llvm/branches/type-system-rewrite/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
    llvm/branches/type-system-rewrite/include/llvm/Config/config.h.cmake
    llvm/branches/type-system-rewrite/include/llvm/Constants.h
    llvm/branches/type-system-rewrite/include/llvm/InitializePasses.h
    llvm/branches/type-system-rewrite/include/llvm/InlineAsm.h
    llvm/branches/type-system-rewrite/include/llvm/Instructions.h
    llvm/branches/type-system-rewrite/include/llvm/Intrinsics.td
    llvm/branches/type-system-rewrite/include/llvm/MC/MCMachObjectWriter.h
    llvm/branches/type-system-rewrite/include/llvm/Object/ObjectFile.h
    llvm/branches/type-system-rewrite/include/llvm/Support/BranchProbability.h
    llvm/branches/type-system-rewrite/include/llvm/Support/CFG.h
    llvm/branches/type-system-rewrite/include/llvm/Support/Endian.h
    llvm/branches/type-system-rewrite/include/llvm/Support/IRBuilder.h
    llvm/branches/type-system-rewrite/include/llvm/Support/system_error.h
    llvm/branches/type-system-rewrite/include/llvm/Target/Target.td
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetAsmInfo.h
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrInfo.h
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetLowering.h
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetLoweringObjectFile.h
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetMachine.h
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetOptions.h
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetRegisterInfo.h
    llvm/branches/type-system-rewrite/include/llvm/Target/TargetRegistry.h
    llvm/branches/type-system-rewrite/include/llvm/Use.h
    llvm/branches/type-system-rewrite/lib/Analysis/Analysis.cpp
    llvm/branches/type-system-rewrite/lib/Analysis/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Analysis/DIBuilder.cpp
    llvm/branches/type-system-rewrite/lib/Analysis/IVUsers.cpp
    llvm/branches/type-system-rewrite/lib/Analysis/InstructionSimplify.cpp
    llvm/branches/type-system-rewrite/lib/Analysis/ScalarEvolutionExpander.cpp
    llvm/branches/type-system-rewrite/lib/Analysis/ValueTracking.cpp
    llvm/branches/type-system-rewrite/lib/AsmParser/LLParser.cpp
    llvm/branches/type-system-rewrite/lib/Bitcode/Reader/BitcodeReader.cpp
    llvm/branches/type-system-rewrite/lib/Bitcode/Writer/BitcodeWriter.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/AggressiveAntiDepBreaker.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/AggressiveAntiDepBreaker.h
    llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
    llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfException.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/BranchFolding.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/CodeGen/CalcSpillWeights.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/CodeGen.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/CriticalAntiDepBreaker.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/DeadMachineInstructionElim.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/ExpandISelPseudos.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/IfConversion.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/InlineSpiller.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/MachineBasicBlock.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/MachineCSE.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/MachineFunction.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/MachineInstr.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/MachineLICM.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/MachineRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/MachineVerifier.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/PeepholeOptimizer.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/PostRASchedulerList.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/PrologEpilogInserter.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocBasic.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocFast.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocGreedy.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocLinearScan.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocPBQP.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/RegisterClassInfo.h
    llvm/branches/type-system-rewrite/lib/CodeGen/RegisterCoalescer.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/ScheduleDAG.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/ScheduleDAGInstrs.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/ScoreboardHazardRecognizer.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/FastISel.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/InstrEmitter.h
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/ShadowStackGC.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SplitKit.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/SplitKit.h
    llvm/branches/type-system-rewrite/lib/CodeGen/Splitter.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/StackSlotColoring.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/TailDuplication.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/TargetInstrInfoImpl.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/TwoAddressInstructionPass.cpp
    llvm/branches/type-system-rewrite/lib/CodeGen/VirtRegRewriter.cpp
    llvm/branches/type-system-rewrite/lib/ExecutionEngine/TargetSelect.cpp
    llvm/branches/type-system-rewrite/lib/MC/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/MC/MCAsmStreamer.cpp
    llvm/branches/type-system-rewrite/lib/MC/MCDisassembler/Disassembler.cpp
    llvm/branches/type-system-rewrite/lib/MC/MCDisassembler/EDDisassembler.cpp
    llvm/branches/type-system-rewrite/lib/MC/MCDwarf.cpp
    llvm/branches/type-system-rewrite/lib/MC/MCParser/AsmParser.cpp
    llvm/branches/type-system-rewrite/lib/MC/MachObjectWriter.cpp
    llvm/branches/type-system-rewrite/lib/Object/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Object/COFFObjectFile.cpp
    llvm/branches/type-system-rewrite/lib/Object/ELFObjectFile.cpp
    llvm/branches/type-system-rewrite/lib/Object/MachOObjectFile.cpp
    llvm/branches/type-system-rewrite/lib/Object/Object.cpp
    llvm/branches/type-system-rewrite/lib/Object/ObjectFile.cpp
    llvm/branches/type-system-rewrite/lib/Support/ConstantRange.cpp
    llvm/branches/type-system-rewrite/lib/Support/Triple.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARM.h
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARM.td
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMAsmBackend.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMAsmPrinter.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInfo.h
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseRegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMCodeEmitter.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMConstantIslandPass.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMExpandPseudoInsts.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMFastISel.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMFrameLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMGlobalMerge.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMHazardRecognizer.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelDAGToDAG.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelLowering.h
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrInfo.td
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrThumb.td
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrThumb2.td
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrVFP.td
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMMCCodeEmitter.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMRegisterInfo.td
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMSubtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMSubtarget.h
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMTargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/ARMTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h
    llvm/branches/type-system-rewrite/lib/Target/ARM/MLxExpansionPass.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/Makefile
    llvm/branches/type-system-rewrite/lib/Target/ARM/Thumb1FrameLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/Thumb1InstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/Thumb1RegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/Thumb2ITBlockPass.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/Thumb2InstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/ARM/Thumb2SizeReduction.cpp
    llvm/branches/type-system-rewrite/lib/Target/Alpha/Alpha.h
    llvm/branches/type-system-rewrite/lib/Target/Alpha/AlphaISelLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/Alpha/AlphaISelLowering.h
    llvm/branches/type-system-rewrite/lib/Target/Alpha/AlphaInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/Alpha/AlphaInstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/Alpha/AlphaRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/Alpha/AlphaRegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/Alpha/AlphaSubtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/Alpha/AlphaSubtarget.h
    llvm/branches/type-system-rewrite/lib/Target/Alpha/AlphaTargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/Alpha/AlphaTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/Alpha/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/Alpha/Makefile
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/Blackfin.h
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinISelLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinISelLowering.h
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinInstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinRegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinRegisterInfo.td
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinSubtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinSubtarget.h
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinTargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/BlackfinTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/Blackfin/Makefile
    llvm/branches/type-system-rewrite/lib/Target/CBackend/CBackend.cpp
    llvm/branches/type-system-rewrite/lib/Target/CBackend/CTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/Makefile
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/SPU.h
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/SPUInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/SPUInstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/SPURegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/SPURegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/SPURegisterNames.h
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/SPUSubtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/SPUSubtarget.h
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/SPUTargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/CellSPU/SPUTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/CppBackend/CPPBackend.cpp
    llvm/branches/type-system-rewrite/lib/Target/CppBackend/CPPTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/AsmParser/MBlazeAsmLexer.cpp
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/Disassembler/MBlazeDisassembler.cpp
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlaze.h
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeDelaySlotFiller.cpp
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeISelLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeISelLowering.h
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeInstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeMCCodeEmitter.cpp
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeRegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeSubtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeSubtarget.h
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeTargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/MBlazeTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/MBlaze/Makefile
    llvm/branches/type-system-rewrite/lib/Target/MSP430/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/MSP430/MSP430.h
    llvm/branches/type-system-rewrite/lib/Target/MSP430/MSP430InstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/MSP430/MSP430InstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/MSP430/MSP430RegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/MSP430/MSP430RegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/MSP430/MSP430Subtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/MSP430/MSP430Subtarget.h
    llvm/branches/type-system-rewrite/lib/Target/MSP430/MSP430TargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/MSP430/MSP430TargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/MSP430/Makefile
    llvm/branches/type-system-rewrite/lib/Target/Mips/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/Mips/Makefile
    llvm/branches/type-system-rewrite/lib/Target/Mips/Mips.h
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsAsmPrinter.cpp
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsCallingConv.td
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsDelaySlotFiller.cpp
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsExpandPseudo.cpp
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsISelDAGToDAG.cpp
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsISelLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsISelLowering.h
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsInstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsInstrInfo.td
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsMCAsmInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsMachineFunction.h
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsRegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsSubtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsSubtarget.h
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsTargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/Mips/MipsTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/PTX/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/PTX/Makefile
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTX.h
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTX.td
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXAsmPrinter.cpp
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXCallingConv.td
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXISelDAGToDAG.cpp
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXISelLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXISelLowering.h
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXInstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXInstrInfo.td
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXMachineFunctionInfo.h
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXRegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXRegisterInfo.td
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXSubtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXSubtarget.h
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXTargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/PTX/PTXTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/Makefile
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPC.h
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCAsmBackend.cpp
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCHazardRecognizers.cpp
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCISelLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCInstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCRegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCSubtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCSubtarget.h
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCTargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/PowerPC/PPCTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/Sparc/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/Sparc/DelaySlotFiller.cpp
    llvm/branches/type-system-rewrite/lib/Target/Sparc/Makefile
    llvm/branches/type-system-rewrite/lib/Target/Sparc/Sparc.h
    llvm/branches/type-system-rewrite/lib/Target/Sparc/SparcISelLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/Sparc/SparcISelLowering.h
    llvm/branches/type-system-rewrite/lib/Target/Sparc/SparcInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/Sparc/SparcInstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/Sparc/SparcRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/Sparc/SparcRegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/Sparc/SparcSubtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/Sparc/SparcSubtarget.h
    llvm/branches/type-system-rewrite/lib/Target/Sparc/SparcTargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/Sparc/SparcTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/Makefile
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/SystemZ.h
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/SystemZInstrBuilder.h
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/SystemZInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/SystemZInstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/SystemZRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/SystemZRegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/SystemZSubtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/SystemZSubtarget.h
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/SystemZTargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/SystemZ/SystemZTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/TargetInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/TargetLoweringObjectFile.cpp
    llvm/branches/type-system-rewrite/lib/Target/TargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/TargetRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/X86/Disassembler/X86Disassembler.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/InstPrinter/X86InstComments.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/Makefile
    llvm/branches/type-system-rewrite/lib/Target/X86/X86.h
    llvm/branches/type-system-rewrite/lib/Target/X86/X86CallingConv.td
    llvm/branches/type-system-rewrite/lib/Target/X86/X86CodeEmitter.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/X86FastISel.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/X86FloatingPoint.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/X86ISelDAGToDAG.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/X86ISelLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/X86ISelLowering.h
    llvm/branches/type-system-rewrite/lib/Target/X86/X86InstrBuilder.h
    llvm/branches/type-system-rewrite/lib/Target/X86/X86InstrFPStack.td
    llvm/branches/type-system-rewrite/lib/Target/X86/X86InstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/X86InstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/X86/X86InstrSSE.td
    llvm/branches/type-system-rewrite/lib/Target/X86/X86InstrSystem.td
    llvm/branches/type-system-rewrite/lib/Target/X86/X86MCCodeEmitter.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/X86MachObjectWriter.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/X86RegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/X86RegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/X86/X86RegisterInfo.td
    llvm/branches/type-system-rewrite/lib/Target/X86/X86Subtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/X86Subtarget.h
    llvm/branches/type-system-rewrite/lib/Target/X86/X86TargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/X86/X86TargetMachine.h
    llvm/branches/type-system-rewrite/lib/Target/XCore/CMakeLists.txt
    llvm/branches/type-system-rewrite/lib/Target/XCore/Makefile
    llvm/branches/type-system-rewrite/lib/Target/XCore/XCore.h
    llvm/branches/type-system-rewrite/lib/Target/XCore/XCoreISelLowering.cpp
    llvm/branches/type-system-rewrite/lib/Target/XCore/XCoreISelLowering.h
    llvm/branches/type-system-rewrite/lib/Target/XCore/XCoreInstrInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/XCore/XCoreInstrInfo.h
    llvm/branches/type-system-rewrite/lib/Target/XCore/XCoreRegisterInfo.cpp
    llvm/branches/type-system-rewrite/lib/Target/XCore/XCoreRegisterInfo.h
    llvm/branches/type-system-rewrite/lib/Target/XCore/XCoreSubtarget.cpp
    llvm/branches/type-system-rewrite/lib/Target/XCore/XCoreSubtarget.h
    llvm/branches/type-system-rewrite/lib/Target/XCore/XCoreTargetMachine.cpp
    llvm/branches/type-system-rewrite/lib/Target/XCore/XCoreTargetMachine.h
    llvm/branches/type-system-rewrite/lib/Transforms/InstCombine/InstCombineCalls.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/InstCombine/InstCombineSelect.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Instrumentation/GCOVProfiling.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Scalar/GVN.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Scalar/IndVarSimplify.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Scalar/JumpThreading.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Scalar/LoopDeletion.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Scalar/LoopRotation.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Scalar/LoopStrengthReduce.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Scalar/LoopUnswitch.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Scalar/ObjCARC.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Scalar/ScalarReplAggregates.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Utils/BasicBlockUtils.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Utils/BreakCriticalEdges.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Utils/CloneFunction.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Utils/InlineFunction.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Utils/Local.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Utils/LoopUnroll.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Utils/SimplifyCFG.cpp
    llvm/branches/type-system-rewrite/lib/Transforms/Utils/ValueMapper.cpp
    llvm/branches/type-system-rewrite/lib/VMCore/AsmWriter.cpp
    llvm/branches/type-system-rewrite/lib/VMCore/BasicBlock.cpp
    llvm/branches/type-system-rewrite/lib/VMCore/ConstantFold.cpp
    llvm/branches/type-system-rewrite/lib/VMCore/Constants.cpp
    llvm/branches/type-system-rewrite/lib/VMCore/ConstantsContext.h
    llvm/branches/type-system-rewrite/lib/VMCore/Core.cpp
    llvm/branches/type-system-rewrite/lib/VMCore/Instructions.cpp
    llvm/branches/type-system-rewrite/lib/VMCore/LLVMContextImpl.h
    llvm/branches/type-system-rewrite/lib/VMCore/Use.cpp
    llvm/branches/type-system-rewrite/lib/VMCore/User.cpp
    llvm/branches/type-system-rewrite/lib/VMCore/Value.cpp
    llvm/branches/type-system-rewrite/lib/VMCore/ValueTypes.cpp
    llvm/branches/type-system-rewrite/lib/VMCore/Verifier.cpp
    llvm/branches/type-system-rewrite/runtime/libprofile/GCDAProfiling.c
    llvm/branches/type-system-rewrite/test/Archive/extract.ll
    llvm/branches/type-system-rewrite/test/CMakeLists.txt
    llvm/branches/type-system-rewrite/test/CodeGen/ARM/arm-modifier.ll
    llvm/branches/type-system-rewrite/test/CodeGen/ARM/carry.ll
    llvm/branches/type-system-rewrite/test/CodeGen/ARM/constants.ll
    llvm/branches/type-system-rewrite/test/CodeGen/ARM/inlineasm3.ll
    llvm/branches/type-system-rewrite/test/CodeGen/ARM/rev.ll
    llvm/branches/type-system-rewrite/test/CodeGen/ARM/section.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Mips/2008-07-15-SmallSection.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Mips/alloca.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Mips/i64arg.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Mips/internalfunc.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Mips/largeimmprinting.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Mips/o32_cc_byval.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/add.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/bitwise.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/bra.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/cvt.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/fdiv-sm10.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/fdiv-sm13.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/fneg.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/intrinsic.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/ld.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/llvm-intrinsic.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/mad.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/mov.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/mul.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/options.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/parameter-order.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/selp.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/setp.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/shl.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/shr.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/st.ll
    llvm/branches/type-system-rewrite/test/CodeGen/PTX/sub.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Thumb2/thumb2-add.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Thumb2/thumb2-ifcvt1.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Thumb2/thumb2-mulhi.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Thumb2/thumb2-sbc.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Thumb2/thumb2-smla.ll
    llvm/branches/type-system-rewrite/test/CodeGen/Thumb2/thumb2-smul.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/2006-11-12-CSRetCC.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/2007-02-04-OrAddrMode.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/2007-02-23-DAGCombine-Miscompile.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/2007-03-24-InlineAsmXConstraint.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/2007-09-17-ObjcFrameEH.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/2008-04-26-Asm-Optimize-Imm.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/2009-02-12-InlineAsm-nieZ-constraints.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/2011-06-14-PreschedRegalias.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/adde-carry.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/asm-global-imm.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/bswap.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/change-compare-stride-0.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/change-compare-stride-1.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/change-compare-stride-trickiness-1.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/dag-rauw-cse.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/fold-add.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/fp-stack-ret.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/h-registers-2.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/inline-asm-error.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/inline-asm-fpstack.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/isel-sink.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/longlong-deadload.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/loop-strength-reduce2.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/lsr-nonaffine.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/lsr-redundant-addressing.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pic_jumptable.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pr1505b.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pr2182.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/pr3216.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/shift-codegen.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/testl-commute.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/vec_insert-2.ll
    llvm/branches/type-system-rewrite/test/CodeGen/X86/vec_set-A.ll
    llvm/branches/type-system-rewrite/test/FrontendC/ARM/inline-asm-multichar.c
    llvm/branches/type-system-rewrite/test/Linker/2003-01-30-LinkerRename.ll
    llvm/branches/type-system-rewrite/test/Linker/2003-05-31-LinkerRename.ll
    llvm/branches/type-system-rewrite/test/MC/ARM/arm_instructions.s
    llvm/branches/type-system-rewrite/test/MC/ARM/thumb2.s
    llvm/branches/type-system-rewrite/test/MC/AsmParser/exprs-invalid.s
    llvm/branches/type-system-rewrite/test/MC/X86/padlock.s
    llvm/branches/type-system-rewrite/test/MC/X86/x86-32-coverage.s
    llvm/branches/type-system-rewrite/test/MC/X86/x86-64.s
    llvm/branches/type-system-rewrite/test/Makefile
    llvm/branches/type-system-rewrite/test/Transforms/IndVarSimplify/ada-loops.ll
    llvm/branches/type-system-rewrite/test/Transforms/IndVarSimplify/iv-zext.ll
    llvm/branches/type-system-rewrite/test/Transforms/IndVarSimplify/no-iv-rewrite.ll
    llvm/branches/type-system-rewrite/test/Transforms/IndVarSimplify/preserve-signed-wrap.ll
    llvm/branches/type-system-rewrite/test/Transforms/InstCombine/select-crash.ll
    llvm/branches/type-system-rewrite/test/Transforms/ScalarRepl/memcpy-from-global.ll
    llvm/branches/type-system-rewrite/test/Unit/lit.cfg
    llvm/branches/type-system-rewrite/test/lit.cfg
    llvm/branches/type-system-rewrite/test/lit.site.cfg.in
    llvm/branches/type-system-rewrite/tools/llc/llc.cpp
    llvm/branches/type-system-rewrite/tools/llvm-mc/llvm-mc.cpp
    llvm/branches/type-system-rewrite/tools/llvm-nm/llvm-nm.cpp
    llvm/branches/type-system-rewrite/tools/llvm-objdump/llvm-objdump.cpp
    llvm/branches/type-system-rewrite/tools/llvmc/src/Hooks.cpp
    llvm/branches/type-system-rewrite/tools/lto/LTOCodeGenerator.cpp
    llvm/branches/type-system-rewrite/tools/lto/LTOModule.cpp
    llvm/branches/type-system-rewrite/unittests/Support/ConstantRangeTest.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/ARMDecoderEmitter.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/AsmMatcherEmitter.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/AsmWriterEmitter.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/CMakeLists.txt
    llvm/branches/type-system-rewrite/utils/TableGen/CodeGenDAGPatterns.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/CodeGenInstruction.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/CodeGenRegisters.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/CodeGenTarget.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/CodeGenTarget.h
    llvm/branches/type-system-rewrite/utils/TableGen/DAGISelMatcherGen.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/DisassemblerEmitter.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/EDEmitter.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/FastISelEmitter.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/FixedLenDecoderEmitter.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/InstrInfoEmitter.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/InstrInfoEmitter.h
    llvm/branches/type-system-rewrite/utils/TableGen/NeonEmitter.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/Record.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/Record.h
    llvm/branches/type-system-rewrite/utils/TableGen/RegisterInfoEmitter.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/RegisterInfoEmitter.h
    llvm/branches/type-system-rewrite/utils/TableGen/SetTheory.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/SubtargetEmitter.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/SubtargetEmitter.h
    llvm/branches/type-system-rewrite/utils/TableGen/TGLexer.cpp
    llvm/branches/type-system-rewrite/utils/TableGen/TGLexer.h
    llvm/branches/type-system-rewrite/utils/TableGen/TGParser.h
    llvm/branches/type-system-rewrite/utils/TableGen/TableGen.cpp
    llvm/branches/type-system-rewrite/utils/lit/lit/TestRunner.py
    llvm/branches/type-system-rewrite/utils/lit/lit/TestingConfig.py
    llvm/branches/type-system-rewrite/utils/lit/lit/Util.py

Propchange: llvm/branches/type-system-rewrite/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Sat Jul  2 22:28:07 2011
@@ -1 +1,2 @@
 /llvm/branches/Apple/Pertwee:110850,110961
+/llvm/trunk:133420-134362

Modified: llvm/branches/type-system-rewrite/Makefile.rules
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/Makefile.rules?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/Makefile.rules (original)
+++ llvm/branches/type-system-rewrite/Makefile.rules Sat Jul  2 22:28:07 2011
@@ -1377,7 +1377,7 @@
 #---------------------------------------------------------
 
 ifeq ($(HOST_OS),Darwin)
-ifdef TOOL_ORDER_FINE
+ifdef TOOL_ORDER_FILE
 
 LD.Flags += -Wl,-order_file,$(TOOL_ORDER_FILE)
 
@@ -1720,30 +1720,15 @@
 # All of these files depend on tblgen and the .td files.
 $(INCTMPFiles) : $(TBLGEN) $(TDFiles)
 
-$(TARGET:%=$(ObjDir)/%GenRegisterNames.inc.tmp): \
-$(ObjDir)/%GenRegisterNames.inc.tmp : %.td $(ObjDir)/.dir
-	$(Echo) "Building $(<F) register names with tblgen"
-	$(Verb) $(TableGen) -gen-register-enums -o $(call SYSPATH, $@) $<
-
-$(TARGET:%=$(ObjDir)/%GenRegisterInfo.h.inc.tmp): \
-$(ObjDir)/%GenRegisterInfo.h.inc.tmp : %.td $(ObjDir)/.dir
-	$(Echo) "Building $(<F) register information header with tblgen"
-	$(Verb) $(TableGen) -gen-register-desc-header -o $(call SYSPATH, $@) $<
-
 $(TARGET:%=$(ObjDir)/%GenRegisterInfo.inc.tmp): \
 $(ObjDir)/%GenRegisterInfo.inc.tmp : %.td $(ObjDir)/.dir
 	$(Echo) "Building $(<F) register info implementation with tblgen"
-	$(Verb) $(TableGen) -gen-register-desc -o $(call SYSPATH, $@) $<
-
-$(TARGET:%=$(ObjDir)/%GenInstrNames.inc.tmp): \
-$(ObjDir)/%GenInstrNames.inc.tmp : %.td $(ObjDir)/.dir
-	$(Echo) "Building $(<F) instruction names with tblgen"
-	$(Verb) $(TableGen) -gen-instr-enums -o $(call SYSPATH, $@) $<
+	$(Verb) $(TableGen) -gen-register-info -o $(call SYSPATH, $@) $<
 
 $(TARGET:%=$(ObjDir)/%GenInstrInfo.inc.tmp): \
 $(ObjDir)/%GenInstrInfo.inc.tmp : %.td $(ObjDir)/.dir
 	$(Echo) "Building $(<F) instruction information with tblgen"
-	$(Verb) $(TableGen) -gen-instr-desc -o $(call SYSPATH, $@) $<
+	$(Verb) $(TableGen) -gen-instr-info -o $(call SYSPATH, $@) $<
 
 $(TARGET:%=$(ObjDir)/%GenAsmWriter.inc.tmp): \
 $(ObjDir)/%GenAsmWriter.inc.tmp : %.td $(ObjDir)/.dir
@@ -1790,8 +1775,8 @@
 	$(Echo) "Building $(<F) \"fast\" instruction selector implementation with tblgen"
 	$(Verb) $(TableGen) -gen-fast-isel -o $(call SYSPATH, $@) $<
 
-$(TARGET:%=$(ObjDir)/%GenSubtarget.inc.tmp): \
-$(ObjDir)/%GenSubtarget.inc.tmp : %.td $(ObjDir)/.dir
+$(TARGET:%=$(ObjDir)/%GenSubtargetInfo.inc.tmp): \
+$(ObjDir)/%GenSubtargetInfo.inc.tmp : %.td $(ObjDir)/.dir
 	$(Echo) "Building $(<F) subtarget information with tblgen"
 	$(Verb) $(TableGen) -gen-subtarget -o $(call SYSPATH, $@) $<
 

Modified: llvm/branches/type-system-rewrite/autoconf/configure.ac
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/autoconf/configure.ac?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/autoconf/configure.ac (original)
+++ llvm/branches/type-system-rewrite/autoconf/configure.ac Sat Jul  2 22:28:07 2011
@@ -297,6 +297,8 @@
     llvm_cv_target_os_type="MingW" ;;
   *-*-haiku*)
     llvm_cv_target_os_type="Haiku" ;;
+  *-*-rtems*)
+    llvm_cv_target_os_type="RTEMS" ;;
   *-unknown-eabi*)
     llvm_cv_target_os_type="Freestanding" ;;
   *)

Modified: llvm/branches/type-system-rewrite/cmake/config-ix.cmake
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/cmake/config-ix.cmake?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/cmake/config-ix.cmake (original)
+++ llvm/branches/type-system-rewrite/cmake/config-ix.cmake Sat Jul  2 22:28:07 2011
@@ -349,7 +349,6 @@
 
 if( MSVC )
   set(error_t int)
-  set(mode_t "unsigned short")
   set(LTDL_SHLIBPATH_VAR "PATH")
   set(LTDL_SYSSEARCHPATH "")
   set(LTDL_DLOPEN_DEPLIBS 1)

Modified: llvm/branches/type-system-rewrite/cmake/modules/HandleLLVMOptions.cmake
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/cmake/modules/HandleLLVMOptions.cmake?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/cmake/modules/HandleLLVMOptions.cmake (original)
+++ llvm/branches/type-system-rewrite/cmake/modules/HandleLLVMOptions.cmake Sat Jul  2 22:28:07 2011
@@ -36,13 +36,8 @@
   # explicitly undefine it:
   if( uppercase_CMAKE_BUILD_TYPE STREQUAL "RELEASE" )
     add_definitions( -UNDEBUG )
-    set(LLVM_BUILD_MODE "Release")
-  else()
-    set(LLVM_BUILD_MODE "Debug")
   endif()
-  set(LLVM_BUILD_MODE "${LLVM_BUILD_MODE}+Asserts")
 else()
-  set(LLVM_BUILD_MODE "Release")
   if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "RELEASE" )
     if( NOT MSVC_IDE AND NOT XCODE )
       add_definitions( -DNDEBUG )

Modified: llvm/branches/type-system-rewrite/cmake/modules/LLVMLibDeps.cmake
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/cmake/modules/LLVMLibDeps.cmake?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/cmake/modules/LLVMLibDeps.cmake (original)
+++ llvm/branches/type-system-rewrite/cmake/modules/LLVMLibDeps.cmake Sat Jul  2 22:28:07 2011
@@ -42,7 +42,7 @@
 set(MSVC_LIB_DEPS_LLVMMipsCodeGen LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMMipsInfo LLVMSelectionDAG LLVMSupport LLVMTarget)
 set(MSVC_LIB_DEPS_LLVMMipsInfo LLVMMC LLVMSupport)
 set(MSVC_LIB_DEPS_LLVMObject LLVMSupport)
-set(MSVC_LIB_DEPS_LLVMPTXCodeGen LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMPTXInfo LLVMSelectionDAG LLVMSupport LLVMTarget)
+set(MSVC_LIB_DEPS_LLVMPTXCodeGen LLVMAnalysis LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMPTXInfo LLVMSelectionDAG LLVMSupport LLVMTarget)
 set(MSVC_LIB_DEPS_LLVMPTXInfo LLVMMC LLVMSupport)
 set(MSVC_LIB_DEPS_LLVMPowerPCAsmPrinter LLVMMC LLVMSupport)
 set(MSVC_LIB_DEPS_LLVMPowerPCCodeGen LLVMAnalysis LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMPowerPCAsmPrinter LLVMPowerPCInfo LLVMSelectionDAG LLVMSupport LLVMTarget)
@@ -60,6 +60,7 @@
 set(MSVC_LIB_DEPS_LLVMX86AsmParser LLVMMC LLVMMCParser LLVMSupport LLVMTarget LLVMX86Info)
 set(MSVC_LIB_DEPS_LLVMX86AsmPrinter LLVMMC LLVMSupport LLVMX86Utils)
 set(MSVC_LIB_DEPS_LLVMX86CodeGen LLVMAnalysis LLVMAsmPrinter LLVMCodeGen LLVMCore LLVMMC LLVMSelectionDAG LLVMSupport LLVMTarget LLVMX86AsmPrinter LLVMX86Info LLVMX86Utils)
+set(MSVC_LIB_DEPS_LLVMX86Desc LLVMX86Info)
 set(MSVC_LIB_DEPS_LLVMX86Disassembler LLVMMC LLVMSupport LLVMX86Info)
 set(MSVC_LIB_DEPS_LLVMX86Info LLVMMC LLVMSupport)
 set(MSVC_LIB_DEPS_LLVMX86Utils LLVMCore LLVMSupport)

Modified: llvm/branches/type-system-rewrite/configure
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/configure?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/configure (original)
+++ llvm/branches/type-system-rewrite/configure Sat Jul  2 22:28:07 2011
@@ -2337,6 +2337,8 @@
     llvm_cv_target_os_type="MingW" ;;
   *-*-haiku*)
     llvm_cv_target_os_type="Haiku" ;;
+  *-*-rtems*)
+    llvm_cv_target_os_type="RTEMS" ;;
   *-unknown-eabi*)
     llvm_cv_target_os_type="Freestanding" ;;
   *)

Modified: llvm/branches/type-system-rewrite/docs/ExtendingLLVM.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/docs/ExtendingLLVM.html?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/docs/ExtendingLLVM.html (original)
+++ llvm/branches/type-system-rewrite/docs/ExtendingLLVM.html Sat Jul  2 22:28:07 2011
@@ -146,7 +146,7 @@
 complicated behavior in a single node (rotate).</p>
 
 <ol>
-<li><tt>include/llvm/CodeGen/SelectionDAGNodes.h</tt>:
+<li><tt>include/llvm/CodeGen/ISDOpcodes.h</tt>:
     Add an enum value for the new SelectionDAG node.</li>
 <li><tt>lib/CodeGen/SelectionDAG/SelectionDAG.cpp</tt>:
     Add code to print the node to <tt>getOperationName</tt>.  If your new node

Modified: llvm/branches/type-system-rewrite/docs/LangRef.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/docs/LangRef.html?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/docs/LangRef.html (original)
+++ llvm/branches/type-system-rewrite/docs/LangRef.html Sat Jul  2 22:28:07 2011
@@ -6576,7 +6576,8 @@
 
 <h5>Syntax:</h5>
 <p>This is an overloaded intrinsic. You can use llvm.ctpop on any integer bit
-   width. Not all targets support all bit widths however.</p>
+   width, or on any vector with integer elements. Not all targets support all
+  bit widths or vector types, however.</p>
 
 <pre>
   declare i8 @llvm.ctpop.i8(i8  <src>)
@@ -6584,6 +6585,7 @@
   declare i32 @llvm.ctpop.i32(i32 <src>)
   declare i64 @llvm.ctpop.i64(i64 <src>)
   declare i256 @llvm.ctpop.i256(i256 <src>)
+  declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32> <src>)
 </pre>
 
 <h5>Overview:</h5>
@@ -6592,10 +6594,12 @@
 
 <h5>Arguments:</h5>
 <p>The only argument is the value to be counted.  The argument may be of any
-   integer type.  The return type must match the argument type.</p>
+   integer type, or a vector with integer elements.
+   The return type must match the argument type.</p>
 
 <h5>Semantics:</h5>
-<p>The '<tt>llvm.ctpop</tt>' intrinsic counts the 1's in a variable.</p>
+<p>The '<tt>llvm.ctpop</tt>' intrinsic counts the 1's in a variable, or within each
+   element of a vector.</p>
 
 </div>
 
@@ -6608,7 +6612,8 @@
 
 <h5>Syntax:</h5>
 <p>This is an overloaded intrinsic. You can use <tt>llvm.ctlz</tt> on any
-   integer bit width. Not all targets support all bit widths however.</p>
+   integer bit width, or any vector whose elements are integers. Not all
+   targets support all bit widths or vector types, however.</p>
 
 <pre>
   declare i8 @llvm.ctlz.i8 (i8  <src>)
@@ -6616,6 +6621,7 @@
   declare i32 @llvm.ctlz.i32(i32 <src>)
   declare i64 @llvm.ctlz.i64(i64 <src>)
   declare i256 @llvm.ctlz.i256(i256 <src>)
+  declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32> <src;gt)
 </pre>
 
 <h5>Overview:</h5>
@@ -6624,11 +6630,13 @@
 
 <h5>Arguments:</h5>
 <p>The only argument is the value to be counted.  The argument may be of any
-   integer type. The return type must match the argument type.</p>
+   integer type, or any vector type with integer element type.
+   The return type must match the argument type.</p>
 
 <h5>Semantics:</h5>
 <p>The '<tt>llvm.ctlz</tt>' intrinsic counts the leading (most significant)
-   zeros in a variable.  If the src == 0 then the result is the size in bits of
+   zeros in a variable, or within each element of the vector if the operation
+   is of vector type.  If the src == 0 then the result is the size in bits of
    the type of src. For example, <tt>llvm.ctlz(i32 2) = 30</tt>.</p>
 
 </div>
@@ -6642,7 +6650,8 @@
 
 <h5>Syntax:</h5>
 <p>This is an overloaded intrinsic. You can use <tt>llvm.cttz</tt> on any
-   integer bit width. Not all targets support all bit widths however.</p>
+   integer bit width, or any vector of integer elements. Not all targets
+   support all bit widths or vector types, however.</p>
 
 <pre>
   declare i8 @llvm.cttz.i8 (i8  <src>)
@@ -6650,6 +6659,7 @@
   declare i32 @llvm.cttz.i32(i32 <src>)
   declare i64 @llvm.cttz.i64(i64 <src>)
   declare i256 @llvm.cttz.i256(i256 <src>)
+  declase <2 x i32> @llvm.cttz.v2i32(<2 x i32> <src>)
 </pre>
 
 <h5>Overview:</h5>
@@ -6658,11 +6668,13 @@
 
 <h5>Arguments:</h5>
 <p>The only argument is the value to be counted.  The argument may be of any
-   integer type.  The return type must match the argument type.</p>
+   integer type, or a vectory with integer element type..  The return type
+   must match the argument type.</p>
 
 <h5>Semantics:</h5>
 <p>The '<tt>llvm.cttz</tt>' intrinsic counts the trailing (least significant)
-   zeros in a variable.  If the src == 0 then the result is the size in bits of
+   zeros in a variable, or within each element of a vector.
+   If the src == 0 then the result is the size in bits of
    the type of src.  For example, <tt>llvm.cttz(2) = 1</tt>.</p>
 
 </div>
@@ -7259,7 +7271,7 @@
             store i32 4, %ptr
 
 %result1  = load i32* %ptr      <i>; yields {i32}:result1 = 4</i>
-            call void @llvm.memory.barrier(i1 false, i1 true, i1 false, i1 false)
+            call void @llvm.memory.barrier(i1 false, i1 true, i1 false, i1 false, i1 true)
                                 <i>; guarantee the above finishes</i>
             store i32 8, %ptr   <i>; before this begins</i>
 </pre>

Modified: llvm/branches/type-system-rewrite/include/llvm/ADT/ArrayRef.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/ADT/ArrayRef.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/ADT/ArrayRef.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/ADT/ArrayRef.h Sat Jul  2 22:28:07 2011
@@ -125,6 +125,13 @@
     }
     
     /// @}
+    /// @name Conversion operators
+    /// @{
+    operator std::vector<T>() const {
+      return std::vector<T>(Data, Data+Length);
+    }
+    
+    /// @}
   };
   
   // ArrayRefs can be treated like a POD type.

Modified: llvm/branches/type-system-rewrite/include/llvm/ADT/StringMap.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/ADT/StringMap.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/ADT/StringMap.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/ADT/StringMap.h Sat Jul  2 22:28:07 2011
@@ -381,18 +381,6 @@
     return GetOrCreateValue(Key, ValueTy());
   }
 
-  // FIXME: Remove this method.
-  template <typename InitTy>
-  MapEntryTy &GetOrCreateValue(const char *KeyStart, const char *KeyEnd,
-                               InitTy Val) {
-    return GetOrCreateValue(StringRef(KeyStart, KeyEnd - KeyStart), Val);
-  }
-
-  // FIXME: Remove this method.
-  MapEntryTy &GetOrCreateValue(const char *KeyStart, const char *KeyEnd) {
-    return GetOrCreateValue(StringRef(KeyStart, KeyEnd - KeyStart));
-  }
-
   /// remove - Remove the specified key/value pair from the map, but do not
   /// erase it.  This aborts if the key is not in the map.
   void remove(MapEntryTy *KeyValue) {

Modified: llvm/branches/type-system-rewrite/include/llvm/ADT/Triple.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/ADT/Triple.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/ADT/Triple.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/ADT/Triple.h Sat Jul  2 22:28:07 2011
@@ -95,7 +95,8 @@
     Solaris,
     Win32,
     Haiku,
-    Minix
+    Minix,
+    RTEMS
   };
   enum EnvironmentType {
     UnknownEnvironment,
@@ -237,19 +238,10 @@
   /// specialized because it is a common query.
   unsigned getOSMajorVersion() const {
     unsigned Maj, Min, Micro;
-    getDarwinNumber(Maj, Min, Micro);
+    getOSVersion(Maj, Min, Micro);
     return Maj;
   }
 
-  void getDarwinNumber(unsigned &Major, unsigned &Minor,
-                       unsigned &Micro) const {
-    return getOSVersion(Major, Minor, Micro);
-  }
-
-  unsigned getDarwinMajorNumber() const {
-    return getOSMajorVersion();
-  }
-
   /// isOSVersionLT - Helper function for doing comparisons against version
   /// numbers included in the target triple.
   bool isOSVersionLT(unsigned Major, unsigned Minor = 0,
@@ -275,7 +267,7 @@
 
   /// isOSDarwin - Is this a "Darwin" OS (OS X or iOS).
   bool isOSDarwin() const {
-    return isMacOSX() ||getOS() == Triple::IOS;
+    return isMacOSX() || getOS() == Triple::IOS;
   }
 
   /// isOSWindows - Is this a "Windows" OS.
@@ -288,7 +280,7 @@
   /// compatibility, which handles supporting skewed version numbering schemes
   /// used by the "darwin" triples.
   unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
-                          unsigned Micro = 0) const {
+			     unsigned Micro = 0) const {
     assert(isMacOSX() && "Not an OS X triple!");
 
     // If this is OS X, expect a sane version number.
@@ -299,7 +291,7 @@
     assert(Major == 10 && "Unexpected major version");
     return isOSVersionLT(Minor + 4, Micro, 0);
   }
-    
+
   /// @}
   /// @name Mutators
   /// @{

Modified: llvm/branches/type-system-rewrite/include/llvm/Analysis/DIBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Analysis/DIBuilder.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Analysis/DIBuilder.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Analysis/DIBuilder.h Sat Jul  2 22:28:07 2011
@@ -135,6 +135,7 @@
                              unsigned Flags);
 
     /// createMemberType - Create debugging information entry for a member.
+    /// @param Scope        Member scope.
     /// @param Name         Member name.
     /// @param File         File where this member is defined.
     /// @param LineNo       Line number.
@@ -143,7 +144,7 @@
     /// @param OffsetInBits Member offset.
     /// @param Flags        Flags to encode member attribute, e.g. private
     /// @param Ty           Parent type.
-    DIType createMemberType(StringRef Name, DIFile File,
+    DIType createMemberType(DIDescriptor Scope, StringRef Name, DIFile File,
                             unsigned LineNo, uint64_t SizeInBits, 
                             uint64_t AlignInBits, uint64_t OffsetInBits, 
                             unsigned Flags, DIType Ty);

Modified: llvm/branches/type-system-rewrite/include/llvm/Analysis/IVUsers.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Analysis/IVUsers.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Analysis/IVUsers.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Analysis/IVUsers.h Sat Jul  2 22:28:07 2011
@@ -37,8 +37,8 @@
 class IVStrideUse : public CallbackVH, public ilist_node<IVStrideUse> {
   friend class IVUsers;
 public:
-  IVStrideUse(IVUsers *P, Instruction* U, Value *O, Value *PN)
-    : CallbackVH(U), Parent(P), OperandValToReplace(O), Phi(PN) {
+  IVStrideUse(IVUsers *P, Instruction* U, Value *O)
+    : CallbackVH(U), Parent(P), OperandValToReplace(O) {
   }
 
   /// getUser - Return the user instruction for this use.
@@ -51,11 +51,6 @@
     setValPtr(NewUser);
   }
 
-  /// getPhi - Return the phi node that represents this IV.
-  PHINode *getPhi() const {
-    return cast<PHINode>(Phi);
-  }
-
   /// getOperandValToReplace - Return the Value of the operand in the user
   /// instruction that this IVStrideUse is representing.
   Value *getOperandValToReplace() const {
@@ -86,9 +81,6 @@
   /// that this IVStrideUse is representing.
   WeakVH OperandValToReplace;
 
-  /// Phi - The loop header phi that represents this IV.
-  WeakVH Phi;
-
   /// PostIncLoops - The set of loops for which Expr has been adjusted to
   /// use post-inc mode. This corresponds with SCEVExpander's post-inc concept.
   PostIncLoopSet PostIncLoops;
@@ -151,9 +143,9 @@
   /// AddUsersIfInteresting - Inspect the specified Instruction.  If it is a
   /// reducible SCEV, recursively add its users to the IVUsesByStride set and
   /// return true.  Otherwise, return false.
-  bool AddUsersIfInteresting(Instruction *I, PHINode *Phi);
+  bool AddUsersIfInteresting(Instruction *I);
 
-  IVStrideUse &AddUser(Instruction *User, Value *Operand, PHINode *Phi);
+  IVStrideUse &AddUser(Instruction *User, Value *Operand);
 
   /// getReplacementExpr - Return a SCEV expression which computes the
   /// value of the OperandValToReplace of the given IVStrideUse.

Modified: llvm/branches/type-system-rewrite/include/llvm/Analysis/ScalarEvolutionExpander.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Analysis/ScalarEvolutionExpander.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Analysis/ScalarEvolutionExpander.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Analysis/ScalarEvolutionExpander.h Sat Jul  2 22:28:07 2011
@@ -30,6 +30,10 @@
   /// memory.
   class SCEVExpander : public SCEVVisitor<SCEVExpander, Value*> {
     ScalarEvolution &SE;
+
+    // New instructions receive a name to identifies them with the current pass.
+    const char* IVName;
+
     std::map<std::pair<const SCEV *, Instruction *>, AssertingVH<Value> >
       InsertedExpressions;
     std::set<AssertingVH<Value> > InsertedValues;
@@ -67,8 +71,8 @@
 
   public:
     /// SCEVExpander - Construct a SCEVExpander in "canonical" mode.
-    explicit SCEVExpander(ScalarEvolution &se)
-      : SE(se), IVIncInsertLoop(0), CanonicalMode(true),
+    explicit SCEVExpander(ScalarEvolution &se, const char *name)
+      : SE(se), IVName(name), IVIncInsertLoop(0), CanonicalMode(true),
         Builder(se.getContext(), TargetFolder(se.TD)) {}
 
     /// clear - Erase the contents of the InsertedExpressions map so that users

Modified: llvm/branches/type-system-rewrite/include/llvm/Analysis/ValueTracking.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Analysis/ValueTracking.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Analysis/ValueTracking.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Analysis/ValueTracking.h Sat Jul  2 22:28:07 2011
@@ -158,6 +158,10 @@
     return GetUnderlyingObject(const_cast<Value *>(V), TD, MaxLookup);
   }
 
+  /// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
+  /// are lifetime markers.
+  bool onlyUsedByLifetimeMarkers(const Value *V);
+
 } // end namespace llvm
 
 #endif

Modified: llvm/branches/type-system-rewrite/include/llvm/BasicBlock.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/BasicBlock.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/BasicBlock.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/BasicBlock.h Sat Jul  2 22:28:07 2011
@@ -110,7 +110,7 @@
         Function *getParent()       { return Parent; }
 
   /// use_back - Specialize the methods defined in Value, as we know that an
-  /// BasicBlock can only be used by Users (specifically PHI nodes, terminators,
+  /// BasicBlock can only be used by Users (specifically terminators
   /// and BlockAddress's).
   User       *use_back()       { return cast<User>(*use_begin());}
   const User *use_back() const { return cast<User>(*use_begin());}
@@ -138,6 +138,12 @@
     return const_cast<BasicBlock*>(this)->getFirstNonPHIOrDbg();
   }
 
+  // Same as above, but also skip lifetime intrinsics.
+  Instruction* getFirstNonPHIOrDbgOrLifetime();
+  const Instruction* getFirstNonPHIOrDbgOrLifetime() const {
+    return const_cast<BasicBlock*>(this)->getFirstNonPHIOrDbgOrLifetime();
+  }
+
   /// removeFromParent - This method unlinks 'this' from the containing
   /// function, but does not delete it.
   ///
@@ -248,6 +254,10 @@
   /// other than direct branches, switches, etc. to it.
   bool hasAddressTaken() const { return getSubclassDataFromValue() != 0; }
 
+  /// replaceSuccessorsPhiUsesWith - Update all phi nodes in all our successors
+  /// to refer to basic block New instead of to us.
+  void replaceSuccessorsPhiUsesWith(BasicBlock *New);
+
 private:
   /// AdjustBlockAddressRefCount - BasicBlock stores the number of BlockAddress
   /// objects using it.  This is almost always 0, sometimes one, possibly but

Modified: llvm/branches/type-system-rewrite/include/llvm/Bitcode/BitstreamReader.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Bitcode/BitstreamReader.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Bitcode/BitstreamReader.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Bitcode/BitstreamReader.h Sat Jul  2 22:28:07 2011
@@ -375,10 +375,12 @@
 
     // Check that the block wasn't partially defined, and that the offset isn't
     // bogus.
-    if (AtEndOfStream() || NextChar+NumWords*4 > BitStream->getLastChar())
+    const unsigned char *const SkipTo = NextChar + NumWords*4;
+    if (AtEndOfStream() || SkipTo > BitStream->getLastChar() ||
+                           SkipTo < BitStream->getFirstChar())
       return true;
 
-    NextChar += NumWords*4;
+    NextChar = SkipTo;
     return false;
   }
 

Modified: llvm/branches/type-system-rewrite/include/llvm/CodeGen/LinkAllCodegenComponents.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/CodeGen/LinkAllCodegenComponents.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/CodeGen/LinkAllCodegenComponents.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/CodeGen/LinkAllCodegenComponents.h Sat Jul  2 22:28:07 2011
@@ -39,8 +39,6 @@
       (void) llvm::createGreedyRegisterAllocator();
       (void) llvm::createDefaultPBQPRegisterAllocator();
 
-      (void) llvm::createSimpleRegisterCoalescer();
-      
       llvm::linkOcamlGC();
       llvm::linkShadowStackGC();
       

Modified: llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineFunction.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineFunction.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineFunction.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineFunction.h Sat Jul  2 22:28:07 2011
@@ -345,7 +345,7 @@
   /// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
   /// of `new MachineInstr'.
   ///
-  MachineInstr *CreateMachineInstr(const TargetInstrDesc &TID,
+  MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID,
                                    DebugLoc DL,
                                    bool NoImp = false);
 

Modified: llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineInstr.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineInstr.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineInstr.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineInstr.h Sat Jul  2 22:28:07 2011
@@ -17,11 +17,12 @@
 #define LLVM_CODEGEN_MACHINEINSTR_H
 
 #include "llvm/CodeGen/MachineOperand.h"
-#include "llvm/Target/TargetInstrDesc.h"
+#include "llvm/MC/MCInstrDesc.h"
 #include "llvm/Target/TargetOpcodes.h"
 #include "llvm/ADT/ilist.h"
 #include "llvm/ADT/ilist_node.h"
 #include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/DenseMapInfo.h"
 #include "llvm/Support/DebugLoc.h"
 #include <vector>
@@ -30,7 +31,6 @@
 
 template <typename T> class SmallVectorImpl;
 class AliasAnalysis;
-class TargetInstrDesc;
 class TargetInstrInfo;
 class TargetRegisterInfo;
 class MachineFunction;
@@ -57,7 +57,7 @@
                                         // function frame setup code.
   };
 private:
-  const TargetInstrDesc *TID;           // Instruction descriptor.
+  const MCInstrDesc *MCID;              // Instruction descriptor.
   uint16_t NumImplicitOps;              // Number of implicit operands (which
                                         // are determined at construction time).
 
@@ -94,7 +94,7 @@
   MachineInstr(MachineFunction &, const MachineInstr &);
 
   /// MachineInstr ctor - This constructor creates a dummy MachineInstr with
-  /// TID NULL and no operands.
+  /// MCID NULL and no operands.
   MachineInstr();
 
   // The next two constructors have DebugLoc and non-DebugLoc versions;
@@ -103,25 +103,25 @@
 
   /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
   /// implicit operands.  It reserves space for the number of operands specified
-  /// by the TargetInstrDesc.  The version with a DebugLoc should be preferred.
-  explicit MachineInstr(const TargetInstrDesc &TID, bool NoImp = false);
+  /// by the MCInstrDesc.  The version with a DebugLoc should be preferred.
+  explicit MachineInstr(const MCInstrDesc &MCID, bool NoImp = false);
 
   /// MachineInstr ctor - Work exactly the same as the ctor above, except that
   /// the MachineInstr is created and added to the end of the specified basic
   /// block.  The version with a DebugLoc should be preferred.
-  MachineInstr(MachineBasicBlock *MBB, const TargetInstrDesc &TID);
+  MachineInstr(MachineBasicBlock *MBB, const MCInstrDesc &MCID);
 
   /// MachineInstr ctor - This constructor create a MachineInstr and add the
   /// implicit operands.  It reserves space for number of operands specified by
-  /// TargetInstrDesc.  An explicit DebugLoc is supplied.
-  explicit MachineInstr(const TargetInstrDesc &TID, const DebugLoc dl,
+  /// MCInstrDesc.  An explicit DebugLoc is supplied.
+  explicit MachineInstr(const MCInstrDesc &MCID, const DebugLoc dl,
                         bool NoImp = false);
 
   /// MachineInstr ctor - Work exactly the same as the ctor above, except that
   /// the MachineInstr is created and added to the end of the specified basic
   /// block.
   MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
-               const TargetInstrDesc &TID);
+               const MCInstrDesc &MCID);
 
   ~MachineInstr();
 
@@ -181,13 +181,22 @@
   ///
   DebugLoc getDebugLoc() const { return debugLoc; }
 
+  /// emitError - Emit an error referring to the source location of this
+  /// instruction. This should only be used for inline assembly that is somehow
+  /// impossible to compile. Other errors should have been handled much
+  /// earlier.
+  ///
+  /// If this method returns, the caller should try to recover from the error.
+  ///
+  void emitError(StringRef Msg) const;
+
   /// getDesc - Returns the target instruction descriptor of this
   /// MachineInstr.
-  const TargetInstrDesc &getDesc() const { return *TID; }
+  const MCInstrDesc &getDesc() const { return *MCID; }
 
   /// getOpcode - Returns the opcode of this MachineInstr.
   ///
-  int getOpcode() const { return TID->Opcode; }
+  int getOpcode() const { return MCID->Opcode; }
 
   /// Access to explicit operands of the instruction.
   ///
@@ -279,6 +288,9 @@
   bool isCopy() const {
     return getOpcode() == TargetOpcode::COPY;
   }
+  bool isFullCopy() const {
+    return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
+  }
 
   /// isCopyLike - Return true if the instruction behaves like a copy.
   /// This does not include native copy instructions.
@@ -464,8 +476,8 @@
 
   /// hasUnmodeledSideEffects - Return true if this instruction has side
   /// effects that are not modeled by mayLoad / mayStore, etc.
-  /// For all instructions, the property is encoded in TargetInstrDesc::Flags
-  /// (see TargetInstrDesc::hasUnmodeledSideEffects(). The only exception is
+  /// For all instructions, the property is encoded in MCInstrDesc::Flags
+  /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
   /// INLINEASM instruction, in which case the side effect property is encoded
   /// in one of its operands (see InlineAsm::Extra_HasSideEffect).
   ///
@@ -497,7 +509,7 @@
   /// setDesc - Replace the instruction descriptor (thus opcode) of
   /// the current instruction with a new one.
   ///
-  void setDesc(const TargetInstrDesc &tid) { TID = &tid; }
+  void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
 
   /// setDebugLoc - Replace current source information with new such.
   /// Avoid using this, the constructor argument is preferable.

Modified: llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineInstrBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineInstrBuilder.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineInstrBuilder.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineInstrBuilder.h Sat Jul  2 22:28:07 2011
@@ -22,7 +22,7 @@
 
 namespace llvm {
 
-class TargetInstrDesc;
+class MCInstrDesc;
 class MDNode;
 
 namespace RegState {
@@ -77,6 +77,11 @@
     return *this;
   }
 
+  const MachineInstrBuilder &addCImm(const ConstantInt *Val) const {
+    MI->addOperand(MachineOperand::CreateCImm(Val));
+    return *this;
+  }
+
   const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
     MI->addOperand(MachineOperand::CreateFPImm(Val));
     return *this;
@@ -175,8 +180,8 @@
 ///
 inline MachineInstrBuilder BuildMI(MachineFunction &MF,
                                    DebugLoc DL,
-                                   const TargetInstrDesc &TID) {
-  return MachineInstrBuilder(MF.CreateMachineInstr(TID, DL));
+                                   const MCInstrDesc &MCID) {
+  return MachineInstrBuilder(MF.CreateMachineInstr(MCID, DL));
 }
 
 /// BuildMI - This version of the builder sets up the first operand as a
@@ -184,9 +189,9 @@
 ///
 inline MachineInstrBuilder BuildMI(MachineFunction &MF,
                                    DebugLoc DL,
-                                   const TargetInstrDesc &TID,
+                                   const MCInstrDesc &MCID,
                                    unsigned DestReg) {
-  return MachineInstrBuilder(MF.CreateMachineInstr(TID, DL))
+  return MachineInstrBuilder(MF.CreateMachineInstr(MCID, DL))
            .addReg(DestReg, RegState::Define);
 }
 
@@ -197,9 +202,9 @@
 inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
                                    MachineBasicBlock::iterator I,
                                    DebugLoc DL,
-                                   const TargetInstrDesc &TID,
+                                   const MCInstrDesc &MCID,
                                    unsigned DestReg) {
-  MachineInstr *MI = BB.getParent()->CreateMachineInstr(TID, DL);
+  MachineInstr *MI = BB.getParent()->CreateMachineInstr(MCID, DL);
   BB.insert(I, MI);
   return MachineInstrBuilder(MI).addReg(DestReg, RegState::Define);
 }
@@ -211,8 +216,8 @@
 inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
                                    MachineBasicBlock::iterator I,
                                    DebugLoc DL,
-                                   const TargetInstrDesc &TID) {
-  MachineInstr *MI = BB.getParent()->CreateMachineInstr(TID, DL);
+                                   const MCInstrDesc &MCID) {
+  MachineInstr *MI = BB.getParent()->CreateMachineInstr(MCID, DL);
   BB.insert(I, MI);
   return MachineInstrBuilder(MI);
 }
@@ -223,8 +228,8 @@
 ///
 inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
                                    DebugLoc DL,
-                                   const TargetInstrDesc &TID) {
-  return BuildMI(*BB, BB->end(), DL, TID);
+                                   const MCInstrDesc &MCID) {
+  return BuildMI(*BB, BB->end(), DL, MCID);
 }
 
 /// BuildMI - This version of the builder inserts the newly-built
@@ -233,9 +238,9 @@
 ///
 inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
                                    DebugLoc DL,
-                                   const TargetInstrDesc &TID,
+                                   const MCInstrDesc &MCID,
                                    unsigned DestReg) {
-  return BuildMI(*BB, BB->end(), DL, TID, DestReg);
+  return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
 }
 
 inline unsigned getDefRegState(bool B) {

Modified: llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineOperand.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineOperand.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineOperand.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineOperand.h Sat Jul  2 22:28:07 2011
@@ -21,6 +21,7 @@
 
 class BlockAddress;
 class ConstantFP;
+class ConstantInt;
 class GlobalValue;
 class MachineBasicBlock;
 class MachineInstr;
@@ -38,6 +39,7 @@
   enum MachineOperandType {
     MO_Register,               ///< Register operand.
     MO_Immediate,              ///< Immediate operand
+    MO_CImmediate,             ///< Immediate >64bit operand
     MO_FPImmediate,            ///< Floating-point immediate operand
     MO_MachineBasicBlock,      ///< MachineBasicBlock reference
     MO_FrameIndex,             ///< Abstract Stack Frame Index
@@ -111,6 +113,7 @@
   union {
     MachineBasicBlock *MBB;   // For MO_MachineBasicBlock.
     const ConstantFP *CFP;    // For MO_FPImmediate.
+    const ConstantInt *CI;    // For MO_CImmediate. Integers > 64bit.
     int64_t ImmVal;           // For MO_Immediate.
     const MDNode *MD;         // For MO_Metadata.
     MCSymbol *Sym;            // For MO_MCSymbol
@@ -173,6 +176,8 @@
   bool isReg() const { return OpKind == MO_Register; }
   /// isImm - Tests if this is a MO_Immediate operand.
   bool isImm() const { return OpKind == MO_Immediate; }
+  /// isCImm - Test if t his is a MO_CImmediate operand.
+  bool isCImm() const { return OpKind == MO_CImmediate; }
   /// isFPImm - Tests if this is a MO_FPImmediate operand.
   bool isFPImm() const { return OpKind == MO_FPImmediate; }
   /// isMBB - Tests if this is a MO_MachineBasicBlock operand.
@@ -333,6 +338,11 @@
     return Contents.ImmVal;
   }
 
+  const ConstantInt *getCImm() const {
+    assert(isCImm() && "Wrong MachineOperand accessor");
+    return Contents.CI;
+  }
+
   const ConstantFP *getFPImm() const {
     assert(isFPImm() && "Wrong MachineOperand accessor");
     return Contents.CFP;
@@ -440,6 +450,12 @@
     return Op;
   }
 
+  static MachineOperand CreateCImm(const ConstantInt *CI) {
+    MachineOperand Op(MachineOperand::MO_CImmediate);
+    Op.Contents.CI = CI;
+    return Op;
+  }
+
   static MachineOperand CreateFPImm(const ConstantFP *CFP) {
     MachineOperand Op(MachineOperand::MO_FPImmediate);
     Op.Contents.CFP = CFP;

Modified: llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineRegisterInfo.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineRegisterInfo.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/CodeGen/MachineRegisterInfo.h Sat Jul  2 22:28:07 2011
@@ -32,11 +32,6 @@
   IndexedMap<std::pair<const TargetRegisterClass*, MachineOperand*>,
              VirtReg2IndexFunctor> VRegInfo;
 
-  /// RegClassVRegMap - This vector acts as a map from TargetRegisterClass to
-  /// virtual registers. For each target register class, it keeps a list of
-  /// virtual registers belonging to the class.
-  std::vector<unsigned> *RegClass2VRegMap;
-
   /// RegAllocHints - This vector records register allocation hints for virtual
   /// registers. For each virtual register, it keeps a register and hint type
   /// pair making up the allocation hint. Hint type is target specific except
@@ -216,13 +211,6 @@
   ///
   unsigned getNumVirtRegs() const { return VRegInfo.size(); }
 
-  /// getRegClassVirtRegs - Return the list of virtual registers of the given
-  /// target register class.
-  const std::vector<unsigned> &
-  getRegClassVirtRegs(const TargetRegisterClass *RC) const {
-    return RegClass2VRegMap[RC->getID()];
-  }
-
   /// setRegAllocationHint - Specify a register allocation hint for the
   /// specified virtual register.
   void setRegAllocationHint(unsigned Reg, unsigned Type, unsigned PrefReg) {

Modified: llvm/branches/type-system-rewrite/include/llvm/CodeGen/Passes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/CodeGen/Passes.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/CodeGen/Passes.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/CodeGen/Passes.h Sat Jul  2 22:28:07 2011
@@ -73,16 +73,9 @@
   ///  This pass is still in development
   extern char &StrongPHIEliminationID;
 
-  extern char &PreAllocSplittingID;
-
   /// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
   extern char &LiveStacksID;
 
-  /// SimpleRegisterCoalescing pass.  Aggressively coalesces every register
-  /// copy it can.
-  ///
-  extern char &SimpleRegisterCoalescingID;
-
   /// TwoAddressInstruction pass - This pass reduces two-address instructions to
   /// use two operands. This destroys SSA information but it is desired by
   /// register allocators.
@@ -132,10 +125,10 @@
   ///
   FunctionPass *createDefaultPBQPRegisterAllocator();
 
-  /// SimpleRegisterCoalescing Pass - Coalesce all copies possible.  Can run
+  /// RegisterCoalescer Pass - Coalesce all copies possible.  Can run
   /// independently of the register allocator.
   ///
-  RegisterCoalescer *createSimpleRegisterCoalescer();
+  RegisterCoalescer *createRegisterCoalescer();
 
   /// PrologEpilogCodeInserter Pass - This pass inserts prolog and epilog code,
   /// and eliminates abstract frame references.

Removed: llvm/branches/type-system-rewrite/include/llvm/CodeGen/RegisterCoalescer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/CodeGen/RegisterCoalescer.h?rev=134362&view=auto
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/CodeGen/RegisterCoalescer.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/CodeGen/RegisterCoalescer.h (removed)
@@ -1,244 +0,0 @@
-//===-- RegisterCoalescer.h - Register Coalescing Interface ------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains the abstract interface for register coalescers, 
-// allowing them to interact with and query register allocators.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/IncludeFile.h"
-#include "llvm/CodeGen/LiveInterval.h"
-#include "llvm/ADT/SmallPtrSet.h"
-
-#ifndef LLVM_CODEGEN_REGISTER_COALESCER_H
-#define LLVM_CODEGEN_REGISTER_COALESCER_H
-
-namespace llvm {
-
-  class MachineFunction;
-  class RegallocQuery;
-  class AnalysisUsage;
-  class MachineInstr;
-  class TargetRegisterInfo;
-  class TargetRegisterClass;
-  class TargetInstrInfo;
-
-  /// An abstract interface for register coalescers.  Coalescers must
-  /// implement this interface to be part of the coalescer analysis
-  /// group.
-  class RegisterCoalescer {
-  public:
-    static char ID; // Class identification, replacement for typeinfo
-    RegisterCoalescer() {}
-    virtual ~RegisterCoalescer();  // We want to be subclassed
-
-    /// Run the coalescer on this function, providing interference
-    /// data to query.  Return whether we removed any copies.
-    virtual bool coalesceFunction(MachineFunction &mf,
-                                  RegallocQuery &ifd) = 0;
-
-    /// Reset state.  Can be used to allow a coalescer run by
-    /// PassManager to be run again by the register allocator.
-    virtual void reset(MachineFunction &mf) {}
-
-    /// Register allocators must call this from their own
-    /// getAnalysisUsage to cover the case where the coalescer is not
-    /// a Pass in the proper sense and isn't managed by PassManager.
-    /// PassManager needs to know which analyses to make available and
-    /// which to invalidate when running the register allocator or any
-    /// pass that might call coalescing.  The long-term solution is to
-    /// allow hierarchies of PassManagers.
-    virtual void getAnalysisUsage(AnalysisUsage &AU) const {}
-  }; 
-
-  /// An abstract interface for register allocators to interact with
-  /// coalescers
-  ///
-  /// Example:
-  ///
-  /// This is simply an example of how to use the RegallocQuery
-  /// interface.  It is not meant to be used in production.
-  ///
-  ///   class LinearScanRegallocQuery : public RegallocQuery {
-  ///   private:
-  ///     const LiveIntervals \&li;
-  ///
-  ///   public:
-  ///     LinearScanRegallocQuery(LiveIntervals &intervals) 
-  ///         : li(intervals) {}
-  ///
-  ///     /// This is pretty slow and conservative, but since linear scan
-  ///     /// allocation doesn't pre-compute interference information it's
-  ///     /// the best we can do.  Coalescers are always free to ignore this
-  ///     /// and implement their own discovery strategy.  See
-  ///     /// SimpleRegisterCoalescing for an example.
-  ///     void getInterferences(IntervalSet &interferences,
-  ///                           const LiveInterval &a) const {
-  ///       for(LiveIntervals::const_iterator iv = li.begin(),
-  ///             ivend = li.end();
-  ///           iv != ivend;
-  ///           ++iv) {
-  ///         if (interfere(a, iv->second)) {
-  ///           interferences.insert(&iv->second);
-  ///         }
-  ///       }
-  ///     }
-  ///
-  ///     /// This is *really* slow and stupid.  See above.
-  ///     int getNumberOfInterferences(const LiveInterval &a) const {
-  ///       IntervalSet intervals;
-  ///       getInterferences(intervals, a);
-  ///       return intervals.size();
-  ///     }
-  ///   };  
-  ///
-  ///   In the allocator:
-  ///
-  ///   RegisterCoalescer &coalescer = getAnalysis<RegisterCoalescer>();
-  ///
-  ///   // We don't reset the coalescer so if it's already been run this
-  ///   // takes almost no time.
-  ///   LinearScanRegallocQuery ifd(*li_);
-  ///   coalescer.coalesceFunction(fn, ifd);
-  ///
-  class RegallocQuery {
-  public:
-    typedef SmallPtrSet<const LiveInterval *, 8> IntervalSet;
-
-    virtual ~RegallocQuery() {}
-    
-    /// Return whether two live ranges interfere.
-    virtual bool interfere(const LiveInterval &a,
-                           const LiveInterval &b) const {
-      // A naive test
-      return a.overlaps(b);
-    }
-
-    /// Return the set of intervals that interfere with this one.
-    virtual void getInterferences(IntervalSet &interferences,
-                                  const LiveInterval &a) const = 0;
-
-    /// This can often be cheaper than actually returning the
-    /// interferences.
-    virtual int getNumberOfInterferences(const LiveInterval &a) const = 0;
-
-    /// Make any data structure updates necessary to reflect
-    /// coalescing or other modifications.
-    virtual void updateDataForMerge(const LiveInterval &a,
-                                    const LiveInterval &b,
-                                    const MachineInstr &copy) {}
-
-    /// Allow the register allocator to communicate when it doesn't
-    /// want a copy coalesced.  This may be due to assumptions made by
-    /// the allocator about various invariants and so this question is
-    /// a matter of legality, not performance.  Performance decisions
-    /// about which copies to coalesce should be made by the
-    /// coalescer.
-    virtual bool isLegalToCoalesce(const MachineInstr &inst) const {
-      return true;
-    }
-  };
-
-
-  /// CoalescerPair - A helper class for register coalescers. When deciding if
-  /// two registers can be coalesced, CoalescerPair can determine if a copy
-  /// instruction would become an identity copy after coalescing.
-  class CoalescerPair {
-    const TargetInstrInfo &tii_;
-    const TargetRegisterInfo &tri_;
-
-    /// dstReg_ - The register that will be left after coalescing. It can be a
-    /// virtual or physical register.
-    unsigned dstReg_;
-
-    /// srcReg_ - the virtual register that will be coalesced into dstReg.
-    unsigned srcReg_;
-
-    /// subReg_ - The subregister index of srcReg in dstReg_. It is possible the
-    /// coalesce srcReg_ into a subreg of the larger dstReg_ when dstReg_ is a
-    /// virtual register.
-    unsigned subIdx_;
-
-    /// partial_ - True when the original copy was a partial subregister copy.
-    bool partial_;
-
-    /// crossClass_ - True when both regs are virtual, and newRC is constrained.
-    bool crossClass_;
-
-    /// flipped_ - True when DstReg and SrcReg are reversed from the oriignal copy
-    /// instruction.
-    bool flipped_;
-
-    /// newRC_ - The register class of the coalesced register, or NULL if dstReg_
-    /// is a physreg.
-    const TargetRegisterClass *newRC_;
-
-    /// compose - Compose subreg indices a and b, either may be 0.
-    unsigned compose(unsigned, unsigned) const;
-
-    /// isMoveInstr - Return true if MI is a move or subreg instruction.
-    bool isMoveInstr(const MachineInstr *MI, unsigned &Src, unsigned &Dst,
-                     unsigned &SrcSub, unsigned &DstSub) const;
-
-  public:
-    CoalescerPair(const TargetInstrInfo &tii, const TargetRegisterInfo &tri)
-      : tii_(tii), tri_(tri), dstReg_(0), srcReg_(0), subIdx_(0),
-        partial_(false), crossClass_(false), flipped_(false), newRC_(0) {}
-
-    /// setRegisters - set registers to match the copy instruction MI. Return
-    /// false if MI is not a coalescable copy instruction.
-    bool setRegisters(const MachineInstr*);
-
-    /// flip - Swap srcReg_ and dstReg_. Return false if swapping is impossible
-    /// because dstReg_ is a physical register, or subIdx_ is set.
-    bool flip();
-
-    /// isCoalescable - Return true if MI is a copy instruction that will become
-    /// an identity copy after coalescing.
-    bool isCoalescable(const MachineInstr*) const;
-
-    /// isPhys - Return true if DstReg is a physical register.
-    bool isPhys() const { return !newRC_; }
-
-    /// isPartial - Return true if the original copy instruction did not copy the
-    /// full register, but was a subreg operation.
-    bool isPartial() const { return partial_; }
-
-    /// isCrossClass - Return true if DstReg is virtual and NewRC is a smaller register class than DstReg's.
-    bool isCrossClass() const { return crossClass_; }
-
-    /// isFlipped - Return true when getSrcReg is the register being defined by
-    /// the original copy instruction.
-    bool isFlipped() const { return flipped_; }
-
-    /// getDstReg - Return the register (virtual or physical) that will remain
-    /// after coalescing.
-    unsigned getDstReg() const { return dstReg_; }
-
-    /// getSrcReg - Return the virtual register that will be coalesced away.
-    unsigned getSrcReg() const { return srcReg_; }
-
-    /// getSubIdx - Return the subregister index in DstReg that SrcReg will be
-    /// coalesced into, or 0.
-    unsigned getSubIdx() const { return subIdx_; }
-
-    /// getNewRC - Return the register class of the coalesced register.
-    const TargetRegisterClass *getNewRC() const { return newRC_; }
-  };
-}
-
-// Because of the way .a files work, we must force the SimpleRC
-// implementation to be pulled in if the RegisterCoalescing header is
-// included.  Otherwise we run the risk of RegisterCoalescing being
-// used, but the default implementation not being linked into the tool
-// that uses it.
-FORCE_DEFINING_FILE_TO_BE_LINKED(RegisterCoalescer)
-FORCE_DEFINING_FILE_TO_BE_LINKED(SimpleRegisterCoalescing)
-
-#endif

Modified: llvm/branches/type-system-rewrite/include/llvm/CodeGen/ScheduleDAG.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/CodeGen/ScheduleDAG.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/CodeGen/ScheduleDAG.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/CodeGen/ScheduleDAG.h Sat Jul  2 22:28:07 2011
@@ -34,7 +34,7 @@
   class ScheduleDAG;
   class SDNode;
   class TargetInstrInfo;
-  class TargetInstrDesc;
+  class MCInstrDesc;
   class TargetMachine;
   class TargetRegisterClass;
   template<class Graph> class GraphWriter;
@@ -507,9 +507,9 @@
 
     virtual ~ScheduleDAG();
 
-    /// getInstrDesc - Return the TargetInstrDesc of this SUnit.
+    /// getInstrDesc - Return the MCInstrDesc of this SUnit.
     /// Return NULL for SDNodes without a machine opcode.
-    const TargetInstrDesc *getInstrDesc(const SUnit *SU) const {
+    const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
       if (SU->isInstr()) return &SU->getInstr()->getDesc();
       return getNodeDesc(SU->getNode());
     }
@@ -579,8 +579,8 @@
     void EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap);
 
   private:
-    // Return the TargetInstrDesc of this SDNode or NULL.
-    const TargetInstrDesc *getNodeDesc(const SDNode *Node) const;
+    // Return the MCInstrDesc of this SDNode or NULL.
+    const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
   };
 
   class SUnitIterator : public std::iterator<std::forward_iterator_tag,

Modified: llvm/branches/type-system-rewrite/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/CodeGen/ScoreboardHazardRecognizer.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/CodeGen/ScoreboardHazardRecognizer.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/CodeGen/ScoreboardHazardRecognizer.h Sat Jul  2 22:28:07 2011
@@ -25,7 +25,6 @@
 namespace llvm {
 
 class InstrItineraryData;
-class TargetInstrDesc;
 class ScheduleDAG;
 class SUnit;
 

Modified: llvm/branches/type-system-rewrite/include/llvm/Config/config.h.cmake
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Config/config.h.cmake?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Config/config.h.cmake (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Config/config.h.cmake Sat Jul  2 22:28:07 2011
@@ -686,9 +686,6 @@
    `char[]'. */
 #undef YYTEXT_POINTER
 
-/* Define to a type to use for `mode_t' if it is not otherwise available. */
-#cmakedefine mode_t ${mode_t}
-
 /* Define to a function replacing strtoll */
 #cmakedefine strtoll ${strtoll}
 

Modified: llvm/branches/type-system-rewrite/include/llvm/Constants.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Constants.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Constants.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Constants.h Sat Jul  2 22:28:07 2011
@@ -350,12 +350,7 @@
   ConstantArray(const ArrayType *T, const std::vector<Constant*> &Val);
 public:
   // ConstantArray accessors
-  static Constant *get(const ArrayType *T, const std::vector<Constant*> &V);
-  static Constant *get(const ArrayType *T, Constant *const *Vals, 
-                       unsigned NumVals);
-  static Constant *get(const ArrayType *T, ArrayRef<Constant*> V) {
-    return get(T, V.data(), V.size());
-  }
+  static Constant *get(const ArrayType *T, ArrayRef<Constant*> V);
                              
   /// This method constructs a ConstantArray and initializes it with a text
   /// string. The default behavior (AddNull==true) causes a null terminator to
@@ -392,6 +387,12 @@
   ///
   std::string getAsString() const;
 
+  /// getAsCString - If this array is isCString(), then this method converts the
+  /// array (without the trailing null byte) to an std::string and returns it.
+  /// Otherwise, it asserts out.
+  ///
+  std::string getAsCString() const;
+
   /// isNullValue - Return true if this is the value that would be returned by
   /// getNullValue.  This always returns false because zero arrays are always
   /// created as ConstantAggregateZero objects.
@@ -494,8 +495,6 @@
 public:
   // ConstantVector accessors
   static Constant *get(ArrayRef<Constant*> V);
-  // FIXME: Eliminate this constructor form.
-  static Constant *get(const VectorType *T, const std::vector<Constant*> &V);
   
   /// Transparently provide more efficient getOperand methods.
   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);

Modified: llvm/branches/type-system-rewrite/include/llvm/InitializePasses.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/InitializePasses.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/InitializePasses.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/InitializePasses.h Sat Jul  2 22:28:07 2011
@@ -65,6 +65,7 @@
 void initializeBasicAliasAnalysisPass(PassRegistry&);
 void initializeBasicCallGraphPass(PassRegistry&);
 void initializeBlockExtractorPassPass(PassRegistry&);
+void initializeBlockFrequencyPass(PassRegistry&);
 void initializeBlockPlacementPass(PassRegistry&);
 void initializeBranchProbabilityInfoPass(PassRegistry&);
 void initializeBreakCriticalEdgesPass(PassRegistry&);
@@ -175,7 +176,6 @@
 void initializePostDomPrinterPass(PassRegistry&);
 void initializePostDomViewerPass(PassRegistry&);
 void initializePostDominatorTreePass(PassRegistry&);
-void initializePreAllocSplittingPass(PassRegistry&);
 void initializePreVerifierPass(PassRegistry&);
 void initializePrintDbgInfoPass(PassRegistry&);
 void initializePrintFunctionPassPass(PassRegistry&);
@@ -196,7 +196,6 @@
 void initializeRegionOnlyViewerPass(PassRegistry&);
 void initializeRegionPrinterPass(PassRegistry&);
 void initializeRegionViewerPass(PassRegistry&);
-void initializeRegisterCoalescerAnalysisGroup(PassRegistry&);
 void initializeRenderMachineFunctionPass(PassRegistry&);
 void initializeSCCPPass(PassRegistry&);
 void initializeSROA_DTPass(PassRegistry&);
@@ -204,7 +203,7 @@
 void initializeScalarEvolutionAliasAnalysisPass(PassRegistry&);
 void initializeScalarEvolutionPass(PassRegistry&);
 void initializeSimpleInlinerPass(PassRegistry&);
-void initializeSimpleRegisterCoalescingPass(PassRegistry&);
+void initializeRegisterCoalescerPass(PassRegistry&);
 void initializeSimplifyLibCallsPass(PassRegistry&);
 void initializeSingleLoopExtractorPass(PassRegistry&);
 void initializeSinkingPass(PassRegistry&);

Modified: llvm/branches/type-system-rewrite/include/llvm/InlineAsm.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/InlineAsm.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/InlineAsm.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/InlineAsm.h Sat Jul  2 22:28:07 2011
@@ -25,15 +25,16 @@
 class FunctionType;
 class Module;
 struct InlineAsmKeyType;
-template<class ValType, class TypeClass, class ConstantClass, bool HasLargeKey>
+template<class ValType, class ValRefType, class TypeClass, class ConstantClass,
+         bool HasLargeKey>
 class ConstantUniqueMap;
 template<class ConstantClass, class TypeClass, class ValType>
 struct ConstantCreator;
 
 class InlineAsm : public Value {
   friend struct ConstantCreator<InlineAsm, PointerType, InlineAsmKeyType>;
-  friend class ConstantUniqueMap<InlineAsmKeyType, PointerType, InlineAsm,
-                                 false>;
+  friend class ConstantUniqueMap<InlineAsmKeyType, const InlineAsmKeyType&,
+                                 PointerType, InlineAsm, false>;
 
   InlineAsm(const InlineAsm &);             // do not implement
   void operator=(const InlineAsm&);         // do not implement
@@ -187,25 +188,32 @@
   // in the backend.
   
   enum {
+    // Fixed operands on an INLINEASM SDNode.
     Op_InputChain = 0,
     Op_AsmString = 1,
     Op_MDNode = 2,
     Op_ExtraInfo = 3,    // HasSideEffects, IsAlignStack
     Op_FirstOperand = 4,
 
+    // Fixed operands on an INLINEASM MachineInstr.
     MIOp_AsmString = 0,
     MIOp_ExtraInfo = 1,    // HasSideEffects, IsAlignStack
     MIOp_FirstOperand = 2,
 
+    // Interpretation of the MIOp_ExtraInfo bit field.
     Extra_HasSideEffects = 1,
     Extra_IsAlignStack = 2,
-    
-    Kind_RegUse = 1,
-    Kind_RegDef = 2,
-    Kind_Imm = 3,
-    Kind_Mem = 4,
-    Kind_RegDefEarlyClobber = 6,
-    
+
+    // Inline asm operands map to multiple SDNode / MachineInstr operands.
+    // The first operand is an immediate describing the asm operand, the low
+    // bits is the kind:
+    Kind_RegUse = 1,             // Input register, "r".
+    Kind_RegDef = 2,             // Output register, "=r".
+    Kind_RegDefEarlyClobber = 3, // Early-clobber output register, "=&r".
+    Kind_Clobber = 4,            // Clobbered register, "~r".
+    Kind_Imm = 5,                // Immediate.
+    Kind_Mem = 6,                // Memory operand, "m".
+
     Flag_MatchingOperand = 0x80000000
   };
   
@@ -232,7 +240,10 @@
   static bool isRegDefEarlyClobberKind(unsigned Flag) {
     return getKind(Flag) == Kind_RegDefEarlyClobber;
   }
-  
+  static bool isClobberKind(unsigned Flag) {
+    return getKind(Flag) == Kind_Clobber;
+  }
+
   /// getNumOperandRegisters - Extract the number of registers field from the
   /// inline asm operand flag.
   static unsigned getNumOperandRegisters(unsigned Flag) {

Modified: llvm/branches/type-system-rewrite/include/llvm/Instructions.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Instructions.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Instructions.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Instructions.h Sat Jul  2 22:28:07 2011
@@ -1814,7 +1814,7 @@
   explicit PHINode(const Type *Ty, unsigned NumReservedValues,
                    const Twine &NameStr = "", Instruction *InsertBefore = 0)
     : Instruction(Ty, Instruction::PHI, 0, 0, InsertBefore),
-      ReservedSpace(NumReservedValues * 2) {
+      ReservedSpace(NumReservedValues) {
     setName(NameStr);
     OperandList = allocHungoffUses(ReservedSpace);
   }
@@ -1822,11 +1822,16 @@
   PHINode(const Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
           BasicBlock *InsertAtEnd)
     : Instruction(Ty, Instruction::PHI, 0, 0, InsertAtEnd),
-      ReservedSpace(NumReservedValues * 2) {
+      ReservedSpace(NumReservedValues) {
     setName(NameStr);
     OperandList = allocHungoffUses(ReservedSpace);
   }
 protected:
+  // allocHungoffUses - this is more complicated than the generic
+  // User::allocHungoffUses, because we have to allocate Uses for the incoming
+  // values and pointers to the incoming blocks, all in one allocation.
+  Use *allocHungoffUses(unsigned) const;
+
   virtual PHINode *clone_impl() const;
 public:
   /// Constructors - NumReservedValues is a hint for the number of incoming
@@ -1845,32 +1850,55 @@
   /// Provide fast operand accessors
   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
 
+  // Block iterator interface. This provides access to the list of incoming
+  // basic blocks, which parallels the list of incoming values.
+
+  typedef BasicBlock **block_iterator;
+  typedef BasicBlock * const *const_block_iterator;
+
+  block_iterator block_begin() {
+    Use::UserRef *ref =
+      reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace);
+    return reinterpret_cast<block_iterator>(ref + 1);
+  }
+
+  const_block_iterator block_begin() const {
+    const Use::UserRef *ref =
+      reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace);
+    return reinterpret_cast<const_block_iterator>(ref + 1);
+  }
+
+  block_iterator block_end() {
+    return block_begin() + getNumOperands();
+  }
+
+  const_block_iterator block_end() const {
+    return block_begin() + getNumOperands();
+  }
+
   /// getNumIncomingValues - Return the number of incoming edges
   ///
-  unsigned getNumIncomingValues() const { return getNumOperands()/2; }
+  unsigned getNumIncomingValues() const { return getNumOperands(); }
 
   /// getIncomingValue - Return incoming value number x
   ///
   Value *getIncomingValue(unsigned i) const {
-    assert(i*2 < getNumOperands() && "Invalid value number!");
-    return getOperand(i*2);
+    return getOperand(i);
   }
   void setIncomingValue(unsigned i, Value *V) {
-    assert(i*2 < getNumOperands() && "Invalid value number!");
-    setOperand(i*2, V);
+    setOperand(i, V);
   }
   static unsigned getOperandNumForIncomingValue(unsigned i) {
-    return i*2;
+    return i;
   }
   static unsigned getIncomingValueNumForOperand(unsigned i) {
-    assert(i % 2 == 0 && "Invalid incoming-value operand index!");
-    return i/2;
+    return i;
   }
 
   /// getIncomingBlock - Return incoming basic block number @p i.
   ///
   BasicBlock *getIncomingBlock(unsigned i) const {
-    return cast<BasicBlock>(getOperand(i*2+1));
+    return block_begin()[i];
   }
 
   /// getIncomingBlock - Return incoming basic block corresponding
@@ -1878,7 +1906,7 @@
   ///
   BasicBlock *getIncomingBlock(const Use &U) const {
     assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
-    return cast<BasicBlock>((&U + 1)->get());
+    return getIncomingBlock(unsigned(&U - op_begin()));
   }
 
   /// getIncomingBlock - Return incoming basic block corresponding
@@ -1889,16 +1917,8 @@
     return getIncomingBlock(I.getUse());
   }
 
-
   void setIncomingBlock(unsigned i, BasicBlock *BB) {
-    setOperand(i*2+1, (Value*)BB);
-  }
-  static unsigned getOperandNumForIncomingBlock(unsigned i) {
-    return i*2+1;
-  }
-  static unsigned getIncomingBlockNumForOperand(unsigned i) {
-    assert(i % 2 == 1 && "Invalid incoming-block operand index!");
-    return i/2;
+    block_begin()[i] = BB;
   }
 
   /// addIncoming - Add an incoming value to the end of the PHI list
@@ -1908,13 +1928,12 @@
     assert(BB && "PHI node got a null basic block!");
     assert(getType() == V->getType() &&
            "All operands to PHI node must be the same type as the PHI node!");
-    unsigned OpNo = NumOperands;
-    if (OpNo+2 > ReservedSpace)
+    if (NumOperands == ReservedSpace)
       growOperands();  // Get more space!
     // Initialize some new operands.
-    NumOperands = OpNo+2;
-    OperandList[OpNo] = V;
-    OperandList[OpNo+1] = (Value*)BB;
+    ++NumOperands;
+    setIncomingValue(NumOperands - 1, V);
+    setIncomingBlock(NumOperands - 1, BB);
   }
 
   /// removeIncomingValue - Remove an incoming value.  This is useful if a
@@ -1937,14 +1956,16 @@
   /// block in the value list for this PHI.  Returns -1 if no instance.
   ///
   int getBasicBlockIndex(const BasicBlock *BB) const {
-    Use *OL = OperandList;
-    for (unsigned i = 0, e = getNumOperands(); i != e; i += 2)
-      if (OL[i+1].get() == (const Value*)BB) return i/2;
+    for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+      if (block_begin()[i] == BB)
+        return i;
     return -1;
   }
 
   Value *getIncomingValueForBlock(const BasicBlock *BB) const {
-    return getIncomingValue(getBasicBlockIndex(BB));
+    int Idx = getBasicBlockIndex(BB);
+    assert(Idx >= 0 && "Invalid basic block argument!");
+    return getIncomingValue(Idx);
   }
 
   /// hasConstantValue - If the specified PHI node always merges together the

Modified: llvm/branches/type-system-rewrite/include/llvm/Intrinsics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Intrinsics.td?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Intrinsics.td (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Intrinsics.td Sat Jul  2 22:28:07 2011
@@ -312,7 +312,7 @@
   def int_eh_sjlj_lsda    : Intrinsic<[llvm_ptr_ty]>;
   def int_eh_sjlj_callsite: Intrinsic<[], [llvm_i32_ty]>;
 }
-def int_eh_sjlj_dispatch_setup : Intrinsic<[], [llvm_i32_ty], [IntrReadMem]>;
+def int_eh_sjlj_dispatch_setup : Intrinsic<[], [llvm_i32_ty]>;
 def int_eh_sjlj_setjmp  : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
 def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>;
 

Modified: llvm/branches/type-system-rewrite/include/llvm/MC/MCMachObjectWriter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/MC/MCMachObjectWriter.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/MC/MCMachObjectWriter.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/MC/MCMachObjectWriter.h Sat Jul  2 22:28:07 2011
@@ -10,11 +10,20 @@
 #ifndef LLVM_MC_MCMACHOBJECTWRITER_H
 #define LLVM_MC_MCMACHOBJECTWRITER_H
 
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/MC/MCExpr.h"
 #include "llvm/MC/MCObjectWriter.h"
+#include "llvm/Object/MachOFormat.h"
 #include "llvm/Support/DataTypes.h"
+#include <vector>
 
 namespace llvm {
 
+class MCSectionData;
+class MachObjectWriter;
+
 class MCMachObjectTargetWriter {
   const unsigned Is64Bit : 1;
   const uint32_t CPUType;
@@ -48,8 +57,191 @@
   }
 
   /// @}
+
+  /// @name API
+  /// @{
+
+  virtual void RecordRelocation(MachObjectWriter *Writer,
+                                const MCAssembler &Asm,
+                                const MCAsmLayout &Layout,
+                                const MCFragment *Fragment,
+                                const MCFixup &Fixup,
+                                MCValue Target,
+                                uint64_t &FixedValue) = 0;
+
+  /// @}
 };
 
+class MachObjectWriter : public MCObjectWriter {
+  /// MachSymbolData - Helper struct for containing some precomputed information
+  /// on symbols.
+  struct MachSymbolData {
+    MCSymbolData *SymbolData;
+    uint64_t StringIndex;
+    uint8_t SectionIndex;
+
+    // Support lexicographic sorting.
+    bool operator<(const MachSymbolData &RHS) const;
+  };
+
+  /// The target specific Mach-O writer instance.
+  llvm::OwningPtr<MCMachObjectTargetWriter> TargetObjectWriter;
+
+  /// @name Relocation Data
+  /// @{
+
+  llvm::DenseMap<const MCSectionData*,
+                 std::vector<object::macho::RelocationEntry> > Relocations;
+  llvm::DenseMap<const MCSectionData*, unsigned> IndirectSymBase;
+
+  /// @}
+  /// @name Symbol Table Data
+  /// @{
+
+  SmallString<256> StringTable;
+  std::vector<MachSymbolData> LocalSymbolData;
+  std::vector<MachSymbolData> ExternalSymbolData;
+  std::vector<MachSymbolData> UndefinedSymbolData;
+
+  /// @}
+
+public:
+  MachObjectWriter(MCMachObjectTargetWriter *MOTW, raw_ostream &_OS,
+                   bool _IsLittleEndian)
+    : MCObjectWriter(_OS, _IsLittleEndian), TargetObjectWriter(MOTW) {
+  }
+
+  /// @name Utility Methods
+  /// @{
+
+  bool isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind);
+
+  SectionAddrMap SectionAddress;
+
+  SectionAddrMap &getSectionAddressMap() { return SectionAddress; }
+
+  uint64_t getSectionAddress(const MCSectionData* SD) const {
+    return SectionAddress.lookup(SD);
+  }
+  uint64_t getSymbolAddress(const MCSymbolData* SD,
+                            const MCAsmLayout &Layout) const;
+
+  uint64_t getFragmentAddress(const MCFragment *Fragment,
+                              const MCAsmLayout &Layout) const;
+
+  uint64_t getPaddingSize(const MCSectionData *SD,
+                          const MCAsmLayout &Layout) const;
+
+  bool doesSymbolRequireExternRelocation(const MCSymbolData *SD);
+
+  /// @}
+
+  /// @name Target Writer Proxy Accessors
+  /// @{
+
+  bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
+  bool isARM() const {
+    uint32_t CPUType = TargetObjectWriter->getCPUType() &
+      ~object::mach::CTFM_ArchMask;
+    return CPUType == object::mach::CTM_ARM;
+  }
+
+  /// @}
+
+  void WriteHeader(unsigned NumLoadCommands, unsigned LoadCommandsSize,
+                   bool SubsectionsViaSymbols);
+
+  /// WriteSegmentLoadCommand - Write a segment load command.
+  ///
+  /// \arg NumSections - The number of sections in this segment.
+  /// \arg SectionDataSize - The total size of the sections.
+  void WriteSegmentLoadCommand(unsigned NumSections,
+                               uint64_t VMSize,
+                               uint64_t SectionDataStartOffset,
+                               uint64_t SectionDataSize);
+
+  void WriteSection(const MCAssembler &Asm, const MCAsmLayout &Layout,
+                    const MCSectionData &SD, uint64_t FileOffset,
+                    uint64_t RelocationsStart, unsigned NumRelocations);
+
+  void WriteSymtabLoadCommand(uint32_t SymbolOffset, uint32_t NumSymbols,
+                              uint32_t StringTableOffset,
+                              uint32_t StringTableSize);
+
+  void WriteDysymtabLoadCommand(uint32_t FirstLocalSymbol,
+                                uint32_t NumLocalSymbols,
+                                uint32_t FirstExternalSymbol,
+                                uint32_t NumExternalSymbols,
+                                uint32_t FirstUndefinedSymbol,
+                                uint32_t NumUndefinedSymbols,
+                                uint32_t IndirectSymbolOffset,
+                                uint32_t NumIndirectSymbols);
+
+  void WriteNlist(MachSymbolData &MSD, const MCAsmLayout &Layout);
+
+  // FIXME: We really need to improve the relocation validation. Basically, we
+  // want to implement a separate computation which evaluates the relocation
+  // entry as the linker would, and verifies that the resultant fixup value is
+  // exactly what the encoder wanted. This will catch several classes of
+  // problems:
+  //
+  //  - Relocation entry bugs, the two algorithms are unlikely to have the same
+  //    exact bug.
+  //
+  //  - Relaxation issues, where we forget to relax something.
+  //
+  //  - Input errors, where something cannot be correctly encoded. 'as' allows
+  //    these through in many cases.
+
+  void addRelocation(const MCSectionData *SD,
+                     object::macho::RelocationEntry &MRE) {
+    Relocations[SD].push_back(MRE);
+  }
+
+  void RecordScatteredRelocation(const MCAssembler &Asm,
+                                 const MCAsmLayout &Layout,
+                                 const MCFragment *Fragment,
+                                 const MCFixup &Fixup, MCValue Target,
+                                 unsigned Log2Size,
+                                 uint64_t &FixedValue);
+
+  void RecordTLVPRelocation(const MCAssembler &Asm,
+                            const MCAsmLayout &Layout,
+                            const MCFragment *Fragment,
+                            const MCFixup &Fixup, MCValue Target,
+                            uint64_t &FixedValue);
+
+  void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+                        const MCFragment *Fragment, const MCFixup &Fixup,
+                        MCValue Target, uint64_t &FixedValue);
+
+  void BindIndirectSymbols(MCAssembler &Asm);
+
+  /// ComputeSymbolTable - Compute the symbol table data
+  ///
+  /// \param StringTable [out] - The string table data.
+  /// \param StringIndexMap [out] - Map from symbol names to offsets in the
+  /// string table.
+  void ComputeSymbolTable(MCAssembler &Asm, SmallString<256> &StringTable,
+                          std::vector<MachSymbolData> &LocalSymbolData,
+                          std::vector<MachSymbolData> &ExternalSymbolData,
+                          std::vector<MachSymbolData> &UndefinedSymbolData);
+
+  void computeSectionAddresses(const MCAssembler &Asm,
+                               const MCAsmLayout &Layout);
+
+  void ExecutePostLayoutBinding(MCAssembler &Asm, const MCAsmLayout &Layout);
+
+  virtual bool IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+                                                      const MCSymbolData &DataA,
+                                                      const MCFragment &FB,
+                                                      bool InSet,
+                                                      bool IsPCRel) const;
+
+  void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout);
+};
+
+
 /// \brief Construct a new Mach-O writer instance.
 ///
 /// This routine takes ownership of the target writer subclass.

Modified: llvm/branches/type-system-rewrite/include/llvm/Object/ObjectFile.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Object/ObjectFile.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Object/ObjectFile.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Object/ObjectFile.h Sat Jul  2 22:28:07 2011
@@ -14,15 +14,14 @@
 #ifndef LLVM_OBJECT_OBJECT_FILE_H
 #define LLVM_OBJECT_OBJECT_FILE_H
 
+#include "llvm/Object/Binary.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
 #include <cstring>
 
 namespace llvm {
-
-class MemoryBuffer;
-class StringRef;
-
 namespace object {
 
 class ObjectFile;
@@ -31,7 +30,7 @@
   struct {
     uint32_t a, b;
   } d;
-  intptr_t p;
+  uintptr_t p;
 };
 
 static bool operator ==(const DataRefImpl &a, const DataRefImpl &b) {
@@ -40,6 +39,19 @@
   return std::memcmp(&a, &b, sizeof(DataRefImpl)) == 0;
 }
 
+class RelocationRef {
+  DataRefImpl RelocationPimpl;
+  const ObjectFile *OwningObject;
+
+public:
+  RelocationRef() : OwningObject(NULL) { std::memset(&RelocationPimpl, 0, sizeof(RelocationPimpl)); }
+  RelocationRef(DataRefImpl RelocationP, const ObjectFile *Owner);
+
+  bool operator==(const RelocationRef &Other) const;
+
+  error_code getNext(RelocationRef &Result);
+};
+
 /// SymbolRef - This is a value type class that represents a single symbol in
 /// the list of symbols in the object file.
 class SymbolRef {
@@ -47,23 +59,24 @@
   const ObjectFile *OwningObject;
 
 public:
+  SymbolRef() : OwningObject(NULL) { std::memset(&SymbolPimpl, 0, sizeof(SymbolPimpl)); }
   SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner);
 
   bool operator==(const SymbolRef &Other) const;
 
-  SymbolRef getNext() const;
+  error_code getNext(SymbolRef &Result) const;
 
-  StringRef getName() const;
-  uint64_t  getAddress() const;
-  uint64_t  getSize() const;
+  error_code getName(StringRef &Result) const;
+  error_code getAddress(uint64_t &Result) const;
+  error_code getSize(uint64_t &Result) const;
 
   /// Returns the ascii char that should be displayed in a symbol table dump via
   /// nm for this symbol.
-  char      getNMTypeChar() const;
+  error_code getNMTypeChar(char &Result) const;
 
   /// Returns true for symbols that are internal to the object file format such
   /// as section symbols.
-  bool      isInternal() const;
+  error_code isInternal(bool &Result) const;
 };
 
 /// SectionRef - This is a value type class that represents a single section in
@@ -73,19 +86,20 @@
   const ObjectFile *OwningObject;
 
 public:
+  SectionRef() : OwningObject(NULL) { std::memset(&SectionPimpl, 0, sizeof(SectionPimpl)); }
   SectionRef(DataRefImpl SectionP, const ObjectFile *Owner);
 
   bool operator==(const SectionRef &Other) const;
 
-  SectionRef getNext() const;
+  error_code getNext(SectionRef &Result) const;
 
-  StringRef getName() const;
-  uint64_t  getAddress() const;
-  uint64_t  getSize() const;
-  StringRef getContents() const;
+  error_code getName(StringRef &Result) const;
+  error_code getAddress(uint64_t &Result) const;
+  error_code getSize(uint64_t &Result) const;
+  error_code getContents(StringRef &Result) const;
 
   // FIXME: Move to the normalization layer when it's created.
-  bool      isText() const;
+  error_code isText(bool &Result) const;
 };
 
 const uint64_t UnknownAddressOrSize = ~0ULL;
@@ -93,38 +107,42 @@
 /// ObjectFile - This class is the base class for all object file types.
 /// Concrete instances of this object are created by createObjectFile, which
 /// figure out which type to create.
-class ObjectFile {
+class ObjectFile : public Binary {
 private:
   ObjectFile(); // = delete
   ObjectFile(const ObjectFile &other); // = delete
 
 protected:
-  MemoryBuffer *MapFile;
-  const uint8_t *base;
+  ObjectFile(unsigned int Type, MemoryBuffer *source, error_code &ec);
 
-  ObjectFile(MemoryBuffer *Object);
+  const uint8_t *base() const {
+    return reinterpret_cast<const uint8_t *>(Data->getBufferStart());
+  }
 
   // These functions are for SymbolRef to call internally. The main goal of
   // this is to allow SymbolRef::SymbolPimpl to point directly to the symbol
   // entry in the memory mapped object file. SymbolPimpl cannot contain any
   // virtual functions because then it could not point into the memory mapped
   // file.
+  //
+  // Implementations assume that the DataRefImpl is valid and has not been
+  // modified externally. It's UB otherwise.
   friend class SymbolRef;
-  virtual SymbolRef getSymbolNext(DataRefImpl Symb) const = 0;
-  virtual StringRef getSymbolName(DataRefImpl Symb) const = 0;
-  virtual uint64_t  getSymbolAddress(DataRefImpl Symb) const = 0;
-  virtual uint64_t  getSymbolSize(DataRefImpl Symb) const = 0;
-  virtual char      getSymbolNMTypeChar(DataRefImpl Symb) const = 0;
-  virtual bool      isSymbolInternal(DataRefImpl Symb) const = 0;
+  virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const = 0;
+  virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const = 0;
+  virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const =0;
+  virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const = 0;
+  virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const = 0;
+  virtual error_code isSymbolInternal(DataRefImpl Symb, bool &Res) const = 0;
 
   // Same as above for SectionRef.
   friend class SectionRef;
-  virtual SectionRef getSectionNext(DataRefImpl Sec) const = 0;
-  virtual StringRef  getSectionName(DataRefImpl Sec) const = 0;
-  virtual uint64_t   getSectionAddress(DataRefImpl Sec) const = 0;
-  virtual uint64_t   getSectionSize(DataRefImpl Sec) const = 0;
-  virtual StringRef  getSectionContents(DataRefImpl Sec) const = 0;
-  virtual bool       isSectionText(DataRefImpl Sec) const = 0;
+  virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const = 0;
+  virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const = 0;
+  virtual error_code getSectionAddress(DataRefImpl Sec, uint64_t &Res) const =0;
+  virtual error_code getSectionSize(DataRefImpl Sec, uint64_t &Res) const = 0;
+  virtual error_code getSectionContents(DataRefImpl Sec, StringRef &Res)const=0;
+  virtual error_code isSectionText(DataRefImpl Sec, bool &Res) const = 0;
 
 
 public:
@@ -147,8 +165,12 @@
       return !(*this == other);
     }
 
-    content_iterator& operator++() {  // Preincrement
-      Current = Current.getNext();
+    content_iterator& increment(error_code &err) {
+      content_type next;
+      if (error_code ec = Current.getNext(next))
+        err = ec;
+      else
+        Current = next;
       return *this;
     }
   };
@@ -156,8 +178,6 @@
   typedef content_iterator<SymbolRef> symbol_iterator;
   typedef content_iterator<SectionRef> section_iterator;
 
-  virtual ~ObjectFile();
-
   virtual symbol_iterator begin_symbols() const = 0;
   virtual symbol_iterator end_symbols() const = 0;
 
@@ -171,8 +191,6 @@
   virtual StringRef getFileFormatName() const = 0;
   virtual /* Triple::ArchType */ unsigned getArch() const = 0;
 
-  StringRef getFilename() const;
-
   /// @returns Pointer to ObjectFile subclass to handle this type of object.
   /// @param ObjectPath The path to the object file. ObjectPath.isObject must
   ///        return true.
@@ -180,12 +198,16 @@
   static ObjectFile *createObjectFile(StringRef ObjectPath);
   static ObjectFile *createObjectFile(MemoryBuffer *Object);
 
-private:
+  static inline bool classof(const Binary *v) {
+    return v->getType() >= isObject &&
+           v->getType() < lastObject;
+  }
+  static inline bool classof(const ObjectFile *v) { return true; }
+
+public:
   static ObjectFile *createCOFFObjectFile(MemoryBuffer *Object);
   static ObjectFile *createELFObjectFile(MemoryBuffer *Object);
   static ObjectFile *createMachOObjectFile(MemoryBuffer *Object);
-  static ObjectFile *createArchiveObjectFile(MemoryBuffer *Object);
-  static ObjectFile *createLibObjectFile(MemoryBuffer *Object);
 };
 
 // Inline function definitions.
@@ -197,28 +219,28 @@
   return SymbolPimpl == Other.SymbolPimpl;
 }
 
-inline SymbolRef SymbolRef::getNext() const {
-  return OwningObject->getSymbolNext(SymbolPimpl);
+inline error_code SymbolRef::getNext(SymbolRef &Result) const {
+  return OwningObject->getSymbolNext(SymbolPimpl, Result);
 }
 
-inline StringRef SymbolRef::getName() const {
-  return OwningObject->getSymbolName(SymbolPimpl);
+inline error_code SymbolRef::getName(StringRef &Result) const {
+  return OwningObject->getSymbolName(SymbolPimpl, Result);
 }
 
-inline uint64_t SymbolRef::getAddress() const {
-  return OwningObject->getSymbolAddress(SymbolPimpl);
+inline error_code SymbolRef::getAddress(uint64_t &Result) const {
+  return OwningObject->getSymbolAddress(SymbolPimpl, Result);
 }
 
-inline uint64_t SymbolRef::getSize() const {
-  return OwningObject->getSymbolSize(SymbolPimpl);
+inline error_code SymbolRef::getSize(uint64_t &Result) const {
+  return OwningObject->getSymbolSize(SymbolPimpl, Result);
 }
 
-inline char SymbolRef::getNMTypeChar() const {
-  return OwningObject->getSymbolNMTypeChar(SymbolPimpl);
+inline error_code SymbolRef::getNMTypeChar(char &Result) const {
+  return OwningObject->getSymbolNMTypeChar(SymbolPimpl, Result);
 }
 
-inline bool SymbolRef::isInternal() const {
-  return OwningObject->isSymbolInternal(SymbolPimpl);
+inline error_code SymbolRef::isInternal(bool &Result) const {
+  return OwningObject->isSymbolInternal(SymbolPimpl, Result);
 }
 
 
@@ -232,28 +254,28 @@
   return SectionPimpl == Other.SectionPimpl;
 }
 
-inline SectionRef SectionRef::getNext() const {
-  return OwningObject->getSectionNext(SectionPimpl);
+inline error_code SectionRef::getNext(SectionRef &Result) const {
+  return OwningObject->getSectionNext(SectionPimpl, Result);
 }
 
-inline StringRef SectionRef::getName() const {
-  return OwningObject->getSectionName(SectionPimpl);
+inline error_code SectionRef::getName(StringRef &Result) const {
+  return OwningObject->getSectionName(SectionPimpl, Result);
 }
 
-inline uint64_t SectionRef::getAddress() const {
-  return OwningObject->getSectionAddress(SectionPimpl);
+inline error_code SectionRef::getAddress(uint64_t &Result) const {
+  return OwningObject->getSectionAddress(SectionPimpl, Result);
 }
 
-inline uint64_t SectionRef::getSize() const {
-  return OwningObject->getSectionSize(SectionPimpl);
+inline error_code SectionRef::getSize(uint64_t &Result) const {
+  return OwningObject->getSectionSize(SectionPimpl, Result);
 }
 
-inline StringRef SectionRef::getContents() const {
-  return OwningObject->getSectionContents(SectionPimpl);
+inline error_code SectionRef::getContents(StringRef &Result) const {
+  return OwningObject->getSectionContents(SectionPimpl, Result);
 }
 
-inline bool SectionRef::isText() const {
-  return OwningObject->isSectionText(SectionPimpl);
+inline error_code SectionRef::isText(bool &Result) const {
+  return OwningObject->isSectionText(SectionPimpl, Result);
 }
 
 } // end namespace object

Modified: llvm/branches/type-system-rewrite/include/llvm/Support/BranchProbability.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Support/BranchProbability.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Support/BranchProbability.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Support/BranchProbability.h Sat Jul  2 22:28:07 2011
@@ -18,13 +18,17 @@
 
 namespace llvm {
 
-class raw_ostream;
+template<class BlockT, class FunctionT, class BranchProbInfoT>
+class BlockFrequencyImpl;
 class BranchProbabilityInfo;
 class MachineBranchProbabilityInfo;
 class MachineBasicBlock;
+class raw_ostream;
 
 // This class represents Branch Probability as a non-negative fraction.
 class BranchProbability {
+  template<class BlockT, class FunctionT, class BranchProbInfoT>
+  friend class BlockFrequencyImpl;
   friend class BranchProbabilityInfo;
   friend class MachineBranchProbabilityInfo;
   friend class MachineBasicBlock;
@@ -38,6 +42,10 @@
   BranchProbability(uint32_t n, uint32_t d);
 
 public:
+
+  uint32_t getNumerator() const { return N; }
+  uint32_t getDenominator() const { return D; }
+
   raw_ostream &print(raw_ostream &OS) const;
 
   void dump() const;

Modified: llvm/branches/type-system-rewrite/include/llvm/Support/CFG.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Support/CFG.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Support/CFG.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Support/CFG.h Sat Jul  2 22:28:07 2011
@@ -33,7 +33,7 @@
   USE_iterator It;
 
   inline void advancePastNonTerminators() {
-    // Loop to ignore non terminator uses (for example PHI nodes).
+    // Loop to ignore non terminator uses (for example BlockAddresses).
     while (!It.atEnd() && !isa<TerminatorInst>(*It))
       ++It;
   }

Modified: llvm/branches/type-system-rewrite/include/llvm/Support/Endian.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Support/Endian.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Support/Endian.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Support/Endian.h Sat Jul  2 22:28:07 2011
@@ -14,7 +14,6 @@
 #ifndef LLVM_SUPPORT_ENDIAN_H
 #define LLVM_SUPPORT_ENDIAN_H
 
-#include "llvm/Config/config.h"
 #include "llvm/Support/Host.h"
 #include "llvm/Support/SwapByteOrder.h"
 #include "llvm/Support/type_traits.h"

Modified: llvm/branches/type-system-rewrite/include/llvm/Support/IRBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Support/IRBuilder.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Support/IRBuilder.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Support/IRBuilder.h Sat Jul  2 22:28:07 2011
@@ -82,7 +82,7 @@
     InsertPt = I;
     SetCurrentDebugLocation(I->getDebugLoc());
   }
-  
+
   /// SetInsertPoint - This specifies that created instructions should be
   /// inserted at the specified point.
   void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
@@ -90,6 +90,19 @@
     InsertPt = IP;
   }
 
+  /// SetInsertPoint(Use) - Find the nearest point that dominates this use, and
+  /// specify that created instructions should be inserted at this point.
+  void SetInsertPoint(Use &U) {
+    Instruction *UseInst = cast<Instruction>(U.getUser());
+    if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
+      BasicBlock *PredBB = Phi->getIncomingBlock(U);
+      assert(U != PredBB->getTerminator() && "critical edge not split");
+      SetInsertPoint(PredBB, PredBB->getTerminator());
+      return;
+    }
+    SetInsertPoint(UseInst);
+  }
+
   /// SetCurrentDebugLocation - Set location information used by debugging
   /// information.
   void SetCurrentDebugLocation(const DebugLoc &L) {
@@ -110,7 +123,7 @@
   /// getCurrentFunctionReturnType - Get the return type of the current function
   /// that we're emitting into.
   const Type *getCurrentFunctionReturnType() const;
-  
+
   /// InsertPoint - A saved insertion point.
   class InsertPoint {
     BasicBlock *Block;
@@ -198,7 +211,7 @@
   ConstantInt *getInt64(uint64_t C) {
     return ConstantInt::get(getInt64Ty(), C);
   }
-  
+
   /// getInt - Get a constant integer value.
   ConstantInt *getInt(const APInt &AI) {
     return ConstantInt::get(Context, AI);
@@ -263,7 +276,7 @@
                          bool isVolatile = false, MDNode *TBAATag = 0) {
     return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile, TBAATag);
   }
-  
+
   CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align,
                          bool isVolatile = false, MDNode *TBAATag = 0);
 
@@ -274,7 +287,7 @@
                          bool isVolatile = false, MDNode *TBAATag = 0) {
     return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag);
   }
-  
+
   CallInst *CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
                          bool isVolatile = false, MDNode *TBAATag = 0);
 
@@ -285,9 +298,9 @@
                           bool isVolatile = false, MDNode *TBAATag = 0) {
     return CreateMemMove(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag);
   }
-  
+
   CallInst *CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
-                          bool isVolatile = false, MDNode *TBAATag = 0);  
+                          bool isVolatile = false, MDNode *TBAATag = 0);
 
   /// CreateLifetimeStart - Create a lifetime.start intrinsic.  If the pointer
   /// isn't i8* it will be converted.
@@ -341,7 +354,13 @@
     SetInsertPoint(IP);
     SetCurrentDebugLocation(IP->getDebugLoc());
   }
-  
+
+  explicit IRBuilder(Use &U)
+    : IRBuilderBase(U->getContext()), Folder() {
+    SetInsertPoint(U);
+    SetCurrentDebugLocation(cast<Instruction>(U.getUser())->getDebugLoc());
+  }
+
   IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T& F)
     : IRBuilderBase(TheBB->getContext()), Folder(F) {
     SetInsertPoint(TheBB, IP);

Modified: llvm/branches/type-system-rewrite/include/llvm/Support/system_error.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Support/system_error.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Support/system_error.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Support/system_error.h Sat Jul  2 22:28:07 2011
@@ -222,7 +222,7 @@
 
 */
 
-#include "llvm/Config/config.h"
+#include "llvm/Config/llvm-config.h"
 #include "llvm/Support/type_traits.h"
 #include <cerrno>
 #include <string>

Removed: llvm/branches/type-system-rewrite/include/llvm/Target/SubtargetFeature.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/SubtargetFeature.h?rev=134362&view=auto
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/SubtargetFeature.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/SubtargetFeature.h (removed)
@@ -1,119 +0,0 @@
-//===-- llvm/Target/SubtargetFeature.h - CPU characteristics ----*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines and manages user or tool specified CPU characteristics.
-// The intent is to be able to package specific features that should or should
-// not be used on a specific target processor.  A tool, such as llc, could, as
-// as example, gather chip info from the command line, a long with features
-// that should be used on that chip.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_SUBTARGETFEATURE_H
-#define LLVM_TARGET_SUBTARGETFEATURE_H
-
-#include <string>
-#include <vector>
-#include <cstring>
-#include "llvm/ADT/Triple.h"
-#include "llvm/Support/DataTypes.h"
-
-namespace llvm {
-  class raw_ostream;
-  
-//===----------------------------------------------------------------------===//
-///
-/// SubtargetFeatureKV - Used to provide key value pairs for feature and
-/// CPU bit flags.
-//
-struct SubtargetFeatureKV {
-  const char *Key;                      // K-V key string
-  const char *Desc;                     // Help descriptor
-  uint64_t Value;                       // K-V integer value
-  uint64_t Implies;                     // K-V bit mask
-  
-  // Compare routine for std binary search
-  bool operator<(const SubtargetFeatureKV &S) const {
-    return strcmp(Key, S.Key) < 0;
-  }
-};
-  
-//===----------------------------------------------------------------------===//
-///
-/// SubtargetInfoKV - Used to provide key value pairs for CPU and arbitrary
-/// pointers.
-//
-struct SubtargetInfoKV {
-  const char *Key;                      // K-V key string
-  void *Value;                          // K-V pointer value
-  
-  // Compare routine for std binary search
-  bool operator<(const SubtargetInfoKV &S) const {
-    return strcmp(Key, S.Key) < 0;
-  }
-};
-  
-//===----------------------------------------------------------------------===//
-///
-/// SubtargetFeatures - Manages the enabling and disabling of subtarget 
-/// specific features.  Features are encoded as a string of the form
-///   "cpu,+attr1,+attr2,-attr3,...,+attrN"
-/// A comma separates each feature from the next (all lowercase.)
-/// The first feature is always the CPU subtype (eg. pentiumm).  If the CPU
-/// value is "generic" then the CPU subtype should be generic for the target.
-/// Each of the remaining features is prefixed with + or - indicating whether
-/// that feature should be enabled or disabled contrary to the cpu
-/// specification.
-///
-
-class SubtargetFeatures {
-  std::vector<std::string> Features;    // Subtarget features as a vector
-public:
-  explicit SubtargetFeatures(const std::string &Initial = std::string());
-
-  /// Features string accessors.
-  std::string getString() const;
-  void setString(const std::string &Initial);
-
-  /// Set the CPU string.  Replaces previous setting.  Setting to "" clears CPU.
-  void setCPU(const std::string &String);
-
-  /// Setting CPU string only if no string is set.
-  void setCPUIfNone(const std::string &String);
-
-  /// Returns current CPU string.
-  const std::string & getCPU() const;
-
-  /// Adding Features.
-  void AddFeature(const std::string &String, bool IsEnabled = true);
-           
-  /// Get feature bits.
-  uint64_t getBits(const SubtargetFeatureKV *CPUTable,
-                         size_t CPUTableSize,
-                   const SubtargetFeatureKV *FeatureTable,
-                         size_t FeatureTableSize);
-                         
-  /// Get info pointer
-  void *getInfo(const SubtargetInfoKV *Table, size_t TableSize);
-  
-  /// Print feature string.
-  void print(raw_ostream &OS) const;
-  
-  // Dump feature info.
-  void dump() const;
-
-  /// Retrieve a formatted string of the default features for the specified
-  /// target triple.
-  void getDefaultSubtargetFeatures(const std::string &CPU,
-                                   const Triple& Triple);
-};
-
-} // End namespace llvm
-
-#endif

Modified: llvm/branches/type-system-rewrite/include/llvm/Target/Target.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/Target.td?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/Target.td (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/Target.td Sat Jul  2 22:28:07 2011
@@ -26,11 +26,19 @@
   string Namespace = "";
 }
 
+// RegAltNameIndex - The alternate name set to use for register operands of
+// this register class when printing.
+class RegAltNameIndex {
+  string Namespace = "";
+}
+def NoRegAltName : RegAltNameIndex;
+
 // Register - You should define one instance of this class for each register
 // in the target machine.  String n will become the "name" of the register.
-class Register<string n> {
+class Register<string n, list<string> altNames = []> {
   string Namespace = "";
   string AsmName = n;
+  list<string> AltNames = altNames;
 
   // Aliases - A list of registers that this register overlaps with.  A read or
   // modification of this register can potentially read or modify the aliased
@@ -48,6 +56,10 @@
   // SubRegs.
   list<SubRegIndex> SubRegIndices = [];
 
+  // RegAltNameIndices - The alternate name indices which are valid for this
+  // register.
+  list<RegAltNameIndex> RegAltNameIndices = [];
+
   // CompositeIndices - Specify subreg indices that don't correspond directly to
   // a register in SubRegs and are not inherited. The following formats are
   // supported:
@@ -92,7 +104,7 @@
 // registers by register allocators.
 //
 class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
-                    dag regList> {
+                    dag regList, RegAltNameIndex idx = NoRegAltName> {
   string Namespace = namespace;
 
   // RegType - Specify the list ValueType of the registers in this register
@@ -124,6 +136,11 @@
   //
   dag MemberList = regList;
 
+  // AltNameIndex - The alternate register name to use when printing operands
+  // of this register class. Every register in the register class must have
+  // a valid alternate name for the given index.
+  RegAltNameIndex altNameIndex = idx;
+
   // SubRegClasses - Specify the register class of subregisters as a list of
   // dags: (RegClass SubRegIndex, SubRegindex, ...)
   list<dag> SubRegClasses = [];
@@ -466,6 +483,24 @@
   AsmOperandClass ParserMatchClass = ImmAsmOperand;
 }
 
+class RegisterOperand<RegisterClass regclass, string pm = "printOperand"> {
+  // RegClass - The register class of the operand.
+  RegisterClass RegClass = regclass;
+  // PrintMethod - The target method to call to print register operands of
+  // this type. The method normally will just use an alt-name index to look
+  // up the name to print. Default to the generic printOperand().
+  string PrintMethod = pm;
+  // ParserMatchClass - The "match class" that operands of this type fit
+  // in. Match classes are used to define the order in which instructions are
+  // match, to ensure that which instructions gets matched is deterministic.
+  //
+  // The target specific parser must be able to classify an parsed operand into
+  // a unique class, which does not partially overlap with any other classes. It
+  // can match a subset of some other class, in which case the AsmOperandClass
+  // should declare the other operand as one of its super classes.
+  AsmOperandClass ParserMatchClass;
+}
+
 def i1imm  : Operand<i1>;
 def i8imm  : Operand<i8>;
 def i16imm : Operand<i16>;

Modified: llvm/branches/type-system-rewrite/include/llvm/Target/TargetAsmInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/TargetAsmInfo.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/TargetAsmInfo.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/TargetAsmInfo.h Sat Jul  2 22:28:07 2011
@@ -59,6 +59,10 @@
     return TLOF->getEHFrameSection();
   }
 
+  const MCSection *getCompactUnwindSection() const {
+    return TLOF->getCompactUnwindSection();
+  }
+
   const MCSection *getDwarfFrameSection() const {
     return TLOF->getDwarfFrameSection();
   }
@@ -102,6 +106,10 @@
   int getSEHRegNum(unsigned RegNum) const {
     return TRI->getSEHRegNum(RegNum);
   }
+
+  int getCompactUnwindRegNum(unsigned RegNum) const {
+    return TRI->getCompactUnwindRegNum(RegNum);
+  }
 };
 
 }

Removed: llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrDesc.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrDesc.h?rev=134362&view=auto
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrDesc.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrDesc.h (removed)
@@ -1,520 +0,0 @@
-//===-- llvm/Target/TargetInstrDesc.h - Instruction Descriptors -*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the TargetOperandInfo and TargetInstrDesc classes, which
-// are used to describe target instructions and their operands. 
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETINSTRDESC_H
-#define LLVM_TARGET_TARGETINSTRDESC_H
-
-#include "llvm/Support/DataTypes.h"
-
-namespace llvm {
-
-class TargetRegisterClass;
-class TargetRegisterInfo;
-  
-//===----------------------------------------------------------------------===//
-// Machine Operand Flags and Description
-//===----------------------------------------------------------------------===//
-  
-namespace TOI {
-  // Operand constraints
-  enum OperandConstraint {
-    TIED_TO = 0,    // Must be allocated the same register as.
-    EARLY_CLOBBER   // Operand is an early clobber register operand
-  };
-  
-  /// OperandFlags - These are flags set on operands, but should be considered
-  /// private, all access should go through the TargetOperandInfo accessors.
-  /// See the accessors for a description of what these are.
-  enum OperandFlags {
-    LookupPtrRegClass = 0,
-    Predicate,
-    OptionalDef
-  };
-}
-
-/// TargetOperandInfo - This holds information about one operand of a machine
-/// instruction, indicating the register class for register operands, etc.
-///
-class TargetOperandInfo {
-public:
-  /// RegClass - This specifies the register class enumeration of the operand 
-  /// if the operand is a register.  If isLookupPtrRegClass is set, then this is
-  /// an index that is passed to TargetRegisterInfo::getPointerRegClass(x) to
-  /// get a dynamic register class.
-  ///
-  /// NOTE: This member should be considered to be private, all access should go
-  /// through "getRegClass(TRI)" below.
-  short RegClass;
-  
-  /// Flags - These are flags from the TOI::OperandFlags enum.
-  unsigned short Flags;
-  
-  /// Lower 16 bits are used to specify which constraints are set. The higher 16
-  /// bits are used to specify the value of constraints (4 bits each).
-  unsigned Constraints;
-  /// Currently no other information.
-  
-  /// getRegClass - Get the register class for the operand, handling resolution
-  /// of "symbolic" pointer register classes etc.  If this is not a register
-  /// operand, this returns null.
-  const TargetRegisterClass *getRegClass(const TargetRegisterInfo *TRI) const;
-  
-  
-  /// isLookupPtrRegClass - Set if this operand is a pointer value and it
-  /// requires a callback to look up its register class.
-  bool isLookupPtrRegClass() const { return Flags&(1 <<TOI::LookupPtrRegClass);}
-  
-  /// isPredicate - Set if this is one of the operands that made up of
-  /// the predicate operand that controls an isPredicable() instruction.
-  bool isPredicate() const { return Flags & (1 << TOI::Predicate); }
-  
-  /// isOptionalDef - Set if this operand is a optional def.
-  ///
-  bool isOptionalDef() const { return Flags & (1 << TOI::OptionalDef); }
-};
-
-  
-//===----------------------------------------------------------------------===//
-// Machine Instruction Flags and Description
-//===----------------------------------------------------------------------===//
-
-/// TargetInstrDesc flags - These should be considered private to the
-/// implementation of the TargetInstrDesc class.  Clients should use the
-/// predicate methods on TargetInstrDesc, not use these directly.  These
-/// all correspond to bitfields in the TargetInstrDesc::Flags field.
-namespace TID {
-  enum {
-    Variadic = 0,
-    HasOptionalDef,
-    Return,
-    Call,
-    Barrier,
-    Terminator,
-    Branch,
-    IndirectBranch,
-    Compare,
-    MoveImm,
-    Bitcast,
-    DelaySlot,
-    FoldableAsLoad,
-    MayLoad,
-    MayStore,
-    Predicable,
-    NotDuplicable,
-    UnmodeledSideEffects,
-    Commutable,
-    ConvertibleTo3Addr,
-    UsesCustomInserter,
-    Rematerializable,
-    CheapAsAMove,
-    ExtraSrcRegAllocReq,
-    ExtraDefRegAllocReq
-  };
-}
-
-/// TargetInstrDesc - Describe properties that are true of each
-/// instruction in the target description file.  This captures information about
-/// side effects, register use and many other things.  There is one instance of
-/// this struct for each target instruction class, and the MachineInstr class
-/// points to this struct directly to describe itself.
-class TargetInstrDesc {
-public:
-  unsigned short  Opcode;        // The opcode number
-  unsigned short  NumOperands;   // Num of args (may be more if variable_ops)
-  unsigned short  NumDefs;       // Num of args that are definitions
-  unsigned short  SchedClass;    // enum identifying instr sched class
-  const char *    Name;          // Name of the instruction record in td file
-  unsigned        Flags;         // Flags identifying machine instr class
-  uint64_t        TSFlags;       // Target Specific Flag values
-  const unsigned *ImplicitUses;  // Registers implicitly read by this instr
-  const unsigned *ImplicitDefs;  // Registers implicitly defined by this instr
-  const TargetRegisterClass **RCBarriers; // Reg classes completely "clobbered"
-  const TargetOperandInfo *OpInfo; // 'NumOperands' entries about operands
-
-  /// getOperandConstraint - Returns the value of the specific constraint if
-  /// it is set. Returns -1 if it is not set.
-  int getOperandConstraint(unsigned OpNum,
-                           TOI::OperandConstraint Constraint) const {
-    if (OpNum < NumOperands &&
-        (OpInfo[OpNum].Constraints & (1 << Constraint))) {
-      unsigned Pos = 16 + Constraint * 4;
-      return (int)(OpInfo[OpNum].Constraints >> Pos) & 0xf;
-    }
-    return -1;
-  }
-
-  /// getRegClass - Returns the register class constraint for OpNum, or NULL.
-  const TargetRegisterClass *getRegClass(unsigned OpNum,
-                                         const TargetRegisterInfo *TRI) const {
-    return OpNum < NumOperands ? OpInfo[OpNum].getRegClass(TRI) : 0;
-  }
-
-  /// getOpcode - Return the opcode number for this descriptor.
-  unsigned getOpcode() const {
-    return Opcode;
-  }
-  
-  /// getName - Return the name of the record in the .td file for this
-  /// instruction, for example "ADD8ri".
-  const char *getName() const {
-    return Name;
-  }
-  
-  /// getNumOperands - Return the number of declared MachineOperands for this
-  /// MachineInstruction.  Note that variadic (isVariadic() returns true)
-  /// instructions may have additional operands at the end of the list, and note
-  /// that the machine instruction may include implicit register def/uses as
-  /// well.
-  unsigned getNumOperands() const {
-    return NumOperands;
-  }
-  
-  /// getNumDefs - Return the number of MachineOperands that are register
-  /// definitions.  Register definitions always occur at the start of the 
-  /// machine operand list.  This is the number of "outs" in the .td file,
-  /// and does not include implicit defs.
-  unsigned getNumDefs() const {
-    return NumDefs;
-  }
-  
-  /// isVariadic - Return true if this instruction can have a variable number of
-  /// operands.  In this case, the variable operands will be after the normal
-  /// operands but before the implicit definitions and uses (if any are
-  /// present).
-  bool isVariadic() const {
-    return Flags & (1 << TID::Variadic);
-  }
-  
-  /// hasOptionalDef - Set if this instruction has an optional definition, e.g.
-  /// ARM instructions which can set condition code if 's' bit is set.
-  bool hasOptionalDef() const {
-    return Flags & (1 << TID::HasOptionalDef);
-  }
-  
-  /// getImplicitUses - Return a list of registers that are potentially
-  /// read by any instance of this machine instruction.  For example, on X86,
-  /// the "adc" instruction adds two register operands and adds the carry bit in
-  /// from the flags register.  In this case, the instruction is marked as
-  /// implicitly reading the flags.  Likewise, the variable shift instruction on
-  /// X86 is marked as implicitly reading the 'CL' register, which it always
-  /// does.
-  ///
-  /// This method returns null if the instruction has no implicit uses.
-  const unsigned *getImplicitUses() const {
-    return ImplicitUses;
-  }
-  
-  /// getNumImplicitUses - Return the number of implicit uses this instruction
-  /// has.
-  unsigned getNumImplicitUses() const {
-    if (ImplicitUses == 0) return 0;
-    unsigned i = 0;
-    for (; ImplicitUses[i]; ++i) /*empty*/;
-    return i;
-  }
-  
-  
-  /// getImplicitDefs - Return a list of registers that are potentially
-  /// written by any instance of this machine instruction.  For example, on X86,
-  /// many instructions implicitly set the flags register.  In this case, they
-  /// are marked as setting the FLAGS.  Likewise, many instructions always
-  /// deposit their result in a physical register.  For example, the X86 divide
-  /// instruction always deposits the quotient and remainder in the EAX/EDX
-  /// registers.  For that instruction, this will return a list containing the
-  /// EAX/EDX/EFLAGS registers.
-  ///
-  /// This method returns null if the instruction has no implicit defs.
-  const unsigned *getImplicitDefs() const {
-    return ImplicitDefs;
-  }
-  
-  /// getNumImplicitDefs - Return the number of implicit defs this instruction
-  /// has.
-  unsigned getNumImplicitDefs() const {
-    if (ImplicitDefs == 0) return 0;
-    unsigned i = 0;
-    for (; ImplicitDefs[i]; ++i) /*empty*/;
-    return i;
-  }
-  
-  /// hasImplicitUseOfPhysReg - Return true if this instruction implicitly
-  /// uses the specified physical register.
-  bool hasImplicitUseOfPhysReg(unsigned Reg) const {
-    if (const unsigned *ImpUses = ImplicitUses)
-      for (; *ImpUses; ++ImpUses)
-        if (*ImpUses == Reg) return true;
-    return false;
-  }
-  
-  /// hasImplicitDefOfPhysReg - Return true if this instruction implicitly
-  /// defines the specified physical register.
-  bool hasImplicitDefOfPhysReg(unsigned Reg) const {
-    if (const unsigned *ImpDefs = ImplicitDefs)
-      for (; *ImpDefs; ++ImpDefs)
-        if (*ImpDefs == Reg) return true;
-    return false;
-  }
-
-  /// getRegClassBarriers - Return a list of register classes that are
-  /// completely clobbered by this machine instruction. For example, on X86
-  /// the call instructions will completely clobber all the registers in the
-  /// fp stack and XMM classes.
-  ///
-  /// This method returns null if the instruction doesn't completely clobber
-  /// any register class.
-  const TargetRegisterClass **getRegClassBarriers() const {
-    return RCBarriers;
-  }
-
-  /// getSchedClass - Return the scheduling class for this instruction.  The
-  /// scheduling class is an index into the InstrItineraryData table.  This
-  /// returns zero if there is no known scheduling information for the
-  /// instruction.
-  ///
-  unsigned getSchedClass() const {
-    return SchedClass;
-  }
-  
-  bool isReturn() const {
-    return Flags & (1 << TID::Return);
-  }
-  
-  bool isCall() const {
-    return Flags & (1 << TID::Call);
-  }
-  
-  /// isBarrier - Returns true if the specified instruction stops control flow
-  /// from executing the instruction immediately following it.  Examples include
-  /// unconditional branches and return instructions.
-  bool isBarrier() const {
-    return Flags & (1 << TID::Barrier);
-  }
-  
-  /// isTerminator - Returns true if this instruction part of the terminator for
-  /// a basic block.  Typically this is things like return and branch
-  /// instructions.
-  ///
-  /// Various passes use this to insert code into the bottom of a basic block,
-  /// but before control flow occurs.
-  bool isTerminator() const {
-    return Flags & (1 << TID::Terminator);
-  }
-  
-  /// isBranch - Returns true if this is a conditional, unconditional, or
-  /// indirect branch.  Predicates below can be used to discriminate between
-  /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
-  /// get more information.
-  bool isBranch() const {
-    return Flags & (1 << TID::Branch);
-  }
-
-  /// isIndirectBranch - Return true if this is an indirect branch, such as a
-  /// branch through a register.
-  bool isIndirectBranch() const {
-    return Flags & (1 << TID::IndirectBranch);
-  }
-
-  /// isConditionalBranch - Return true if this is a branch which may fall
-  /// through to the next instruction or may transfer control flow to some other
-  /// block.  The TargetInstrInfo::AnalyzeBranch method can be used to get more
-  /// information about this branch.
-  bool isConditionalBranch() const {
-    return isBranch() & !isBarrier() & !isIndirectBranch();
-  }
-  
-  /// isUnconditionalBranch - Return true if this is a branch which always
-  /// transfers control flow to some other block.  The
-  /// TargetInstrInfo::AnalyzeBranch method can be used to get more information
-  /// about this branch.
-  bool isUnconditionalBranch() const {
-    return isBranch() & isBarrier() & !isIndirectBranch();
-  }
-  
-  // isPredicable - Return true if this instruction has a predicate operand that
-  // controls execution.  It may be set to 'always', or may be set to other
-  /// values.   There are various methods in TargetInstrInfo that can be used to
-  /// control and modify the predicate in this instruction.
-  bool isPredicable() const {
-    return Flags & (1 << TID::Predicable);
-  }
-  
-  /// isCompare - Return true if this instruction is a comparison.
-  bool isCompare() const {
-    return Flags & (1 << TID::Compare);
-  }
-  
-  /// isMoveImmediate - Return true if this instruction is a move immediate
-  /// (including conditional moves) instruction. 
-  bool isMoveImmediate() const {
-    return Flags & (1 << TID::MoveImm);
-  }
-
-  /// isBitcast - Return true if this instruction is a bitcast instruction.
-  ///
-  bool isBitcast() const {
-    return Flags & (1 << TID::Bitcast);
-  }
-  
-  /// isNotDuplicable - Return true if this instruction cannot be safely
-  /// duplicated.  For example, if the instruction has a unique labels attached
-  /// to it, duplicating it would cause multiple definition errors.
-  bool isNotDuplicable() const {
-    return Flags & (1 << TID::NotDuplicable);
-  }
-  
-  /// hasDelaySlot - Returns true if the specified instruction has a delay slot
-  /// which must be filled by the code generator.
-  bool hasDelaySlot() const {
-    return Flags & (1 << TID::DelaySlot);
-  }
-  
-  /// canFoldAsLoad - Return true for instructions that can be folded as
-  /// memory operands in other instructions. The most common use for this
-  /// is instructions that are simple loads from memory that don't modify
-  /// the loaded value in any way, but it can also be used for instructions
-  /// that can be expressed as constant-pool loads, such as V_SETALLONES
-  /// on x86, to allow them to be folded when it is beneficial.
-  /// This should only be set on instructions that return a value in their
-  /// only virtual register definition.
-  bool canFoldAsLoad() const {
-    return Flags & (1 << TID::FoldableAsLoad);
-  }
-  
-  //===--------------------------------------------------------------------===//
-  // Side Effect Analysis
-  //===--------------------------------------------------------------------===//
-
-  /// mayLoad - Return true if this instruction could possibly read memory.
-  /// Instructions with this flag set are not necessarily simple load
-  /// instructions, they may load a value and modify it, for example.
-  bool mayLoad() const {
-    return Flags & (1 << TID::MayLoad);
-  }
-  
-  
-  /// mayStore - Return true if this instruction could possibly modify memory.
-  /// Instructions with this flag set are not necessarily simple store
-  /// instructions, they may store a modified value based on their operands, or
-  /// may not actually modify anything, for example.
-  bool mayStore() const {
-    return Flags & (1 << TID::MayStore);
-  }
-  
-  /// hasUnmodeledSideEffects - Return true if this instruction has side
-  /// effects that are not modeled by other flags.  This does not return true
-  /// for instructions whose effects are captured by:
-  ///
-  ///  1. Their operand list and implicit definition/use list.  Register use/def
-  ///     info is explicit for instructions.
-  ///  2. Memory accesses.  Use mayLoad/mayStore.
-  ///  3. Calling, branching, returning: use isCall/isReturn/isBranch.
-  ///
-  /// Examples of side effects would be modifying 'invisible' machine state like
-  /// a control register, flushing a cache, modifying a register invisible to
-  /// LLVM, etc.
-  ///
-  bool hasUnmodeledSideEffects() const {
-    return Flags & (1 << TID::UnmodeledSideEffects);
-  }
-  
-  //===--------------------------------------------------------------------===//
-  // Flags that indicate whether an instruction can be modified by a method.
-  //===--------------------------------------------------------------------===//
-  
-  /// isCommutable - Return true if this may be a 2- or 3-address
-  /// instruction (of the form "X = op Y, Z, ..."), which produces the same
-  /// result if Y and Z are exchanged.  If this flag is set, then the 
-  /// TargetInstrInfo::commuteInstruction method may be used to hack on the
-  /// instruction.
-  ///
-  /// Note that this flag may be set on instructions that are only commutable
-  /// sometimes.  In these cases, the call to commuteInstruction will fail.
-  /// Also note that some instructions require non-trivial modification to
-  /// commute them.
-  bool isCommutable() const {
-    return Flags & (1 << TID::Commutable);
-  }
-  
-  /// isConvertibleTo3Addr - Return true if this is a 2-address instruction
-  /// which can be changed into a 3-address instruction if needed.  Doing this
-  /// transformation can be profitable in the register allocator, because it
-  /// means that the instruction can use a 2-address form if possible, but
-  /// degrade into a less efficient form if the source and dest register cannot
-  /// be assigned to the same register.  For example, this allows the x86
-  /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
-  /// is the same speed as the shift but has bigger code size.
-  ///
-  /// If this returns true, then the target must implement the
-  /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
-  /// is allowed to fail if the transformation isn't valid for this specific
-  /// instruction (e.g. shl reg, 4 on x86).
-  ///
-  bool isConvertibleTo3Addr() const {
-    return Flags & (1 << TID::ConvertibleTo3Addr);
-  }
-  
-  /// usesCustomInsertionHook - Return true if this instruction requires
-  /// custom insertion support when the DAG scheduler is inserting it into a
-  /// machine basic block.  If this is true for the instruction, it basically
-  /// means that it is a pseudo instruction used at SelectionDAG time that is 
-  /// expanded out into magic code by the target when MachineInstrs are formed.
-  ///
-  /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
-  /// is used to insert this into the MachineBasicBlock.
-  bool usesCustomInsertionHook() const {
-    return Flags & (1 << TID::UsesCustomInserter);
-  }
-  
-  /// isRematerializable - Returns true if this instruction is a candidate for
-  /// remat.  This flag is deprecated, please don't use it anymore.  If this
-  /// flag is set, the isReallyTriviallyReMaterializable() method is called to
-  /// verify the instruction is really rematable.
-  bool isRematerializable() const {
-    return Flags & (1 << TID::Rematerializable);
-  }
-
-  /// isAsCheapAsAMove - Returns true if this instruction has the same cost (or
-  /// less) than a move instruction. This is useful during certain types of
-  /// optimizations (e.g., remat during two-address conversion or machine licm)
-  /// where we would like to remat or hoist the instruction, but not if it costs
-  /// more than moving the instruction into the appropriate register. Note, we
-  /// are not marking copies from and to the same register class with this flag.
-  bool isAsCheapAsAMove() const {
-    return Flags & (1 << TID::CheapAsAMove);
-  }
-
-  /// hasExtraSrcRegAllocReq - Returns true if this instruction source operands
-  /// have special register allocation requirements that are not captured by the
-  /// operand register classes. e.g. ARM::STRD's two source registers must be an
-  /// even / odd pair, ARM::STM registers have to be in ascending order.
-  /// Post-register allocation passes should not attempt to change allocations
-  /// for sources of instructions with this flag.
-  bool hasExtraSrcRegAllocReq() const {
-    return Flags & (1 << TID::ExtraSrcRegAllocReq);
-  }
-
-  /// hasExtraDefRegAllocReq - Returns true if this instruction def operands
-  /// have special register allocation requirements that are not captured by the
-  /// operand register classes. e.g. ARM::LDRD's two def registers must be an
-  /// even / odd pair, ARM::LDM registers have to be in ascending order.
-  /// Post-register allocation passes should not attempt to change allocations
-  /// for definitions of instructions with this flag.
-  bool hasExtraDefRegAllocReq() const {
-    return Flags & (1 << TID::ExtraDefRegAllocReq);
-  }
-};
-
-} // end namespace llvm
-
-#endif

Modified: llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrInfo.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrInfo.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrInfo.h Sat Jul  2 22:28:07 2011
@@ -14,7 +14,7 @@
 #ifndef LLVM_TARGET_TARGETINSTRINFO_H
 #define LLVM_TARGET_TARGETINSTRINFO_H
 
-#include "llvm/Target/TargetInstrDesc.h"
+#include "llvm/MC/MCInstrInfo.h"
 #include "llvm/CodeGen/MachineFunction.h"
 
 namespace llvm {
@@ -40,25 +40,22 @@
 ///
 /// TargetInstrInfo - Interface to description of machine instruction set
 ///
-class TargetInstrInfo {
-  const TargetInstrDesc *Descriptors; // Raw array to allow static init'n
-  unsigned NumOpcodes;                // Number of entries in the desc array
-
+class TargetInstrInfo : public MCInstrInfo {
   TargetInstrInfo(const TargetInstrInfo &);  // DO NOT IMPLEMENT
   void operator=(const TargetInstrInfo &);   // DO NOT IMPLEMENT
 public:
-  TargetInstrInfo(const TargetInstrDesc *desc, unsigned NumOpcodes);
+  TargetInstrInfo(int CFSetupOpcode = -1, int CFDestroyOpcode = -1)
+    : CallFrameSetupOpcode(CFSetupOpcode),
+      CallFrameDestroyOpcode(CFDestroyOpcode) {
+  }
+    
   virtual ~TargetInstrInfo();
 
-  unsigned getNumOpcodes() const { return NumOpcodes; }
-
-  /// get - Return the machine instruction descriptor that corresponds to the
-  /// specified instruction opcode.
-  ///
-  const TargetInstrDesc &get(unsigned Opcode) const {
-    assert(Opcode < NumOpcodes && "Invalid opcode!");
-    return Descriptors[Opcode];
-  }
+  /// getRegClass - Givem a machine instruction descriptor, returns the register
+  /// class constraint for OpNum, or NULL.
+  const TargetRegisterClass *getRegClass(const MCInstrDesc &TID,
+                                         unsigned OpNum,
+                                         const TargetRegisterInfo *TRI) const;
 
   /// isTriviallyReMaterializable - Return true if the instruction is trivially
   /// rematerializable, meaning it has no side effects and requires no operands
@@ -93,6 +90,15 @@
                                                 AliasAnalysis *AA) const;
 
 public:
+  /// getCallFrameSetup/DestroyOpcode - These methods return the opcode of the
+  /// frame setup/destroy instructions if they exist (-1 otherwise).  Some
+  /// targets use pseudo instructions in order to abstract away the difference
+  /// between operating with a frame pointer and operating without, through the
+  /// use of these two instructions.
+  ///
+  int getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
+  int getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
+
   /// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
   /// extension instruction. That is, it's like a copy where it's legal for the
   /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
@@ -663,6 +669,9 @@
   virtual
   bool hasLowDefLatency(const InstrItineraryData *ItinData,
                         const MachineInstr *DefMI, unsigned DefIdx) const;
+
+private:
+  int CallFrameSetupOpcode, CallFrameDestroyOpcode;
 };
 
 /// TargetInstrInfoImpl - This is the default implementation of
@@ -671,8 +680,9 @@
 /// libcodegen, not in libtarget.
 class TargetInstrInfoImpl : public TargetInstrInfo {
 protected:
-  TargetInstrInfoImpl(const TargetInstrDesc *desc, unsigned NumOpcodes)
-  : TargetInstrInfo(desc, NumOpcodes) {}
+  TargetInstrInfoImpl(int CallFrameSetupOpcode = -1,
+                      int CallFrameDestroyOpcode = -1)
+    : TargetInstrInfo(CallFrameSetupOpcode, CallFrameDestroyOpcode) {}
 public:
   virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
                                        MachineBasicBlock *NewDest) const;

Removed: llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrItineraries.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrItineraries.h?rev=134362&view=auto
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrItineraries.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/TargetInstrItineraries.h (removed)
@@ -1,253 +0,0 @@
-//===-- llvm/Target/TargetInstrItineraries.h - Scheduling -------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the structures used for instruction
-// itineraries, stages, and operand reads/writes.  This is used by
-// schedulers to determine instruction stages and latencies.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETINSTRITINERARIES_H
-#define LLVM_TARGET_TARGETINSTRITINERARIES_H
-
-#include <algorithm>
-
-namespace llvm {
-
-//===----------------------------------------------------------------------===//
-/// Instruction stage - These values represent a non-pipelined step in
-/// the execution of an instruction.  Cycles represents the number of
-/// discrete time slots needed to complete the stage.  Units represent
-/// the choice of functional units that can be used to complete the
-/// stage.  Eg. IntUnit1, IntUnit2. NextCycles indicates how many
-/// cycles should elapse from the start of this stage to the start of
-/// the next stage in the itinerary. A value of -1 indicates that the
-/// next stage should start immediately after the current one.
-/// For example:
-///
-///   { 1, x, -1 }
-///      indicates that the stage occupies FU x for 1 cycle and that
-///      the next stage starts immediately after this one.
-///
-///   { 2, x|y, 1 }
-///      indicates that the stage occupies either FU x or FU y for 2
-///      consecuative cycles and that the next stage starts one cycle
-///      after this stage starts. That is, the stage requirements
-///      overlap in time.
-///
-///   { 1, x, 0 }
-///      indicates that the stage occupies FU x for 1 cycle and that
-///      the next stage starts in this same cycle. This can be used to
-///      indicate that the instruction requires multiple stages at the
-///      same time.
-///
-/// FU reservation can be of two different kinds:
-///  - FUs which instruction actually requires
-///  - FUs which instruction just reserves. Reserved unit is not available for
-///    execution of other instruction. However, several instructions can reserve
-///    the same unit several times.
-/// Such two types of units reservation is used to model instruction domain
-/// change stalls, FUs using the same resource (e.g. same register file), etc.
-
-struct InstrStage {
-  enum ReservationKinds {
-    Required = 0,
-    Reserved = 1
-  };
-
-  unsigned Cycles_;  ///< Length of stage in machine cycles
-  unsigned Units_;   ///< Choice of functional units
-  int NextCycles_;   ///< Number of machine cycles to next stage
-  ReservationKinds Kind_; ///< Kind of the FU reservation
-
-  /// getCycles - returns the number of cycles the stage is occupied
-  unsigned getCycles() const {
-    return Cycles_;
-  }
-
-  /// getUnits - returns the choice of FUs
-  unsigned getUnits() const {
-    return Units_;
-  }
-
-  ReservationKinds getReservationKind() const {
-    return Kind_;
-  }
-
-  /// getNextCycles - returns the number of cycles from the start of
-  /// this stage to the start of the next stage in the itinerary
-  unsigned getNextCycles() const {
-    return (NextCycles_ >= 0) ? (unsigned)NextCycles_ : Cycles_;
-  }
-};
-
-
-//===----------------------------------------------------------------------===//
-/// Instruction itinerary - An itinerary represents the scheduling
-/// information for an instruction. This includes a set of stages
-/// occupies by the instruction, and the pipeline cycle in which
-/// operands are read and written.
-///
-struct InstrItinerary {
-  unsigned NumMicroOps;        ///< # of micro-ops, 0 means it's variable
-  unsigned FirstStage;         ///< Index of first stage in itinerary
-  unsigned LastStage;          ///< Index of last + 1 stage in itinerary
-  unsigned FirstOperandCycle;  ///< Index of first operand rd/wr
-  unsigned LastOperandCycle;   ///< Index of last + 1 operand rd/wr
-};
-
-
-//===----------------------------------------------------------------------===//
-/// Instruction itinerary Data - Itinerary data supplied by a subtarget to be
-/// used by a target.
-///
-class InstrItineraryData {
-public:
-  const InstrStage     *Stages;         ///< Array of stages selected
-  const unsigned       *OperandCycles;  ///< Array of operand cycles selected
-  const unsigned       *Forwardings;    ///< Array of pipeline forwarding pathes
-  const InstrItinerary *Itineraries;    ///< Array of itineraries selected
-  unsigned              IssueWidth;     ///< Max issue per cycle. 0=Unknown.
-
-  /// Ctors.
-  ///
-  InstrItineraryData() : Stages(0), OperandCycles(0), Forwardings(0),
-                         Itineraries(0), IssueWidth(0) {}
-
-  InstrItineraryData(const InstrStage *S, const unsigned *OS,
-                     const unsigned *F, const InstrItinerary *I)
-    : Stages(S), OperandCycles(OS), Forwardings(F), Itineraries(I),
-      IssueWidth(0) {}
-
-  /// isEmpty - Returns true if there are no itineraries.
-  ///
-  bool isEmpty() const { return Itineraries == 0; }
-
-  /// isEndMarker - Returns true if the index is for the end marker
-  /// itinerary.
-  ///
-  bool isEndMarker(unsigned ItinClassIndx) const {
-    return ((Itineraries[ItinClassIndx].FirstStage == ~0U) &&
-            (Itineraries[ItinClassIndx].LastStage == ~0U));
-  }
-
-  /// beginStage - Return the first stage of the itinerary.
-  ///
-  const InstrStage *beginStage(unsigned ItinClassIndx) const {
-    unsigned StageIdx = Itineraries[ItinClassIndx].FirstStage;
-    return Stages + StageIdx;
-  }
-
-  /// endStage - Return the last+1 stage of the itinerary.
-  ///
-  const InstrStage *endStage(unsigned ItinClassIndx) const {
-    unsigned StageIdx = Itineraries[ItinClassIndx].LastStage;
-    return Stages + StageIdx;
-  }
-
-  /// getStageLatency - Return the total stage latency of the given
-  /// class.  The latency is the maximum completion time for any stage
-  /// in the itinerary.
-  ///
-  unsigned getStageLatency(unsigned ItinClassIndx) const {
-    // If the target doesn't provide itinerary information, use a simple
-    // non-zero default value for all instructions.  Some target's provide a
-    // dummy (Generic) itinerary which should be handled as if it's itinerary is
-    // empty. We identify this by looking for a reference to stage zero (invalid
-    // stage). This is different from beginStage == endState != 0, which could
-    // be used for zero-latency pseudo ops.
-    if (isEmpty() || Itineraries[ItinClassIndx].FirstStage == 0)
-      return 1;
-
-    // Calculate the maximum completion time for any stage.
-    unsigned Latency = 0, StartCycle = 0;
-    for (const InstrStage *IS = beginStage(ItinClassIndx),
-           *E = endStage(ItinClassIndx); IS != E; ++IS) {
-      Latency = std::max(Latency, StartCycle + IS->getCycles());
-      StartCycle += IS->getNextCycles();
-    }
-
-    return Latency;
-  }
-
-  /// getOperandCycle - Return the cycle for the given class and
-  /// operand. Return -1 if no cycle is specified for the operand.
-  ///
-  int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const {
-    if (isEmpty())
-      return -1;
-
-    unsigned FirstIdx = Itineraries[ItinClassIndx].FirstOperandCycle;
-    unsigned LastIdx = Itineraries[ItinClassIndx].LastOperandCycle;
-    if ((FirstIdx + OperandIdx) >= LastIdx)
-      return -1;
-
-    return (int)OperandCycles[FirstIdx + OperandIdx];
-  }
-
-  /// hasPipelineForwarding - Return true if there is a pipeline forwarding
-  /// between instructions of itinerary classes DefClass and UseClasses so that
-  /// value produced by an instruction of itinerary class DefClass, operand
-  /// index DefIdx can be bypassed when it's read by an instruction of
-  /// itinerary class UseClass, operand index UseIdx.
-  bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx,
-                             unsigned UseClass, unsigned UseIdx) const {
-    unsigned FirstDefIdx = Itineraries[DefClass].FirstOperandCycle;
-    unsigned LastDefIdx = Itineraries[DefClass].LastOperandCycle;
-    if ((FirstDefIdx + DefIdx) >= LastDefIdx)
-      return false;
-    if (Forwardings[FirstDefIdx + DefIdx] == 0)
-      return false;
-
-    unsigned FirstUseIdx = Itineraries[UseClass].FirstOperandCycle;
-    unsigned LastUseIdx = Itineraries[UseClass].LastOperandCycle;
-    if ((FirstUseIdx + UseIdx) >= LastUseIdx)
-      return false;
-
-    return Forwardings[FirstDefIdx + DefIdx] ==
-      Forwardings[FirstUseIdx + UseIdx];
-  }
-
-  /// getOperandLatency - Compute and return the use operand latency of a given
-  /// itinerary class and operand index if the value is produced by an
-  /// instruction of the specified itinerary class and def operand index.
-  int getOperandLatency(unsigned DefClass, unsigned DefIdx,
-                        unsigned UseClass, unsigned UseIdx) const {
-    if (isEmpty())
-      return -1;
-
-    int DefCycle = getOperandCycle(DefClass, DefIdx);
-    if (DefCycle == -1)
-      return -1;
-
-    int UseCycle = getOperandCycle(UseClass, UseIdx);
-    if (UseCycle == -1)
-      return -1;
-
-    UseCycle = DefCycle - UseCycle + 1;
-    if (UseCycle > 0 &&
-        hasPipelineForwarding(DefClass, DefIdx, UseClass, UseIdx))
-      // FIXME: This assumes one cycle benefit for every pipeline forwarding.
-      --UseCycle;
-    return UseCycle;
-  }
-
-  /// isMicroCoded - Return true if the instructions in the given class decode
-  /// to more than one micro-ops.
-  bool isMicroCoded(unsigned ItinClassIndx) const {
-    if (isEmpty())
-      return false;
-    return Itineraries[ItinClassIndx].NumMicroOps != 1;
-  }
-};
-
-
-} // End llvm namespace
-
-#endif

Modified: llvm/branches/type-system-rewrite/include/llvm/Target/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/TargetLowering.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/TargetLowering.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/TargetLowering.h Sat Jul  2 22:28:07 2011
@@ -1421,13 +1421,6 @@
   /// is for this target.
   virtual ConstraintType getConstraintType(const std::string &Constraint) const;
 
-  /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"),
-  /// return a list of registers that can be used to satisfy the constraint.
-  /// This should only be used for C_RegisterClass constraints.
-  virtual std::vector<unsigned>
-  getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                    EVT VT) const;
-
   /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g.
   /// {edx}), return the register number and the register class for the
   /// register.

Modified: llvm/branches/type-system-rewrite/include/llvm/Target/TargetLoweringObjectFile.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/TargetLoweringObjectFile.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/TargetLoweringObjectFile.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/TargetLoweringObjectFile.h Sat Jul  2 22:28:07 2011
@@ -68,6 +68,11 @@
   /// LSDASection - If exception handling is supported by the target, this is
   /// the section the Language Specific Data Area information is emitted to.
   const MCSection *LSDASection;
+
+  /// CompactUnwindSection - If exception handling is supported by the target
+  /// and the target can support a compact representation of the CIE and FDE,
+  /// this is the section to emit them into.
+  const MCSection *CompactUnwindSection;
   
   // Dwarf sections for debug info.  If a target supports debug info, these must
   // be set.
@@ -102,8 +107,8 @@
   /// private linkage, aka an L or .L label) or false if it should be a normal
   /// non-.globl label.  This defaults to true.
   bool IsFunctionEHFrameSymbolPrivate;
+
 public:
-  
   MCContext &getContext() const { return *Ctx; }
   
   virtual ~TargetLoweringObjectFile();
@@ -121,7 +126,6 @@
   bool getSupportsWeakOmittedEHFrame() const {
     return SupportsWeakOmittedEHFrame;
   }
-  
   bool getCommDirectiveSupportsAlignment() const {
     return CommDirectiveSupportsAlignment;
   }
@@ -132,6 +136,7 @@
   const MCSection *getStaticCtorSection() const { return StaticCtorSection; }
   const MCSection *getStaticDtorSection() const { return StaticDtorSection; }
   const MCSection *getLSDASection() const { return LSDASection; }
+  const MCSection *getCompactUnwindSection() const{return CompactUnwindSection;}
   virtual const MCSection *getEHFrameSection() const = 0;
   virtual void emitPersonalityValue(MCStreamer &Streamer,
                                     const TargetMachine &TM,

Modified: llvm/branches/type-system-rewrite/include/llvm/Target/TargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/TargetMachine.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/TargetMachine.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/TargetMachine.h Sat Jul  2 22:28:07 2011
@@ -14,29 +14,29 @@
 #ifndef LLVM_TARGET_TARGETMACHINE_H
 #define LLVM_TARGET_TARGETMACHINE_H
 
-#include "llvm/Target/TargetInstrItineraries.h"
 #include <cassert>
 #include <string>
 
 namespace llvm {
 
-class Target;
+class InstrItineraryData;
+class JITCodeEmitter;
 class MCAsmInfo;
+class MCContext;
+class Pass;
+class PassManager;
+class PassManagerBase;
+class Target;
 class TargetData;
-class TargetSubtarget;
+class TargetELFWriterInfo;
+class TargetFrameLowering;
 class TargetInstrInfo;
 class TargetIntrinsicInfo;
 class TargetJITInfo;
 class TargetLowering;
-class TargetSelectionDAGInfo;
-class TargetFrameLowering;
-class JITCodeEmitter;
-class MCContext;
 class TargetRegisterInfo;
-class PassManagerBase;
-class PassManager;
-class Pass;
-class TargetELFWriterInfo;
+class TargetSelectionDAGInfo;
+class TargetSubtargetInfo;
 class formatted_raw_ostream;
 class raw_ostream;
 
@@ -94,8 +94,8 @@
   TargetMachine(const Target &);
 
   /// getSubtargetImpl - virtual method implemented by subclasses that returns
-  /// a reference to that target's TargetSubtarget-derived member variable.
-  virtual const TargetSubtarget *getSubtargetImpl() const { return 0; }
+  /// a reference to that target's TargetSubtargetInfo-derived member variable.
+  virtual const TargetSubtargetInfo *getSubtargetImpl() const { return 0; }
 
   /// TheTarget - The Target that this machine was created for.
   const Target &TheTarget;
@@ -132,7 +132,7 @@
   const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; }
 
   /// getSubtarget - This method returns a pointer to the specified type of
-  /// TargetSubtarget.  In debug builds, it verifies that the object being
+  /// TargetSubtargetInfo.  In debug builds, it verifies that the object being
   /// returned is of the correct type.
   template<typename STC> const STC &getSubtarget() const {
     return *static_cast<const STC*>(getSubtargetImpl());

Modified: llvm/branches/type-system-rewrite/include/llvm/Target/TargetOptions.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/TargetOptions.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/TargetOptions.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/TargetOptions.h Sat Jul  2 22:28:07 2011
@@ -133,8 +133,8 @@
   /// as their parent function, etc.), using an alternate ABI if necessary.
   extern bool GuaranteedTailCallOpt;
 
-  /// StackAlignment - Override default stack alignment for target.
-  extern unsigned StackAlignment;
+  /// StackAlignmentOverride - Override default stack alignment for target.
+  extern unsigned StackAlignmentOverride;
 
   /// RealignStack - This flag indicates whether the stack should be
   /// automatically realigned, if needed.

Modified: llvm/branches/type-system-rewrite/include/llvm/Target/TargetRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/TargetRegisterInfo.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/TargetRegisterInfo.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/TargetRegisterInfo.h Sat Jul  2 22:28:07 2011
@@ -16,6 +16,7 @@
 #ifndef LLVM_TARGET_TARGETREGISTERINFO_H
 #define LLVM_TARGET_TARGETREGISTERINFO_H
 
+#include "llvm/MC/MCRegisterInfo.h"
 #include "llvm/CodeGen/MachineBasicBlock.h"
 #include "llvm/CodeGen/ValueTypes.h"
 #include "llvm/ADT/ArrayRef.h"
@@ -27,30 +28,10 @@
 
 class BitVector;
 class MachineFunction;
-class MachineMove;
 class RegScavenger;
 template<class T> class SmallVectorImpl;
 class raw_ostream;
 
-/// TargetRegisterDesc - This record contains all of the information known about
-/// a particular register.  The Overlaps field contains a pointer to a zero
-/// terminated array of registers that this register aliases, starting with
-/// itself. This is needed for architectures like X86 which have AL alias AX
-/// alias EAX. The SubRegs field is a zero terminated array of registers that
-/// are sub-registers of the specific register, e.g. AL, AH are sub-registers of
-/// AX. The SuperRegs field is a zero terminated array of registers that are
-/// super-registers of the specific register, e.g. RAX, EAX, are super-registers
-/// of AX.
-///
-struct TargetRegisterDesc {
-  const char     *Name;         // Printable name for the reg (for debugging)
-  const unsigned *Overlaps;     // Overlapping registers, described above
-  const unsigned *SubRegs;      // Sub-register set, described above
-  const unsigned *SuperRegs;    // Super-register set, described above
-  unsigned CostPerUse;          // Extra cost of instructions using register.
-  bool inAllocatableClass;      // Register belongs to an allocatable regclass.
-};
-
 class TargetRegisterClass {
 public:
   typedef const unsigned* iterator;
@@ -274,6 +255,12 @@
   bool isAllocatable() const { return Allocatable; }
 };
 
+/// TargetRegisterInfoDesc - Extra information, not in MCRegisterDesc, about
+/// registers. These are used by codegen, not by MC.
+struct TargetRegisterInfoDesc {
+  unsigned CostPerUse;          // Extra cost of instructions using register.
+  bool inAllocatableClass;      // Register belongs to an allocatable regclass.
+};
 
 /// TargetRegisterInfo base class - We assume that the target defines a static
 /// array of TargetRegisterDesc objects that represent all of the machine
@@ -281,25 +268,19 @@
 /// to this array so that we can turn register number into a register
 /// descriptor.
 ///
-class TargetRegisterInfo {
+class TargetRegisterInfo : public MCRegisterInfo {
 public:
   typedef const TargetRegisterClass * const * regclass_iterator;
 private:
-  const TargetRegisterDesc *Desc;             // Pointer to the descriptor array
+  const TargetRegisterInfoDesc *InfoDesc;     // Extra desc array for codegen
   const char *const *SubRegIndexNames;        // Names of subreg indexes.
-  unsigned NumRegs;                           // Number of entries in the array
-
   regclass_iterator RegClassBegin, RegClassEnd;   // List of regclasses
 
-  int CallFrameSetupOpcode, CallFrameDestroyOpcode;
-
 protected:
-  TargetRegisterInfo(const TargetRegisterDesc *D, unsigned NR,
+  TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
                      regclass_iterator RegClassBegin,
                      regclass_iterator RegClassEnd,
-                     const char *const *subregindexnames,
-                     int CallFrameSetupOpcode = -1,
-                     int CallFrameDestroyOpcode = -1);
+                     const char *const *subregindexnames);
   virtual ~TargetRegisterInfo();
 public:
 
@@ -379,71 +360,16 @@
   BitVector getAllocatableSet(const MachineFunction &MF,
                               const TargetRegisterClass *RC = NULL) const;
 
-  const TargetRegisterDesc &operator[](unsigned RegNo) const {
-    assert(RegNo < NumRegs &&
-           "Attempting to access record for invalid register number!");
-    return Desc[RegNo];
-  }
-
-  /// Provide a get method, equivalent to [], but more useful if we have a
-  /// pointer to this object.
-  ///
-  const TargetRegisterDesc &get(unsigned RegNo) const {
-    return operator[](RegNo);
-  }
-
-  /// getAliasSet - Return the set of registers aliased by the specified
-  /// register, or a null list of there are none.  The list returned is zero
-  /// terminated.
-  ///
-  const unsigned *getAliasSet(unsigned RegNo) const {
-    // The Overlaps set always begins with Reg itself.
-    return get(RegNo).Overlaps + 1;
-  }
-
-  /// getOverlaps - Return a list of registers that overlap Reg, including
-  /// itself. This is the same as the alias set except Reg is included in the
-  /// list.
-  /// These are exactly the registers in { x | regsOverlap(x, Reg) }.
-  ///
-  const unsigned *getOverlaps(unsigned RegNo) const {
-    return get(RegNo).Overlaps;
-  }
-
-  /// getSubRegisters - Return the list of registers that are sub-registers of
-  /// the specified register, or a null list of there are none. The list
-  /// returned is zero terminated and sorted according to super-sub register
-  /// relations. e.g. X86::RAX's sub-register list is EAX, AX, AL, AH.
-  ///
-  const unsigned *getSubRegisters(unsigned RegNo) const {
-    return get(RegNo).SubRegs;
-  }
-
-  /// getSuperRegisters - Return the list of registers that are super-registers
-  /// of the specified register, or a null list of there are none. The list
-  /// returned is zero terminated and sorted according to super-sub register
-  /// relations. e.g. X86::AL's super-register list is AX, EAX, RAX.
-  ///
-  const unsigned *getSuperRegisters(unsigned RegNo) const {
-    return get(RegNo).SuperRegs;
-  }
-
-  /// getName - Return the human-readable symbolic target-specific name for the
-  /// specified physical register.
-  const char *getName(unsigned RegNo) const {
-    return get(RegNo).Name;
-  }
-
   /// getCostPerUse - Return the additional cost of using this register instead
   /// of other registers in its class.
   unsigned getCostPerUse(unsigned RegNo) const {
-    return get(RegNo).CostPerUse;
+    return InfoDesc[RegNo].CostPerUse;
   }
 
-  /// getNumRegs - Return the number of registers this target has (useful for
-  /// sizing arrays holding per register information)
-  unsigned getNumRegs() const {
-    return NumRegs;
+  /// isInAllocatableClass - Return true if the register is in the allocation
+  /// of any register class.
+  bool isInAllocatableClass(unsigned RegNo) const {
+    return InfoDesc[RegNo].inAllocatableClass;
   }
 
   /// getSubRegIndexName - Return the human-readable symbolic target-specific
@@ -566,7 +492,7 @@
   }
 
   /// getRegClass - Returns the register class associated with the enumeration
-  /// value.  See class TargetOperandInfo.
+  /// value.  See class MCOperandInfo.
   const TargetRegisterClass *getRegClass(unsigned i) const {
     assert(i < getNumRegClasses() && "Register Class ID out of range");
     return RegClassBegin[i];
@@ -732,15 +658,6 @@
     return false; // Must return a value in order to compile with VS 2005
   }
 
-  /// getCallFrameSetup/DestroyOpcode - These methods return the opcode of the
-  /// frame setup/destroy instructions if they exist (-1 otherwise).  Some
-  /// targets use pseudo instructions in order to abstract away the difference
-  /// between operating with a frame pointer and operating without, through the
-  /// use of these two instructions.
-  ///
-  int getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
-  int getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
-
   /// eliminateCallFramePseudoInstr - This method is called during prolog/epilog
   /// code insertion to eliminate call frame setup and destroy pseudo
   /// instructions (but only if the Target is using them).  It is responsible
@@ -752,9 +669,6 @@
   eliminateCallFramePseudoInstr(MachineFunction &MF,
                                 MachineBasicBlock &MBB,
                                 MachineBasicBlock::iterator MI) const {
-    assert(getCallFrameSetupOpcode()== -1 && getCallFrameDestroyOpcode()== -1 &&
-           "eliminateCallFramePseudoInstr must be implemented if using"
-           " call frame setup/destroy pseudo instructions!");
     assert(0 && "Call Frame Pseudo Instructions do not exist on this target!");
   }
 
@@ -806,6 +720,12 @@
   virtual int getSEHRegNum(unsigned i) const {
     return i;
   }
+
+  /// getCompactUnwindRegNum - This function maps the register to the number for
+  /// compact unwind encoding. Return -1 if the register isn't valid.
+  virtual int getCompactUnwindRegNum(unsigned) const {
+    return -1;
+  }
 };
 
 

Modified: llvm/branches/type-system-rewrite/include/llvm/Target/TargetRegistry.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/TargetRegistry.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/TargetRegistry.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/TargetRegistry.h Sat Jul  2 22:28:07 2011
@@ -33,6 +33,9 @@
   class MCContext;
   class MCDisassembler;
   class MCInstPrinter;
+  class MCInstrInfo;
+  class MCRegisterInfo;
+  class MCSubtargetInfo;
   class MCStreamer;
   class TargetAsmBackend;
   class TargetAsmLexer;
@@ -64,9 +67,13 @@
     typedef unsigned (*TripleMatchQualityFnTy)(const std::string &TT);
 
     typedef MCAsmInfo *(*AsmInfoCtorFnTy)(const Target &T,
-                                                StringRef TT);
+                                          StringRef TT);
+    typedef MCInstrInfo *(*MCInstrInfoCtorFnTy)(void);
+    typedef MCRegisterInfo *(*MCRegInfoCtorFnTy)(void);
+    typedef MCSubtargetInfo *(*MCSubtargetInfoCtorFnTy)(void);
     typedef TargetMachine *(*TargetMachineCtorTy)(const Target &T,
                                                   const std::string &TT,
+                                                  const std::string &CPU,
                                                   const std::string &Features);
     typedef AsmPrinter *(*AsmPrinterCtorTy)(TargetMachine &TM,
                                             MCStreamer &Streamer);
@@ -120,8 +127,22 @@
     /// HasJIT - Whether this target supports the JIT.
     bool HasJIT;
 
+    /// AsmInfoCtorFn - Constructor function for this target's MCAsmInfo, if
+    /// registered.
     AsmInfoCtorFnTy AsmInfoCtorFn;
 
+    /// MCInstrInfoCtorFn - Constructor function for this target's MCInstrInfo,
+    /// if registered.
+    MCInstrInfoCtorFnTy MCInstrInfoCtorFn;
+
+    /// MCRegInfoCtorFn - Constructor function for this target's MCRegisterInfo,
+    /// if registered.
+    MCRegInfoCtorFnTy MCRegInfoCtorFn;
+
+    /// MCSubtargetInfoCtorFn - Constructor function for this target's
+    /// MCSubtargetInfo, if registered.
+    MCSubtargetInfoCtorFnTy MCSubtargetInfoCtorFn;
+
     /// TargetMachineCtorFn - Construction function for this target's
     /// TargetMachine, if registered.
     TargetMachineCtorTy TargetMachineCtorFn;
@@ -231,6 +252,30 @@
       return AsmInfoCtorFn(*this, Triple);
     }
 
+    /// createMCInstrInfo - Create a MCInstrInfo implementation.
+    ///
+    MCInstrInfo *createMCInstrInfo() const {
+      if (!MCInstrInfoCtorFn)
+        return 0;
+      return MCInstrInfoCtorFn();
+    }
+
+    /// createMCRegInfo - Create a MCRegisterInfo implementation.
+    ///
+    MCRegisterInfo *createMCRegInfo() const {
+      if (!MCRegInfoCtorFn)
+        return 0;
+      return MCRegInfoCtorFn();
+    }
+
+    /// createMCSubtargetInfo - Create a MCSubtargetInfo implementation.
+    ///
+    MCSubtargetInfo *createMCSubtargetInfo() const {
+      if (!MCSubtargetInfoCtorFn)
+        return 0;
+      return MCSubtargetInfoCtorFn();
+    }
+
     /// createTargetMachine - Create a target specific machine implementation
     /// for the specified \arg Triple.
     ///
@@ -239,10 +284,11 @@
     /// either the target triple from the module, or the target triple of the
     /// host if that does not exist.
     TargetMachine *createTargetMachine(const std::string &Triple,
+                                       const std::string &CPU,
                                        const std::string &Features) const {
       if (!TargetMachineCtorFn)
         return 0;
-      return TargetMachineCtorFn(*this, Triple, Features);
+      return TargetMachineCtorFn(*this, Triple, CPU, Features);
     }
 
     /// createAsmBackend - Create a target specific assembly parser.
@@ -444,6 +490,52 @@
         T.AsmInfoCtorFn = Fn;
     }
 
+    /// RegisterMCInstrInfo - Register a MCInstrInfo implementation for the
+    /// given target.
+    ///
+    /// Clients are responsible for ensuring that registration doesn't occur
+    /// while another thread is attempting to access the registry. Typically
+    /// this is done by initializing all targets at program startup.
+    ///
+    /// @param T - The target being registered.
+    /// @param Fn - A function to construct a MCInstrInfo for the target.
+    static void RegisterMCInstrInfo(Target &T, Target::MCInstrInfoCtorFnTy Fn) {
+      // Ignore duplicate registration.
+      if (!T.MCInstrInfoCtorFn)
+        T.MCInstrInfoCtorFn = Fn;
+    }
+
+    /// RegisterMCRegInfo - Register a MCRegisterInfo implementation for the
+    /// given target.
+    ///
+    /// Clients are responsible for ensuring that registration doesn't occur
+    /// while another thread is attempting to access the registry. Typically
+    /// this is done by initializing all targets at program startup.
+    ///
+    /// @param T - The target being registered.
+    /// @param Fn - A function to construct a MCRegisterInfo for the target.
+    static void RegisterMCRegInfo(Target &T, Target::MCRegInfoCtorFnTy Fn) {
+      // Ignore duplicate registration.
+      if (!T.MCRegInfoCtorFn)
+        T.MCRegInfoCtorFn = Fn;
+    }
+
+    /// RegisterMCSubtargetInfo - Register a MCSubtargetInfo implementation for
+    /// the given target.
+    ///
+    /// Clients are responsible for ensuring that registration doesn't occur
+    /// while another thread is attempting to access the registry. Typically
+    /// this is done by initializing all targets at program startup.
+    ///
+    /// @param T - The target being registered.
+    /// @param Fn - A function to construct a MCSubtargetInfo for the target.
+    static void RegisterMCSubtargetInfo(Target &T,
+                                        Target::MCSubtargetInfoCtorFnTy Fn) {
+      // Ignore duplicate registration.
+      if (!T.MCSubtargetInfoCtorFn)
+        T.MCSubtargetInfoCtorFn = Fn;
+    }
+
     /// RegisterTargetMachine - Register a TargetMachine implementation for the
     /// given target.
     ///
@@ -654,6 +746,104 @@
     }
   };
 
+  /// RegisterMCInstrInfo - Helper template for registering a target instruction
+  /// info implementation.  This invokes the static "Create" method on the class
+  /// to actually do the construction.  Usage:
+  ///
+  /// extern "C" void LLVMInitializeFooTarget() {
+  ///   extern Target TheFooTarget;
+  ///   RegisterMCInstrInfo<FooMCInstrInfo> X(TheFooTarget);
+  /// }
+  template<class MCInstrInfoImpl>
+  struct RegisterMCInstrInfo {
+    RegisterMCInstrInfo(Target &T) {
+      TargetRegistry::RegisterMCInstrInfo(T, &Allocator);
+    }
+  private:
+    static MCInstrInfo *Allocator() {
+      return new MCInstrInfoImpl();
+    }
+  };
+
+  /// RegisterMCInstrInfoFn - Helper template for registering a target
+  /// instruction info implementation.  This invokes the specified function to
+  /// do the construction.  Usage:
+  ///
+  /// extern "C" void LLVMInitializeFooTarget() {
+  ///   extern Target TheFooTarget;
+  ///   RegisterMCInstrInfoFn X(TheFooTarget, TheFunction);
+  /// }
+  struct RegisterMCInstrInfoFn {
+    RegisterMCInstrInfoFn(Target &T, Target::MCInstrInfoCtorFnTy Fn) {
+      TargetRegistry::RegisterMCInstrInfo(T, Fn);
+    }
+  };
+
+  /// RegisterMCRegInfo - Helper template for registering a target register info
+  /// implementation.  This invokes the static "Create" method on the class to
+  /// actually do the construction.  Usage:
+  ///
+  /// extern "C" void LLVMInitializeFooTarget() {
+  ///   extern Target TheFooTarget;
+  ///   RegisterMCRegInfo<FooMCRegInfo> X(TheFooTarget);
+  /// }
+  template<class MCRegisterInfoImpl>
+  struct RegisterMCRegInfo {
+    RegisterMCRegInfo(Target &T) {
+      TargetRegistry::RegisterMCRegInfo(T, &Allocator);
+    }
+  private:
+    static MCRegisterInfo *Allocator() {
+      return new MCRegisterInfoImpl();
+    }
+  };
+
+  /// RegisterMCRegInfoFn - Helper template for registering a target register
+  /// info implementation.  This invokes the specified function to do the
+  /// construction.  Usage:
+  ///
+  /// extern "C" void LLVMInitializeFooTarget() {
+  ///   extern Target TheFooTarget;
+  ///   RegisterMCRegInfoFn X(TheFooTarget, TheFunction);
+  /// }
+  struct RegisterMCRegInfoFn {
+    RegisterMCRegInfoFn(Target &T, Target::MCRegInfoCtorFnTy Fn) {
+      TargetRegistry::RegisterMCRegInfo(T, Fn);
+    }
+  };
+
+  /// RegisterMCSubtargetInfo - Helper template for registering a target
+  /// subtarget info implementation.  This invokes the static "Create" method
+  /// on the class to actually do the construction.  Usage:
+  ///
+  /// extern "C" void LLVMInitializeFooTarget() {
+  ///   extern Target TheFooTarget;
+  ///   RegisterMCSubtargetInfo<FooMCSubtargetInfo> X(TheFooTarget);
+  /// }
+  template<class MCSubtargetInfoImpl>
+  struct RegisterMCSubtargetInfo {
+    RegisterMCSubtargetInfo(Target &T) {
+      TargetRegistry::RegisterMCSubtargetInfo(T, &Allocator);
+    }
+  private:
+    static MCSubtargetInfo *Allocator() {
+      return new MCSubtargetInfoImpl();
+    }
+  };
+
+  /// RegisterMCSubtargetInfoFn - Helper template for registering a target
+  /// subtarget info implementation.  This invokes the specified function to
+  /// do the construction.  Usage:
+  ///
+  /// extern "C" void LLVMInitializeFooTarget() {
+  ///   extern Target TheFooTarget;
+  ///   RegisterMCSubtargetInfoFn X(TheFooTarget, TheFunction);
+  /// }
+  struct RegisterMCSubtargetInfoFn {
+    RegisterMCSubtargetInfoFn(Target &T, Target::MCSubtargetInfoCtorFnTy Fn) {
+      TargetRegistry::RegisterMCSubtargetInfo(T, Fn);
+    }
+  };
 
   /// RegisterTargetMachine - Helper template for registering a target machine
   /// implementation, for use in the target machine initialization
@@ -671,8 +861,9 @@
 
   private:
     static TargetMachine *Allocator(const Target &T, const std::string &TT,
+                                    const std::string &CPU,
                                     const std::string &FS) {
-      return new TargetMachineImpl(T, TT, FS);
+      return new TargetMachineImpl(T, TT, CPU, FS);
     }
   };
 

Removed: llvm/branches/type-system-rewrite/include/llvm/Target/TargetSubtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Target/TargetSubtarget.h?rev=134362&view=auto
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Target/TargetSubtarget.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Target/TargetSubtarget.h (removed)
@@ -1,67 +0,0 @@
-//==-- llvm/Target/TargetSubtarget.h - Target Information --------*- C++ -*-==//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file describes the subtarget options of a Target machine.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TARGET_TARGETSUBTARGET_H
-#define LLVM_TARGET_TARGETSUBTARGET_H
-
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
-
-class SDep;
-class SUnit;
-class TargetRegisterClass;
-template <typename T> class SmallVectorImpl;
-
-//===----------------------------------------------------------------------===//
-///
-/// TargetSubtarget - Generic base class for all target subtargets.  All
-/// Target-specific options that control code generation and printing should
-/// be exposed through a TargetSubtarget-derived class.
-///
-class TargetSubtarget {
-  TargetSubtarget(const TargetSubtarget&);   // DO NOT IMPLEMENT
-  void operator=(const TargetSubtarget&);  // DO NOT IMPLEMENT
-protected: // Can only create subclasses...
-  TargetSubtarget();
-public:
-  // AntiDepBreakMode - Type of anti-dependence breaking that should
-  // be performed before post-RA scheduling.
-  typedef enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL } AntiDepBreakMode;
-  typedef SmallVectorImpl<TargetRegisterClass*> RegClassVector;
-
-  virtual ~TargetSubtarget();
-
-  /// getSpecialAddressLatency - For targets where it is beneficial to
-  /// backschedule instructions that compute addresses, return a value
-  /// indicating the number of scheduling cycles of backscheduling that
-  /// should be attempted.
-  virtual unsigned getSpecialAddressLatency() const { return 0; }
-
-  // enablePostRAScheduler - If the target can benefit from post-regalloc
-  // scheduling and the specified optimization level meets the requirement
-  // return true to enable post-register-allocation scheduling. In
-  // CriticalPathRCs return any register classes that should only be broken
-  // if on the critical path. 
-  virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
-                                     AntiDepBreakMode& Mode,
-                                     RegClassVector& CriticalPathRCs) const;
-  // adjustSchedDependency - Perform target specific adjustments to
-  // the latency of a schedule dependency.
-  virtual void adjustSchedDependency(SUnit *def, SUnit *use, 
-                                     SDep& dep) const { }
-};
-
-} // End llvm namespace
-
-#endif

Modified: llvm/branches/type-system-rewrite/include/llvm/Use.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/include/llvm/Use.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/include/llvm/Use.h (original)
+++ llvm/branches/type-system-rewrite/include/llvm/Use.h Sat Jul  2 22:28:07 2011
@@ -60,6 +60,10 @@
   /// that also works with less standard-compliant compilers
   void swap(Use &RHS);
 
+  // A type for the word following an array of hung-off Uses in memory, which is
+  // a pointer back to their User with the bottom bit set.
+  typedef PointerIntPair<User*, 1, unsigned> UserRef;
+
 private:
   /// Copy ctor - do not implement
   Use(const Use &U);
@@ -108,13 +112,16 @@
   Use *getNext() const { return Next; }
 
   
+  /// initTags - initialize the waymarking tags on an array of Uses, so that
+  /// getUser() can find the User from any of those Uses.
+  static Use *initTags(Use *Start, Use *Stop);
+
   /// zap - This is used to destroy Use operands when the number of operands of
   /// a User changes.
   static void zap(Use *Start, const Use *Stop, bool del = false);
 
 private:
   const Use* getImpliedUser() const;
-  static Use *initTags(Use *Start, Use *Stop);
   
   Value *Val;
   Use *Next;
@@ -136,7 +143,6 @@
   }
 
   friend class Value;
-  friend class User;
 };
 
 // simplify_type - Allow clients to treat uses just like values when using
@@ -208,15 +214,6 @@
   unsigned getOperandNo() const;
 };
 
-//===----------------------------------------------------------------------===//
-//                         AugmentedUse layout struct
-//===----------------------------------------------------------------------===//
-
-struct AugmentedUse : public Use {
-  PointerIntPair<User*, 1, unsigned> ref;
-  AugmentedUse(); // not implemented
-};
-
 } // End llvm namespace
 
 #endif

Modified: llvm/branches/type-system-rewrite/lib/Analysis/Analysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Analysis/Analysis.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Analysis/Analysis.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Analysis/Analysis.cpp Sat Jul  2 22:28:07 2011
@@ -23,6 +23,7 @@
   initializeAliasSetPrinterPass(Registry);
   initializeNoAAPass(Registry);
   initializeBasicAliasAnalysisPass(Registry);
+  initializeBlockFrequencyPass(Registry);
   initializeBranchProbabilityInfoPass(Registry);
   initializeCFGViewerPass(Registry);
   initializeCFGPrinterPass(Registry);

Modified: llvm/branches/type-system-rewrite/lib/Analysis/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Analysis/CMakeLists.txt?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Analysis/CMakeLists.txt (original)
+++ llvm/branches/type-system-rewrite/lib/Analysis/CMakeLists.txt Sat Jul  2 22:28:07 2011
@@ -6,6 +6,7 @@
   AliasSetTracker.cpp
   Analysis.cpp
   BasicAliasAnalysis.cpp
+  BlockFrequency.cpp
   BranchProbabilityInfo.cpp
   CFGPrinter.cpp
   CaptureTracking.cpp

Modified: llvm/branches/type-system-rewrite/lib/Analysis/DIBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Analysis/DIBuilder.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Analysis/DIBuilder.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Analysis/DIBuilder.cpp Sat Jul  2 22:28:07 2011
@@ -219,7 +219,7 @@
 }
 
 /// createMemberType - Create debugging information entry for a member.
-DIType DIBuilder::createMemberType(StringRef Name, 
+DIType DIBuilder::createMemberType(DIDescriptor Scope, StringRef Name, 
                                    DIFile File, unsigned LineNumber, 
                                    uint64_t SizeInBits, uint64_t AlignInBits,
                                    uint64_t OffsetInBits, unsigned Flags, 
@@ -227,7 +227,7 @@
   // TAG_member is encoded in DIDerivedType format.
   Value *Elts[] = {
     GetTagConstant(VMContext, dwarf::DW_TAG_member),
-    File, // Or TheCU ? Ty ?
+    Scope,
     MDString::get(VMContext, Name),
     File,
     ConstantInt::get(Type::getInt32Ty(VMContext), LineNumber),

Modified: llvm/branches/type-system-rewrite/lib/Analysis/IVUsers.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Analysis/IVUsers.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Analysis/IVUsers.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Analysis/IVUsers.cpp Sat Jul  2 22:28:07 2011
@@ -21,7 +21,6 @@
 #include "llvm/Analysis/Dominators.h"
 #include "llvm/Analysis/LoopPass.h"
 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
-#include "llvm/Support/CommandLine.h"
 #include "llvm/Target/TargetData.h"
 #include "llvm/Assembly/Writer.h"
 #include "llvm/ADT/STLExtras.h"
@@ -39,15 +38,6 @@
 INITIALIZE_PASS_END(IVUsers, "iv-users",
                       "Induction Variable Users", false, true)
 
-// IVUsers behavior currently depends on this temporary indvars mode. The
-// option must be defined upstream from its uses.
-namespace llvm {
-  bool DisableIVRewrite = false;
-}
-cl::opt<bool, true> DisableIVRewriteOpt(
-  "disable-iv-rewrite", cl::Hidden, cl::location(llvm::DisableIVRewrite),
-  cl::desc("Disable canonical induction variable rewriting"));
-
 Pass *llvm::createIVUsersPass() {
   return new IVUsers();
 }
@@ -56,17 +46,20 @@
 /// used by the given expression, within the context of analyzing the
 /// given loop.
 static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
-                          ScalarEvolution *SE) {
+                          ScalarEvolution *SE, LoopInfo *LI) {
   // An addrec is interesting if it's affine or if it has an interesting start.
   if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
-    // Keep things simple. Don't touch loop-variant strides.
+    // Keep things simple. Don't touch loop-variant strides unless they're
+    // only used outside the loop and we can simplify them.
     if (AR->getLoop() == L)
-      return AR->isAffine() || !L->contains(I);
+      return AR->isAffine() ||
+             (!L->contains(I) &&
+              SE->getSCEVAtScope(AR, LI->getLoopFor(I->getParent())) != AR);
     // Otherwise recurse to see if the start value is interesting, and that
     // the step value is not interesting, since we don't yet know how to
     // do effective SCEV expansions for addrecs with interesting steps.
-    return isInteresting(AR->getStart(), I, L, SE) &&
-          !isInteresting(AR->getStepRecurrence(*SE), I, L, SE);
+    return isInteresting(AR->getStart(), I, L, SE, LI) &&
+          !isInteresting(AR->getStepRecurrence(*SE), I, L, SE, LI);
   }
 
   // An add is interesting if exactly one of its operands is interesting.
@@ -74,7 +67,7 @@
     bool AnyInterestingYet = false;
     for (SCEVAddExpr::op_iterator OI = Add->op_begin(), OE = Add->op_end();
          OI != OE; ++OI)
-      if (isInteresting(*OI, I, L, SE)) {
+      if (isInteresting(*OI, I, L, SE, LI)) {
         if (AnyInterestingYet)
           return false;
         AnyInterestingYet = true;
@@ -89,7 +82,7 @@
 /// AddUsersIfInteresting - Inspect the specified instruction.  If it is a
 /// reducible SCEV, recursively add its users to the IVUsesByStride set and
 /// return true.  Otherwise, return false.
-bool IVUsers::AddUsersIfInteresting(Instruction *I, PHINode *Phi) {
+bool IVUsers::AddUsersIfInteresting(Instruction *I) {
   if (!SE->isSCEVable(I->getType()))
     return false;   // Void and FP expressions cannot be reduced.
 
@@ -100,11 +93,6 @@
   if (Width > 64 || (TD && !TD->isLegalInteger(Width)))
     return false;
 
-  // We expect Sign/Zero extension to be eliminated from the IR before analyzing
-  // any downstream uses.
-  if (DisableIVRewrite && (isa<SExtInst>(I) || isa<ZExtInst>(I)))
-    return false;
-
   if (!Processed.insert(I))
     return true;    // Instruction already handled.
 
@@ -113,7 +101,7 @@
 
   // If we've come to an uninteresting expression, stop the traversal and
   // call this a user.
-  if (!isInteresting(ISE, I, L, SE))
+  if (!isInteresting(ISE, I, L, SE, LI))
     return false;
 
   SmallPtrSet<Instruction *, 4> UniqueUsers;
@@ -136,13 +124,12 @@
     bool AddUserToIVUsers = false;
     if (LI->getLoopFor(User->getParent()) != L) {
       if (isa<PHINode>(User) || Processed.count(User) ||
-          !AddUsersIfInteresting(User, Phi)) {
+          !AddUsersIfInteresting(User)) {
         DEBUG(dbgs() << "FOUND USER in other loop: " << *User << '\n'
                      << "   OF SCEV: " << *ISE << '\n');
         AddUserToIVUsers = true;
       }
-    } else if (Processed.count(User) ||
-               !AddUsersIfInteresting(User, Phi)) {
+    } else if (Processed.count(User) || !AddUsersIfInteresting(User)) {
       DEBUG(dbgs() << "FOUND USER: " << *User << '\n'
                    << "   OF SCEV: " << *ISE << '\n');
       AddUserToIVUsers = true;
@@ -150,7 +137,7 @@
 
     if (AddUserToIVUsers) {
       // Okay, we found a user that we cannot reduce.
-      IVUses.push_back(new IVStrideUse(this, User, I, Phi));
+      IVUses.push_back(new IVStrideUse(this, User, I));
       IVStrideUse &NewUse = IVUses.back();
       // Autodetect the post-inc loop set, populating NewUse.PostIncLoops.
       // The regular return value here is discarded; instead of recording
@@ -165,8 +152,8 @@
   return true;
 }
 
-IVStrideUse &IVUsers::AddUser(Instruction *User, Value *Operand, PHINode *Phi) {
-  IVUses.push_back(new IVStrideUse(this, User, Operand, Phi));
+IVStrideUse &IVUsers::AddUser(Instruction *User, Value *Operand) {
+  IVUses.push_back(new IVStrideUse(this, User, Operand));
   return IVUses.back();
 }
 
@@ -194,7 +181,7 @@
   // them by stride.  Start by finding all of the PHI nodes in the header for
   // this loop.  If they are induction variables, inspect their uses.
   for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
-    (void)AddUsersIfInteresting(I, cast<PHINode>(I));
+    (void)AddUsersIfInteresting(I);
 
   return false;
 }

Modified: llvm/branches/type-system-rewrite/lib/Analysis/InstructionSimplify.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Analysis/InstructionSimplify.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Analysis/InstructionSimplify.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Analysis/InstructionSimplify.cpp Sat Jul  2 22:28:07 2011
@@ -2204,15 +2204,15 @@
   if (TrueVal == FalseVal)
     return TrueVal;
 
-  if (isa<UndefValue>(TrueVal))   // select C, undef, X -> X
-    return FalseVal;
-  if (isa<UndefValue>(FalseVal))   // select C, X, undef -> X
-    return TrueVal;
   if (isa<UndefValue>(CondVal)) {  // select undef, X, Y -> X or Y
     if (isa<Constant>(TrueVal))
       return TrueVal;
     return FalseVal;
   }
+  if (isa<UndefValue>(TrueVal))   // select C, undef, X -> X
+    return FalseVal;
+  if (isa<UndefValue>(FalseVal))   // select C, X, undef -> X
+    return TrueVal;
 
   return 0;
 }

Modified: llvm/branches/type-system-rewrite/lib/Analysis/ScalarEvolutionExpander.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Analysis/ScalarEvolutionExpander.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Analysis/ScalarEvolutionExpander.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Analysis/ScalarEvolutionExpander.cpp Sat Jul  2 22:28:07 2011
@@ -159,7 +159,8 @@
   }
 
   // If we haven't found this binop, insert it.
-  Value *BO = Builder.CreateBinOp(Opcode, LHS, RHS, "tmp");
+  Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS, "tmp"));
+  BO->setDebugLoc(SaveInsertPt->getDebugLoc());
   rememberInstruction(BO);
 
   // Restore the original insert point.
@@ -935,7 +936,8 @@
   BasicBlock *Header = L->getHeader();
   Builder.SetInsertPoint(Header, Header->begin());
   pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
-  PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), "lsr.iv");
+  PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
+                                  Twine(IVName) + ".iv");
   rememberInstruction(PN);
 
   // Create the step instructions and populate the PHI.
@@ -971,8 +973,8 @@
       }
     } else {
       IncV = isNegative ?
-        Builder.CreateSub(PN, StepV, "lsr.iv.next") :
-        Builder.CreateAdd(PN, StepV, "lsr.iv.next");
+        Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
+        Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
       rememberInstruction(IncV);
     }
     PN->addIncoming(IncV, Pred);
@@ -1155,6 +1157,7 @@
         Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
                                                      "indvar.next",
                                                      HP->getTerminator());
+        Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
         rememberInstruction(Add);
         CanonicalIV->addIncoming(Add, HP);
       } else {

Modified: llvm/branches/type-system-rewrite/lib/Analysis/ValueTracking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Analysis/ValueTracking.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Analysis/ValueTracking.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Analysis/ValueTracking.cpp Sat Jul  2 22:28:07 2011
@@ -1783,3 +1783,19 @@
   }
   return V;
 }
+
+/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
+/// are lifetime markers.
+///
+bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
+  for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
+       UI != UE; ++UI) {
+    const IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI);
+    if (!II) return false;
+
+    if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
+        II->getIntrinsicID() != Intrinsic::lifetime_end)
+      return false;
+  }
+  return true;
+}

Modified: llvm/branches/type-system-rewrite/lib/AsmParser/LLParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/AsmParser/LLParser.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/AsmParser/LLParser.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/AsmParser/LLParser.cpp Sat Jul  2 22:28:07 2011
@@ -1987,7 +1987,7 @@
                      " is not of type '" + getTypeString(Elts[0]->getType()));
     }
 
-    ID.ConstantVal = ConstantArray::get(ATy, Elts.data(), Elts.size());
+    ID.ConstantVal = ConstantArray::get(ATy, Elts);
     ID.Kind = ValID::t_Constant;
     return false;
   }

Modified: llvm/branches/type-system-rewrite/lib/Bitcode/Reader/BitcodeReader.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Bitcode/Reader/BitcodeReader.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Bitcode/Reader/BitcodeReader.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Bitcode/Reader/BitcodeReader.cpp Sat Jul  2 22:28:07 2011
@@ -292,7 +292,7 @@
       // Make the new constant.
       Constant *NewC;
       if (ConstantArray *UserCA = dyn_cast<ConstantArray>(UserC)) {
-        NewC = ConstantArray::get(UserCA->getType(), &NewOps[0], NewOps.size());
+        NewC = ConstantArray::get(UserCA->getType(), NewOps);
       } else if (ConstantStruct *UserCS = dyn_cast<ConstantStruct>(UserC)) {
         NewC = ConstantStruct::get(UserCS->getType(), NewOps);
       } else if (isa<ConstantVector>(UserC)) {

Modified: llvm/branches/type-system-rewrite/lib/Bitcode/Writer/BitcodeWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Bitcode/Writer/BitcodeWriter.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Bitcode/Writer/BitcodeWriter.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Bitcode/Writer/BitcodeWriter.cpp Sat Jul  2 22:28:07 2011
@@ -1113,12 +1113,16 @@
     AbbrevToUse = FUNCTION_INST_UNREACHABLE_ABBREV;
     break;
 
-  case Instruction::PHI:
+  case Instruction::PHI: {
+    const PHINode &PN = cast<PHINode>(I);
     Code = bitc::FUNC_CODE_INST_PHI;
-    Vals.push_back(VE.getTypeID(I.getType()));
-    for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
-      Vals.push_back(VE.getValueID(I.getOperand(i)));
+    Vals.push_back(VE.getTypeID(PN.getType()));
+    for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
+      Vals.push_back(VE.getValueID(PN.getIncomingValue(i)));
+      Vals.push_back(VE.getValueID(PN.getIncomingBlock(i)));
+    }
     break;
+  }
 
   case Instruction::Alloca:
     Code = bitc::FUNC_CODE_INST_ALLOCA;

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/AggressiveAntiDepBreaker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/AggressiveAntiDepBreaker.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/AggressiveAntiDepBreaker.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/AggressiveAntiDepBreaker.cpp Sat Jul  2 22:28:07 2011
@@ -116,7 +116,7 @@
 AggressiveAntiDepBreaker::
 AggressiveAntiDepBreaker(MachineFunction& MFi,
                          const RegisterClassInfo &RCI,
-                         TargetSubtarget::RegClassVector& CriticalPathRCs) :
+                         TargetSubtargetInfo::RegClassVector& CriticalPathRCs) :
   AntiDepBreaker(), MF(MFi),
   MRI(MF.getRegInfo()),
   TII(MF.getTarget().getInstrInfo()),
@@ -404,7 +404,7 @@
     // Note register reference...
     const TargetRegisterClass *RC = NULL;
     if (i < MI->getDesc().getNumOperands())
-      RC = MI->getDesc().OpInfo[i].getRegClass(TRI);
+      RC = TII->getRegClass(MI->getDesc(), i, TRI);
     AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
     RegRefs.insert(std::make_pair(Reg, RR));
   }
@@ -479,7 +479,7 @@
     // Note register reference...
     const TargetRegisterClass *RC = NULL;
     if (i < MI->getDesc().getNumOperands())
-      RC = MI->getDesc().OpInfo[i].getRegClass(TRI);
+      RC = TII->getRegClass(MI->getDesc(), i, TRI);
     AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
     RegRefs.insert(std::make_pair(Reg, RR));
   }

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/AggressiveAntiDepBreaker.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/AggressiveAntiDepBreaker.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/AggressiveAntiDepBreaker.h (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/AggressiveAntiDepBreaker.h Sat Jul  2 22:28:07 2011
@@ -23,7 +23,7 @@
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Target/TargetSubtarget.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/ADT/BitVector.h"
 #include "llvm/ADT/SmallSet.h"
@@ -131,8 +131,8 @@
 
   public:
     AggressiveAntiDepBreaker(MachineFunction& MFi,
-                             const RegisterClassInfo &RCI,
-                             TargetSubtarget::RegClassVector& CriticalPathRCs);
+                          const RegisterClassInfo &RCI,
+                          TargetSubtargetInfo::RegClassVector& CriticalPathRCs);
     ~AggressiveAntiDepBreaker();
 
     /// Start - Initialize anti-dep breaking for a new basic block.

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/AsmPrinter.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/AsmPrinter.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/AsmPrinter.cpp Sat Jul  2 22:28:07 2011
@@ -575,6 +575,8 @@
     }
   } else if (MI->getOperand(0).isImm()) {
     OS << MI->getOperand(0).getImm();
+  } else if (MI->getOperand(0).isCImm()) {
+    MI->getOperand(0).getCImm()->getValue().print(OS, false /*isSigned*/);
   } else {
     assert(MI->getOperand(0).isReg() && "Unknown operand type");
     if (MI->getOperand(0).getReg() == 0) {
@@ -1516,6 +1518,13 @@
                                      unsigned AddrSpace, AsmPrinter &AP) {
   for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i)
     EmitGlobalConstantImpl(CV->getOperand(i), AddrSpace, AP);
+
+  const TargetData &TD = *AP.TM.getTargetData();
+  unsigned Size = TD.getTypeAllocSize(CV->getType());
+  unsigned EmittedSize = TD.getTypeAllocSize(CV->getType()->getElementType()) *
+                         CV->getType()->getNumElements();
+  if (unsigned Padding = Size - EmittedSize)
+    AP.OutStreamer.EmitZeros(Padding, AddrSpace);
 }
 
 static void EmitGlobalConstantStruct(const ConstantStruct *CS,

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp Sat Jul  2 22:28:07 2011
@@ -491,7 +491,7 @@
 }
 
 /// addConstantValue - Add constant value entry in variable DIE.
-bool CompileUnit::addConstantValue(DIE *Die, ConstantInt *CI,
+bool CompileUnit::addConstantValue(DIE *Die, const ConstantInt *CI,
                                    bool Unsigned) {
   unsigned CIBitWidth = CI->getBitWidth();
   if (CIBitWidth <= 64) {

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h Sat Jul  2 22:28:07 2011
@@ -181,7 +181,7 @@
 
   /// addConstantValue - Add constant value entry in variable DIE.
   bool addConstantValue(DIE *Die, const MachineOperand &MO, DIType Ty);
-  bool addConstantValue(DIE *Die, ConstantInt *CI, bool Unsigned);
+  bool addConstantValue(DIE *Die, const ConstantInt *CI, bool Unsigned);
 
   /// addConstantFPValue - Add constant value entry in variable DIE.
   bool addConstantFPValue(DIE *Die, const MachineOperand &MO);

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfDebug.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfDebug.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfDebug.cpp Sat Jul  2 22:28:07 2011
@@ -618,6 +618,21 @@
   return ScopeDIE;
 }
 
+/// isUnsignedDIType - Return true if type encoding is unsigned.
+static bool isUnsignedDIType(DIType Ty) {
+  DIDerivedType DTy(Ty);
+  if (DTy.Verify())
+    return isUnsignedDIType(DTy.getTypeDerivedFrom());
+
+  DIBasicType BTy(Ty);
+  if (BTy.Verify()) {
+    unsigned Encoding = BTy.getEncoding();
+    if (Encoding == dwarf::DW_ATE_unsigned ||
+        Encoding == dwarf::DW_ATE_unsigned_char)
+      return true;
+  }
+  return false;
+}
 
 /// constructVariableDIE - Construct a DIE for the given DbgVariable.
 DIE *DwarfDebug::constructVariableDIE(DbgVariable *DV, DbgScope *Scope) {
@@ -718,6 +733,11 @@
       else if (DVInsn->getOperand(0).isFPImm())
         updated =
           VariableCU->addConstantFPValue(VariableDie, DVInsn->getOperand(0));
+      else if (DVInsn->getOperand(0).isCImm())
+        updated =
+          VariableCU->addConstantValue(VariableDie, 
+                                       DVInsn->getOperand(0).getCImm(),
+                                       isUnsignedDIType(DV->getType()));
     } else {
       VariableCU->addVariableAddress(DV, VariableDie, 
                                      Asm->getDebugValueLocation(DVInsn));
@@ -913,22 +933,6 @@
   return I->second;
 }
 
-/// isUnsignedDIType - Return true if type encoding is unsigned.
-static bool isUnsignedDIType(DIType Ty) {
-  DIDerivedType DTy(Ty);
-  if (DTy.Verify())
-    return isUnsignedDIType(DTy.getTypeDerivedFrom());
-
-  DIBasicType BTy(Ty);
-  if (BTy.Verify()) {
-    unsigned Encoding = BTy.getEncoding();
-    if (Encoding == dwarf::DW_ATE_unsigned ||
-        Encoding == dwarf::DW_ATE_unsigned_char)
-      return true;
-  }
-  return false;
-}
-
 // Return const exprssion if value is a GEP to access merged global
 // constant. e.g.
 // i8* getelementptr ({ i8, i8, i8, i8 }* @_MergedGlobals, i32 0, i32 0)
@@ -1017,7 +1021,7 @@
     } else {
       TheCU->addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
     } 
-  } else if (ConstantInt *CI = 
+  } else if (const ConstantInt *CI = 
              dyn_cast_or_null<ConstantInt>(GV.getConstant()))
     TheCU->addConstantValue(VariableDIE, CI, isUnsignedDIType(GTy));
   else if (const ConstantExpr *CE = getMergedGlobalExpr(N->getOperand(11))) {

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfException.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfException.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfException.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/AsmPrinter/DwarfException.cpp Sat Jul  2 22:28:07 2011
@@ -512,6 +512,8 @@
     SizeAlign = 0;
   }
 
+  bool VerboseAsm = Asm->OutStreamer.isVerboseAsm();
+
   // SjLj Exception handling
   if (IsSJLJ) {
     Asm->EmitEncodingByte(dwarf::DW_EH_PE_udata4, "Call site");
@@ -525,14 +527,30 @@
          I = CallSites.begin(), E = CallSites.end(); I != E; ++I, ++idx) {
       const CallSiteEntry &S = *I;
 
+      if (VerboseAsm) {
+        // Emit comments that decode the call site.
+        Asm->OutStreamer.AddComment(Twine(">> Call Site ") +
+                                    llvm::utostr(idx) + " <<");
+        Asm->OutStreamer.AddComment(Twine("  On exception at call site ") +
+                                    llvm::utostr(idx));
+
+        if (S.Action == 0)
+          Asm->OutStreamer.AddComment("  Action: cleanup");
+        else
+          Asm->OutStreamer.AddComment(Twine("  Action: ") +
+                                      llvm::utostr((S.Action - 1) / 2 + 1));
+
+        Asm->OutStreamer.AddBlankLine();
+      }
+
       // Offset of the landing pad, counted in 16-byte bundles relative to the
       // @LPStart address.
-      Asm->EmitULEB128(idx, "Landing pad");
+      Asm->EmitULEB128(idx);
 
       // Offset of the first associated action record, relative to the start of
       // the action table. This value is biased by 1 (1 indicates the start of
       // the action table), and 0 indicates that there are no actions.
-      Asm->EmitULEB128(S.Action, "Action");
+      Asm->EmitULEB128(S.Action);
     }
   } else {
     // DWARF Exception handling
@@ -562,6 +580,7 @@
     // Add extra padding if it wasn't added to the TType base offset.
     Asm->EmitULEB128(CallSiteTableLength, "Call site table length", SizeAlign);
 
+    unsigned Entry = 0;
     for (SmallVectorImpl<CallSiteEntry>::const_iterator
          I = CallSites.begin(), E = CallSites.end(); I != E; ++I) {
       const CallSiteEntry &S = *I;
@@ -576,19 +595,38 @@
       if (EndLabel == 0)
         EndLabel = Asm->GetTempSymbol("eh_func_end", Asm->getFunctionNumber());
 
+      if (VerboseAsm) {
+        // Emit comments that decode the call site.
+        Asm->OutStreamer.AddComment(Twine(">> Call Site ") +
+                                    llvm::utostr(++Entry) + " <<");
+        Asm->OutStreamer.AddComment(Twine("  Call between ") +
+                                    BeginLabel->getName() + " and " +
+                                    EndLabel->getName());
+
+        if (!S.PadLabel) {
+          Asm->OutStreamer.AddComment("    has no landing pad");
+        } else {
+          Asm->OutStreamer.AddComment(Twine("    jumps to ") +
+                                      S.PadLabel->getName());
+
+          if (S.Action == 0)
+            Asm->OutStreamer.AddComment("  On action: cleanup");
+          else
+            Asm->OutStreamer.AddComment(Twine("  On action: ") +
+                                        llvm::utostr((S.Action - 1) / 2 + 1));
+        }
+
+        Asm->OutStreamer.AddBlankLine();
+      }
+
       // Offset of the call site relative to the previous call site, counted in
       // number of 16-byte bundles. The first call site is counted relative to
       // the start of the procedure fragment.
-      Asm->OutStreamer.AddComment("Region start");
       Asm->EmitLabelDifference(BeginLabel, EHFuncBeginSym, 4);
-
-      Asm->OutStreamer.AddComment("Region length");
       Asm->EmitLabelDifference(EndLabel, BeginLabel, 4);
 
-
       // Offset of the landing pad, counted in 16-byte bundles relative to the
       // @LPStart address.
-      Asm->OutStreamer.AddComment("Landing pad");
       if (!S.PadLabel)
         Asm->OutStreamer.EmitIntValue(0, 4/*size*/, 0/*addrspace*/);
       else
@@ -597,45 +635,63 @@
       // Offset of the first associated action record, relative to the start of
       // the action table. This value is biased by 1 (1 indicates the start of
       // the action table), and 0 indicates that there are no actions.
-      Asm->EmitULEB128(S.Action, "Action");
+      Asm->EmitULEB128(S.Action);
     }
   }
 
   // Emit the Action Table.
-  if (Actions.size() != 0) {
-    Asm->OutStreamer.AddComment("-- Action Record Table --");
-    Asm->OutStreamer.AddBlankLine();
-  }
-
+  int Entry = 0;
   for (SmallVectorImpl<ActionEntry>::const_iterator
          I = Actions.begin(), E = Actions.end(); I != E; ++I) {
     const ActionEntry &Action = *I;
-    Asm->OutStreamer.AddComment("Action Record");
-    Asm->OutStreamer.AddBlankLine();
+
+    if (VerboseAsm) {
+      // Emit comments that decode the action table.
+      Asm->OutStreamer.AddComment(Twine(">> Action Record ") +
+                                  llvm::utostr(++Entry) + " <<");
+      if (Action.ValueForTypeID >= 0)
+        Asm->OutStreamer.AddComment(Twine("  Catch TypeInfo ") +
+                                    llvm::itostr(Action.ValueForTypeID));
+      else 
+        Asm->OutStreamer.AddComment(Twine("  Filter TypeInfo ") +
+                                    llvm::itostr(Action.ValueForTypeID));
+
+      if (Action.NextAction == 0) {
+        Asm->OutStreamer.AddComment("  No further actions");
+      } else {
+        unsigned NextAction = Entry + (Action.NextAction + 1) / 2;
+        Asm->OutStreamer.AddComment(Twine("  Continue to action ") +
+                                    llvm::utostr(NextAction));
+      }
+
+      Asm->OutStreamer.AddBlankLine();
+    }
 
     // Type Filter
     //
     //   Used by the runtime to match the type of the thrown exception to the
     //   type of the catch clauses or the types in the exception specification.
-    Asm->EmitSLEB128(Action.ValueForTypeID, "  TypeInfo index");
+    Asm->EmitSLEB128(Action.ValueForTypeID);
 
     // Action Record
     //
     //   Self-relative signed displacement in bytes of the next action record,
     //   or 0 if there is no next action record.
-    Asm->EmitSLEB128(Action.NextAction, "  Next action");
+    Asm->EmitSLEB128(Action.NextAction);
   }
 
   // Emit the Catch TypeInfos.
-  if (!TypeInfos.empty()) {
-    Asm->OutStreamer.AddComment("-- Catch TypeInfos --");
+  if (VerboseAsm && !TypeInfos.empty()) {
+    Asm->OutStreamer.AddComment(">> Catch TypeInfos <<");
     Asm->OutStreamer.AddBlankLine();
+    Entry = TypeInfos.size();
   }
+
   for (std::vector<const GlobalVariable *>::const_reverse_iterator
          I = TypeInfos.rbegin(), E = TypeInfos.rend(); I != E; ++I) {
     const GlobalVariable *GV = *I;
-
-    Asm->OutStreamer.AddComment("TypeInfo");
+    if (VerboseAsm)
+      Asm->OutStreamer.AddComment(Twine("TypeInfo ") + llvm::utostr(Entry--));
     if (GV)
       Asm->EmitReference(GV, TTypeEncoding);
     else
@@ -644,14 +700,21 @@
   }
 
   // Emit the Exception Specifications.
-  if (!FilterIds.empty()) {
-    Asm->OutStreamer.AddComment("-- Filter IDs --");
+  if (VerboseAsm && !FilterIds.empty()) {
+    Asm->OutStreamer.AddComment(">> Filter TypeInfos <<");
     Asm->OutStreamer.AddBlankLine();
+    Entry = 0;
   }
   for (std::vector<unsigned>::const_iterator
          I = FilterIds.begin(), E = FilterIds.end(); I < E; ++I) {
     unsigned TypeID = *I;
-    Asm->EmitULEB128(TypeID, TypeID != 0 ? "Exception specification" : 0);
+    if (VerboseAsm) {
+      --Entry;
+      if (TypeID != 0)
+        Asm->OutStreamer.AddComment(Twine("FilterInfo ") + llvm::itostr(Entry));
+    }
+
+    Asm->EmitULEB128(TypeID);
   }
 
   Asm->EmitAlignment(2);

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/BranchFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/BranchFolding.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/BranchFolding.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/BranchFolding.cpp Sat Jul  2 22:28:07 2011
@@ -421,10 +421,10 @@
   for (; I != E; ++I) {
     if (I->isDebugValue())
       continue;
-    const TargetInstrDesc &TID = I->getDesc();
-    if (TID.isCall())
+    const MCInstrDesc &MCID = I->getDesc();
+    if (MCID.isCall())
       Time += 10;
-    else if (TID.mayLoad() || TID.mayStore())
+    else if (MCID.mayLoad() || MCID.mayStore())
       Time += 2;
     else
       ++Time;

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/CMakeLists.txt?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/CMakeLists.txt (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/CMakeLists.txt Sat Jul  2 22:28:07 2011
@@ -59,7 +59,6 @@
   Passes.cpp
   PeepholeOptimizer.cpp
   PostRASchedulerList.cpp
-  PreAllocSplitting.cpp
   ProcessImplicitDefs.cpp
   PrologEpilogInserter.cpp
   PseudoSourceValue.cpp
@@ -79,7 +78,6 @@
   ScoreboardHazardRecognizer.cpp
   ShadowStackGC.cpp
   ShrinkWrapping.cpp
-  SimpleRegisterCoalescing.cpp
   SjLjEHPrepare.cpp
   SlotIndexes.cpp
   Spiller.cpp

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/CalcSpillWeights.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/CalcSpillWeights.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/CalcSpillWeights.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/CalcSpillWeights.cpp Sat Jul  2 22:28:07 2011
@@ -188,6 +188,7 @@
 
 void VirtRegAuxInfo::CalculateRegClass(unsigned reg) {
   MachineRegisterInfo &MRI = MF.getRegInfo();
+  const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
   const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
   const TargetRegisterClass *OldRC = MRI.getRegClass(reg);
   const TargetRegisterClass *NewRC = TRI->getLargestLegalSuperClass(OldRC);
@@ -202,8 +203,11 @@
     // TRI doesn't have accurate enough information to model this yet.
     if (I.getOperand().getSubReg())
       return;
+    // Inline asm instuctions don't remember their constraints.
+    if (I->isInlineAsm())
+      return;
     const TargetRegisterClass *OpRC =
-      I->getDesc().getRegClass(I.getOperandNo(), TRI);
+      TII->getRegClass(I->getDesc(), I.getOperandNo(), TRI);
     if (OpRC)
       NewRC = getCommonSubClass(NewRC, OpRC);
     if (!NewRC || NewRC == OldRC)

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/CodeGen.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/CodeGen.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/CodeGen.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/CodeGen.cpp Sat Jul  2 22:28:07 2011
@@ -37,13 +37,11 @@
   initializeOptimizePHIsPass(Registry);
   initializePHIEliminationPass(Registry);
   initializePeepholeOptimizerPass(Registry);
-  initializePreAllocSplittingPass(Registry);
   initializeProcessImplicitDefsPass(Registry);
   initializePEIPass(Registry);
   initializeRALinScanPass(Registry);
-  initializeRegisterCoalescerAnalysisGroup(Registry);
+  initializeRegisterCoalescerPass(Registry);
   initializeRenderMachineFunctionPass(Registry);
-  initializeSimpleRegisterCoalescingPass(Registry);
   initializeSlotIndexesPass(Registry);
   initializeLoopSplitterPass(Registry);
   initializeStackProtectorPass(Registry);

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/CriticalAntiDepBreaker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/CriticalAntiDepBreaker.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/CriticalAntiDepBreaker.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/CriticalAntiDepBreaker.cpp Sat Jul  2 22:28:07 2011
@@ -207,7 +207,7 @@
     const TargetRegisterClass *NewRC = 0;
 
     if (i < MI->getDesc().getNumOperands())
-      NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
+      NewRC = TII->getRegClass(MI->getDesc(), i, TRI);
 
     // For now, only allow the register to be changed if its register
     // class is consistent across all uses.
@@ -295,7 +295,7 @@
 
     const TargetRegisterClass *NewRC = 0;
     if (i < MI->getDesc().getNumOperands())
-      NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
+      NewRC = TII->getRegClass(MI->getDesc(), i, TRI);
 
     // For now, only allow the register to be changed if its register
     // class is consistent across all uses.

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/DeadMachineInstructionElim.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/DeadMachineInstructionElim.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/DeadMachineInstructionElim.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/DeadMachineInstructionElim.cpp Sat Jul  2 22:28:07 2011
@@ -110,9 +110,14 @@
           LivePhysRegs.set(Reg);
       }
 
-    // FIXME: Add live-ins from sucessors to LivePhysRegs. Normally, physregs
-    // are not live across blocks, but some targets (x86) can have flags live
-    // out of a block.
+    // Add live-ins from sucessors to LivePhysRegs. Normally, physregs are not
+    // live across blocks, but some targets (x86) can have flags live out of a
+    // block.
+    for (MachineBasicBlock::succ_iterator S = MBB->succ_begin(),
+           E = MBB->succ_end(); S != E; S++)
+      for (MachineBasicBlock::livein_iterator LI = (*S)->livein_begin();
+           LI != (*S)->livein_end(); LI++)
+        LivePhysRegs.set(*LI);
 
     // Now scan the instructions and delete dead ones, tracking physreg
     // liveness as we go.

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/ExpandISelPseudos.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/ExpandISelPseudos.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/ExpandISelPseudos.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/ExpandISelPseudos.cpp Sat Jul  2 22:28:07 2011
@@ -62,8 +62,8 @@
       MachineInstr *MI = MBBI++;
 
       // If MI is a pseudo, expand it.
-      const TargetInstrDesc &TID = MI->getDesc();
-      if (TID.usesCustomInsertionHook()) {
+      const MCInstrDesc &MCID = MI->getDesc();
+      if (MCID.usesCustomInsertionHook()) {
         Changed = true;
         MachineBasicBlock *NewMBB =
           TLI->EmitInstrWithCustomInserter(MI, MBB);

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/IfConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/IfConversion.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/IfConversion.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/IfConversion.cpp Sat Jul  2 22:28:07 2011
@@ -18,8 +18,8 @@
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 #include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/MC/MCInstrItineraries.h"
 #include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetInstrItineraries.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetRegisterInfo.h"
@@ -651,12 +651,12 @@
     if (I->isDebugValue())
       continue;
 
-    const TargetInstrDesc &TID = I->getDesc();
-    if (TID.isNotDuplicable())
+    const MCInstrDesc &MCID = I->getDesc();
+    if (MCID.isNotDuplicable())
       BBI.CannotBeCopied = true;
 
     bool isPredicated = TII->isPredicated(I);
-    bool isCondBr = BBI.IsBrAnalyzable && TID.isConditionalBranch();
+    bool isCondBr = BBI.IsBrAnalyzable && MCID.isConditionalBranch();
 
     if (!isCondBr) {
       if (!isPredicated) {
@@ -1414,9 +1414,9 @@
 
   for (MachineBasicBlock::iterator I = FromBBI.BB->begin(),
          E = FromBBI.BB->end(); I != E; ++I) {
-    const TargetInstrDesc &TID = I->getDesc();
+    const MCInstrDesc &MCID = I->getDesc();
     // Do not copy the end of the block branches.
-    if (IgnoreBr && TID.isBranch())
+    if (IgnoreBr && MCID.isBranch())
       break;
 
     MachineInstr *MI = MF.CloneMachineInstr(I);

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/InlineSpiller.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/InlineSpiller.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/InlineSpiller.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/InlineSpiller.cpp Sat Jul  2 22:28:07 2011
@@ -180,11 +180,7 @@
 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
 /// otherwise return 0.
 static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) {
-  if (!MI->isCopy())
-    return 0;
-  if (MI->getOperand(0).getSubReg() != 0)
-    return 0;
-  if (MI->getOperand(1).getSubReg() != 0)
+  if (!MI->isFullCopy())
     return 0;
   if (MI->getOperand(0).getReg() == Reg)
       return MI->getOperand(1).getReg();

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/MachineBasicBlock.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/MachineBasicBlock.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/MachineBasicBlock.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/MachineBasicBlock.cpp Sat Jul  2 22:28:07 2011
@@ -22,7 +22,6 @@
 #include "llvm/MC/MCContext.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Target/TargetData.h"
-#include "llvm/Target/TargetInstrDesc.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Assembly/Writer.h"

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/MachineCSE.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/MachineCSE.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/MachineCSE.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/MachineCSE.cpp Sat Jul  2 22:28:07 2011
@@ -260,12 +260,12 @@
     return false;
 
   // Ignore stuff that we obviously can't move.
-  const TargetInstrDesc &TID = MI->getDesc();  
-  if (TID.mayStore() || TID.isCall() || TID.isTerminator() ||
+  const MCInstrDesc &MCID = MI->getDesc();  
+  if (MCID.mayStore() || MCID.isCall() || MCID.isTerminator() ||
       MI->hasUnmodeledSideEffects())
     return false;
 
-  if (TID.mayLoad()) {
+  if (MCID.mayLoad()) {
     // Okay, this instruction does a load. As a refinement, we allow the target
     // to decide whether the loaded value is actually a constant. If so, we can
     // actually use it as a load.

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/MachineFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/MachineFunction.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/MachineFunction.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/MachineFunction.cpp Sat Jul  2 22:28:07 2011
@@ -152,10 +152,10 @@
 /// of `new MachineInstr'.
 ///
 MachineInstr *
-MachineFunction::CreateMachineInstr(const TargetInstrDesc &TID,
+MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
                                     DebugLoc DL, bool NoImp) {
   return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
-    MachineInstr(TID, DL, NoImp);
+    MachineInstr(MCID, DL, NoImp);
 }
 
 /// CloneMachineInstr - Create a new MachineInstr which is a copy of the

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/MachineInstr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/MachineInstr.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/MachineInstr.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/MachineInstr.cpp Sat Jul  2 22:28:07 2011
@@ -15,19 +15,22 @@
 #include "llvm/Constants.h"
 #include "llvm/Function.h"
 #include "llvm/InlineAsm.h"
+#include "llvm/LLVMContext.h"
 #include "llvm/Metadata.h"
+#include "llvm/Module.h"
 #include "llvm/Type.h"
 #include "llvm/Value.h"
 #include "llvm/Assembly/Writer.h"
 #include "llvm/CodeGen/MachineConstantPool.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/MC/MCInstrDesc.h"
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetInstrDesc.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/DebugInfo.h"
@@ -194,6 +197,8 @@
            getSubReg() == Other.getSubReg();
   case MachineOperand::MO_Immediate:
     return getImm() == Other.getImm();
+  case MachineOperand::MO_CImmediate:
+    return getCImm() == Other.getCImm();
   case MachineOperand::MO_FPImmediate:
     return getFPImm() == Other.getFPImm();
   case MachineOperand::MO_MachineBasicBlock:
@@ -267,6 +272,9 @@
   case MachineOperand::MO_Immediate:
     OS << getImm();
     break;
+  case MachineOperand::MO_CImmediate:
+    getCImm()->getValue().print(OS, false);
+    break;
   case MachineOperand::MO_FPImmediate:
     if (getFPImm()->getType()->isFloatTy())
       OS << getFPImm()->getValueAPF().convertToFloat();
@@ -454,9 +462,9 @@
 //===----------------------------------------------------------------------===//
 
 /// MachineInstr ctor - This constructor creates a dummy MachineInstr with
-/// TID NULL and no operands.
+/// MCID NULL and no operands.
 MachineInstr::MachineInstr()
-  : TID(0), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
+  : MCID(0), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
     MemRefs(0), MemRefsEnd(0),
     Parent(0) {
   // Make sure that we get added to a machine basicblock
@@ -464,23 +472,23 @@
 }
 
 void MachineInstr::addImplicitDefUseOperands() {
-  if (TID->ImplicitDefs)
-    for (const unsigned *ImpDefs = TID->ImplicitDefs; *ImpDefs; ++ImpDefs)
+  if (MCID->ImplicitDefs)
+    for (const unsigned *ImpDefs = MCID->ImplicitDefs; *ImpDefs; ++ImpDefs)
       addOperand(MachineOperand::CreateReg(*ImpDefs, true, true));
-  if (TID->ImplicitUses)
-    for (const unsigned *ImpUses = TID->ImplicitUses; *ImpUses; ++ImpUses)
+  if (MCID->ImplicitUses)
+    for (const unsigned *ImpUses = MCID->ImplicitUses; *ImpUses; ++ImpUses)
       addOperand(MachineOperand::CreateReg(*ImpUses, false, true));
 }
 
 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
 /// implicit operands. It reserves space for the number of operands specified by
-/// the TargetInstrDesc.
-MachineInstr::MachineInstr(const TargetInstrDesc &tid, bool NoImp)
-  : TID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
+/// the MCInstrDesc.
+MachineInstr::MachineInstr(const MCInstrDesc &tid, bool NoImp)
+  : MCID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
     MemRefs(0), MemRefsEnd(0), Parent(0) {
   if (!NoImp)
-    NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
-  Operands.reserve(NumImplicitOps + TID->getNumOperands());
+    NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
+  Operands.reserve(NumImplicitOps + MCID->getNumOperands());
   if (!NoImp)
     addImplicitDefUseOperands();
   // Make sure that we get added to a machine basicblock
@@ -488,13 +496,13 @@
 }
 
 /// MachineInstr ctor - As above, but with a DebugLoc.
-MachineInstr::MachineInstr(const TargetInstrDesc &tid, const DebugLoc dl,
+MachineInstr::MachineInstr(const MCInstrDesc &tid, const DebugLoc dl,
                            bool NoImp)
-  : TID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
+  : MCID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
     MemRefs(0), MemRefsEnd(0), Parent(0), debugLoc(dl) {
   if (!NoImp)
-    NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
-  Operands.reserve(NumImplicitOps + TID->getNumOperands());
+    NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
+  Operands.reserve(NumImplicitOps + MCID->getNumOperands());
   if (!NoImp)
     addImplicitDefUseOperands();
   // Make sure that we get added to a machine basicblock
@@ -504,12 +512,12 @@
 /// MachineInstr ctor - Work exactly the same as the ctor two above, except
 /// that the MachineInstr is created and added to the end of the specified 
 /// basic block.
-MachineInstr::MachineInstr(MachineBasicBlock *MBB, const TargetInstrDesc &tid)
-  : TID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
+MachineInstr::MachineInstr(MachineBasicBlock *MBB, const MCInstrDesc &tid)
+  : MCID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
     MemRefs(0), MemRefsEnd(0), Parent(0) {
   assert(MBB && "Cannot use inserting ctor with null basic block!");
-  NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
-  Operands.reserve(NumImplicitOps + TID->getNumOperands());
+  NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
+  Operands.reserve(NumImplicitOps + MCID->getNumOperands());
   addImplicitDefUseOperands();
   // Make sure that we get added to a machine basicblock
   LeakDetector::addGarbageObject(this);
@@ -519,12 +527,12 @@
 /// MachineInstr ctor - As above, but with a DebugLoc.
 ///
 MachineInstr::MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
-                           const TargetInstrDesc &tid)
-  : TID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
+                           const MCInstrDesc &tid)
+  : MCID(&tid), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
     MemRefs(0), MemRefsEnd(0), Parent(0), debugLoc(dl) {
   assert(MBB && "Cannot use inserting ctor with null basic block!");
-  NumImplicitOps = TID->getNumImplicitDefs() + TID->getNumImplicitUses();
-  Operands.reserve(NumImplicitOps + TID->getNumOperands());
+  NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses();
+  Operands.reserve(NumImplicitOps + MCID->getNumOperands());
   addImplicitDefUseOperands();
   // Make sure that we get added to a machine basicblock
   LeakDetector::addGarbageObject(this);
@@ -534,7 +542,7 @@
 /// MachineInstr ctor - Copies MachineInstr arg exactly
 ///
 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
-  : TID(&MI.getDesc()), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
+  : MCID(&MI.getDesc()), NumImplicitOps(0), Flags(0), AsmPrinterFlags(0),
     MemRefs(MI.MemRefs), MemRefsEnd(MI.MemRefsEnd),
     Parent(0), debugLoc(MI.getDebugLoc()) {
   Operands.reserve(MI.getNumOperands());
@@ -621,7 +629,7 @@
         Operands.back().AddRegOperandToRegInfo(RegInfo);
         // If the register operand is flagged as early, mark the operand as such
         unsigned OpNo = Operands.size() - 1;
-        if (TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1)
+        if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
           Operands[OpNo].setIsEarlyClobber(true);
       }
       return;
@@ -643,7 +651,7 @@
     if (Operands[OpNo].isReg()) {
       Operands[OpNo].AddRegOperandToRegInfo(0);
       // If the register operand is flagged as early, mark the operand as such
-      if (TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1)
+      if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
         Operands[OpNo].setIsEarlyClobber(true);
     }
 
@@ -668,7 +676,7 @@
     if (Operands[OpNo].isReg()) {
       Operands[OpNo].AddRegOperandToRegInfo(RegInfo);
       // If the register operand is flagged as early, mark the operand as such
-      if (TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1)
+      if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
         Operands[OpNo].setIsEarlyClobber(true);
     }
     
@@ -691,7 +699,7 @@
 
       // If the register operand is flagged as early, mark the operand as such
     if (Operands[OpNo].isReg()
-        && TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1)
+        && MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
       Operands[OpNo].setIsEarlyClobber(true);
   }
 }
@@ -817,8 +825,8 @@
 /// OperandComplete - Return true if it's illegal to add a new operand
 ///
 bool MachineInstr::OperandsComplete() const {
-  unsigned short NumOperands = TID->getNumOperands();
-  if (!TID->isVariadic() && getNumOperands()-NumImplicitOps >= NumOperands)
+  unsigned short NumOperands = MCID->getNumOperands();
+  if (!MCID->isVariadic() && getNumOperands()-NumImplicitOps >= NumOperands)
     return true;  // Broken: we have all the operands of this instruction!
   return false;
 }
@@ -826,8 +834,8 @@
 /// getNumExplicitOperands - Returns the number of non-implicit operands.
 ///
 unsigned MachineInstr::getNumExplicitOperands() const {
-  unsigned NumOperands = TID->getNumOperands();
-  if (!TID->isVariadic())
+  unsigned NumOperands = MCID->getNumOperands();
+  if (!MCID->isVariadic())
     return NumOperands;
 
   for (unsigned i = NumOperands, e = getNumOperands(); i != e; ++i) {
@@ -928,10 +936,10 @@
 /// operand list that is used to represent the predicate. It returns -1 if
 /// none is found.
 int MachineInstr::findFirstPredOperandIdx() const {
-  const TargetInstrDesc &TID = getDesc();
-  if (TID.isPredicable()) {
+  const MCInstrDesc &MCID = getDesc();
+  if (MCID.isPredicable()) {
     for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
-      if (TID.OpInfo[i].isPredicate())
+      if (MCID.OpInfo[i].isPredicate())
         return i;
   }
 
@@ -987,11 +995,11 @@
   }
 
   assert(getOperand(DefOpIdx).isDef() && "DefOpIdx is not a def!");
-  const TargetInstrDesc &TID = getDesc();
-  for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) {
+  const MCInstrDesc &MCID = getDesc();
+  for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
     const MachineOperand &MO = getOperand(i);
     if (MO.isReg() && MO.isUse() &&
-        TID.getOperandConstraint(i, TOI::TIED_TO) == (int)DefOpIdx) {
+        MCID.getOperandConstraint(i, MCOI::TIED_TO) == (int)DefOpIdx) {
       if (UseOpIdx)
         *UseOpIdx = (unsigned)i;
       return true;
@@ -1047,13 +1055,13 @@
     return false;
   }
 
-  const TargetInstrDesc &TID = getDesc();
-  if (UseOpIdx >= TID.getNumOperands())
+  const MCInstrDesc &MCID = getDesc();
+  if (UseOpIdx >= MCID.getNumOperands())
     return false;
   const MachineOperand &MO = getOperand(UseOpIdx);
   if (!MO.isReg() || !MO.isUse())
     return false;
-  int DefIdx = TID.getOperandConstraint(UseOpIdx, TOI::TIED_TO);
+  int DefIdx = MCID.getOperandConstraint(UseOpIdx, MCOI::TIED_TO);
   if (DefIdx == -1)
     return false;
   if (DefOpIdx)
@@ -1093,11 +1101,11 @@
 
 /// copyPredicates - Copies predicate operand(s) from MI.
 void MachineInstr::copyPredicates(const MachineInstr *MI) {
-  const TargetInstrDesc &TID = MI->getDesc();
-  if (!TID.isPredicable())
+  const MCInstrDesc &MCID = MI->getDesc();
+  if (!MCID.isPredicable())
     return;
   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
-    if (TID.OpInfo[i].isPredicate()) {
+    if (MCID.OpInfo[i].isPredicate()) {
       // Predicated operands must be last operands.
       addOperand(MI->getOperand(i));
     }
@@ -1134,13 +1142,13 @@
                                 AliasAnalysis *AA,
                                 bool &SawStore) const {
   // Ignore stuff that we obviously can't move.
-  if (TID->mayStore() || TID->isCall()) {
+  if (MCID->mayStore() || MCID->isCall()) {
     SawStore = true;
     return false;
   }
 
   if (isLabel() || isDebugValue() ||
-      TID->isTerminator() || hasUnmodeledSideEffects())
+      MCID->isTerminator() || hasUnmodeledSideEffects())
     return false;
 
   // See if this instruction does a load.  If so, we have to guarantee that the
@@ -1148,7 +1156,7 @@
   // destination. The check for isInvariantLoad gives the targe the chance to
   // classify the load as always returning a constant, e.g. a constant pool
   // load.
-  if (TID->mayLoad() && !isInvariantLoad(AA))
+  if (MCID->mayLoad() && !isInvariantLoad(AA))
     // Otherwise, this is a real load.  If there is a store between the load and
     // end of block, or if the load is volatile, we can't move it.
     return !SawStore && !hasVolatileMemoryRef();
@@ -1188,9 +1196,9 @@
 /// have no volatile memory references.
 bool MachineInstr::hasVolatileMemoryRef() const {
   // An instruction known never to access memory won't have a volatile access.
-  if (!TID->mayStore() &&
-      !TID->mayLoad() &&
-      !TID->isCall() &&
+  if (!MCID->mayStore() &&
+      !MCID->mayLoad() &&
+      !MCID->isCall() &&
       !hasUnmodeledSideEffects())
     return false;
 
@@ -1214,7 +1222,7 @@
 /// *all* loads the instruction does are invariant (if it does multiple loads).
 bool MachineInstr::isInvariantLoad(AliasAnalysis *AA) const {
   // If the instruction doesn't load at all, it isn't an invariant load.
-  if (!TID->mayLoad())
+  if (!MCID->mayLoad())
     return false;
 
   // If the instruction has lost its memoperands, conservatively assume that
@@ -1364,6 +1372,8 @@
   // Print the rest of the operands.
   bool OmittedAnyCallClobbers = false;
   bool FirstOp = true;
+  unsigned AsmDescOp = ~0u;
+  unsigned AsmOpCount = 0;
 
   if (isInlineAsm()) {
     // Print asm string.
@@ -1377,7 +1387,7 @@
     if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
       OS << " [alignstack]";
 
-    StartOp = InlineAsm::MIOp_FirstOperand;
+    StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
     FirstOp = false;
   }
 
@@ -1416,10 +1426,10 @@
     if (FirstOp) FirstOp = false; else OS << ",";
     OS << " ";
     if (i < getDesc().NumOperands) {
-      const TargetOperandInfo &TOI = getDesc().OpInfo[i];
-      if (TOI.isPredicate())
+      const MCOperandInfo &MCOI = getDesc().OpInfo[i];
+      if (MCOI.isPredicate())
         OS << "pred:";
-      if (TOI.isOptionalDef())
+      if (MCOI.isOptionalDef())
         OS << "opt:";
     }
     if (isDebugValue() && MO.isMetadata()) {
@@ -1431,6 +1441,26 @@
         MO.print(OS, TM);
     } else if (TM && (isInsertSubreg() || isRegSequence()) && MO.isImm()) {
       OS << TM->getRegisterInfo()->getSubRegIndexName(MO.getImm());
+    } else if (i == AsmDescOp && MO.isImm()) {
+      // Pretty print the inline asm operand descriptor.
+      OS << '$' << AsmOpCount++;
+      unsigned Flag = MO.getImm();
+      switch (InlineAsm::getKind(Flag)) {
+      case InlineAsm::Kind_RegUse:             OS << ":[reguse]"; break;
+      case InlineAsm::Kind_RegDef:             OS << ":[regdef]"; break;
+      case InlineAsm::Kind_RegDefEarlyClobber: OS << ":[regdef-ec]"; break;
+      case InlineAsm::Kind_Clobber:            OS << ":[clobber]"; break;
+      case InlineAsm::Kind_Imm:                OS << ":[imm]"; break;
+      case InlineAsm::Kind_Mem:                OS << ":[mem]"; break;
+      default: OS << ":[??" << InlineAsm::getKind(Flag) << ']'; break;
+      }
+
+      unsigned TiedTo = 0;
+      if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
+        OS << " [tiedto:$" << TiedTo << ']';
+
+      // Compute the index of the next operand descriptor.
+      AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
     } else
       MO.print(OS, TM);
   }
@@ -1685,3 +1715,24 @@
   }
   return Hash;
 }
+
+void MachineInstr::emitError(StringRef Msg) const {
+  // Find the source location cookie.
+  unsigned LocCookie = 0;
+  const MDNode *LocMD = 0;
+  for (unsigned i = getNumOperands(); i != 0; --i) {
+    if (getOperand(i-1).isMetadata() &&
+        (LocMD = getOperand(i-1).getMetadata()) &&
+        LocMD->getNumOperands() != 0) {
+      if (const ConstantInt *CI = dyn_cast<ConstantInt>(LocMD->getOperand(0))) {
+        LocCookie = CI->getZExtValue();
+        break;
+      }
+    }
+  }
+
+  if (const MachineBasicBlock *MBB = getParent())
+    if (const MachineFunction *MF = MBB->getParent())
+      return MF->getMMI().getModule()->getContext().emitError(LocCookie, Msg);
+  report_fatal_error(Msg);
+}

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/MachineLICM.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/MachineLICM.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/MachineLICM.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/MachineLICM.cpp Sat Jul  2 22:28:07 2011
@@ -28,10 +28,10 @@
 #include "llvm/CodeGen/MachineMemOperand.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/MC/MCInstrItineraries.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetInstrItineraries.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/ADT/DenseMap.h"
@@ -1018,9 +1018,9 @@
                                     /*UnfoldStore=*/false,
                                     &LoadRegIndex);
   if (NewOpc == 0) return 0;
-  const TargetInstrDesc &TID = TII->get(NewOpc);
-  if (TID.getNumDefs() != 1) return 0;
-  const TargetRegisterClass *RC = TID.OpInfo[LoadRegIndex].getRegClass(TRI);
+  const MCInstrDesc &MID = TII->get(NewOpc);
+  if (MID.getNumDefs() != 1) return 0;
+  const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI);
   // Ok, we're unfolding. Create a temporary register and do the unfold.
   unsigned Reg = MRI->createVirtualRegister(RC);
 

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/MachineRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/MachineRegisterInfo.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/MachineRegisterInfo.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/MachineRegisterInfo.cpp Sat Jul  2 22:28:07 2011
@@ -20,7 +20,6 @@
 MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI) {
   VRegInfo.reserve(256);
   RegAllocHints.reserve(256);
-  RegClass2VRegMap = new std::vector<unsigned>[TRI.getNumRegClasses()];
   UsedPhysRegs.resize(TRI.getNumRegs());
   
   // Create the physreg use/def lists.
@@ -38,25 +37,13 @@
            "PhysRegUseDefLists has entries after all instructions are deleted");
 #endif
   delete [] PhysRegUseDefLists;
-  delete [] RegClass2VRegMap;
 }
 
 /// setRegClass - Set the register class of the specified virtual register.
 ///
 void
 MachineRegisterInfo::setRegClass(unsigned Reg, const TargetRegisterClass *RC) {
-  const TargetRegisterClass *OldRC = VRegInfo[Reg].first;
   VRegInfo[Reg].first = RC;
-
-  // Remove from old register class's vregs list. This may be slow but
-  // fortunately this operation is rarely needed.
-  std::vector<unsigned> &VRegs = RegClass2VRegMap[OldRC->getID()];
-  std::vector<unsigned>::iterator I =
-    std::find(VRegs.begin(), VRegs.end(), Reg);
-  VRegs.erase(I);
-
-  // Add to new register class's vregs list.
-  RegClass2VRegMap[RC->getID()].push_back(Reg);
 }
 
 const TargetRegisterClass *
@@ -95,7 +82,6 @@
   if (ArrayBase && &VRegInfo[FirstVirtReg] != ArrayBase)
     // The vector reallocated, handle this now.
     HandleVRegListReallocation();
-  RegClass2VRegMap[RegClass->getID()].push_back(Reg);
   return Reg;
 }
 

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/MachineVerifier.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/MachineVerifier.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/MachineVerifier.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/MachineVerifier.cpp Sat Jul  2 22:28:07 2011
@@ -62,6 +62,7 @@
     raw_ostream *OS;
     const MachineFunction *MF;
     const TargetMachine *TM;
+    const TargetInstrInfo *TII;
     const TargetRegisterInfo *TRI;
     const MachineRegisterInfo *MRI;
 
@@ -255,6 +256,7 @@
 
   this->MF = &MF;
   TM = &MF.getTarget();
+  TII = TM->getInstrInfo();
   TRI = TM->getRegisterInfo();
   MRI = &MF.getRegInfo();
 
@@ -387,8 +389,6 @@
 
 void
 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
-  const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
-
   // Count the number of landing pad successors.
   SmallPtrSet<MachineBasicBlock*, 4> LandingPadSuccs;
   for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
@@ -541,19 +541,19 @@
 }
 
 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
-  const TargetInstrDesc &TI = MI->getDesc();
-  if (MI->getNumOperands() < TI.getNumOperands()) {
+  const MCInstrDesc &MCID = MI->getDesc();
+  if (MI->getNumOperands() < MCID.getNumOperands()) {
     report("Too few operands", MI);
-    *OS << TI.getNumOperands() << " operands expected, but "
+    *OS << MCID.getNumOperands() << " operands expected, but "
         << MI->getNumExplicitOperands() << " given.\n";
   }
 
   // Check the MachineMemOperands for basic consistency.
   for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
        E = MI->memoperands_end(); I != E; ++I) {
-    if ((*I)->isLoad() && !TI.mayLoad())
+    if ((*I)->isLoad() && !MCID.mayLoad())
       report("Missing mayLoad flag", MI);
-    if ((*I)->isStore() && !TI.mayStore())
+    if ((*I)->isStore() && !MCID.mayStore())
       report("Missing mayStore flag", MI);
   }
 
@@ -575,29 +575,30 @@
 void
 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
   const MachineInstr *MI = MO->getParent();
-  const TargetInstrDesc &TI = MI->getDesc();
-  const TargetOperandInfo &TOI = TI.OpInfo[MONum];
+  const MCInstrDesc &MCID = MI->getDesc();
+  const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
 
-  // The first TI.NumDefs operands must be explicit register defines
-  if (MONum < TI.getNumDefs()) {
+  // The first MCID.NumDefs operands must be explicit register defines
+  if (MONum < MCID.getNumDefs()) {
     if (!MO->isReg())
       report("Explicit definition must be a register", MO, MONum);
     else if (!MO->isDef())
       report("Explicit definition marked as use", MO, MONum);
     else if (MO->isImplicit())
       report("Explicit definition marked as implicit", MO, MONum);
-  } else if (MONum < TI.getNumOperands()) {
+  } else if (MONum < MCID.getNumOperands()) {
     // Don't check if it's the last operand in a variadic instruction. See,
     // e.g., LDM_RET in the arm back end.
-    if (MO->isReg() && !(TI.isVariadic() && MONum == TI.getNumOperands()-1)) {
-      if (MO->isDef() && !TOI.isOptionalDef())
+    if (MO->isReg() &&
+        !(MCID.isVariadic() && MONum == MCID.getNumOperands()-1)) {
+      if (MO->isDef() && !MCOI.isOptionalDef())
           report("Explicit operand marked as def", MO, MONum);
       if (MO->isImplicit())
         report("Explicit operand marked as implicit", MO, MONum);
     }
   } else {
     // ARM adds %reg0 operands to indicate predicates. We'll allow that.
-    if (MO->isReg() && !MO->isImplicit() && !TI.isVariadic() && MO->getReg())
+    if (MO->isReg() && !MO->isImplicit() && !MCID.isVariadic() && MO->getReg())
       report("Extra explicit operand on non-variadic instruction", MO, MONum);
   }
 
@@ -709,7 +710,7 @@
     }
 
     // Check register classes.
-    if (MONum < TI.getNumOperands() && !MO->isImplicit()) {
+    if (MONum < MCID.getNumOperands() && !MO->isImplicit()) {
       unsigned SubIdx = MO->getSubReg();
 
       if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
@@ -723,7 +724,7 @@
           }
           sr = s;
         }
-        if (const TargetRegisterClass *DRC = TOI.getRegClass(TRI)) {
+        if (const TargetRegisterClass *DRC = TII->getRegClass(MCID,MONum,TRI)) {
           if (!DRC->contains(sr)) {
             report("Illegal physical register for instruction", MO, MONum);
             *OS << TRI->getName(sr) << " is not a "
@@ -743,7 +744,7 @@
           }
           RC = SRC;
         }
-        if (const TargetRegisterClass *DRC = TOI.getRegClass(TRI)) {
+        if (const TargetRegisterClass *DRC = TII->getRegClass(MCID,MONum,TRI)) {
           if (!RC->hasSuperClassEq(DRC)) {
             report("Illegal virtual register for instruction", MO, MONum);
             *OS << "Expected a " << DRC->getName() << " register, but got a "
@@ -765,11 +766,11 @@
         LiveInts && !LiveInts->isNotInMIMap(MI)) {
       LiveInterval &LI = LiveStks->getInterval(MO->getIndex());
       SlotIndex Idx = LiveInts->getInstructionIndex(MI);
-      if (TI.mayLoad() && !LI.liveAt(Idx.getUseIndex())) {
+      if (MCID.mayLoad() && !LI.liveAt(Idx.getUseIndex())) {
         report("Instruction loads from dead spill slot", MO, MONum);
         *OS << "Live stack: " << LI << '\n';
       }
-      if (TI.mayStore() && !LI.liveAt(Idx.getDefIndex())) {
+      if (MCID.mayStore() && !LI.liveAt(Idx.getDefIndex())) {
         report("Instruction stores to dead spill slot", MO, MONum);
         *OS << "Live stack: " << LI << '\n';
       }

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/PeepholeOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/PeepholeOptimizer.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/PeepholeOptimizer.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/PeepholeOptimizer.cpp Sat Jul  2 22:28:07 2011
@@ -353,10 +353,10 @@
 bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
                                         SmallSet<unsigned, 4> &ImmDefRegs,
                                  DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
-  const TargetInstrDesc &TID = MI->getDesc();
-  if (!TID.isMoveImmediate())
+  const MCInstrDesc &MCID = MI->getDesc();
+  if (!MCID.isMoveImmediate())
     return false;
-  if (TID.getNumDefs() != 1)
+  if (MCID.getNumDefs() != 1)
     return false;
   unsigned Reg = MI->getOperand(0).getReg();
   if (TargetRegisterInfo::isVirtualRegister(Reg)) {
@@ -429,16 +429,16 @@
         continue;
       }
 
-      const TargetInstrDesc &TID = MI->getDesc();
+      const MCInstrDesc &MCID = MI->getDesc();
 
-      if (TID.isBitcast()) {
+      if (MCID.isBitcast()) {
         if (OptimizeBitcastInstr(MI, MBB)) {
           // MI is deleted.
           Changed = true;
           MII = First ? I->begin() : llvm::next(PMII);
           continue;
         }        
-      } else if (TID.isCompare()) {
+      } else if (MCID.isCompare()) {
         if (OptimizeCmpInstr(MI, MBB)) {
           // MI is deleted.
           Changed = true;

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/PostRASchedulerList.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/PostRASchedulerList.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/PostRASchedulerList.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/PostRASchedulerList.cpp Sat Jul  2 22:28:07 2011
@@ -38,7 +38,7 @@
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetSubtarget.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -53,7 +53,7 @@
 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
 
 // Post-RA scheduling is enabled with
-// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
+// TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to
 // override the target.
 static cl::opt<bool>
 EnablePostRAScheduler("post-RA-scheduler",
@@ -138,7 +138,7 @@
     SchedulePostRATDList(
       MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
       AliasAnalysis *AA, const RegisterClassInfo&,
-      TargetSubtarget::AntiDepBreakMode AntiDepMode,
+      TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
       SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs);
 
     ~SchedulePostRATDList();
@@ -183,7 +183,7 @@
 SchedulePostRATDList::SchedulePostRATDList(
   MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
   AliasAnalysis *AA, const RegisterClassInfo &RCI,
-  TargetSubtarget::AntiDepBreakMode AntiDepMode,
+  TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
   SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs)
   : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits), AA(AA),
     KillIndices(TRI->getNumRegs())
@@ -193,9 +193,9 @@
   HazardRec =
     TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this);
   AntiDepBreak =
-    ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
+    ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ?
      (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) :
-     ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
+     ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ?
       (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL));
 }
 
@@ -212,7 +212,7 @@
   RegClassInfo.runOnMachineFunction(Fn);
 
   // Check for explicit enable/disable of post-ra scheduling.
-  TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
+  TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = TargetSubtargetInfo::ANTIDEP_NONE;
   SmallVector<TargetRegisterClass*, 4> CriticalPathRCs;
   if (EnablePostRAScheduler.getPosition() > 0) {
     if (!EnablePostRAScheduler)
@@ -220,17 +220,18 @@
   } else {
     // Check that post-RA scheduling is enabled for this target.
     // This may upgrade the AntiDepMode.
-    const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
+    const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>();
     if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs))
       return false;
   }
 
   // Check for antidep breaking override...
   if (EnableAntiDepBreaking.getPosition() > 0) {
-    AntiDepMode = (EnableAntiDepBreaking == "all") ?
-      TargetSubtarget::ANTIDEP_ALL :
-        (EnableAntiDepBreaking == "critical")
-           ? TargetSubtarget::ANTIDEP_CRITICAL : TargetSubtarget::ANTIDEP_NONE;
+    AntiDepMode = (EnableAntiDepBreaking == "all")
+      ? TargetSubtargetInfo::ANTIDEP_ALL
+      : ((EnableAntiDepBreaking == "critical")
+         ? TargetSubtargetInfo::ANTIDEP_CRITICAL
+         : TargetSubtargetInfo::ANTIDEP_NONE);
   }
 
   DEBUG(dbgs() << "PostRAScheduler\n");

Removed: llvm/branches/type-system-rewrite/lib/CodeGen/PreAllocSplitting.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/PreAllocSplitting.cpp?rev=134362&view=auto
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/PreAllocSplitting.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/PreAllocSplitting.cpp (removed)
@@ -1,1430 +0,0 @@
-//===-- PreAllocSplitting.cpp - Pre-allocation Interval Spltting Pass. ----===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the machine instruction level pre-register allocation
-// live interval splitting pass. It finds live interval barriers, i.e.
-// instructions which will kill all physical registers in certain register
-// classes, and split all live intervals which cross the barrier.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "pre-alloc-split"
-#include "VirtRegMap.h"
-#include "llvm/CodeGen/CalcSpillWeights.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/LiveStackAnalysis.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/RegisterCoalescer.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/Statistic.h"
-using namespace llvm;
-
-static cl::opt<int> PreSplitLimit("pre-split-limit", cl::init(-1), cl::Hidden);
-static cl::opt<int> DeadSplitLimit("dead-split-limit", cl::init(-1),
-                                   cl::Hidden);
-static cl::opt<int> RestoreFoldLimit("restore-fold-limit", cl::init(-1),
-                                     cl::Hidden);
-
-STATISTIC(NumSplits, "Number of intervals split");
-STATISTIC(NumRemats, "Number of intervals split by rematerialization");
-STATISTIC(NumFolds, "Number of intervals split with spill folding");
-STATISTIC(NumRestoreFolds, "Number of intervals split with restore folding");
-STATISTIC(NumRenumbers, "Number of intervals renumbered into new registers");
-STATISTIC(NumDeadSpills, "Number of dead spills removed");
-
-namespace {
-  class PreAllocSplitting : public MachineFunctionPass {
-    MachineFunction       *CurrMF;
-    const TargetMachine   *TM;
-    const TargetInstrInfo *TII;
-    const TargetRegisterInfo* TRI;
-    MachineFrameInfo      *MFI;
-    MachineRegisterInfo   *MRI;
-    SlotIndexes           *SIs;
-    LiveIntervals         *LIs;
-    LiveStacks            *LSs;
-    VirtRegMap            *VRM;
-
-    // Barrier - Current barrier being processed.
-    MachineInstr          *Barrier;
-
-    // BarrierMBB - Basic block where the barrier resides in.
-    MachineBasicBlock     *BarrierMBB;
-
-    // Barrier - Current barrier index.
-    SlotIndex     BarrierIdx;
-
-    // CurrLI - Current live interval being split.
-    LiveInterval          *CurrLI;
-
-    // CurrSLI - Current stack slot live interval.
-    LiveInterval          *CurrSLI;
-
-    // CurrSValNo - Current val# for the stack slot live interval.
-    VNInfo                *CurrSValNo;
-
-    // IntervalSSMap - A map from live interval to spill slots.
-    DenseMap<unsigned, int> IntervalSSMap;
-
-    // Def2SpillMap - A map from a def instruction index to spill index.
-    DenseMap<SlotIndex, SlotIndex> Def2SpillMap;
-
-  public:
-    static char ID;
-    PreAllocSplitting() : MachineFunctionPass(ID) {
-      initializePreAllocSplittingPass(*PassRegistry::getPassRegistry());
-    }
-
-    virtual bool runOnMachineFunction(MachineFunction &MF);
-
-    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
-      AU.setPreservesCFG();
-      AU.addRequired<SlotIndexes>();
-      AU.addPreserved<SlotIndexes>();
-      AU.addRequired<LiveIntervals>();
-      AU.addPreserved<LiveIntervals>();
-      AU.addRequired<LiveStacks>();
-      AU.addPreserved<LiveStacks>();
-      AU.addPreserved<RegisterCoalescer>();
-      AU.addPreserved<CalculateSpillWeights>();
-      AU.addPreservedID(StrongPHIEliminationID);
-      AU.addPreservedID(PHIEliminationID);
-      AU.addRequired<MachineDominatorTree>();
-      AU.addRequired<MachineLoopInfo>();
-      AU.addRequired<VirtRegMap>();
-      AU.addPreserved<MachineDominatorTree>();
-      AU.addPreserved<MachineLoopInfo>();
-      AU.addPreserved<VirtRegMap>();
-      MachineFunctionPass::getAnalysisUsage(AU);
-    }
-    
-    virtual void releaseMemory() {
-      IntervalSSMap.clear();
-      Def2SpillMap.clear();
-    }
-
-    virtual const char *getPassName() const {
-      return "Pre-Register Allocaton Live Interval Splitting";
-    }
-
-    /// print - Implement the dump method.
-    virtual void print(raw_ostream &O, const Module* M = 0) const {
-      LIs->print(O, M);
-    }
-
-
-  private:
-
-    MachineBasicBlock::iterator
-      findSpillPoint(MachineBasicBlock*, MachineInstr*, MachineInstr*,
-                     SmallPtrSet<MachineInstr*, 4>&);
-
-    MachineBasicBlock::iterator
-      findRestorePoint(MachineBasicBlock*, MachineInstr*, SlotIndex,
-                     SmallPtrSet<MachineInstr*, 4>&);
-
-    int CreateSpillStackSlot(unsigned, const TargetRegisterClass *);
-
-    bool IsAvailableInStack(MachineBasicBlock*, unsigned,
-                            SlotIndex, SlotIndex,
-                            SlotIndex&, int&) const;
-
-    void UpdateSpillSlotInterval(VNInfo*, SlotIndex, SlotIndex);
-
-    bool SplitRegLiveInterval(LiveInterval*);
-
-    bool SplitRegLiveIntervals(const TargetRegisterClass **,
-                               SmallPtrSet<LiveInterval*, 8>&);
-    
-    bool createsNewJoin(LiveRange* LR, MachineBasicBlock* DefMBB,
-                        MachineBasicBlock* BarrierMBB);
-    bool Rematerialize(unsigned vreg, VNInfo* ValNo,
-                       MachineInstr* DefMI,
-                       MachineBasicBlock::iterator RestorePt,
-                       SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
-    MachineInstr* FoldSpill(unsigned vreg, const TargetRegisterClass* RC,
-                            MachineInstr* DefMI,
-                            MachineInstr* Barrier,
-                            MachineBasicBlock* MBB,
-                            int& SS,
-                            SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
-    MachineInstr* FoldRestore(unsigned vreg, 
-                              const TargetRegisterClass* RC,
-                              MachineInstr* Barrier,
-                              MachineBasicBlock* MBB,
-                              int SS,
-                              SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
-    void RenumberValno(VNInfo* VN);
-    void ReconstructLiveInterval(LiveInterval* LI);
-    bool removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split);
-    unsigned getNumberOfNonSpills(SmallPtrSet<MachineInstr*, 4>& MIs,
-                               unsigned Reg, int FrameIndex, bool& TwoAddr);
-    VNInfo* PerformPHIConstruction(MachineBasicBlock::iterator Use,
-                                   MachineBasicBlock* MBB, LiveInterval* LI,
-                                   SmallPtrSet<MachineInstr*, 4>& Visited,
-            DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
-            DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
-                                      DenseMap<MachineInstr*, VNInfo*>& NewVNs,
-                                DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
-                                DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
-                                        bool IsTopLevel, bool IsIntraBlock);
-    VNInfo* PerformPHIConstructionFallBack(MachineBasicBlock::iterator Use,
-                                   MachineBasicBlock* MBB, LiveInterval* LI,
-                                   SmallPtrSet<MachineInstr*, 4>& Visited,
-            DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
-            DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
-                                      DenseMap<MachineInstr*, VNInfo*>& NewVNs,
-                                DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
-                                DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
-                                        bool IsTopLevel, bool IsIntraBlock);
-};
-} // end anonymous namespace
-
-char PreAllocSplitting::ID = 0;
-
-INITIALIZE_PASS_BEGIN(PreAllocSplitting, "pre-alloc-splitting",
-                "Pre-Register Allocation Live Interval Splitting",
-                false, false)
-INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
-INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
-INITIALIZE_PASS_DEPENDENCY(LiveStacks)
-INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
-INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
-INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
-INITIALIZE_PASS_END(PreAllocSplitting, "pre-alloc-splitting",
-                "Pre-Register Allocation Live Interval Splitting",
-                false, false)
-
-char &llvm::PreAllocSplittingID = PreAllocSplitting::ID;
-
-/// findSpillPoint - Find a gap as far away from the given MI that's suitable
-/// for spilling the current live interval. The index must be before any
-/// defs and uses of the live interval register in the mbb. Return begin() if
-/// none is found.
-MachineBasicBlock::iterator
-PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
-                                  MachineInstr *DefMI,
-                                  SmallPtrSet<MachineInstr*, 4> &RefsInMBB) {
-  MachineBasicBlock::iterator Pt = MBB->begin();
-
-  MachineBasicBlock::iterator MII = MI;
-  MachineBasicBlock::iterator EndPt = DefMI
-    ? MachineBasicBlock::iterator(DefMI) : MBB->begin();
-    
-  while (MII != EndPt && !RefsInMBB.count(MII) &&
-         MII->getOpcode() != TRI->getCallFrameSetupOpcode())
-    --MII;
-  if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
-    
-  while (MII != EndPt && !RefsInMBB.count(MII)) {
-    // We can't insert the spill between the barrier (a call), and its
-    // corresponding call frame setup.
-    if (MII->getOpcode() == TRI->getCallFrameDestroyOpcode()) {
-      while (MII->getOpcode() != TRI->getCallFrameSetupOpcode()) {
-        --MII;
-        if (MII == EndPt) {
-          return Pt;
-        }
-      }
-      continue;
-    } else {
-      Pt = MII;
-    }
-    
-    if (RefsInMBB.count(MII))
-      return Pt;
-    
-    
-    --MII;
-  }
-
-  return Pt;
-}
-
-/// findRestorePoint - Find a gap in the instruction index map that's suitable
-/// for restoring the current live interval value. The index must be before any
-/// uses of the live interval register in the mbb. Return end() if none is
-/// found.
-MachineBasicBlock::iterator
-PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
-                                    SlotIndex LastIdx,
-                                    SmallPtrSet<MachineInstr*, 4> &RefsInMBB) {
-  // FIXME: Allow spill to be inserted to the beginning of the mbb. Update mbb
-  // begin index accordingly.
-  MachineBasicBlock::iterator Pt = MBB->end();
-  MachineBasicBlock::iterator EndPt = MBB->getFirstTerminator();
-
-  // We start at the call, so walk forward until we find the call frame teardown
-  // since we can't insert restores before that.  Bail if we encounter a use
-  // during this time.
-  MachineBasicBlock::iterator MII = MI;
-  if (MII == EndPt) return Pt;
-  
-  while (MII != EndPt && !RefsInMBB.count(MII) &&
-         MII->getOpcode() != TRI->getCallFrameDestroyOpcode())
-    ++MII;
-  if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
-  ++MII;
-  
-  // FIXME: Limit the number of instructions to examine to reduce
-  // compile time?
-  while (MII != EndPt) {
-    SlotIndex Index = LIs->getInstructionIndex(MII);
-    if (Index > LastIdx)
-      break;
-      
-    // We can't insert a restore between the barrier (a call) and its 
-    // corresponding call frame teardown.
-    if (MII->getOpcode() == TRI->getCallFrameSetupOpcode()) {
-      do {
-        if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
-        ++MII;
-      } while (MII->getOpcode() != TRI->getCallFrameDestroyOpcode());
-    } else {
-      Pt = MII;
-    }
-    
-    if (RefsInMBB.count(MII))
-      return Pt;
-    
-    ++MII;
-  }
-
-  return Pt;
-}
-
-/// CreateSpillStackSlot - Create a stack slot for the live interval being
-/// split. If the live interval was previously split, just reuse the same
-/// slot.
-int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
-                                            const TargetRegisterClass *RC) {
-  int SS;
-  DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(Reg);
-  if (I != IntervalSSMap.end()) {
-    SS = I->second;
-  } else {
-    SS = MFI->CreateSpillStackObject(RC->getSize(), RC->getAlignment());
-    IntervalSSMap[Reg] = SS;
-  }
-
-  // Create live interval for stack slot.
-  CurrSLI = &LSs->getOrCreateInterval(SS, RC);
-  if (CurrSLI->hasAtLeastOneValue())
-    CurrSValNo = CurrSLI->getValNumInfo(0);
-  else
-    CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0,
-                                       LSs->getVNInfoAllocator());
-  return SS;
-}
-
-/// IsAvailableInStack - Return true if register is available in a split stack
-/// slot at the specified index.
-bool
-PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
-                                    unsigned Reg, SlotIndex DefIndex,
-                                    SlotIndex RestoreIndex,
-                                    SlotIndex &SpillIndex,
-                                    int& SS) const {
-  if (!DefMBB)
-    return false;
-
-  DenseMap<unsigned, int>::const_iterator I = IntervalSSMap.find(Reg);
-  if (I == IntervalSSMap.end())
-    return false;
-  DenseMap<SlotIndex, SlotIndex>::const_iterator
-    II = Def2SpillMap.find(DefIndex);
-  if (II == Def2SpillMap.end())
-    return false;
-
-  // If last spill of def is in the same mbb as barrier mbb (where restore will
-  // be), make sure it's not below the intended restore index.
-  // FIXME: Undo the previous spill?
-  assert(LIs->getMBBFromIndex(II->second) == DefMBB);
-  if (DefMBB == BarrierMBB && II->second >= RestoreIndex)
-    return false;
-
-  SS = I->second;
-  SpillIndex = II->second;
-  return true;
-}
-
-/// UpdateSpillSlotInterval - Given the specified val# of the register live
-/// interval being split, and the spill and restore indicies, update the live
-/// interval of the spill stack slot.
-void
-PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, SlotIndex SpillIndex,
-                                           SlotIndex RestoreIndex) {
-  assert(LIs->getMBBFromIndex(RestoreIndex) == BarrierMBB &&
-         "Expect restore in the barrier mbb");
-
-  MachineBasicBlock *MBB = LIs->getMBBFromIndex(SpillIndex);
-  if (MBB == BarrierMBB) {
-    // Intra-block spill + restore. We are done.
-    LiveRange SLR(SpillIndex, RestoreIndex, CurrSValNo);
-    CurrSLI->addRange(SLR);
-    return;
-  }
-
-  SmallPtrSet<MachineBasicBlock*, 4> Processed;
-  SlotIndex EndIdx = LIs->getMBBEndIdx(MBB);
-  LiveRange SLR(SpillIndex, EndIdx, CurrSValNo);
-  CurrSLI->addRange(SLR);
-  Processed.insert(MBB);
-
-  // Start from the spill mbb, figure out the extend of the spill slot's
-  // live interval.
-  SmallVector<MachineBasicBlock*, 4> WorkList;
-  const LiveRange *LR = CurrLI->getLiveRangeContaining(SpillIndex);
-  if (LR->end > EndIdx)
-    // If live range extend beyond end of mbb, add successors to work list.
-    for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
-           SE = MBB->succ_end(); SI != SE; ++SI)
-      WorkList.push_back(*SI);
-
-  while (!WorkList.empty()) {
-    MachineBasicBlock *MBB = WorkList.back();
-    WorkList.pop_back();
-    if (Processed.count(MBB))
-      continue;
-    SlotIndex Idx = LIs->getMBBStartIdx(MBB);
-    LR = CurrLI->getLiveRangeContaining(Idx);
-    if (LR && LR->valno == ValNo) {
-      EndIdx = LIs->getMBBEndIdx(MBB);
-      if (Idx <= RestoreIndex && RestoreIndex < EndIdx) {
-        // Spill slot live interval stops at the restore.
-        LiveRange SLR(Idx, RestoreIndex, CurrSValNo);
-        CurrSLI->addRange(SLR);
-      } else if (LR->end > EndIdx) {
-        // Live range extends beyond end of mbb, process successors.
-        LiveRange SLR(Idx, EndIdx.getNextIndex(), CurrSValNo);
-        CurrSLI->addRange(SLR);
-        for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
-               SE = MBB->succ_end(); SI != SE; ++SI)
-          WorkList.push_back(*SI);
-      } else {
-        LiveRange SLR(Idx, LR->end, CurrSValNo);
-        CurrSLI->addRange(SLR);
-      }
-      Processed.insert(MBB);
-    }
-  }
-}
-
-/// PerformPHIConstruction - From properly set up use and def lists, use a PHI
-/// construction algorithm to compute the ranges and valnos for an interval.
-VNInfo*
-PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
-                                       MachineBasicBlock* MBB, LiveInterval* LI,
-                                       SmallPtrSet<MachineInstr*, 4>& Visited,
-             DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
-             DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
-                                       DenseMap<MachineInstr*, VNInfo*>& NewVNs,
-                                 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
-                                 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
-                                           bool IsTopLevel, bool IsIntraBlock) {
-  // Return memoized result if it's available.
-  if (IsTopLevel && Visited.count(UseI) && NewVNs.count(UseI))
-    return NewVNs[UseI];
-  else if (!IsTopLevel && IsIntraBlock && NewVNs.count(UseI))
-    return NewVNs[UseI];
-  else if (!IsIntraBlock && LiveOut.count(MBB))
-    return LiveOut[MBB];
-  
-  // Check if our block contains any uses or defs.
-  bool ContainsDefs = Defs.count(MBB);
-  bool ContainsUses = Uses.count(MBB);
-  
-  VNInfo* RetVNI = 0;
-  
-  // Enumerate the cases of use/def contaning blocks.
-  if (!ContainsDefs && !ContainsUses) {
-    return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs, Uses,
-                                          NewVNs, LiveOut, Phis,
-                                          IsTopLevel, IsIntraBlock);
-  } else if (ContainsDefs && !ContainsUses) {
-    SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
-
-    // Search for the def in this block.  If we don't find it before the
-    // instruction we care about, go to the fallback case.  Note that that
-    // should never happen: this cannot be intrablock, so use should
-    // always be an end() iterator.
-    assert(UseI == MBB->end() && "No use marked in intrablock");
-    
-    MachineBasicBlock::iterator Walker = UseI;
-    --Walker;
-    while (Walker != MBB->begin()) {
-      if (BlockDefs.count(Walker))
-        break;
-      --Walker;
-    }
-    
-    // Once we've found it, extend its VNInfo to our instruction.
-    SlotIndex DefIndex = LIs->getInstructionIndex(Walker);
-    DefIndex = DefIndex.getDefIndex();
-    SlotIndex EndIndex = LIs->getMBBEndIdx(MBB);
-    
-    RetVNI = NewVNs[Walker];
-    LI->addRange(LiveRange(DefIndex, EndIndex, RetVNI));
-  } else if (!ContainsDefs && ContainsUses) {
-    SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
-    
-    // Search for the use in this block that precedes the instruction we care 
-    // about, going to the fallback case if we don't find it.    
-    MachineBasicBlock::iterator Walker = UseI;
-    bool found = false;
-    while (Walker != MBB->begin()) {
-      --Walker;
-      if (BlockUses.count(Walker)) {
-        found = true;
-        break;
-      }
-    }
-
-    if (!found)
-      return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs,
-                                            Uses, NewVNs, LiveOut, Phis,
-                                            IsTopLevel, IsIntraBlock);
-
-    SlotIndex UseIndex = LIs->getInstructionIndex(Walker);
-    UseIndex = UseIndex.getUseIndex();
-    SlotIndex EndIndex;
-    if (IsIntraBlock) {
-      EndIndex = LIs->getInstructionIndex(UseI).getDefIndex();
-    } else
-      EndIndex = LIs->getMBBEndIdx(MBB);
-
-    // Now, recursively phi construct the VNInfo for the use we found,
-    // and then extend it to include the instruction we care about
-    RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
-                                    NewVNs, LiveOut, Phis, false, true);
-    
-    LI->addRange(LiveRange(UseIndex, EndIndex, RetVNI));
-    
-    // FIXME: Need to set kills properly for inter-block stuff.
-  } else if (ContainsDefs && ContainsUses) {
-    SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
-    SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
-    
-    // This case is basically a merging of the two preceding case, with the
-    // special note that checking for defs must take precedence over checking
-    // for uses, because of two-address instructions.
-    MachineBasicBlock::iterator Walker = UseI;
-    bool foundDef = false;
-    bool foundUse = false;
-    while (Walker != MBB->begin()) {
-      --Walker;
-      if (BlockDefs.count(Walker)) {
-        foundDef = true;
-        break;
-      } else if (BlockUses.count(Walker)) {
-        foundUse = true;
-        break;
-      }
-    }
-
-    if (!foundDef && !foundUse)
-      return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs,
-                                            Uses, NewVNs, LiveOut, Phis,
-                                            IsTopLevel, IsIntraBlock);
-
-    SlotIndex StartIndex = LIs->getInstructionIndex(Walker);
-    StartIndex = foundDef ? StartIndex.getDefIndex() : StartIndex.getUseIndex();
-    SlotIndex EndIndex;
-    if (IsIntraBlock) {
-      EndIndex = LIs->getInstructionIndex(UseI).getDefIndex();
-    } else
-      EndIndex = LIs->getMBBEndIdx(MBB);
-
-    if (foundDef)
-      RetVNI = NewVNs[Walker];
-    else
-      RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
-                                      NewVNs, LiveOut, Phis, false, true);
-
-    LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI));
-  }
-  
-  // Memoize results so we don't have to recompute them.
-  if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
-  else {
-    if (!NewVNs.count(UseI))
-      NewVNs[UseI] = RetVNI;
-    Visited.insert(UseI);
-  }
-
-  return RetVNI;
-}
-
-/// PerformPHIConstructionFallBack - PerformPHIConstruction fall back path.
-///
-VNInfo*
-PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator UseI,
-                                       MachineBasicBlock* MBB, LiveInterval* LI,
-                                       SmallPtrSet<MachineInstr*, 4>& Visited,
-             DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
-             DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
-                                       DenseMap<MachineInstr*, VNInfo*>& NewVNs,
-                                 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
-                                 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
-                                           bool IsTopLevel, bool IsIntraBlock) {
-  // NOTE: Because this is the fallback case from other cases, we do NOT
-  // assume that we are not intrablock here.
-  if (Phis.count(MBB)) return Phis[MBB]; 
-
-  SlotIndex StartIndex = LIs->getMBBStartIdx(MBB);
-  VNInfo *RetVNI = Phis[MBB] =
-    LI->getNextValue(SlotIndex(), /*FIXME*/ 0,
-                     LIs->getVNInfoAllocator());
-
-  if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
-    
-  // If there are no uses or defs between our starting point and the
-  // beginning of the block, then recursive perform phi construction
-  // on our predecessors.
-  DenseMap<MachineBasicBlock*, VNInfo*> IncomingVNs;
-  for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
-         PE = MBB->pred_end(); PI != PE; ++PI) {
-    VNInfo* Incoming = PerformPHIConstruction((*PI)->end(), *PI, LI, 
-                                              Visited, Defs, Uses, NewVNs,
-                                              LiveOut, Phis, false, false);
-    if (Incoming != 0)
-      IncomingVNs[*PI] = Incoming;
-  }
-    
-  if (MBB->pred_size() == 1 && !RetVNI->hasPHIKill()) {
-    VNInfo* OldVN = RetVNI;
-    VNInfo* NewVN = IncomingVNs.begin()->second;
-    VNInfo* MergedVN = LI->MergeValueNumberInto(OldVN, NewVN);
-    if (MergedVN == OldVN) std::swap(OldVN, NewVN);
-    
-    for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator LOI = LiveOut.begin(),
-         LOE = LiveOut.end(); LOI != LOE; ++LOI)
-      if (LOI->second == OldVN)
-        LOI->second = MergedVN;
-    for (DenseMap<MachineInstr*, VNInfo*>::iterator NVI = NewVNs.begin(),
-         NVE = NewVNs.end(); NVI != NVE; ++NVI)
-      if (NVI->second == OldVN)
-        NVI->second = MergedVN;
-    for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator PI = Phis.begin(),
-         PE = Phis.end(); PI != PE; ++PI)
-      if (PI->second == OldVN)
-        PI->second = MergedVN;
-    RetVNI = MergedVN;
-  } else {
-    // Otherwise, merge the incoming VNInfos with a phi join.  Create a new
-    // VNInfo to represent the joined value.
-    for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
-           IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
-      I->second->setHasPHIKill(true);
-    }
-  }
-      
-  SlotIndex EndIndex;
-  if (IsIntraBlock) {
-    EndIndex = LIs->getInstructionIndex(UseI).getDefIndex();
-  } else
-    EndIndex = LIs->getMBBEndIdx(MBB);
-  LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI));
-
-  // Memoize results so we don't have to recompute them.
-  if (!IsIntraBlock)
-    LiveOut[MBB] = RetVNI;
-  else {
-    if (!NewVNs.count(UseI))
-      NewVNs[UseI] = RetVNI;
-    Visited.insert(UseI);
-  }
-
-  return RetVNI;
-}
-
-/// ReconstructLiveInterval - Recompute a live interval from scratch.
-void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
-  VNInfo::Allocator& Alloc = LIs->getVNInfoAllocator();
-  
-  // Clear the old ranges and valnos;
-  LI->clear();
-  
-  // Cache the uses and defs of the register
-  typedef DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> > RegMap;
-  RegMap Defs, Uses;
-  
-  // Keep track of the new VNs we're creating.
-  DenseMap<MachineInstr*, VNInfo*> NewVNs;
-  SmallPtrSet<VNInfo*, 2> PhiVNs;
-  
-  // Cache defs, and create a new VNInfo for each def.
-  for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
-       DE = MRI->def_end(); DI != DE; ++DI) {
-    Defs[(*DI).getParent()].insert(&*DI);
-    
-    SlotIndex DefIdx = LIs->getInstructionIndex(&*DI);
-    DefIdx = DefIdx.getDefIndex();
-    
-    assert(!DI->isPHI() && "PHI instr in code during pre-alloc splitting.");
-    VNInfo* NewVN = LI->getNextValue(DefIdx, 0, Alloc);
-    
-    // If the def is a move, set the copy field.
-    if (DI->isCopyLike() && DI->getOperand(0).getReg() == LI->reg)
-      NewVN->setCopy(&*DI);
-
-    NewVNs[&*DI] = NewVN;
-  }
-  
-  // Cache uses as a separate pass from actually processing them.
-  for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(LI->reg),
-       UE = MRI->use_end(); UI != UE; ++UI)
-    Uses[(*UI).getParent()].insert(&*UI);
-    
-  // Now, actually process every use and use a phi construction algorithm
-  // to walk from it to its reaching definitions, building VNInfos along
-  // the way.
-  DenseMap<MachineBasicBlock*, VNInfo*> LiveOut;
-  DenseMap<MachineBasicBlock*, VNInfo*> Phis;
-  SmallPtrSet<MachineInstr*, 4> Visited;
-  for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(LI->reg),
-       UE = MRI->use_end(); UI != UE; ++UI) {
-    PerformPHIConstruction(&*UI, UI->getParent(), LI, Visited, Defs,
-                           Uses, NewVNs, LiveOut, Phis, true, true); 
-  }
-  
-  // Add ranges for dead defs
-  for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
-       DE = MRI->def_end(); DI != DE; ++DI) {
-    SlotIndex DefIdx = LIs->getInstructionIndex(&*DI);
-    DefIdx = DefIdx.getDefIndex();
-    
-    if (LI->liveAt(DefIdx)) continue;
-    
-    VNInfo* DeadVN = NewVNs[&*DI];
-    LI->addRange(LiveRange(DefIdx, DefIdx.getNextSlot(), DeadVN));
-  }
-}
-
-/// RenumberValno - Split the given valno out into a new vreg, allowing it to
-/// be allocated to a different register.  This function creates a new vreg,
-/// copies the valno and its live ranges over to the new vreg's interval,
-/// removes them from the old interval, and rewrites all uses and defs of
-/// the original reg to the new vreg within those ranges.
-void PreAllocSplitting::RenumberValno(VNInfo* VN) {
-  SmallVector<VNInfo*, 4> Stack;
-  SmallVector<VNInfo*, 4> VNsToCopy;
-  Stack.push_back(VN);
-
-  // Walk through and copy the valno we care about, and any other valnos
-  // that are two-address redefinitions of the one we care about.  These
-  // will need to be rewritten as well.  We also check for safety of the 
-  // renumbering here, by making sure that none of the valno involved has
-  // phi kills.
-  while (!Stack.empty()) {
-    VNInfo* OldVN = Stack.back();
-    Stack.pop_back();
-    
-    // Bail out if we ever encounter a valno that has a PHI kill.  We can't
-    // renumber these.
-    if (OldVN->hasPHIKill()) return;
-    
-    VNsToCopy.push_back(OldVN);
-    
-    // Locate two-address redefinitions
-    for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(CurrLI->reg),
-         DE = MRI->def_end(); DI != DE; ++DI) {
-      if (!DI->isRegTiedToUseOperand(DI.getOperandNo())) continue;
-      SlotIndex DefIdx = LIs->getInstructionIndex(&*DI).getDefIndex();
-      VNInfo* NextVN = CurrLI->findDefinedVNInfoForRegInt(DefIdx);
-      if (std::find(VNsToCopy.begin(), VNsToCopy.end(), NextVN) !=
-          VNsToCopy.end())
-        Stack.push_back(NextVN);
-    }
-  }
-  
-  // Create the new vreg
-  unsigned NewVReg = MRI->createVirtualRegister(MRI->getRegClass(CurrLI->reg));
-  
-  // Create the new live interval
-  LiveInterval& NewLI = LIs->getOrCreateInterval(NewVReg);
-  
-  for (SmallVector<VNInfo*, 4>::iterator OI = VNsToCopy.begin(), OE = 
-       VNsToCopy.end(); OI != OE; ++OI) {
-    VNInfo* OldVN = *OI;
-    
-    // Copy the valno over
-    VNInfo* NewVN = NewLI.createValueCopy(OldVN, LIs->getVNInfoAllocator());
-    NewLI.MergeValueInAsValue(*CurrLI, OldVN, NewVN);
-
-    // Remove the valno from the old interval
-    CurrLI->removeValNo(OldVN);
-  }
-  
-  // Rewrite defs and uses.  This is done in two stages to avoid invalidating
-  // the reg_iterator.
-  SmallVector<std::pair<MachineInstr*, unsigned>, 8> OpsToChange;
-  
-  for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
-         E = MRI->reg_end(); I != E; ++I) {
-    MachineOperand& MO = I.getOperand();
-    SlotIndex InstrIdx = LIs->getInstructionIndex(&*I);
-    
-    if ((MO.isUse() && NewLI.liveAt(InstrIdx.getUseIndex())) ||
-        (MO.isDef() && NewLI.liveAt(InstrIdx.getDefIndex())))
-      OpsToChange.push_back(std::make_pair(&*I, I.getOperandNo()));
-  }
-  
-  for (SmallVector<std::pair<MachineInstr*, unsigned>, 8>::iterator I =
-       OpsToChange.begin(), E = OpsToChange.end(); I != E; ++I) {
-    MachineInstr* Inst = I->first;
-    unsigned OpIdx = I->second;
-    MachineOperand& MO = Inst->getOperand(OpIdx);
-    MO.setReg(NewVReg);
-  }
-  
-  // Grow the VirtRegMap, since we've created a new vreg.
-  VRM->grow();
-  
-  // The renumbered vreg shares a stack slot with the old register.
-  if (IntervalSSMap.count(CurrLI->reg))
-    IntervalSSMap[NewVReg] = IntervalSSMap[CurrLI->reg];
-  
-  ++NumRenumbers;
-}
-
-bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
-                                      MachineInstr* DefMI,
-                                      MachineBasicBlock::iterator RestorePt,
-                                    SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
-  MachineBasicBlock& MBB = *RestorePt->getParent();
-  
-  MachineBasicBlock::iterator KillPt = BarrierMBB->end();
-  if (!DefMI || DefMI->getParent() == BarrierMBB)
-    KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB);
-  else
-    KillPt = llvm::next(MachineBasicBlock::iterator(DefMI));
-  
-  if (KillPt == DefMI->getParent()->end())
-    return false;
-  
-  TII->reMaterialize(MBB, RestorePt, VReg, 0, DefMI, *TRI);
-  SlotIndex RematIdx = LIs->InsertMachineInstrInMaps(prior(RestorePt));
-  
-  ReconstructLiveInterval(CurrLI);
-  RematIdx = RematIdx.getDefIndex();
-  RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RematIdx));
-  
-  ++NumSplits;
-  ++NumRemats;
-  return true;  
-}
-
-MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg, 
-                                           const TargetRegisterClass* RC,
-                                           MachineInstr* DefMI,
-                                           MachineInstr* Barrier,
-                                           MachineBasicBlock* MBB,
-                                           int& SS,
-                                    SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
-  // Go top down if RefsInMBB is empty.
-  if (RefsInMBB.empty())
-    return 0;
-  
-  MachineBasicBlock::iterator FoldPt = Barrier;
-  while (&*FoldPt != DefMI && FoldPt != MBB->begin() &&
-         !RefsInMBB.count(FoldPt))
-    --FoldPt;
-  
-  int OpIdx = FoldPt->findRegisterDefOperandIdx(vreg);
-  if (OpIdx == -1)
-    return 0;
-  
-  SmallVector<unsigned, 1> Ops;
-  Ops.push_back(OpIdx);
-  
-  if (!TII->canFoldMemoryOperand(FoldPt, Ops))
-    return 0;
-  
-  DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(vreg);
-  if (I != IntervalSSMap.end()) {
-    SS = I->second;
-  } else {
-    SS = MFI->CreateSpillStackObject(RC->getSize(), RC->getAlignment());
-  }
-  
-  MachineInstr* FMI = TII->foldMemoryOperand(FoldPt, Ops, SS);
-  
-  if (FMI) {
-    LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
-    FoldPt->eraseFromParent();
-    ++NumFolds;
-    
-    IntervalSSMap[vreg] = SS;
-    CurrSLI = &LSs->getOrCreateInterval(SS, RC);
-    if (CurrSLI->hasAtLeastOneValue())
-      CurrSValNo = CurrSLI->getValNumInfo(0);
-    else
-      CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0,
-                                         LSs->getVNInfoAllocator());
-  }
-  
-  return FMI;
-}
-
-MachineInstr* PreAllocSplitting::FoldRestore(unsigned vreg, 
-                                             const TargetRegisterClass* RC,
-                                             MachineInstr* Barrier,
-                                             MachineBasicBlock* MBB,
-                                             int SS,
-                                     SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
-  if ((int)RestoreFoldLimit != -1 && RestoreFoldLimit == (int)NumRestoreFolds)
-    return 0;
-                                       
-  // Go top down if RefsInMBB is empty.
-  if (RefsInMBB.empty())
-    return 0;
-  
-  // Can't fold a restore between a call stack setup and teardown.
-  MachineBasicBlock::iterator FoldPt = Barrier;
-  
-  // Advance from barrier to call frame teardown.
-  while (FoldPt != MBB->getFirstTerminator() &&
-         FoldPt->getOpcode() != TRI->getCallFrameDestroyOpcode()) {
-    if (RefsInMBB.count(FoldPt))
-      return 0;
-    
-    ++FoldPt;
-  }
-  
-  if (FoldPt == MBB->getFirstTerminator())
-    return 0;
-  else
-    ++FoldPt;
-  
-  // Now find the restore point.
-  while (FoldPt != MBB->getFirstTerminator() && !RefsInMBB.count(FoldPt)) {
-    if (FoldPt->getOpcode() == TRI->getCallFrameSetupOpcode()) {
-      while (FoldPt != MBB->getFirstTerminator() &&
-             FoldPt->getOpcode() != TRI->getCallFrameDestroyOpcode()) {
-        if (RefsInMBB.count(FoldPt))
-          return 0;
-        
-        ++FoldPt;
-      }
-      
-      if (FoldPt == MBB->getFirstTerminator())
-        return 0;
-    } 
-    
-    ++FoldPt;
-  }
-  
-  if (FoldPt == MBB->getFirstTerminator())
-    return 0;
-  
-  int OpIdx = FoldPt->findRegisterUseOperandIdx(vreg, true);
-  if (OpIdx == -1)
-    return 0;
-  
-  SmallVector<unsigned, 1> Ops;
-  Ops.push_back(OpIdx);
-  
-  if (!TII->canFoldMemoryOperand(FoldPt, Ops))
-    return 0;
-  
-  MachineInstr* FMI = TII->foldMemoryOperand(FoldPt, Ops, SS);
-  
-  if (FMI) {
-    LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
-    FoldPt->eraseFromParent();
-    ++NumRestoreFolds;
-  }
-  
-  return FMI;
-}
-
-/// SplitRegLiveInterval - Split (spill and restore) the given live interval
-/// so it would not cross the barrier that's being processed. Shrink wrap
-/// (minimize) the live interval to the last uses.
-bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
-  DEBUG(dbgs() << "Pre-alloc splitting " << LI->reg << " for " << *Barrier
-               << "  result: ");
-
-  CurrLI = LI;
-
-  // Find live range where current interval cross the barrier.
-  LiveInterval::iterator LR =
-    CurrLI->FindLiveRangeContaining(BarrierIdx.getUseIndex());
-  VNInfo *ValNo = LR->valno;
-
-  assert(!ValNo->isUnused() && "Val# is defined by a dead def?");
-
-  MachineInstr *DefMI = LIs->getInstructionFromIndex(ValNo->def);
-
-  // If this would create a new join point, do not split.
-  if (DefMI && createsNewJoin(LR, DefMI->getParent(), Barrier->getParent())) {
-    DEBUG(dbgs() << "FAILED (would create a new join point).\n");
-    return false;
-  }
-
-  // Find all references in the barrier mbb.
-  SmallPtrSet<MachineInstr*, 4> RefsInMBB;
-  for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
-         E = MRI->reg_end(); I != E; ++I) {
-    MachineInstr *RefMI = &*I;
-    if (RefMI->getParent() == BarrierMBB)
-      RefsInMBB.insert(RefMI);
-  }
-
-  // Find a point to restore the value after the barrier.
-  MachineBasicBlock::iterator RestorePt =
-    findRestorePoint(BarrierMBB, Barrier, LR->end, RefsInMBB);
-  if (RestorePt == BarrierMBB->end()) {
-    DEBUG(dbgs() << "FAILED (could not find a suitable restore point).\n");
-    return false;
-  }
-
-  if (DefMI && LIs->isReMaterializable(*LI, ValNo, DefMI))
-    if (Rematerialize(LI->reg, ValNo, DefMI, RestorePt, RefsInMBB)) {
-      DEBUG(dbgs() << "success (remat).\n");
-      return true;
-    }
-
-  // Add a spill either before the barrier or after the definition.
-  MachineBasicBlock *DefMBB = DefMI ? DefMI->getParent() : NULL;
-  const TargetRegisterClass *RC = MRI->getRegClass(CurrLI->reg);
-  SlotIndex SpillIndex;
-  MachineInstr *SpillMI = NULL;
-  int SS = -1;
-  if (!DefMI) {
-    // If we don't know where the def is we must split just before the barrier.
-    if ((SpillMI = FoldSpill(LI->reg, RC, 0, Barrier,
-                            BarrierMBB, SS, RefsInMBB))) {
-      SpillIndex = LIs->getInstructionIndex(SpillMI);
-    } else {
-      MachineBasicBlock::iterator SpillPt = 
-        findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB);
-      if (SpillPt == BarrierMBB->begin()) {
-        DEBUG(dbgs() << "FAILED (could not find a suitable spill point).\n");
-        return false; // No gap to insert spill.
-      }
-      // Add spill.
-    
-      SS = CreateSpillStackSlot(CurrLI->reg, RC);
-      TII->storeRegToStackSlot(*BarrierMBB, SpillPt, CurrLI->reg, true, SS, RC,
-                               TRI);
-      SpillMI = prior(SpillPt);
-      SpillIndex = LIs->InsertMachineInstrInMaps(SpillMI);
-    }
-  } else if (!IsAvailableInStack(DefMBB, CurrLI->reg, ValNo->def,
-                                 LIs->getZeroIndex(), SpillIndex, SS)) {
-    // If it's already split, just restore the value. There is no need to spill
-    // the def again.
-    if (!DefMI) {
-      DEBUG(dbgs() << "FAILED (def is dead).\n");
-      return false; // Def is dead. Do nothing.
-    }
-    
-    if ((SpillMI = FoldSpill(LI->reg, RC, DefMI, Barrier,
-                             BarrierMBB, SS, RefsInMBB))) {
-      SpillIndex = LIs->getInstructionIndex(SpillMI);
-    } else {
-      // Check if it's possible to insert a spill after the def MI.
-      MachineBasicBlock::iterator SpillPt;
-      if (DefMBB == BarrierMBB) {
-        // Add spill after the def and the last use before the barrier.
-        SpillPt = findSpillPoint(BarrierMBB, Barrier, DefMI,
-                                 RefsInMBB);
-        if (SpillPt == DefMBB->begin()) {
-          DEBUG(dbgs() << "FAILED (could not find a suitable spill point).\n");
-          return false; // No gap to insert spill.
-        }
-      } else {
-        SpillPt = llvm::next(MachineBasicBlock::iterator(DefMI));
-        if (SpillPt == DefMBB->end()) {
-          DEBUG(dbgs() << "FAILED (could not find a suitable spill point).\n");
-          return false; // No gap to insert spill.
-        }
-      }
-      // Add spill. 
-      SS = CreateSpillStackSlot(CurrLI->reg, RC);
-      TII->storeRegToStackSlot(*DefMBB, SpillPt, CurrLI->reg, false, SS, RC,
-                               TRI);
-      SpillMI = prior(SpillPt);
-      SpillIndex = LIs->InsertMachineInstrInMaps(SpillMI);
-    }
-  }
-
-  // Remember def instruction index to spill index mapping.
-  if (DefMI && SpillMI)
-    Def2SpillMap[ValNo->def] = SpillIndex;
-
-  // Add restore.
-  bool FoldedRestore = false;
-  SlotIndex RestoreIndex;
-  if (MachineInstr* LMI = FoldRestore(CurrLI->reg, RC, Barrier,
-                                      BarrierMBB, SS, RefsInMBB)) {
-    RestorePt = LMI;
-    RestoreIndex = LIs->getInstructionIndex(RestorePt);
-    FoldedRestore = true;
-  } else {
-    TII->loadRegFromStackSlot(*BarrierMBB, RestorePt, CurrLI->reg, SS, RC, TRI);
-    MachineInstr *LoadMI = prior(RestorePt);
-    RestoreIndex = LIs->InsertMachineInstrInMaps(LoadMI);
-  }
-
-  // Update spill stack slot live interval.
-  UpdateSpillSlotInterval(ValNo, SpillIndex.getUseIndex().getNextSlot(),
-                          RestoreIndex.getDefIndex());
-
-  ReconstructLiveInterval(CurrLI);
-
-  if (!FoldedRestore) {
-    SlotIndex RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
-    RestoreIdx = RestoreIdx.getDefIndex();
-    RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RestoreIdx));
-  }
-  
-  ++NumSplits;
-  DEBUG(dbgs() << "success.\n");
-  return true;
-}
-
-/// SplitRegLiveIntervals - Split all register live intervals that cross the
-/// barrier that's being processed.
-bool
-PreAllocSplitting::SplitRegLiveIntervals(const TargetRegisterClass **RCs,
-                                         SmallPtrSet<LiveInterval*, 8>& Split) {
-  // First find all the virtual registers whose live intervals are intercepted
-  // by the current barrier.
-  SmallVector<LiveInterval*, 8> Intervals;
-  for (const TargetRegisterClass **RC = RCs; *RC; ++RC) {
-    // FIXME: If it's not safe to move any instruction that defines the barrier
-    // register class, then it means there are some special dependencies which
-    // codegen is not modelling. Ignore these barriers for now.
-    if (!TII->isSafeToMoveRegClassDefs(*RC))
-      continue;
-    const std::vector<unsigned> &VRs = MRI->getRegClassVirtRegs(*RC);
-    for (unsigned i = 0, e = VRs.size(); i != e; ++i) {
-      unsigned Reg = VRs[i];
-      if (!LIs->hasInterval(Reg))
-        continue;
-      LiveInterval *LI = &LIs->getInterval(Reg);
-      if (LI->liveAt(BarrierIdx) && !Barrier->readsRegister(Reg))
-        // Virtual register live interval is intercepted by the barrier. We
-        // should split and shrink wrap its interval if possible.
-        Intervals.push_back(LI);
-    }
-  }
-
-  // Process the affected live intervals.
-  bool Change = false;
-  while (!Intervals.empty()) {
-    if (PreSplitLimit != -1 && (int)NumSplits == PreSplitLimit)
-      break;
-    LiveInterval *LI = Intervals.back();
-    Intervals.pop_back();
-    bool result = SplitRegLiveInterval(LI);
-    if (result) Split.insert(LI);
-    Change |= result;
-  }
-
-  return Change;
-}
-
-unsigned PreAllocSplitting::getNumberOfNonSpills(
-                                  SmallPtrSet<MachineInstr*, 4>& MIs,
-                                  unsigned Reg, int FrameIndex,
-                                  bool& FeedsTwoAddr) {
-  unsigned NonSpills = 0;
-  for (SmallPtrSet<MachineInstr*, 4>::iterator UI = MIs.begin(), UE = MIs.end();
-       UI != UE; ++UI) {
-    int StoreFrameIndex;
-    unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
-    if (StoreVReg != Reg || StoreFrameIndex != FrameIndex)
-      ++NonSpills;
-    
-    int DefIdx = (*UI)->findRegisterDefOperandIdx(Reg);
-    if (DefIdx != -1 && (*UI)->isRegTiedToUseOperand(DefIdx))
-      FeedsTwoAddr = true;
-  }
-  
-  return NonSpills;
-}
-
-/// removeDeadSpills - After doing splitting, filter through all intervals we've
-/// split, and see if any of the spills are unnecessary.  If so, remove them.
-bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
-  bool changed = false;
-  
-  // Walk over all of the live intervals that were touched by the splitter,
-  // and see if we can do any DCE and/or folding.
-  for (SmallPtrSet<LiveInterval*, 8>::iterator LI = split.begin(),
-       LE = split.end(); LI != LE; ++LI) {
-    DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> > VNUseCount;
-    
-    // First, collect all the uses of the vreg, and sort them by their
-    // reaching definition (VNInfo).
-    for (MachineRegisterInfo::use_iterator UI = MRI->use_begin((*LI)->reg),
-         UE = MRI->use_end(); UI != UE; ++UI) {
-      SlotIndex index = LIs->getInstructionIndex(&*UI);
-      index = index.getUseIndex();
-      
-      const LiveRange* LR = (*LI)->getLiveRangeContaining(index);
-      VNUseCount[LR->valno].insert(&*UI);
-    }
-    
-    // Now, take the definitions (VNInfo's) one at a time and try to DCE 
-    // and/or fold them away.
-    for (LiveInterval::vni_iterator VI = (*LI)->vni_begin(),
-         VE = (*LI)->vni_end(); VI != VE; ++VI) {
-      
-      if (DeadSplitLimit != -1 && (int)NumDeadSpills == DeadSplitLimit) 
-        return changed;
-      
-      VNInfo* CurrVN = *VI;
-      
-      // We don't currently try to handle definitions with PHI kills, because
-      // it would involve processing more than one VNInfo at once.
-      if (CurrVN->hasPHIKill()) continue;
-      
-      // We also don't try to handle the results of PHI joins, since there's
-      // no defining instruction to analyze.
-      MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def);
-      if (!DefMI || CurrVN->isUnused()) continue;
-    
-      // We're only interested in eliminating cruft introduced by the splitter,
-      // is of the form load-use or load-use-store.  First, check that the
-      // definition is a load, and remember what stack slot we loaded it from.
-      int FrameIndex;
-      if (!TII->isLoadFromStackSlot(DefMI, FrameIndex)) continue;
-      
-      // If the definition has no uses at all, just DCE it.
-      if (VNUseCount[CurrVN].size() == 0) {
-        LIs->RemoveMachineInstrFromMaps(DefMI);
-        (*LI)->removeValNo(CurrVN);
-        DefMI->eraseFromParent();
-        VNUseCount.erase(CurrVN);
-        ++NumDeadSpills;
-        changed = true;
-        continue;
-      }
-      
-      // Second, get the number of non-store uses of the definition, as well as
-      // a flag indicating whether it feeds into a later two-address definition.
-      bool FeedsTwoAddr = false;
-      unsigned NonSpillCount = getNumberOfNonSpills(VNUseCount[CurrVN],
-                                                    (*LI)->reg, FrameIndex,
-                                                    FeedsTwoAddr);
-      
-      // If there's one non-store use and it doesn't feed a two-addr, then
-      // this is a load-use-store case that we can try to fold.
-      if (NonSpillCount == 1 && !FeedsTwoAddr) {
-        // Start by finding the non-store use MachineInstr.
-        SmallPtrSet<MachineInstr*, 4>::iterator UI = VNUseCount[CurrVN].begin();
-        int StoreFrameIndex;
-        unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
-        while (UI != VNUseCount[CurrVN].end() &&
-               (StoreVReg == (*LI)->reg && StoreFrameIndex == FrameIndex)) {
-          ++UI;
-          if (UI != VNUseCount[CurrVN].end())
-            StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
-        }
-        if (UI == VNUseCount[CurrVN].end()) continue;
-        
-        MachineInstr* use = *UI;
-        
-        // Attempt to fold it away!
-        int OpIdx = use->findRegisterUseOperandIdx((*LI)->reg, false);
-        if (OpIdx == -1) continue;
-        SmallVector<unsigned, 1> Ops;
-        Ops.push_back(OpIdx);
-        if (!TII->canFoldMemoryOperand(use, Ops)) continue;
-
-        MachineInstr* NewMI = TII->foldMemoryOperand(use, Ops, FrameIndex);
-
-        if (!NewMI) continue;
-
-        // Update relevant analyses.
-        LIs->RemoveMachineInstrFromMaps(DefMI);
-        LIs->ReplaceMachineInstrInMaps(use, NewMI);
-        (*LI)->removeValNo(CurrVN);
-
-        DefMI->eraseFromParent();
-        use->eraseFromParent();
-        VNUseCount[CurrVN].erase(use);
-
-        // Remove deleted instructions.  Note that we need to remove them from 
-        // the VNInfo->use map as well, just to be safe.
-        for (SmallPtrSet<MachineInstr*, 4>::iterator II = 
-             VNUseCount[CurrVN].begin(), IE = VNUseCount[CurrVN].end();
-             II != IE; ++II) {
-          for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
-               VNI = VNUseCount.begin(), VNE = VNUseCount.end(); VNI != VNE; 
-               ++VNI)
-            if (VNI->first != CurrVN)
-              VNI->second.erase(*II);
-          LIs->RemoveMachineInstrFromMaps(*II);
-          (*II)->eraseFromParent();
-        }
-        
-        VNUseCount.erase(CurrVN);
-
-        for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
-             VI = VNUseCount.begin(), VE = VNUseCount.end(); VI != VE; ++VI)
-          if (VI->second.erase(use))
-            VI->second.insert(NewMI);
-
-        ++NumDeadSpills;
-        changed = true;
-        continue;
-      }
-      
-      // If there's more than one non-store instruction, we can't profitably
-      // fold it, so bail.
-      if (NonSpillCount) continue;
-        
-      // Otherwise, this is a load-store case, so DCE them.
-      for (SmallPtrSet<MachineInstr*, 4>::iterator UI = 
-           VNUseCount[CurrVN].begin(), UE = VNUseCount[CurrVN].end();
-           UI != UE; ++UI) {
-        LIs->RemoveMachineInstrFromMaps(*UI);
-        (*UI)->eraseFromParent();
-      }
-        
-      VNUseCount.erase(CurrVN);
-        
-      LIs->RemoveMachineInstrFromMaps(DefMI);
-      (*LI)->removeValNo(CurrVN);
-      DefMI->eraseFromParent();
-      ++NumDeadSpills;
-      changed = true;
-    }
-  }
-  
-  return changed;
-}
-
-bool PreAllocSplitting::createsNewJoin(LiveRange* LR,
-                                       MachineBasicBlock* DefMBB,
-                                       MachineBasicBlock* BarrierMBB) {
-  if (DefMBB == BarrierMBB)
-    return false;
-  
-  if (LR->valno->hasPHIKill())
-    return false;
-  
-  SlotIndex MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
-  if (LR->end < MBBEnd)
-    return false;
-  
-  MachineLoopInfo& MLI = getAnalysis<MachineLoopInfo>();
-  if (MLI.getLoopFor(DefMBB) != MLI.getLoopFor(BarrierMBB))
-    return true;
-  
-  MachineDominatorTree& MDT = getAnalysis<MachineDominatorTree>();
-  SmallPtrSet<MachineBasicBlock*, 4> Visited;
-  typedef std::pair<MachineBasicBlock*,
-                    MachineBasicBlock::succ_iterator> ItPair;
-  SmallVector<ItPair, 4> Stack;
-  Stack.push_back(std::make_pair(BarrierMBB, BarrierMBB->succ_begin()));
-  
-  while (!Stack.empty()) {
-    ItPair P = Stack.back();
-    Stack.pop_back();
-    
-    MachineBasicBlock* PredMBB = P.first;
-    MachineBasicBlock::succ_iterator S = P.second;
-    
-    if (S == PredMBB->succ_end())
-      continue;
-    else if (Visited.count(*S)) {
-      Stack.push_back(std::make_pair(PredMBB, ++S));
-      continue;
-    } else
-      Stack.push_back(std::make_pair(PredMBB, S+1));
-    
-    MachineBasicBlock* MBB = *S;
-    Visited.insert(MBB);
-    
-    if (MBB == BarrierMBB)
-      return true;
-    
-    MachineDomTreeNode* DefMDTN = MDT.getNode(DefMBB);
-    MachineDomTreeNode* BarrierMDTN = MDT.getNode(BarrierMBB);
-    MachineDomTreeNode* MDTN = MDT.getNode(MBB)->getIDom();
-    while (MDTN) {
-      if (MDTN == DefMDTN)
-        return true;
-      else if (MDTN == BarrierMDTN)
-        break;
-      MDTN = MDTN->getIDom();
-    }
-    
-    MBBEnd = LIs->getMBBEndIdx(MBB);
-    if (LR->end > MBBEnd)
-      Stack.push_back(std::make_pair(MBB, MBB->succ_begin()));
-  }
-  
-  return false;
-} 
-  
-
-bool PreAllocSplitting::runOnMachineFunction(MachineFunction &MF) {
-  CurrMF = &MF;
-  TM     = &MF.getTarget();
-  TRI    = TM->getRegisterInfo();
-  TII    = TM->getInstrInfo();
-  MFI    = MF.getFrameInfo();
-  MRI    = &MF.getRegInfo();
-  SIs    = &getAnalysis<SlotIndexes>();
-  LIs    = &getAnalysis<LiveIntervals>();
-  LSs    = &getAnalysis<LiveStacks>();
-  VRM    = &getAnalysis<VirtRegMap>();
-
-  bool MadeChange = false;
-
-  // Make sure blocks are numbered in order.
-  MF.RenumberBlocks();
-
-  MachineBasicBlock *Entry = MF.begin();
-  SmallPtrSet<MachineBasicBlock*,16> Visited;
-
-  SmallPtrSet<LiveInterval*, 8> Split;
-
-  for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
-         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
-       DFI != E; ++DFI) {
-    BarrierMBB = *DFI;
-    for (MachineBasicBlock::iterator I = BarrierMBB->begin(),
-           E = BarrierMBB->end(); I != E; ++I) {
-      Barrier = &*I;
-      const TargetRegisterClass **BarrierRCs =
-        Barrier->getDesc().getRegClassBarriers();
-      if (!BarrierRCs)
-        continue;
-      BarrierIdx = LIs->getInstructionIndex(Barrier);
-      MadeChange |= SplitRegLiveIntervals(BarrierRCs, Split);
-    }
-  }
-
-  MadeChange |= removeDeadSpills(Split);
-
-  return MadeChange;
-}

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/PrologEpilogInserter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/PrologEpilogInserter.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/PrologEpilogInserter.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/PrologEpilogInserter.cpp Sat Jul  2 22:28:07 2011
@@ -145,6 +145,7 @@
 /// pseudo instructions.
 void PEI::calculateCallsInformation(MachineFunction &Fn) {
   const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
+  const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
   const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
   MachineFrameInfo *MFI = Fn.getFrameInfo();
 
@@ -152,8 +153,8 @@
   bool AdjustsStack = MFI->adjustsStack();
 
   // Get the function call frame set-up and tear-down instruction opcode
-  int FrameSetupOpcode   = RegInfo->getCallFrameSetupOpcode();
-  int FrameDestroyOpcode = RegInfo->getCallFrameDestroyOpcode();
+  int FrameSetupOpcode   = TII.getCallFrameSetupOpcode();
+  int FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
 
   // Early exit for targets which have no call frame setup/destroy pseudo
   // instructions.
@@ -705,12 +706,13 @@
 
   const TargetMachine &TM = Fn.getTarget();
   assert(TM.getRegisterInfo() && "TM::getRegisterInfo() must be implemented!");
+  const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
   const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
   const TargetFrameLowering *TFI = TM.getFrameLowering();
   bool StackGrowsDown =
     TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
-  int FrameSetupOpcode   = TRI.getCallFrameSetupOpcode();
-  int FrameDestroyOpcode = TRI.getCallFrameDestroyOpcode();
+  int FrameSetupOpcode   = TII.getCallFrameSetupOpcode();
+  int FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
 
   for (MachineFunction::iterator BB = Fn.begin(),
          E = Fn.end(); BB != E; ++BB) {

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocBasic.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocBasic.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocBasic.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocBasic.cpp Sat Jul  2 22:28:07 2011
@@ -20,6 +20,7 @@
 #include "RenderMachineFunction.h"
 #include "Spiller.h"
 #include "VirtRegMap.h"
+#include "RegisterCoalescer.h"
 #include "llvm/ADT/OwningPtr.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/AliasAnalysis.h"
@@ -34,7 +35,6 @@
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/CodeGen/RegAllocRegistry.h"
-#include "llvm/CodeGen/RegisterCoalescer.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Target/TargetRegisterInfo.h"
@@ -141,7 +141,7 @@
   initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
   initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
   initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
-  initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
+  initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
   initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
   initializeLiveStacksPass(*PassRegistry::getPassRegistry());
   initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
@@ -324,19 +324,21 @@
 
     if (AvailablePhysReg == ~0u) {
       // selectOrSplit failed to find a register!
-      std::string msg;
-      raw_string_ostream Msg(msg);
-      Msg << "Ran out of registers during register allocation!"
-             "\nCannot allocate: " << *VirtReg;
+      const char *Msg = "ran out of registers during register allocation";
+      // Probably caused by an inline asm.
+      MachineInstr *MI;
       for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(VirtReg->reg);
-      MachineInstr *MI = I.skipInstruction();) {
-        if (!MI->isInlineAsm())
-          continue;
-        Msg << "\nPlease check your inline asm statement for "
-          "invalid constraints:\n";
-        MI->print(Msg, &VRM->getMachineFunction().getTarget());
-      }
-      report_fatal_error(Msg.str());
+           (MI = I.skipInstruction());)
+        if (MI->isInlineAsm())
+          break;
+      if (MI)
+        MI->emitError(Msg);
+      else
+        report_fatal_error(Msg);
+      // Keep going after reporting the error.
+      VRM->assignVirt2Phys(VirtReg->reg,
+                 RegClassInfo.getOrder(MRI->getRegClass(VirtReg->reg)).front());
+      continue;
     }
 
     if (AvailablePhysReg)

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocFast.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocFast.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocFast.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocFast.cpp Sat Jul  2 22:28:07 2011
@@ -86,7 +86,7 @@
     // that is currently available in a physical register.
     LiveRegMap LiveVirtRegs;
 
-    DenseMap<unsigned, MachineInstr *> LiveDbgValueMap;
+    DenseMap<unsigned, SmallVector<MachineInstr *, 4> > LiveDbgValueMap;
 
     // RegState - Track the state of a physical register.
     enum RegState {
@@ -118,7 +118,7 @@
     // SkippedInstrs - Descriptors of instructions whose clobber list was
     // ignored because all registers were spilled. It is still necessary to
     // mark all the clobbered registers as used by the function.
-    SmallPtrSet<const TargetInstrDesc*, 4> SkippedInstrs;
+    SmallPtrSet<const MCInstrDesc*, 4> SkippedInstrs;
 
     // isBulkSpilling - This flag is set when LiveRegMap will be cleared
     // completely after spilling all live registers. LiveRegMap entries should
@@ -272,7 +272,9 @@
     // If this register is used by DBG_VALUE then insert new DBG_VALUE to
     // identify spilled location as the place to find corresponding variable's
     // value.
-    if (MachineInstr *DBG = LiveDbgValueMap.lookup(LRI->first)) {
+    SmallVector<MachineInstr *, 4> &LRIDbgValues = LiveDbgValueMap[LRI->first];
+    for (unsigned li = 0, le = LRIDbgValues.size(); li != le; ++li) {
+      MachineInstr *DBG = LRIDbgValues[li];
       const MDNode *MDPtr =
         DBG->getOperand(DBG->getNumOperands()-1).getMetadata();
       int64_t Offset = 0;
@@ -291,9 +293,11 @@
         MachineBasicBlock *MBB = DBG->getParent();
         MBB->insert(MI, NewDV);
         DEBUG(dbgs() << "Inserting debug info due to spill:" << "\n" << *NewDV);
-        LiveDbgValueMap[LRI->first] = NewDV;
       }
     }
+    // Now this register is spilled there is should not be any DBG_VALUE pointing
+    // to this register because they are all pointing to spilled value now.
+    LRIDbgValues.clear();
     if (SpillKill)
       LR.LastUse = 0; // Don't kill register again
   }
@@ -419,7 +423,7 @@
 // Returns spillImpossible when PhysReg or an alias can't be spilled.
 unsigned RAFast::calcSpillCost(unsigned PhysReg) const {
   if (UsedInInstr.test(PhysReg)) {
-    DEBUG(dbgs() << "PhysReg: " << PhysReg << " is already used in instr.\n");
+    DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is already used in instr.\n");
     return spillImpossible;
   }
   switch (unsigned VirtReg = PhysRegState[PhysReg]) {
@@ -428,15 +432,15 @@
   case regFree:
     return 0;
   case regReserved:
-    DEBUG(dbgs() << "VirtReg: " << VirtReg << " corresponding to PhysReg: "
-          << PhysReg << " is reserved already.\n");
+    DEBUG(dbgs() << PrintReg(VirtReg, TRI) << " corresponding "
+                 << PrintReg(PhysReg, TRI) << " is reserved already.\n");
     return spillImpossible;
   default:
     return LiveVirtRegs.lookup(VirtReg).Dirty ? spillDirty : spillClean;
   }
 
   // This is a disabled register, add up cost of aliases.
-  DEBUG(dbgs() << "\tRegister: " << PhysReg << " is disabled.\n");
+  DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is disabled.\n");
   unsigned Cost = 0;
   for (const unsigned *AS = TRI->getAliasSet(PhysReg);
        unsigned Alias = *AS; ++AS) {
@@ -511,7 +515,7 @@
   unsigned BestReg = 0, BestCost = spillImpossible;
   for (ArrayRef<unsigned>::iterator I = AO.begin(), E = AO.end(); I != E; ++I) {
     unsigned Cost = calcSpillCost(*I);
-    DEBUG(dbgs() << "\tRegister: " << *I << "\n");
+    DEBUG(dbgs() << "\tRegister: " << PrintReg(*I, TRI) << "\n");
     DEBUG(dbgs() << "\tCost: " << Cost << "\n");
     DEBUG(dbgs() << "\tBestCost: " << BestCost << "\n");
     // Cost is 0 when all aliases are already disabled.
@@ -526,16 +530,10 @@
     return assignVirtToPhysReg(LRE, BestReg);
   }
 
-  // Nothing we can do.
-  std::string msg;
-  raw_string_ostream Msg(msg);
-  Msg << "Ran out of registers during register allocation!";
-  if (MI->isInlineAsm()) {
-    Msg << "\nPlease check your inline asm statement for "
-        << "invalid constraints:\n";
-    MI->print(Msg, TM);
-  }
-  report_fatal_error(Msg.str());
+  // Nothing we can do. Report an error and keep going with a bad allocation.
+  MI->emitError("ran out of registers during register allocation");
+  definePhysReg(MI, *AO.begin(), regFree);
+  assignVirtToPhysReg(LRE, *AO.begin());
 }
 
 /// defineVirtReg - Allocate a register for VirtReg and mark it as dirty.
@@ -722,7 +720,8 @@
     if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue;
     unsigned Reg = MO.getReg();
     if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
-    DEBUG(dbgs() << "\tSetting reg " << Reg << " as used in instr\n");
+    DEBUG(dbgs() << "\tSetting " << PrintReg(Reg, TRI)
+                 << " as used in instr\n");
     UsedInInstr.set(Reg);
   }
 
@@ -772,7 +771,7 @@
   // Otherwise, sequentially allocate each instruction in the MBB.
   while (MII != MBB->end()) {
     MachineInstr *MI = MII++;
-    const TargetInstrDesc &TID = MI->getDesc();
+    const MCInstrDesc &MCID = MI->getDesc();
     DEBUG({
         dbgs() << "\n>> " << *MI << "Regs:";
         for (unsigned Reg = 1, E = TRI->getNumRegs(); Reg != E; ++Reg) {
@@ -816,7 +815,7 @@
           if (!MO.isReg()) continue;
           unsigned Reg = MO.getReg();
           if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue;
-          LiveDbgValueMap[Reg] = MI;
+          LiveDbgValueMap[Reg].push_back(MI);
           LiveRegMap::iterator LRI = LiveVirtRegs.find(Reg);
           if (LRI != LiveVirtRegs.end())
             setPhysReg(MI, i, LRI->second.PhysReg);
@@ -885,7 +884,7 @@
         VirtOpEnd = i+1;
         if (MO.isUse()) {
           hasTiedOps = hasTiedOps ||
-                                TID.getOperandConstraint(i, TOI::TIED_TO) != -1;
+                              MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1;
         } else {
           if (MO.isEarlyClobber())
             hasEarlyClobbers = true;
@@ -915,7 +914,7 @@
     // We didn't detect inline asm tied operands above, so just make this extra
     // pass for all inline asm.
     if (MI->isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
-        (hasTiedOps && (hasPhysDefs || TID.getNumDefs() > 1))) {
+        (hasTiedOps && (hasPhysDefs || MCID.getNumDefs() > 1))) {
       handleThroughOperands(MI, VirtDead);
       // Don't attempt coalescing when we have funny stuff going on.
       CopyDst = 0;
@@ -960,7 +959,7 @@
     }
 
     unsigned DefOpEnd = MI->getNumOperands();
-    if (TID.isCall()) {
+    if (MCID.isCall()) {
       // Spill all virtregs before a call. This serves two purposes: 1. If an
       // exception is thrown, the landing pad is going to expect to find
       // registers in their spill slots, and 2. we don't have to wade through
@@ -971,7 +970,7 @@
 
       // The imp-defs are skipped below, but we still need to mark those
       // registers as used by the function.
-      SkippedInstrs.insert(&TID);
+      SkippedInstrs.insert(&MCID);
     }
 
     // Third scan.
@@ -1057,7 +1056,7 @@
   MRI->closePhysRegsUsed(*TRI);
 
   // Add the clobber lists for all the instructions we skipped earlier.
-  for (SmallPtrSet<const TargetInstrDesc*, 4>::const_iterator
+  for (SmallPtrSet<const MCInstrDesc*, 4>::const_iterator
        I = SkippedInstrs.begin(), E = SkippedInstrs.end(); I != E; ++I)
     if (const unsigned *Defs = (*I)->getImplicitDefs())
       while (*Defs)

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocGreedy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocGreedy.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocGreedy.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocGreedy.cpp Sat Jul  2 22:28:07 2011
@@ -22,6 +22,7 @@
 #include "SpillPlacement.h"
 #include "SplitKit.h"
 #include "VirtRegMap.h"
+#include "RegisterCoalescer.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Function.h"
@@ -37,7 +38,6 @@
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/CodeGen/RegAllocRegistry.h"
-#include "llvm/CodeGen/RegisterCoalescer.h"
 #include "llvm/Target/TargetOptions.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
@@ -76,6 +76,7 @@
   // state
   std::auto_ptr<Spiller> SpillerInstance;
   std::priority_queue<std::pair<unsigned, unsigned> > Queue;
+  unsigned NextCascade;
 
   // Live ranges pass through a number of stages as we try to allocate them.
   // Some of the stages may also create new live ranges:
@@ -101,31 +102,37 @@
 
   static const char *const StageName[];
 
-  IndexedMap<unsigned char, VirtReg2IndexFunctor> LRStage;
+  // RegInfo - Keep additional information about each live range.
+  struct RegInfo {
+    LiveRangeStage Stage;
+
+    // Cascade - Eviction loop prevention. See canEvictInterference().
+    unsigned Cascade;
+
+    RegInfo() : Stage(RS_New), Cascade(0) {}
+  };
+
+  IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
 
   LiveRangeStage getStage(const LiveInterval &VirtReg) const {
-    return LiveRangeStage(LRStage[VirtReg.reg]);
+    return ExtraRegInfo[VirtReg.reg].Stage;
+  }
+
+  void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
+    ExtraRegInfo.resize(MRI->getNumVirtRegs());
+    ExtraRegInfo[VirtReg.reg].Stage = Stage;
   }
 
   template<typename Iterator>
   void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
-    LRStage.resize(MRI->getNumVirtRegs());
+    ExtraRegInfo.resize(MRI->getNumVirtRegs());
     for (;Begin != End; ++Begin) {
       unsigned Reg = (*Begin)->reg;
-      if (LRStage[Reg] == RS_New)
-        LRStage[Reg] = NewStage;
+      if (ExtraRegInfo[Reg].Stage == RS_New)
+        ExtraRegInfo[Reg].Stage = NewStage;
     }
   }
 
-  // Eviction. Sometimes an assigned live range can be evicted without
-  // conditions, but other times it must be split after being evicted to avoid
-  // infinite loops.
-  enum CanEvict {
-    CE_Never,    ///< Can never evict.
-    CE_Always,   ///< Can always evict.
-    CE_WithSplit ///< Can evict only if range is also split or spilled.
-  };
-
   // splitting state.
   std::auto_ptr<SplitAnalysis> SA;
   std::auto_ptr<SplitEditor> SE;
@@ -190,7 +197,7 @@
   void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&,
                          SmallVectorImpl<LiveInterval*>&);
   void calcGapWeights(unsigned, SmallVectorImpl<float>&);
-  CanEvict canEvict(LiveInterval &A, LiveInterval &B);
+  bool canEvict(LiveInterval &A, LiveInterval &B);
   bool canEvictInterference(LiveInterval&, unsigned, float&);
 
   unsigned tryAssign(LiveInterval&, AllocationOrder&,
@@ -228,13 +235,13 @@
   return new RAGreedy();
 }
 
-RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_New) {
+RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
   initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
   initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
   initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
   initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
   initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
-  initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
+  initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
   initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
   initializeLiveStacksPass(*PassRegistry::getPassRegistry());
   initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
@@ -308,13 +315,13 @@
   // LRE may clone a virtual register because dead code elimination causes it to
   // be split into connected components. Ensure that the new register gets the
   // same stage as the parent.
-  LRStage.grow(New);
-  LRStage[New] = LRStage[Old];
+  ExtraRegInfo.grow(New);
+  ExtraRegInfo[New] = ExtraRegInfo[Old];
 }
 
 void RAGreedy::releaseMemory() {
   SpillerInstance.reset(0);
-  LRStage.clear();
+  ExtraRegInfo.clear();
   GlobalCand.clear();
   RegAllocBase::releaseMemory();
 }
@@ -328,11 +335,11 @@
          "Can only enqueue virtual registers");
   unsigned Prio;
 
-  LRStage.grow(Reg);
-  if (LRStage[Reg] == RS_New)
-    LRStage[Reg] = RS_First;
+  ExtraRegInfo.grow(Reg);
+  if (ExtraRegInfo[Reg].Stage == RS_New)
+    ExtraRegInfo[Reg].Stage = RS_First;
 
-  if (LRStage[Reg] == RS_Second)
+  if (ExtraRegInfo[Reg].Stage == RS_Second)
     // Unsplit ranges that couldn't be allocated immediately are deferred until
     // everything else has been allocated. Long ranges are allocated last so
     // they are split against realistic interference.
@@ -398,13 +405,10 @@
 /// by enqueue() decides which registers ultimately end up being split and
 /// spilled.
 ///
-/// This function must define a non-circular relation when it returns CE_Always,
-/// otherwise infinite eviction loops are possible. When evicting a <= RS_Second
-/// range, it is possible to return CE_WithSplit which forces the evicted
-/// register to be split or spilled before it can evict anything again. That
-/// guarantees progress.
-RAGreedy::CanEvict RAGreedy::canEvict(LiveInterval &A, LiveInterval &B) {
-  return A.weight > B.weight ? CE_Always : CE_Never;
+/// Cascade numbers are used to prevent infinite loops if this function is a
+/// cyclic relation.
+bool RAGreedy::canEvict(LiveInterval &A, LiveInterval &B) {
+  return A.weight > B.weight;
 }
 
 /// canEvict - Return true if all interferences between VirtReg and PhysReg can
@@ -413,6 +417,17 @@
 /// On return, set MaxWeight to the maximal spill weight of an interference.
 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
                                     float &MaxWeight) {
+  // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
+  // involved in an eviction before. If a cascade number was assigned, deny
+  // evicting anything with the same or a newer cascade number. This prevents
+  // infinite eviction loops.
+  //
+  // This works out so a register without a cascade number is allowed to evict
+  // anything, and it can be evicted by anything.
+  unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
+  if (!Cascade)
+    Cascade = NextCascade;
+
   float Weight = 0;
   for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
     LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
@@ -425,18 +440,12 @@
       LiveInterval *Intf = Q.interferingVRegs()[i - 1];
       if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
         return false;
+      if (Cascade <= ExtraRegInfo[Intf->reg].Cascade)
+        return false;
       if (Intf->weight >= MaxWeight)
         return false;
-      switch (canEvict(VirtReg, *Intf)) {
-      case CE_Always:
-        break;
-      case CE_Never:
+      if (!canEvict(VirtReg, *Intf))
         return false;
-      case CE_WithSplit:
-        if (getStage(*Intf) > RS_Second)
-          return false;
-        break;
-      }
       Weight = std::max(Weight, Intf->weight);
     }
   }
@@ -487,20 +496,27 @@
   if (!BestPhys)
     return 0;
 
-  DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
+  // We will evict interference. Make sure that VirtReg has a cascade number,
+  // and assign that cascade number to every evicted register. These live
+  // ranges than then only be evicted by a newer cascade, preventing infinite
+  // loops.
+  unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
+  if (!Cascade)
+    Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
+
+  DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI)
+               << " interference: Cascade " << Cascade << '\n');
   for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
     LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
     assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
     for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
       LiveInterval *Intf = Q.interferingVRegs()[i];
       unassign(*Intf, VRM->getPhys(Intf->reg));
+      assert(ExtraRegInfo[Intf->reg].Cascade < Cascade &&
+             "Cannot decrease cascade number, illegal eviction");
+      ExtraRegInfo[Intf->reg].Cascade = Cascade;
       ++NumEvicted;
       NewVRegs.push_back(Intf);
-      // Prevent looping by forcing the evicted ranges to be split before they
-      // can evict anything else.
-      if (getStage(*Intf) < RS_Second &&
-          canEvict(VirtReg, *Intf) == CE_WithSplit)
-        LRStage[Intf->reg] = RS_Second;
     }
   }
   return BestPhys;
@@ -763,32 +779,46 @@
   // Create the main cross-block interval.
   const unsigned MainIntv = SE->openIntv();
 
-  // First add all defs that are live out of a block.
+  // First handle all the blocks with uses.
   ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
   for (unsigned i = 0; i != UseBlocks.size(); ++i) {
     const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
-    bool RegIn  = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
-    bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
+    bool RegIn  = BI.LiveIn &&
+                  LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
+    bool RegOut = BI.LiveOut &&
+                  LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
 
     // Create separate intervals for isolated blocks with multiple uses.
-    if (!RegIn && !RegOut && BI.FirstUse != BI.LastUse) {
+    //
+    //     |---o---o---|    Enter and leave on the stack.
+    //     ____-----____    Create local interval for uses.
+    //
+    //     |   o---o---|    Defined in block, leave on stack.
+    //         -----____    Create local interval for uses.
+    //
+    //     |---o---x   |    Enter on stack, killed in block.
+    //     ____-----        Create local interval for uses.
+    //
+    if (!RegIn && !RegOut) {
       DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
-      SE->splitSingleBlock(BI);
-      SE->selectIntv(MainIntv);
+      if (!BI.isOneInstr()) {
+        SE->splitSingleBlock(BI);
+        SE->selectIntv(MainIntv);
+      }
       continue;
     }
 
-    // Should the register be live out?
-    if (!BI.LiveOut || !RegOut)
-      continue;
-
     SlotIndex Start, Stop;
     tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
     Intf.moveToBlock(BI.MBB->getNumber());
-    DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
-                 << Bundles->getBundle(BI.MBB->getNumber(), 1)
+    DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
+                 << (RegIn ? " => " : " -- ")
+                 << "BB#" << BI.MBB->getNumber()
+                 << (RegOut ? " => " : " -- ")
+                 << " EB#" << Bundles->getBundle(BI.MBB->getNumber(), 1)
                  << " [" << Start << ';'
                  << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
+                 << ") uses [" << BI.FirstUse << ';' << BI.LastUse
                  << ") intf [" << Intf.first() << ';' << Intf.last() << ')');
 
     // The interference interval should either be invalid or overlap MBB.
@@ -797,150 +827,266 @@
     assert((!Intf.hasInterference() || Intf.last() > Start)
            && "Bad interference");
 
-    // Check interference leaving the block.
+    // We are now ready to decide where to split in the current block.  There
+    // are many variables guiding the decision:
+    //
+    // - RegIn / RegOut: The global splitting algorithm's decisions for our
+    //   ingoing and outgoing bundles.
+    //
+    // - BI.BlockIn / BI.BlockOut: Is the live range live-in and/or live-out
+    //   from this block.
+    //
+    // - Intf.hasInterference(): Is there interference in this block.
+    //
+    // - Intf.first() / Inft.last(): The range of interference.
+    //
+    // The live range should be split such that MainIntv is live-in when RegIn
+    // is set, and live-out when RegOut is set.  MainIntv should never overlap
+    // the interference, and the stack interval should never have more than one
+    // use per block.
+
+    // No splits can be inserted after LastSplitPoint, overlap instead.
+    SlotIndex LastSplitPoint = Stop;
+    if (BI.LiveOut)
+      LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
+
+    // At this point, we know that either RegIn or RegOut is set. We dealt with
+    // the all-stack case above.
+
+    // Blocks without interference are relatively easy.
     if (!Intf.hasInterference()) {
-      // Block is interference-free.
-      DEBUG(dbgs() << ", no interference");
-      if (!BI.LiveThrough) {
-        DEBUG(dbgs() << ", not live-through.\n");
-        SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
-        continue;
-      }
-      if (!RegIn) {
-        // Block is live-through, but entry bundle is on the stack.
-        // Reload just before the first use.
-        DEBUG(dbgs() << ", not live-in, enter before first use.\n");
-        SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
-        continue;
-      }
-      DEBUG(dbgs() << ", live-through.\n");
-      continue;
-    }
+      DEBUG(dbgs() << ", no interference.\n");
+      SE->selectIntv(MainIntv);
+      // The easiest case has MainIntv live through.
+      //
+      //     |---o---o---|    Live-in, live-out.
+      //     =============    Use MainIntv everywhere.
+      //
+      SlotIndex From = Start, To = Stop;
+
+      // Block entry. Reload before the first use if MainIntv is not live-in.
+      //
+      //     |---o--    Enter on stack.
+      //     ____===    Reload before first use.
+      //
+      //     |   o--    Defined in block.
+      //         ===    Use MainIntv from def.
+      //
+      if (!RegIn)
+        From = SE->enterIntvBefore(BI.FirstUse);
 
-    // Block has interference.
-    DEBUG(dbgs() << ", interference to " << Intf.last());
+      // Block exit. Handle cases where MainIntv is not live-out.
+      if (!BI.LiveOut)
+        //
+        //     --x   |    Killed in block.
+        //     ===        Use MainIntv up to kill.
+        //
+        To = SE->leaveIntvAfter(BI.LastUse);
+      else if (!RegOut) {
+        //
+        //     --o---|    Live-out on stack.
+        //     ===____    Use MainIntv up to last use, switch to stack.
+        //
+        //     -----o|    Live-out on stack, last use after last split point.
+        //     ======     Extend MainIntv to last use, overlapping.
+        //       \____    Copy to stack interval before last split point.
+        //
+        if (BI.LastUse < LastSplitPoint)
+          To = SE->leaveIntvAfter(BI.LastUse);
+        else {
+          // The last use is after the last split point, it is probably an
+          // indirect branch.
+          To = SE->leaveIntvBefore(LastSplitPoint);
+          // Run a double interval from the split to the last use.  This makes
+          // it possible to spill the complement without affecting the indirect
+          // branch.
+          SE->overlapIntv(To, BI.LastUse);
+        }
+      }
 
-    if (!BI.LiveThrough && Intf.last() <= BI.FirstUse) {
-      // The interference doesn't reach the outgoing segment.
-      DEBUG(dbgs() << " doesn't affect def from " << BI.FirstUse << '\n');
-      SE->useIntv(BI.FirstUse, Stop);
+      // Paint in MainIntv liveness for this block.
+      SE->useIntv(From, To);
       continue;
     }
 
-    SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
-    if (Intf.last().getBoundaryIndex() < BI.LastUse) {
-      // There are interference-free uses at the end of the block.
-      // Find the first use that can get the live-out register.
-      SmallVectorImpl<SlotIndex>::const_iterator UI =
-        std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
-                         Intf.last().getBoundaryIndex());
-      assert(UI != SA->UseSlots.end() && "Couldn't find last use");
-      SlotIndex Use = *UI;
-      assert(Use <= BI.LastUse && "Couldn't find last use");
-      // Only attempt a split befroe the last split point.
-      if (Use.getBaseIndex() <= LastSplitPoint) {
-        DEBUG(dbgs() << ", free use at " << Use << ".\n");
-        SlotIndex SegStart = SE->enterIntvBefore(Use);
-        assert(SegStart >= Intf.last() && "Couldn't avoid interference");
-        assert(SegStart < LastSplitPoint && "Impossible split point");
-        SE->useIntv(SegStart, Stop);
-        continue;
-      }
-    }
+    // We are now looking at a block with interference, and we know that either
+    // RegIn or RegOut is set.
+    assert(Intf.hasInterference() && (RegIn || RegOut) && "Bad invariant");
 
-    // Interference is after the last use.
-    DEBUG(dbgs() << " after last use.\n");
-    SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB);
-    assert(SegStart >= Intf.last() && "Couldn't avoid interference");
-  }
+    // If the live range is not live through the block, it is possible that the
+    // interference doesn't even overlap.  Deal with those cases first.  Since
+    // no copy instructions are required, we can tolerate interference starting
+    // or ending at the same instruction that kills or defines our live range.
 
-  // Now all defs leading to live bundles are handled, do everything else.
-  for (unsigned i = 0; i != UseBlocks.size(); ++i) {
-    const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
-    bool RegIn  = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
-    bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
+    // Live-in, killed before interference.
+    //
+    //               ~~~    Interference after kill.
+    //     |---o---x   |    Killed in block.
+    //     =========        Use MainIntv everywhere.
+    //
+    if (RegIn && !BI.LiveOut && BI.LastUse <= Intf.first()) {
+      DEBUG(dbgs() << ", live-in, killed before interference.\n");
+      SE->selectIntv(MainIntv);
+      SlotIndex To = SE->leaveIntvAfter(BI.LastUse);
+      SE->useIntv(Start, To);
+      continue;
+    }
 
-    // Is the register live-in?
-    if (!BI.LiveIn || !RegIn)
+    // Live-out, defined after interference.
+    //
+    //     ~~~              Interference before def.
+    //     |   o---o---|    Defined in block.
+    //         =========    Use MainIntv everywhere.
+    //
+    if (RegOut && !BI.LiveIn && BI.FirstUse >= Intf.last()) {
+      DEBUG(dbgs() << ", live-out, defined after interference.\n");
+      SE->selectIntv(MainIntv);
+      SlotIndex From = SE->enterIntvBefore(BI.FirstUse);
+      SE->useIntv(From, Stop);
       continue;
+    }
 
-    // We have an incoming register. Check for interference.
-    SlotIndex Start, Stop;
-    tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
-    Intf.moveToBlock(BI.MBB->getNumber());
-    DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
-                 << " -> BB#" << BI.MBB->getNumber() << " [" << Start << ';'
-                 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
-                 << ')');
+    // The interference is now known to overlap the live range, but it may
+    // still be easy to avoid if all the interference is on one side of the
+    // uses, and we enter or leave on the stack.
 
-    // Check interference entering the block.
-    if (!Intf.hasInterference()) {
-      // Block is interference-free.
-      DEBUG(dbgs() << ", no interference");
-      if (!BI.LiveThrough) {
-        DEBUG(dbgs() << ", killed in block.\n");
-        SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
-        continue;
-      }
-      if (!RegOut) {
-        SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
-        // Block is live-through, but exit bundle is on the stack.
-        // Spill immediately after the last use.
-        if (BI.LastUse < LastSplitPoint) {
-          DEBUG(dbgs() << ", uses, stack-out.\n");
-          SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
-          continue;
-        }
-        // The last use is after the last split point, it is probably an
-        // indirect jump.
-        DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
-                     << LastSplitPoint << ", stack-out.\n");
-        SlotIndex SegEnd = SE->leaveIntvBefore(LastSplitPoint);
-        SE->useIntv(Start, SegEnd);
-        // Run a double interval from the split to the last use.
-        // This makes it possible to spill the complement without affecting the
-        // indirect branch.
-        SE->overlapIntv(SegEnd, BI.LastUse);
-        continue;
+    // Live-out on stack, interference after last use.
+    //
+    //               ~~~    Interference after last use.
+    //     |---o---o---|    Live-out on stack.
+    //     =========____    Leave MainIntv after last use.
+    //
+    //                 ~    Interference after last use.
+    //     |---o---o--o|    Live-out on stack, late last use.
+    //     =========____    Copy to stack after LSP, overlap MainIntv.
+    //
+    if (!RegOut && Intf.first() > BI.LastUse.getBoundaryIndex()) {
+      assert(RegIn && "Stack-in, stack-out should already be handled");
+      if (BI.LastUse < LastSplitPoint) {
+        DEBUG(dbgs() << ", live-in, stack-out, interference after last use.\n");
+        SE->selectIntv(MainIntv);
+        SlotIndex To = SE->leaveIntvAfter(BI.LastUse);
+        assert(To <= Intf.first() && "Expected to avoid interference");
+        SE->useIntv(Start, To);
+      } else {
+        DEBUG(dbgs() << ", live-in, stack-out, avoid last split point\n");
+        SE->selectIntv(MainIntv);
+        SlotIndex To = SE->leaveIntvBefore(LastSplitPoint);
+        assert(To <= Intf.first() && "Expected to avoid interference");
+        SE->overlapIntv(To, BI.LastUse);
+        SE->useIntv(Start, To);
       }
-      // Register is live-through.
-      DEBUG(dbgs() << ", uses, live-through.\n");
-      SE->useIntv(Start, Stop);
       continue;
     }
 
-    // Block has interference.
-    DEBUG(dbgs() << ", interference from " << Intf.first());
-
-    if (!BI.LiveThrough && Intf.first() >= BI.LastUse) {
-      // The interference doesn't reach the outgoing segment.
-      DEBUG(dbgs() << " doesn't affect kill at " << BI.LastUse << '\n');
-      SE->useIntv(Start, BI.LastUse);
+    // Live-in on stack, interference before first use.
+    //
+    //     ~~~              Interference before first use.
+    //     |---o---o---|    Live-in on stack.
+    //     ____=========    Enter MainIntv before first use.
+    //
+    if (!RegIn && Intf.last() < BI.FirstUse.getBaseIndex()) {
+      assert(RegOut && "Stack-in, stack-out should already be handled");
+      DEBUG(dbgs() << ", stack-in, interference before first use.\n");
+      SE->selectIntv(MainIntv);
+      SlotIndex From = SE->enterIntvBefore(BI.FirstUse);
+      assert(From >= Intf.last() && "Expected to avoid interference");
+      SE->useIntv(From, Stop);
       continue;
     }
 
-    if (Intf.first().getBaseIndex() > BI.FirstUse) {
-      // There are interference-free uses at the beginning of the block.
-      // Find the last use that can get the register.
-      SmallVectorImpl<SlotIndex>::const_iterator UI =
-        std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
-                         Intf.first().getBaseIndex());
-      assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
-      SlotIndex Use = (--UI)->getBoundaryIndex();
-      DEBUG(dbgs() << ", free use at " << *UI << ".\n");
-      SlotIndex SegEnd = SE->leaveIntvAfter(Use);
-      assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
-      SE->useIntv(Start, SegEnd);
-      continue;
+    // The interference is overlapping somewhere we wanted to use MainIntv. That
+    // means we need to create a local interval that can be allocated a
+    // different register.
+    DEBUG(dbgs() << ", creating local interval.\n");
+    unsigned LocalIntv = SE->openIntv();
+
+    // We may be creating copies directly between MainIntv and LocalIntv,
+    // bypassing the stack interval. When we do that, we should never use the
+    // leaveIntv* methods as they define values in the stack interval. By
+    // starting from the end of the block and working our way backwards, we can
+    // get by with only enterIntv* methods.
+    //
+    // When selecting split points, we generally try to maximize the stack
+    // interval as long at it contains no uses, maximize the main interval as
+    // long as it doesn't overlap interference, and minimize the local interval
+    // that we don't know how to allocate yet.
+
+    // Handle the block exit, set Pos to the first handled slot.
+    SlotIndex Pos = BI.LastUse;
+    if (RegOut) {
+      assert(Intf.last() < LastSplitPoint && "Cannot be live-out in register");
+      // Create a snippet of MainIntv that is live-out.
+      //
+      //     ~~~        Interference overlapping uses.
+      //     --o---|    Live-out in MainIntv.
+      //     ----===    Switch from LocalIntv to MainIntv after interference.
+      //
+      SE->selectIntv(MainIntv);
+      Pos = SE->enterIntvAfter(Intf.last());
+      assert(Pos >= Intf.last() && "Expected to avoid interference");
+      SE->useIntv(Pos, Stop);
+      SE->selectIntv(LocalIntv);
+    } else if (BI.LiveOut) {
+      if (BI.LastUse < LastSplitPoint) {
+        // Live-out on the stack.
+        //
+        //     ~~~        Interference overlapping uses.
+        //     --o---|    Live-out on stack.
+        //     ---____    Switch from LocalIntv to stack after last use.
+        //
+        Pos = SE->leaveIntvAfter(BI.LastUse);
+      } else {
+        // Live-out on the stack, last use after last split point.
+        //
+        //     ~~~        Interference overlapping uses.
+        //     --o--o|    Live-out on stack, late use.
+        //     ------     Copy to stack before LSP, overlap LocalIntv.
+        //         \__
+        //
+        Pos = SE->leaveIntvBefore(LastSplitPoint);
+        // We need to overlap LocalIntv so it can reach LastUse.
+        SE->overlapIntv(Pos, BI.LastUse);
+      }
     }
 
-    // Interference is before the first use.
-    DEBUG(dbgs() << " before first use.\n");
-    SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB);
-    assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
+    // When not live-out, leave Pos at LastUse. We have handled everything from
+    // Pos to Stop. Find the starting point for LocalIntv.
+    assert(SE->currentIntv() == LocalIntv && "Expecting local interval");
+
+    if (RegIn) {
+      assert(Start < Intf.first() && "Cannot be live-in with interference");
+      // Live-in in MainIntv, only use LocalIntv for interference.
+      //
+      //         ~~~    Interference overlapping uses.
+      //     |---o--    Live-in in MainIntv.
+      //     ====---    Switch to LocalIntv before interference.
+      //
+      SlotIndex Switch = SE->enterIntvBefore(Intf.first());
+      assert(Switch <= Intf.first() && "Expected to avoid interference");
+      SE->useIntv(Switch, Pos);
+      SE->selectIntv(MainIntv);
+      SE->useIntv(Start, Switch);
+    } else {
+      // Live-in on stack, enter LocalIntv before first use.
+      //
+      //         ~~~    Interference overlapping uses.
+      //     |---o--    Live-in in MainIntv.
+      //     ____---    Reload to LocalIntv before interference.
+      //
+      // Defined in block.
+      //
+      //         ~~~    Interference overlapping uses.
+      //     |   o--    Defined in block.
+      //         ---    Begin LocalIntv at first use.
+      //
+      SlotIndex Switch = SE->enterIntvBefore(BI.FirstUse);
+      SE->useIntv(Switch, Pos);
+    }
   }
 
   // Handle live-through blocks.
+  SE->selectIntv(MainIntv);
   for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
     unsigned Number = Cand.ActiveBlocks[i];
     bool RegIn  = LiveBundles[Bundles->getBundle(Number, 0)];
@@ -967,7 +1113,7 @@
   SE->finish(&IntvMap);
   DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
 
-  LRStage.resize(MRI->getNumVirtRegs());
+  ExtraRegInfo.resize(MRI->getNumVirtRegs());
   unsigned OrigBlocks = SA->getNumLiveBlocks();
 
   // Sort out the new intervals created by splitting. We get four kinds:
@@ -976,27 +1122,27 @@
   // - Block-local splits are candidates for local splitting.
   // - DCE leftovers should go back on the queue.
   for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
-    unsigned Reg = LREdit.get(i)->reg;
+    LiveInterval &Reg = *LREdit.get(i);
 
     // Ignore old intervals from DCE.
-    if (LRStage[Reg] != RS_New)
+    if (getStage(Reg) != RS_New)
       continue;
 
     // Remainder interval. Don't try splitting again, spill if it doesn't
     // allocate.
     if (IntvMap[i] == 0) {
-      LRStage[Reg] = RS_Global;
+      setStage(Reg, RS_Global);
       continue;
     }
 
     // Main interval. Allow repeated splitting as long as the number of live
     // blocks is strictly decreasing.
     if (IntvMap[i] == MainIntv) {
-      if (SA->countLiveBlocks(LREdit.get(i)) >= OrigBlocks) {
+      if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
         DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
                      << " blocks as original.\n");
         // Don't allow repeated splitting as a safe guard against looping.
-        LRStage[Reg] = RS_Global;
+        setStage(Reg, RS_Global);
       }
       continue;
     }
@@ -1302,10 +1448,9 @@
   if (NewGaps >= NumGaps) {
     DEBUG(dbgs() << "Tagging non-progress ranges: ");
     assert(!ProgressRequired && "Didn't make progress when it was required.");
-    LRStage.resize(MRI->getNumVirtRegs());
     for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
       if (IntvMap[i] == 1) {
-        LRStage[LREdit.get(i)->reg] = RS_Local;
+        setStage(*LREdit.get(i), RS_Local);
         DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg));
       }
     DEBUG(dbgs() << '\n');
@@ -1384,7 +1529,8 @@
     return PhysReg;
 
   LiveRangeStage Stage = getStage(VirtReg);
-  DEBUG(dbgs() << StageName[Stage] << '\n');
+  DEBUG(dbgs() << StageName[Stage]
+               << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
 
   // Try to evict a less worthy live range, but only for ranges from the primary
   // queue. The RS_Second ranges already failed to do this, and they should not
@@ -1399,7 +1545,7 @@
   // Wait until the second time, when all smaller ranges have been allocated.
   // This gives a better picture of the interference to split around.
   if (Stage == RS_First) {
-    LRStage[VirtReg.reg] = RS_Second;
+    setStage(VirtReg, RS_Second);
     DEBUG(dbgs() << "wait for second round\n");
     NewVRegs.push_back(&VirtReg);
     return 0;
@@ -1450,8 +1596,9 @@
 
   SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
   SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
-  LRStage.clear();
-  LRStage.resize(MRI->getNumVirtRegs());
+  ExtraRegInfo.clear();
+  ExtraRegInfo.resize(MRI->getNumVirtRegs());
+  NextCascade = 1;
   IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
 
   allocatePhysRegs();

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocLinearScan.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocLinearScan.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocLinearScan.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocLinearScan.cpp Sat Jul  2 22:28:07 2011
@@ -18,6 +18,7 @@
 #include "VirtRegRewriter.h"
 #include "RegisterClassInfo.h"
 #include "Spiller.h"
+#include "RegisterCoalescer.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Function.h"
 #include "llvm/CodeGen/CalcSpillWeights.h"
@@ -28,7 +29,6 @@
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/Passes.h"
 #include "llvm/CodeGen/RegAllocRegistry.h"
-#include "llvm/CodeGen/RegisterCoalescer.h"
 #include "llvm/Target/TargetRegisterInfo.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
@@ -58,11 +58,6 @@
              cl::init(false), cl::Hidden);
 
 static cl::opt<bool>
-PreSplitIntervals("pre-alloc-split",
-                  cl::desc("Pre-register allocation live interval splitting"),
-                  cl::init(false), cl::Hidden);
-
-static cl::opt<bool>
 TrivCoalesceEnds("trivial-coalesce-ends",
                   cl::desc("Attempt trivial coalescing of interval ends"),
                   cl::init(false), cl::Hidden);
@@ -101,10 +96,9 @@
       initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
       initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
       initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
-      initializeRegisterCoalescerAnalysisGroup(
+      initializeRegisterCoalescerPass(
         *PassRegistry::getPassRegistry());
       initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
-      initializePreAllocSplittingPass(*PassRegistry::getPassRegistry());
       initializeLiveStacksPass(*PassRegistry::getPassRegistry());
       initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
       initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
@@ -217,8 +211,6 @@
       // to coalescing and which analyses coalescing invalidates.
       AU.addRequiredTransitive<RegisterCoalescer>();
       AU.addRequired<CalculateSpillWeights>();
-      if (PreSplitIntervals)
-        AU.addRequiredID(PreAllocSplittingID);
       AU.addRequiredID(LiveStacksID);
       AU.addPreservedID(LiveStacksID);
       AU.addRequired<MachineLoopInfo>();
@@ -401,11 +393,10 @@
 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
 INITIALIZE_PASS_DEPENDENCY(StrongPHIElimination)
 INITIALIZE_PASS_DEPENDENCY(CalculateSpillWeights)
-INITIALIZE_PASS_DEPENDENCY(PreAllocSplitting)
 INITIALIZE_PASS_DEPENDENCY(LiveStacks)
 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
 INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
-INITIALIZE_AG_DEPENDENCY(RegisterCoalescer)
+INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer)
 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
 INITIALIZE_PASS_END(RALinScan, "linearscan-regalloc",
                     "Linear Scan Register Allocator", false, false)

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocPBQP.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocPBQP.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocPBQP.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/RegAllocPBQP.cpp Sat Jul  2 22:28:07 2011
@@ -35,6 +35,7 @@
 #include "Splitter.h"
 #include "VirtRegMap.h"
 #include "VirtRegRewriter.h"
+#include "RegisterCoalescer.h"
 #include "llvm/CodeGen/CalcSpillWeights.h"
 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
 #include "llvm/CodeGen/LiveStackAnalysis.h"
@@ -46,7 +47,6 @@
 #include "llvm/CodeGen/PBQP/Graph.h"
 #include "llvm/CodeGen/PBQP/Heuristics/Briggs.h"
 #include "llvm/CodeGen/RegAllocRegistry.h"
-#include "llvm/CodeGen/RegisterCoalescer.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Target/TargetInstrInfo.h"
@@ -88,7 +88,7 @@
       : MachineFunctionPass(ID), builder(b), customPassID(cPassID) {
     initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
     initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
-    initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
+    initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
     initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
     initializeLiveStacksPass(*PassRegistry::getPassRegistry());
     initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/RegisterClassInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/RegisterClassInfo.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/RegisterClassInfo.h (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/RegisterClassInfo.h Sat Jul  2 22:28:07 2011
@@ -112,7 +112,7 @@
   /// register, so a register allocator needs to track its liveness and
   /// availability.
   bool isAllocatable(unsigned PhysReg) const {
-    return TRI->get(PhysReg).inAllocatableClass && !isReserved(PhysReg);
+    return TRI->isInAllocatableClass(PhysReg) && !isReserved(PhysReg);
   }
 };
 } // end namespace llvm

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/RegisterCoalescer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/RegisterCoalescer.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/RegisterCoalescer.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/RegisterCoalescer.cpp Sat Jul  2 22:28:07 2011
@@ -13,38 +13,92 @@
 //
 //===----------------------------------------------------------------------===//
 
-#include "llvm/CodeGen/RegisterCoalescer.h"
+#define DEBUG_TYPE "regcoalescing"
+#include "RegisterCoalescer.h"
+#include "VirtRegMap.h"
+#include "LiveDebugVariables.h"
+
+#include "llvm/Pass.h"
+#include "llvm/Value.h"
 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
 #include "llvm/CodeGen/MachineInstr.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Pass.h"
-
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/STLExtras.h"
+#include <algorithm>
+#include <cmath>
 using namespace llvm;
 
-// Register the RegisterCoalescer interface, providing a nice name to refer to.
-INITIALIZE_ANALYSIS_GROUP(RegisterCoalescer, "Register Coalescer", 
-                          SimpleRegisterCoalescing)
-char RegisterCoalescer::ID = 0;
+STATISTIC(numJoins    , "Number of interval joins performed");
+STATISTIC(numCrossRCs , "Number of cross class joins performed");
+STATISTIC(numCommutes , "Number of instruction commuting performed");
+STATISTIC(numExtends  , "Number of copies extended");
+STATISTIC(NumReMats   , "Number of instructions re-materialized");
+STATISTIC(numPeep     , "Number of identity moves eliminated after coalescing");
+STATISTIC(numAborts   , "Number of times interval joining aborted");
 
-// RegisterCoalescer destructor: DO NOT move this to the header file
-// for RegisterCoalescer or else clients of the RegisterCoalescer
-// class may not depend on the RegisterCoalescer.o file in the current
-// .a file, causing alias analysis support to not be included in the
-// tool correctly!
-//
-RegisterCoalescer::~RegisterCoalescer() {}
+static cl::opt<bool>
+EnableJoining("join-liveintervals",
+              cl::desc("Coalesce copies (default=true)"),
+              cl::init(true));
+
+static cl::opt<bool>
+DisableCrossClassJoin("disable-cross-class-join",
+               cl::desc("Avoid coalescing cross register class copies"),
+               cl::init(false), cl::Hidden);
 
-unsigned CoalescerPair::compose(unsigned a, unsigned b) const {
+static cl::opt<bool>
+EnablePhysicalJoin("join-physregs",
+                   cl::desc("Join physical register copies"),
+                   cl::init(false), cl::Hidden);
+
+static cl::opt<bool>
+VerifyCoalescing("verify-coalescing",
+         cl::desc("Verify machine instrs before and after register coalescing"),
+         cl::Hidden);
+
+INITIALIZE_PASS_BEGIN(RegisterCoalescer, "simple-register-coalescing",
+                      "Simple Register Coalescing", false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
+INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
+INITIALIZE_PASS_DEPENDENCY(StrongPHIElimination)
+INITIALIZE_PASS_DEPENDENCY(PHIElimination)
+INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(RegisterCoalescer, "simple-register-coalescing",
+                    "Simple Register Coalescing", false, false)
+
+char RegisterCoalescer::ID = 0;
+
+static unsigned compose(const TargetRegisterInfo &tri, unsigned a, unsigned b) {
   if (!a) return b;
   if (!b) return a;
-  return tri_.composeSubRegIndices(a, b);
+  return tri.composeSubRegIndices(a, b);
 }
 
-bool CoalescerPair::isMoveInstr(const MachineInstr *MI,
-                                unsigned &Src, unsigned &Dst,
-                                unsigned &SrcSub, unsigned &DstSub) const {
+static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI,
+                        unsigned &Src, unsigned &Dst,
+                        unsigned &SrcSub, unsigned &DstSub) {
   if (MI->isCopy()) {
     Dst = MI->getOperand(0).getReg();
     DstSub = MI->getOperand(0).getSubReg();
@@ -52,7 +106,8 @@
     SrcSub = MI->getOperand(1).getSubReg();
   } else if (MI->isSubregToReg()) {
     Dst = MI->getOperand(0).getReg();
-    DstSub = compose(MI->getOperand(0).getSubReg(), MI->getOperand(3).getImm());
+    DstSub = compose(tri, MI->getOperand(0).getSubReg(),
+                     MI->getOperand(3).getImm());
     Src = MI->getOperand(2).getReg();
     SrcSub = MI->getOperand(2).getSubReg();
   } else
@@ -66,7 +121,7 @@
   flipped_ = crossClass_ = false;
 
   unsigned Src, Dst, SrcSub, DstSub;
-  if (!isMoveInstr(MI, Src, Dst, SrcSub, DstSub))
+  if (!isMoveInstr(tri_, MI, Src, Dst, SrcSub, DstSub))
     return false;
   partial_ = SrcSub || DstSub;
 
@@ -156,7 +211,7 @@
   if (!MI)
     return false;
   unsigned Src, Dst, SrcSub, DstSub;
-  if (!isMoveInstr(MI, Src, Dst, SrcSub, DstSub))
+  if (!isMoveInstr(tri_, MI, Src, Dst, SrcSub, DstSub))
     return false;
 
   // Find the virtual register that is srcReg_.
@@ -185,13 +240,1554 @@
     if (dstReg_ != Dst)
       return false;
     // Registers match, do the subregisters line up?
-    return compose(subIdx_, SrcSub) == DstSub;
+    return compose(tri_, subIdx_, SrcSub) == DstSub;
+  }
+}
+
+void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const {
+  AU.setPreservesCFG();
+  AU.addRequired<AliasAnalysis>();
+  AU.addRequired<LiveIntervals>();
+  AU.addPreserved<LiveIntervals>();
+  AU.addRequired<LiveDebugVariables>();
+  AU.addPreserved<LiveDebugVariables>();
+  AU.addPreserved<SlotIndexes>();
+  AU.addRequired<MachineLoopInfo>();
+  AU.addPreserved<MachineLoopInfo>();
+  AU.addPreservedID(MachineDominatorsID);
+  AU.addPreservedID(StrongPHIEliminationID);
+  AU.addPreservedID(PHIEliminationID);
+  AU.addPreservedID(TwoAddressInstructionPassID);
+  MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+void RegisterCoalescer::markAsJoined(MachineInstr *CopyMI) {
+  /// Joined copies are not deleted immediately, but kept in JoinedCopies.
+  JoinedCopies.insert(CopyMI);
+
+  /// Mark all register operands of CopyMI as <undef> so they won't affect dead
+  /// code elimination.
+  for (MachineInstr::mop_iterator I = CopyMI->operands_begin(),
+       E = CopyMI->operands_end(); I != E; ++I)
+    if (I->isReg())
+      I->setIsUndef(true);
+}
+
+/// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy with IntA
+/// being the source and IntB being the dest, thus this defines a value number
+/// in IntB.  If the source value number (in IntA) is defined by a copy from B,
+/// see if we can merge these two pieces of B into a single value number,
+/// eliminating a copy.  For example:
+///
+///  A3 = B0
+///    ...
+///  B1 = A3      <- this copy
+///
+/// In this case, B0 can be extended to where the B1 copy lives, allowing the B1
+/// value number to be replaced with B0 (which simplifies the B liveinterval).
+///
+/// This returns true if an interval was modified.
+///
+bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
+                                                    MachineInstr *CopyMI) {
+  // Bail if there is no dst interval - can happen when merging physical subreg
+  // operations.
+  if (!li_->hasInterval(CP.getDstReg()))
+    return false;
+
+  LiveInterval &IntA =
+    li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
+  LiveInterval &IntB =
+    li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
+  SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
+
+  // BValNo is a value number in B that is defined by a copy from A.  'B3' in
+  // the example above.
+  LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
+  if (BLR == IntB.end()) return false;
+  VNInfo *BValNo = BLR->valno;
+
+  // Get the location that B is defined at.  Two options: either this value has
+  // an unknown definition point or it is defined at CopyIdx.  If unknown, we
+  // can't process it.
+  if (!BValNo->isDefByCopy()) return false;
+  assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
+
+  // AValNo is the value number in A that defines the copy, A3 in the example.
+  SlotIndex CopyUseIdx = CopyIdx.getUseIndex();
+  LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
+  // The live range might not exist after fun with physreg coalescing.
+  if (ALR == IntA.end()) return false;
+  VNInfo *AValNo = ALR->valno;
+  // If it's re-defined by an early clobber somewhere in the live range, then
+  // it's not safe to eliminate the copy. FIXME: This is a temporary workaround.
+  // See PR3149:
+  // 172     %ECX<def> = MOV32rr %reg1039<kill>
+  // 180     INLINEASM <es:subl $5,$1
+  //         sbbl $3,$0>, 10, %EAX<def>, 14, %ECX<earlyclobber,def>, 9,
+  //         %EAX<kill>,
+  // 36, <fi#0>, 1, %reg0, 0, 9, %ECX<kill>, 36, <fi#1>, 1, %reg0, 0
+  // 188     %EAX<def> = MOV32rr %EAX<kill>
+  // 196     %ECX<def> = MOV32rr %ECX<kill>
+  // 204     %ECX<def> = MOV32rr %ECX<kill>
+  // 212     %EAX<def> = MOV32rr %EAX<kill>
+  // 220     %EAX<def> = MOV32rr %EAX
+  // 228     %reg1039<def> = MOV32rr %ECX<kill>
+  // The early clobber operand ties ECX input to the ECX def.
+  //
+  // The live interval of ECX is represented as this:
+  // %reg20,inf = [46,47:1)[174,230:0)  0 at 174-(230) 1 at 46-(47)
+  // The coalescer has no idea there was a def in the middle of [174,230].
+  if (AValNo->hasRedefByEC())
+    return false;
+
+  // If AValNo is defined as a copy from IntB, we can potentially process this.
+  // Get the instruction that defines this value number.
+  if (!CP.isCoalescable(AValNo->getCopy()))
+    return false;
+
+  // Get the LiveRange in IntB that this value number starts with.
+  LiveInterval::iterator ValLR =
+    IntB.FindLiveRangeContaining(AValNo->def.getPrevSlot());
+  if (ValLR == IntB.end())
+    return false;
+
+  // Make sure that the end of the live range is inside the same block as
+  // CopyMI.
+  MachineInstr *ValLREndInst =
+    li_->getInstructionFromIndex(ValLR->end.getPrevSlot());
+  if (!ValLREndInst || ValLREndInst->getParent() != CopyMI->getParent())
+    return false;
+
+  // Okay, we now know that ValLR ends in the same block that the CopyMI
+  // live-range starts.  If there are no intervening live ranges between them in
+  // IntB, we can merge them.
+  if (ValLR+1 != BLR) return false;
+
+  // If a live interval is a physical register, conservatively check if any
+  // of its aliases is overlapping the live interval of the virtual register.
+  // If so, do not coalesce.
+  if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
+    for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS)
+      if (li_->hasInterval(*AS) && IntA.overlaps(li_->getInterval(*AS))) {
+        DEBUG({
+            dbgs() << "\t\tInterfere with alias ";
+            li_->getInterval(*AS).print(dbgs(), tri_);
+          });
+        return false;
+      }
+  }
+
+  DEBUG({
+      dbgs() << "Extending: ";
+      IntB.print(dbgs(), tri_);
+    });
+
+  SlotIndex FillerStart = ValLR->end, FillerEnd = BLR->start;
+  // We are about to delete CopyMI, so need to remove it as the 'instruction
+  // that defines this value #'. Update the valnum with the new defining
+  // instruction #.
+  BValNo->def  = FillerStart;
+  BValNo->setCopy(0);
+
+  // Okay, we can merge them.  We need to insert a new liverange:
+  // [ValLR.end, BLR.begin) of either value number, then we merge the
+  // two value numbers.
+  IntB.addRange(LiveRange(FillerStart, FillerEnd, BValNo));
+
+  // If the IntB live range is assigned to a physical register, and if that
+  // physreg has sub-registers, update their live intervals as well.
+  if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
+    for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
+      if (!li_->hasInterval(*SR))
+        continue;
+      LiveInterval &SRLI = li_->getInterval(*SR);
+      SRLI.addRange(LiveRange(FillerStart, FillerEnd,
+                              SRLI.getNextValue(FillerStart, 0,
+                                                li_->getVNInfoAllocator())));
+    }
+  }
+
+  // Okay, merge "B1" into the same value number as "B0".
+  if (BValNo != ValLR->valno) {
+    // If B1 is killed by a PHI, then the merged live range must also be killed
+    // by the same PHI, as B0 and B1 can not overlap.
+    bool HasPHIKill = BValNo->hasPHIKill();
+    IntB.MergeValueNumberInto(BValNo, ValLR->valno);
+    if (HasPHIKill)
+      ValLR->valno->setHasPHIKill(true);
+  }
+  DEBUG({
+      dbgs() << "   result = ";
+      IntB.print(dbgs(), tri_);
+      dbgs() << "\n";
+    });
+
+  // If the source instruction was killing the source register before the
+  // merge, unset the isKill marker given the live range has been extended.
+  int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true);
+  if (UIdx != -1) {
+    ValLREndInst->getOperand(UIdx).setIsKill(false);
+  }
+
+  // If the copy instruction was killing the destination register before the
+  // merge, find the last use and trim the live range. That will also add the
+  // isKill marker.
+  if (ALR->end == CopyIdx)
+    li_->shrinkToUses(&IntA);
+
+  ++numExtends;
+  return true;
+}
+
+/// HasOtherReachingDefs - Return true if there are definitions of IntB
+/// other than BValNo val# that can reach uses of AValno val# of IntA.
+bool RegisterCoalescer::HasOtherReachingDefs(LiveInterval &IntA,
+                                                    LiveInterval &IntB,
+                                                    VNInfo *AValNo,
+                                                    VNInfo *BValNo) {
+  for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
+       AI != AE; ++AI) {
+    if (AI->valno != AValNo) continue;
+    LiveInterval::Ranges::iterator BI =
+      std::upper_bound(IntB.ranges.begin(), IntB.ranges.end(), AI->start);
+    if (BI != IntB.ranges.begin())
+      --BI;
+    for (; BI != IntB.ranges.end() && AI->end >= BI->start; ++BI) {
+      if (BI->valno == BValNo)
+        continue;
+      if (BI->start <= AI->start && BI->end > AI->start)
+        return true;
+      if (BI->start > AI->start && BI->start < AI->end)
+        return true;
+    }
   }
+  return false;
 }
 
-// Because of the way .a files work, we must force the SimpleRC
-// implementation to be pulled in if the RegisterCoalescer classes are
-// pulled in.  Otherwise we run the risk of RegisterCoalescer being
-// used, but the default implementation not being linked into the tool
-// that uses it.
-DEFINING_FILE_FOR(RegisterCoalescer)
+/// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy with
+/// IntA being the source and IntB being the dest, thus this defines a value
+/// number in IntB.  If the source value number (in IntA) is defined by a
+/// commutable instruction and its other operand is coalesced to the copy dest
+/// register, see if we can transform the copy into a noop by commuting the
+/// definition. For example,
+///
+///  A3 = op A2 B0<kill>
+///    ...
+///  B1 = A3      <- this copy
+///    ...
+///     = op A3   <- more uses
+///
+/// ==>
+///
+///  B2 = op B0 A2<kill>
+///    ...
+///  B1 = B2      <- now an identify copy
+///    ...
+///     = op B2   <- more uses
+///
+/// This returns true if an interval was modified.
+///
+bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
+                                                        MachineInstr *CopyMI) {
+  // FIXME: For now, only eliminate the copy by commuting its def when the
+  // source register is a virtual register. We want to guard against cases
+  // where the copy is a back edge copy and commuting the def lengthen the
+  // live interval of the source register to the entire loop.
+  if (CP.isPhys() && CP.isFlipped())
+    return false;
+
+  // Bail if there is no dst interval.
+  if (!li_->hasInterval(CP.getDstReg()))
+    return false;
+
+  SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
+
+  LiveInterval &IntA =
+    li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
+  LiveInterval &IntB =
+    li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
+
+  // BValNo is a value number in B that is defined by a copy from A. 'B3' in
+  // the example above.
+  VNInfo *BValNo = IntB.getVNInfoAt(CopyIdx);
+  if (!BValNo || !BValNo->isDefByCopy())
+    return false;
+
+  assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
+
+  // AValNo is the value number in A that defines the copy, A3 in the example.
+  VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getUseIndex());
+  assert(AValNo && "COPY source not live");
+
+  // If other defs can reach uses of this def, then it's not safe to perform
+  // the optimization.
+  if (AValNo->isPHIDef() || AValNo->isUnused() || AValNo->hasPHIKill())
+    return false;
+  MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def);
+  if (!DefMI)
+    return false;
+  const MCInstrDesc &MCID = DefMI->getDesc();
+  if (!MCID.isCommutable())
+    return false;
+  // If DefMI is a two-address instruction then commuting it will change the
+  // destination register.
+  int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg);
+  assert(DefIdx != -1);
+  unsigned UseOpIdx;
+  if (!DefMI->isRegTiedToUseOperand(DefIdx, &UseOpIdx))
+    return false;
+  unsigned Op1, Op2, NewDstIdx;
+  if (!tii_->findCommutedOpIndices(DefMI, Op1, Op2))
+    return false;
+  if (Op1 == UseOpIdx)
+    NewDstIdx = Op2;
+  else if (Op2 == UseOpIdx)
+    NewDstIdx = Op1;
+  else
+    return false;
+
+  MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
+  unsigned NewReg = NewDstMO.getReg();
+  if (NewReg != IntB.reg || !NewDstMO.isKill())
+    return false;
+
+  // Make sure there are no other definitions of IntB that would reach the
+  // uses which the new definition can reach.
+  if (HasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
+    return false;
+
+  // Abort if the aliases of IntB.reg have values that are not simply the
+  // clobbers from the superreg.
+  if (TargetRegisterInfo::isPhysicalRegister(IntB.reg))
+    for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS)
+      if (li_->hasInterval(*AS) &&
+          HasOtherReachingDefs(IntA, li_->getInterval(*AS), AValNo, 0))
+        return false;
+
+  // If some of the uses of IntA.reg is already coalesced away, return false.
+  // It's not possible to determine whether it's safe to perform the coalescing.
+  for (MachineRegisterInfo::use_nodbg_iterator UI = 
+         mri_->use_nodbg_begin(IntA.reg), 
+       UE = mri_->use_nodbg_end(); UI != UE; ++UI) {
+    MachineInstr *UseMI = &*UI;
+    SlotIndex UseIdx = li_->getInstructionIndex(UseMI);
+    LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
+    if (ULR == IntA.end())
+      continue;
+    if (ULR->valno == AValNo && JoinedCopies.count(UseMI))
+      return false;
+  }
+
+  DEBUG(dbgs() << "\tRemoveCopyByCommutingDef: " << AValNo->def << '\t'
+               << *DefMI);
+
+  // At this point we have decided that it is legal to do this
+  // transformation.  Start by commuting the instruction.
+  MachineBasicBlock *MBB = DefMI->getParent();
+  MachineInstr *NewMI = tii_->commuteInstruction(DefMI);
+  if (!NewMI)
+    return false;
+  if (TargetRegisterInfo::isVirtualRegister(IntA.reg) &&
+      TargetRegisterInfo::isVirtualRegister(IntB.reg) &&
+      !mri_->constrainRegClass(IntB.reg, mri_->getRegClass(IntA.reg)))
+    return false;
+  if (NewMI != DefMI) {
+    li_->ReplaceMachineInstrInMaps(DefMI, NewMI);
+    MBB->insert(DefMI, NewMI);
+    MBB->erase(DefMI);
+  }
+  unsigned OpIdx = NewMI->findRegisterUseOperandIdx(IntA.reg, false);
+  NewMI->getOperand(OpIdx).setIsKill();
+
+  // If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
+  // A = or A, B
+  // ...
+  // B = A
+  // ...
+  // C = A<kill>
+  // ...
+  //   = B
+
+  // Update uses of IntA of the specific Val# with IntB.
+  for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg),
+         UE = mri_->use_end(); UI != UE;) {
+    MachineOperand &UseMO = UI.getOperand();
+    MachineInstr *UseMI = &*UI;
+    ++UI;
+    if (JoinedCopies.count(UseMI))
+      continue;
+    if (UseMI->isDebugValue()) {
+      // FIXME These don't have an instruction index.  Not clear we have enough
+      // info to decide whether to do this replacement or not.  For now do it.
+      UseMO.setReg(NewReg);
+      continue;
+    }
+    SlotIndex UseIdx = li_->getInstructionIndex(UseMI).getUseIndex();
+    LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
+    if (ULR == IntA.end() || ULR->valno != AValNo)
+      continue;
+    if (TargetRegisterInfo::isPhysicalRegister(NewReg))
+      UseMO.substPhysReg(NewReg, *tri_);
+    else
+      UseMO.setReg(NewReg);
+    if (UseMI == CopyMI)
+      continue;
+    if (!UseMI->isCopy())
+      continue;
+    if (UseMI->getOperand(0).getReg() != IntB.reg ||
+        UseMI->getOperand(0).getSubReg())
+      continue;
+
+    // This copy will become a noop. If it's defining a new val#, merge it into
+    // BValNo.
+    SlotIndex DefIdx = UseIdx.getDefIndex();
+    VNInfo *DVNI = IntB.getVNInfoAt(DefIdx);
+    if (!DVNI)
+      continue;
+    DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI);
+    assert(DVNI->def == DefIdx);
+    BValNo = IntB.MergeValueNumberInto(BValNo, DVNI);
+    markAsJoined(UseMI);
+  }
+
+  // Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
+  // is updated.
+  VNInfo *ValNo = BValNo;
+  ValNo->def = AValNo->def;
+  ValNo->setCopy(0);
+  for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
+       AI != AE; ++AI) {
+    if (AI->valno != AValNo) continue;
+    IntB.addRange(LiveRange(AI->start, AI->end, ValNo));
+  }
+  DEBUG(dbgs() << "\t\textended: " << IntB << '\n');
+
+  IntA.removeValNo(AValNo);
+  DEBUG(dbgs() << "\t\ttrimmed:  " << IntA << '\n');
+  ++numCommutes;
+  return true;
+}
+
+/// ReMaterializeTrivialDef - If the source of a copy is defined by a trivial
+/// computation, replace the copy by rematerialize the definition.
+bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
+                                                       bool preserveSrcInt,
+                                                       unsigned DstReg,
+                                                       unsigned DstSubIdx,
+                                                       MachineInstr *CopyMI) {
+  SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getUseIndex();
+  LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx);
+  assert(SrcLR != SrcInt.end() && "Live range not found!");
+  VNInfo *ValNo = SrcLR->valno;
+  // If other defs can reach uses of this def, then it's not safe to perform
+  // the optimization.
+  if (ValNo->isPHIDef() || ValNo->isUnused() || ValNo->hasPHIKill())
+    return false;
+  MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def);
+  if (!DefMI)
+    return false;
+  assert(DefMI && "Defining instruction disappeared");
+  const MCInstrDesc &MCID = DefMI->getDesc();
+  if (!MCID.isAsCheapAsAMove())
+    return false;
+  if (!tii_->isTriviallyReMaterializable(DefMI, AA))
+    return false;
+  bool SawStore = false;
+  if (!DefMI->isSafeToMove(tii_, AA, SawStore))
+    return false;
+  if (MCID.getNumDefs() != 1)
+    return false;
+  if (!DefMI->isImplicitDef()) {
+    // Make sure the copy destination register class fits the instruction
+    // definition register class. The mismatch can happen as a result of earlier
+    // extract_subreg, insert_subreg, subreg_to_reg coalescing.
+    const TargetRegisterClass *RC = tii_->getRegClass(MCID, 0, tri_);
+    if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
+      if (mri_->getRegClass(DstReg) != RC)
+        return false;
+    } else if (!RC->contains(DstReg))
+      return false;
+  }
+
+  // If destination register has a sub-register index on it, make sure it
+  // matches the instruction register class.
+  if (DstSubIdx) {
+    const MCInstrDesc &MCID = DefMI->getDesc();
+    if (MCID.getNumDefs() != 1)
+      return false;
+    const TargetRegisterClass *DstRC = mri_->getRegClass(DstReg);
+    const TargetRegisterClass *DstSubRC =
+      DstRC->getSubRegisterRegClass(DstSubIdx);
+    const TargetRegisterClass *DefRC = tii_->getRegClass(MCID, 0, tri_);
+    if (DefRC == DstRC)
+      DstSubIdx = 0;
+    else if (DefRC != DstSubRC)
+      return false;
+  }
+
+  RemoveCopyFlag(DstReg, CopyMI);
+
+  MachineBasicBlock *MBB = CopyMI->getParent();
+  MachineBasicBlock::iterator MII =
+    llvm::next(MachineBasicBlock::iterator(CopyMI));
+  tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, *tri_);
+  MachineInstr *NewMI = prior(MII);
+
+  // CopyMI may have implicit operands, transfer them over to the newly
+  // rematerialized instruction. And update implicit def interval valnos.
+  for (unsigned i = CopyMI->getDesc().getNumOperands(),
+         e = CopyMI->getNumOperands(); i != e; ++i) {
+    MachineOperand &MO = CopyMI->getOperand(i);
+    if (MO.isReg() && MO.isImplicit())
+      NewMI->addOperand(MO);
+    if (MO.isDef())
+      RemoveCopyFlag(MO.getReg(), CopyMI);
+  }
+
+  NewMI->copyImplicitOps(CopyMI);
+  li_->ReplaceMachineInstrInMaps(CopyMI, NewMI);
+  CopyMI->eraseFromParent();
+  ReMatCopies.insert(CopyMI);
+  ReMatDefs.insert(DefMI);
+  DEBUG(dbgs() << "Remat: " << *NewMI);
+  ++NumReMats;
+
+  // The source interval can become smaller because we removed a use.
+  if (preserveSrcInt)
+    li_->shrinkToUses(&SrcInt);
+
+  return true;
+}
+
+/// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
+/// update the subregister number if it is not zero. If DstReg is a
+/// physical register and the existing subregister number of the def / use
+/// being updated is not zero, make sure to set it to the correct physical
+/// subregister.
+void
+RegisterCoalescer::UpdateRegDefsUses(const CoalescerPair &CP) {
+  bool DstIsPhys = CP.isPhys();
+  unsigned SrcReg = CP.getSrcReg();
+  unsigned DstReg = CP.getDstReg();
+  unsigned SubIdx = CP.getSubIdx();
+
+  // Update LiveDebugVariables.
+  ldv_->renameRegister(SrcReg, DstReg, SubIdx);
+
+  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg);
+       MachineInstr *UseMI = I.skipInstruction();) {
+    // A PhysReg copy that won't be coalesced can perhaps be rematerialized
+    // instead.
+    if (DstIsPhys) {
+      if (UseMI->isCopy() &&
+          !UseMI->getOperand(1).getSubReg() &&
+          !UseMI->getOperand(0).getSubReg() &&
+          UseMI->getOperand(1).getReg() == SrcReg &&
+          UseMI->getOperand(0).getReg() != SrcReg &&
+          UseMI->getOperand(0).getReg() != DstReg &&
+          !JoinedCopies.count(UseMI) &&
+          ReMaterializeTrivialDef(li_->getInterval(SrcReg), false,
+                                  UseMI->getOperand(0).getReg(), 0, UseMI))
+        continue;
+    }
+
+    SmallVector<unsigned,8> Ops;
+    bool Reads, Writes;
+    tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
+    bool Kills = false, Deads = false;
+
+    // Replace SrcReg with DstReg in all UseMI operands.
+    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+      MachineOperand &MO = UseMI->getOperand(Ops[i]);
+      Kills |= MO.isKill();
+      Deads |= MO.isDead();
+
+      if (DstIsPhys)
+        MO.substPhysReg(DstReg, *tri_);
+      else
+        MO.substVirtReg(DstReg, SubIdx, *tri_);
+    }
+
+    // This instruction is a copy that will be removed.
+    if (JoinedCopies.count(UseMI))
+      continue;
+
+    if (SubIdx) {
+      // If UseMI was a simple SrcReg def, make sure we didn't turn it into a
+      // read-modify-write of DstReg.
+      if (Deads)
+        UseMI->addRegisterDead(DstReg, tri_);
+      else if (!Reads && Writes)
+        UseMI->addRegisterDefined(DstReg, tri_);
+
+      // Kill flags apply to the whole physical register.
+      if (DstIsPhys && Kills)
+        UseMI->addRegisterKilled(DstReg, tri_);
+    }
+
+    DEBUG({
+        dbgs() << "\t\tupdated: ";
+        if (!UseMI->isDebugValue())
+          dbgs() << li_->getInstructionIndex(UseMI) << "\t";
+        dbgs() << *UseMI;
+      });
+  }
+}
+
+/// removeIntervalIfEmpty - Check if the live interval of a physical register
+/// is empty, if so remove it and also remove the empty intervals of its
+/// sub-registers. Return true if live interval is removed.
+static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *li_,
+                                  const TargetRegisterInfo *tri_) {
+  if (li.empty()) {
+    if (TargetRegisterInfo::isPhysicalRegister(li.reg))
+      for (const unsigned* SR = tri_->getSubRegisters(li.reg); *SR; ++SR) {
+        if (!li_->hasInterval(*SR))
+          continue;
+        LiveInterval &sli = li_->getInterval(*SR);
+        if (sli.empty())
+          li_->removeInterval(*SR);
+      }
+    li_->removeInterval(li.reg);
+    return true;
+  }
+  return false;
+}
+
+/// RemoveDeadDef - If a def of a live interval is now determined dead, remove
+/// the val# it defines. If the live interval becomes empty, remove it as well.
+bool RegisterCoalescer::RemoveDeadDef(LiveInterval &li,
+                                             MachineInstr *DefMI) {
+  SlotIndex DefIdx = li_->getInstructionIndex(DefMI).getDefIndex();
+  LiveInterval::iterator MLR = li.FindLiveRangeContaining(DefIdx);
+  if (DefIdx != MLR->valno->def)
+    return false;
+  li.removeValNo(MLR->valno);
+  return removeIntervalIfEmpty(li, li_, tri_);
+}
+
+void RegisterCoalescer::RemoveCopyFlag(unsigned DstReg,
+                                              const MachineInstr *CopyMI) {
+  SlotIndex DefIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
+  if (li_->hasInterval(DstReg)) {
+    LiveInterval &LI = li_->getInterval(DstReg);
+    if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
+      if (LR->valno->def == DefIdx)
+        LR->valno->setCopy(0);
+  }
+  if (!TargetRegisterInfo::isPhysicalRegister(DstReg))
+    return;
+  for (const unsigned* AS = tri_->getAliasSet(DstReg); *AS; ++AS) {
+    if (!li_->hasInterval(*AS))
+      continue;
+    LiveInterval &LI = li_->getInterval(*AS);
+    if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
+      if (LR->valno->def == DefIdx)
+        LR->valno->setCopy(0);
+  }
+}
+
+/// shouldJoinPhys - Return true if a copy involving a physreg should be joined.
+/// We need to be careful about coalescing a source physical register with a
+/// virtual register. Once the coalescing is done, it cannot be broken and these
+/// are not spillable! If the destination interval uses are far away, think
+/// twice about coalescing them!
+bool RegisterCoalescer::shouldJoinPhys(CoalescerPair &CP) {
+  bool Allocatable = li_->isAllocatable(CP.getDstReg());
+  LiveInterval &JoinVInt = li_->getInterval(CP.getSrcReg());
+
+  /// Always join simple intervals that are defined by a single copy from a
+  /// reserved register. This doesn't increase register pressure, so it is
+  /// always beneficial.
+  if (!Allocatable && CP.isFlipped() && JoinVInt.containsOneValue())
+    return true;
+
+  if (!EnablePhysicalJoin) {
+    DEBUG(dbgs() << "\tPhysreg joins disabled.\n");
+    return false;
+  }
+
+  // Only coalesce to allocatable physreg, we don't want to risk modifying
+  // reserved registers.
+  if (!Allocatable) {
+    DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n");
+    return false;  // Not coalescable.
+  }
+
+  // Don't join with physregs that have a ridiculous number of live
+  // ranges. The data structure performance is really bad when that
+  // happens.
+  if (li_->hasInterval(CP.getDstReg()) &&
+      li_->getInterval(CP.getDstReg()).ranges.size() > 1000) {
+    ++numAborts;
+    DEBUG(dbgs()
+          << "\tPhysical register live interval too complicated, abort!\n");
+    return false;
+  }
+
+  // FIXME: Why are we skipping this test for partial copies?
+  //        CodeGen/X86/phys_subreg_coalesce-3.ll needs it.
+  if (!CP.isPartial()) {
+    const TargetRegisterClass *RC = mri_->getRegClass(CP.getSrcReg());
+    unsigned Threshold = RegClassInfo.getNumAllocatableRegs(RC) * 2;
+    unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
+    if (Length > Threshold) {
+      ++numAborts;
+      DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
+      return false;
+    }
+  }
+  return true;
+}
+
+/// isWinToJoinCrossClass - Return true if it's profitable to coalesce
+/// two virtual registers from different register classes.
+bool
+RegisterCoalescer::isWinToJoinCrossClass(unsigned SrcReg,
+                                             unsigned DstReg,
+                                             const TargetRegisterClass *SrcRC,
+                                             const TargetRegisterClass *DstRC,
+                                             const TargetRegisterClass *NewRC) {
+  unsigned NewRCCount = RegClassInfo.getNumAllocatableRegs(NewRC);
+  // This heuristics is good enough in practice, but it's obviously not *right*.
+  // 4 is a magic number that works well enough for x86, ARM, etc. It filter
+  // out all but the most restrictive register classes.
+  if (NewRCCount > 4 ||
+      // Early exit if the function is fairly small, coalesce aggressively if
+      // that's the case. For really special register classes with 3 or
+      // fewer registers, be a bit more careful.
+      (li_->getFuncInstructionCount() / NewRCCount) < 8)
+    return true;
+  LiveInterval &SrcInt = li_->getInterval(SrcReg);
+  LiveInterval &DstInt = li_->getInterval(DstReg);
+  unsigned SrcSize = li_->getApproximateInstructionCount(SrcInt);
+  unsigned DstSize = li_->getApproximateInstructionCount(DstInt);
+
+  // Coalesce aggressively if the intervals are small compared to the number of
+  // registers in the new class. The number 4 is fairly arbitrary, chosen to be
+  // less aggressive than the 8 used for the whole function size.
+  const unsigned ThresSize = 4 * NewRCCount;
+  if (SrcSize <= ThresSize && DstSize <= ThresSize)
+    return true;
+
+  // Estimate *register use density*. If it doubles or more, abort.
+  unsigned SrcUses = std::distance(mri_->use_nodbg_begin(SrcReg),
+                                   mri_->use_nodbg_end());
+  unsigned DstUses = std::distance(mri_->use_nodbg_begin(DstReg),
+                                   mri_->use_nodbg_end());
+  unsigned NewUses = SrcUses + DstUses;
+  unsigned NewSize = SrcSize + DstSize;
+  if (SrcRC != NewRC && SrcSize > ThresSize) {
+    unsigned SrcRCCount = RegClassInfo.getNumAllocatableRegs(SrcRC);
+    if (NewUses*SrcSize*SrcRCCount > 2*SrcUses*NewSize*NewRCCount)
+      return false;
+  }
+  if (DstRC != NewRC && DstSize > ThresSize) {
+    unsigned DstRCCount = RegClassInfo.getNumAllocatableRegs(DstRC);
+    if (NewUses*DstSize*DstRCCount > 2*DstUses*NewSize*NewRCCount)
+      return false;
+  }
+  return true;
+}
+
+
+/// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
+/// which are the src/dst of the copy instruction CopyMI.  This returns true
+/// if the copy was successfully coalesced away. If it is not currently
+/// possible to coalesce this interval, but it may be possible if other
+/// things get coalesced, then it returns true by reference in 'Again'.
+bool RegisterCoalescer::JoinCopy(MachineInstr *CopyMI, bool &Again) {
+
+  Again = false;
+  if (JoinedCopies.count(CopyMI) || ReMatCopies.count(CopyMI))
+    return false; // Already done.
+
+  DEBUG(dbgs() << li_->getInstructionIndex(CopyMI) << '\t' << *CopyMI);
+
+  CoalescerPair CP(*tii_, *tri_);
+  if (!CP.setRegisters(CopyMI)) {
+    DEBUG(dbgs() << "\tNot coalescable.\n");
+    return false;
+  }
+
+  // If they are already joined we continue.
+  if (CP.getSrcReg() == CP.getDstReg()) {
+    markAsJoined(CopyMI);
+    DEBUG(dbgs() << "\tCopy already coalesced.\n");
+    return false;  // Not coalescable.
+  }
+
+  DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), tri_)
+               << " with " << PrintReg(CP.getDstReg(), tri_, CP.getSubIdx())
+               << "\n");
+
+  // Enforce policies.
+  if (CP.isPhys()) {
+    if (!shouldJoinPhys(CP)) {
+      // Before giving up coalescing, if definition of source is defined by
+      // trivial computation, try rematerializing it.
+      if (!CP.isFlipped() &&
+          ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true,
+                                  CP.getDstReg(), 0, CopyMI))
+        return true;
+      return false;
+    }
+  } else {
+    // Avoid constraining virtual register regclass too much.
+    if (CP.isCrossClass()) {
+      DEBUG(dbgs() << "\tCross-class to " << CP.getNewRC()->getName() << ".\n");
+      if (DisableCrossClassJoin) {
+        DEBUG(dbgs() << "\tCross-class joins disabled.\n");
+        return false;
+      }
+      if (!isWinToJoinCrossClass(CP.getSrcReg(), CP.getDstReg(),
+                                 mri_->getRegClass(CP.getSrcReg()),
+                                 mri_->getRegClass(CP.getDstReg()),
+                                 CP.getNewRC())) {
+        DEBUG(dbgs() << "\tAvoid coalescing to constrained register class.\n");
+        Again = true;  // May be possible to coalesce later.
+        return false;
+      }
+    }
+
+    // When possible, let DstReg be the larger interval.
+    if (!CP.getSubIdx() && li_->getInterval(CP.getSrcReg()).ranges.size() >
+                           li_->getInterval(CP.getDstReg()).ranges.size())
+      CP.flip();
+  }
+
+  // Okay, attempt to join these two intervals.  On failure, this returns false.
+  // Otherwise, if one of the intervals being joined is a physreg, this method
+  // always canonicalizes DstInt to be it.  The output "SrcInt" will not have
+  // been modified, so we can use this information below to update aliases.
+  if (!JoinIntervals(CP)) {
+    // Coalescing failed.
+
+    // If definition of source is defined by trivial computation, try
+    // rematerializing it.
+    if (!CP.isFlipped() &&
+        ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true,
+                                CP.getDstReg(), 0, CopyMI))
+      return true;
+
+    // If we can eliminate the copy without merging the live ranges, do so now.
+    if (!CP.isPartial()) {
+      if (AdjustCopiesBackFrom(CP, CopyMI) ||
+          RemoveCopyByCommutingDef(CP, CopyMI)) {
+        markAsJoined(CopyMI);
+        DEBUG(dbgs() << "\tTrivial!\n");
+        return true;
+      }
+    }
+
+    // Otherwise, we are unable to join the intervals.
+    DEBUG(dbgs() << "\tInterference!\n");
+    Again = true;  // May be possible to coalesce later.
+    return false;
+  }
+
+  // Coalescing to a virtual register that is of a sub-register class of the
+  // other. Make sure the resulting register is set to the right register class.
+  if (CP.isCrossClass()) {
+    ++numCrossRCs;
+    mri_->setRegClass(CP.getDstReg(), CP.getNewRC());
+  }
+
+  // Remember to delete the copy instruction.
+  markAsJoined(CopyMI);
+
+  UpdateRegDefsUses(CP);
+
+  // If we have extended the live range of a physical register, make sure we
+  // update live-in lists as well.
+  if (CP.isPhys()) {
+    SmallVector<MachineBasicBlock*, 16> BlockSeq;
+    // JoinIntervals invalidates the VNInfos in SrcInt, but we only need the
+    // ranges for this, and they are preserved.
+    LiveInterval &SrcInt = li_->getInterval(CP.getSrcReg());
+    for (LiveInterval::const_iterator I = SrcInt.begin(), E = SrcInt.end();
+         I != E; ++I ) {
+      li_->findLiveInMBBs(I->start, I->end, BlockSeq);
+      for (unsigned idx = 0, size = BlockSeq.size(); idx != size; ++idx) {
+        MachineBasicBlock &block = *BlockSeq[idx];
+        if (!block.isLiveIn(CP.getDstReg()))
+          block.addLiveIn(CP.getDstReg());
+      }
+      BlockSeq.clear();
+    }
+  }
+
+  // SrcReg is guarateed to be the register whose live interval that is
+  // being merged.
+  li_->removeInterval(CP.getSrcReg());
+
+  // Update regalloc hint.
+  tri_->UpdateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *mf_);
+
+  DEBUG({
+    LiveInterval &DstInt = li_->getInterval(CP.getDstReg());
+    dbgs() << "\tJoined. Result = ";
+    DstInt.print(dbgs(), tri_);
+    dbgs() << "\n";
+  });
+
+  ++numJoins;
+  return true;
+}
+
+/// ComputeUltimateVN - Assuming we are going to join two live intervals,
+/// compute what the resultant value numbers for each value in the input two
+/// ranges will be.  This is complicated by copies between the two which can
+/// and will commonly cause multiple value numbers to be merged into one.
+///
+/// VN is the value number that we're trying to resolve.  InstDefiningValue
+/// keeps track of the new InstDefiningValue assignment for the result
+/// LiveInterval.  ThisFromOther/OtherFromThis are sets that keep track of
+/// whether a value in this or other is a copy from the opposite set.
+/// ThisValNoAssignments/OtherValNoAssignments keep track of value #'s that have
+/// already been assigned.
+///
+/// ThisFromOther[x] - If x is defined as a copy from the other interval, this
+/// contains the value number the copy is from.
+///
+static unsigned ComputeUltimateVN(VNInfo *VNI,
+                                  SmallVector<VNInfo*, 16> &NewVNInfo,
+                                  DenseMap<VNInfo*, VNInfo*> &ThisFromOther,
+                                  DenseMap<VNInfo*, VNInfo*> &OtherFromThis,
+                                  SmallVector<int, 16> &ThisValNoAssignments,
+                                  SmallVector<int, 16> &OtherValNoAssignments) {
+  unsigned VN = VNI->id;
+
+  // If the VN has already been computed, just return it.
+  if (ThisValNoAssignments[VN] >= 0)
+    return ThisValNoAssignments[VN];
+  assert(ThisValNoAssignments[VN] != -2 && "Cyclic value numbers");
+
+  // If this val is not a copy from the other val, then it must be a new value
+  // number in the destination.
+  DenseMap<VNInfo*, VNInfo*>::iterator I = ThisFromOther.find(VNI);
+  if (I == ThisFromOther.end()) {
+    NewVNInfo.push_back(VNI);
+    return ThisValNoAssignments[VN] = NewVNInfo.size()-1;
+  }
+  VNInfo *OtherValNo = I->second;
+
+  // Otherwise, this *is* a copy from the RHS.  If the other side has already
+  // been computed, return it.
+  if (OtherValNoAssignments[OtherValNo->id] >= 0)
+    return ThisValNoAssignments[VN] = OtherValNoAssignments[OtherValNo->id];
+
+  // Mark this value number as currently being computed, then ask what the
+  // ultimate value # of the other value is.
+  ThisValNoAssignments[VN] = -2;
+  unsigned UltimateVN =
+    ComputeUltimateVN(OtherValNo, NewVNInfo, OtherFromThis, ThisFromOther,
+                      OtherValNoAssignments, ThisValNoAssignments);
+  return ThisValNoAssignments[VN] = UltimateVN;
+}
+
+
+// Find out if we have something like
+// A = X
+// B = X
+// if so, we can pretend this is actually
+// A = X
+// B = A
+// which allows us to coalesce A and B.
+// VNI is the definition of B. LR is the life range of A that includes
+// the slot just before B. If we return true, we add "B = X" to DupCopies.
+static bool RegistersDefinedFromSameValue(LiveIntervals &li,
+                                          const TargetRegisterInfo &tri,
+                                          CoalescerPair &CP,
+                                          VNInfo *VNI,
+                                          LiveRange *LR,
+                                     SmallVector<MachineInstr*, 8> &DupCopies) {
+  // FIXME: This is very conservative. For example, we don't handle
+  // physical registers.
+
+  MachineInstr *MI = VNI->getCopy();
+
+  if (!MI->isFullCopy() || CP.isPartial() || CP.isPhys())
+    return false;
+
+  unsigned Dst = MI->getOperand(0).getReg();
+  unsigned Src = MI->getOperand(1).getReg();
+
+  // FIXME: If "B = X" kills X, we have to move the kill back to its
+  // previous use. For now we just avoid the optimization in that case.
+  LiveInterval &SrcInt = li.getInterval(Src);
+  if (SrcInt.killedAt(VNI->def))
+    return false;
+
+  if (!TargetRegisterInfo::isVirtualRegister(Src) ||
+      !TargetRegisterInfo::isVirtualRegister(Dst))
+    return false;
+
+  unsigned A = CP.getDstReg();
+  unsigned B = CP.getSrcReg();
+
+  if (B == Dst)
+    std::swap(A, B);
+  assert(Dst == A);
+
+  VNInfo *Other = LR->valno;
+  if (!Other->isDefByCopy())
+    return false;
+  const MachineInstr *OtherMI = Other->getCopy();
+
+  if (!OtherMI->isFullCopy())
+    return false;
+
+  unsigned OtherDst = OtherMI->getOperand(0).getReg();
+  unsigned OtherSrc = OtherMI->getOperand(1).getReg();
+
+  if (!TargetRegisterInfo::isVirtualRegister(OtherSrc) ||
+      !TargetRegisterInfo::isVirtualRegister(OtherDst))
+    return false;
+
+  assert(OtherDst == B);
+
+  if (Src != OtherSrc)
+    return false;
+
+  // If the copies use two different value numbers of X, we cannot merge
+  // A and B.
+  if (SrcInt.getVNInfoAt(Other->def) != SrcInt.getVNInfoAt(VNI->def))
+    return false;
+
+  DupCopies.push_back(MI);
+
+  return true;
+}
+
+/// JoinIntervals - Attempt to join these two intervals.  On failure, this
+/// returns false.
+bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
+  LiveInterval &RHS = li_->getInterval(CP.getSrcReg());
+  DEBUG({ dbgs() << "\t\tRHS = "; RHS.print(dbgs(), tri_); dbgs() << "\n"; });
+
+  // If a live interval is a physical register, check for interference with any
+  // aliases. The interference check implemented here is a bit more conservative
+  // than the full interfeence check below. We allow overlapping live ranges
+  // only when one is a copy of the other.
+  if (CP.isPhys()) {
+    for (const unsigned *AS = tri_->getAliasSet(CP.getDstReg()); *AS; ++AS){
+      if (!li_->hasInterval(*AS))
+        continue;
+      const LiveInterval &LHS = li_->getInterval(*AS);
+      LiveInterval::const_iterator LI = LHS.begin();
+      for (LiveInterval::const_iterator RI = RHS.begin(), RE = RHS.end();
+           RI != RE; ++RI) {
+        LI = std::lower_bound(LI, LHS.end(), RI->start);
+        // Does LHS have an overlapping live range starting before RI?
+        if ((LI != LHS.begin() && LI[-1].end > RI->start) &&
+            (RI->start != RI->valno->def ||
+             !CP.isCoalescable(li_->getInstructionFromIndex(RI->start)))) {
+          DEBUG({
+            dbgs() << "\t\tInterference from alias: ";
+            LHS.print(dbgs(), tri_);
+            dbgs() << "\n\t\tOverlap at " << RI->start << " and no copy.\n";
+          });
+          return false;
+        }
+
+        // Check that LHS ranges beginning in this range are copies.
+        for (; LI != LHS.end() && LI->start < RI->end; ++LI) {
+          if (LI->start != LI->valno->def ||
+              !CP.isCoalescable(li_->getInstructionFromIndex(LI->start))) {
+            DEBUG({
+              dbgs() << "\t\tInterference from alias: ";
+              LHS.print(dbgs(), tri_);
+              dbgs() << "\n\t\tDef at " << LI->start << " is not a copy.\n";
+            });
+            return false;
+          }
+        }
+      }
+    }
+  }
+
+  // Compute the final value assignment, assuming that the live ranges can be
+  // coalesced.
+  SmallVector<int, 16> LHSValNoAssignments;
+  SmallVector<int, 16> RHSValNoAssignments;
+  DenseMap<VNInfo*, VNInfo*> LHSValsDefinedFromRHS;
+  DenseMap<VNInfo*, VNInfo*> RHSValsDefinedFromLHS;
+  SmallVector<VNInfo*, 16> NewVNInfo;
+
+  SmallVector<MachineInstr*, 8> DupCopies;
+
+  LiveInterval &LHS = li_->getOrCreateInterval(CP.getDstReg());
+  DEBUG({ dbgs() << "\t\tLHS = "; LHS.print(dbgs(), tri_); dbgs() << "\n"; });
+
+  // Loop over the value numbers of the LHS, seeing if any are defined from
+  // the RHS.
+  for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
+       i != e; ++i) {
+    VNInfo *VNI = *i;
+    if (VNI->isUnused() || !VNI->isDefByCopy())  // Src not defined by a copy?
+      continue;
+
+    // Never join with a register that has EarlyClobber redefs.
+    if (VNI->hasRedefByEC())
+      return false;
+
+    // Figure out the value # from the RHS.
+    LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
+    // The copy could be to an aliased physreg.
+    if (!lr) continue;
+
+    // DstReg is known to be a register in the LHS interval.  If the src is
+    // from the RHS interval, we can use its value #.
+    MachineInstr *MI = VNI->getCopy();
+    if (!CP.isCoalescable(MI) &&
+        !RegistersDefinedFromSameValue(*li_, *tri_, CP, VNI, lr, DupCopies))
+      continue;
+
+    LHSValsDefinedFromRHS[VNI] = lr->valno;
+  }
+
+  // Loop over the value numbers of the RHS, seeing if any are defined from
+  // the LHS.
+  for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
+       i != e; ++i) {
+    VNInfo *VNI = *i;
+    if (VNI->isUnused() || !VNI->isDefByCopy())  // Src not defined by a copy?
+      continue;
+
+    // Never join with a register that has EarlyClobber redefs.
+    if (VNI->hasRedefByEC())
+      return false;
+
+    // Figure out the value # from the LHS.
+    LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
+    // The copy could be to an aliased physreg.
+    if (!lr) continue;
+
+    // DstReg is known to be a register in the RHS interval.  If the src is
+    // from the LHS interval, we can use its value #.
+    MachineInstr *MI = VNI->getCopy();
+    if (!CP.isCoalescable(MI) &&
+        !RegistersDefinedFromSameValue(*li_, *tri_, CP, VNI, lr, DupCopies))
+        continue;
+
+    RHSValsDefinedFromLHS[VNI] = lr->valno;
+  }
+
+  LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
+  RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
+  NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
+
+  for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
+       i != e; ++i) {
+    VNInfo *VNI = *i;
+    unsigned VN = VNI->id;
+    if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
+      continue;
+    ComputeUltimateVN(VNI, NewVNInfo,
+                      LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
+                      LHSValNoAssignments, RHSValNoAssignments);
+  }
+  for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
+       i != e; ++i) {
+    VNInfo *VNI = *i;
+    unsigned VN = VNI->id;
+    if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
+      continue;
+    // If this value number isn't a copy from the LHS, it's a new number.
+    if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
+      NewVNInfo.push_back(VNI);
+      RHSValNoAssignments[VN] = NewVNInfo.size()-1;
+      continue;
+    }
+
+    ComputeUltimateVN(VNI, NewVNInfo,
+                      RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
+                      RHSValNoAssignments, LHSValNoAssignments);
+  }
+
+  // Armed with the mappings of LHS/RHS values to ultimate values, walk the
+  // interval lists to see if these intervals are coalescable.
+  LiveInterval::const_iterator I = LHS.begin();
+  LiveInterval::const_iterator IE = LHS.end();
+  LiveInterval::const_iterator J = RHS.begin();
+  LiveInterval::const_iterator JE = RHS.end();
+
+  // Skip ahead until the first place of potential sharing.
+  if (I != IE && J != JE) {
+    if (I->start < J->start) {
+      I = std::upper_bound(I, IE, J->start);
+      if (I != LHS.begin()) --I;
+    } else if (J->start < I->start) {
+      J = std::upper_bound(J, JE, I->start);
+      if (J != RHS.begin()) --J;
+    }
+  }
+
+  while (I != IE && J != JE) {
+    // Determine if these two live ranges overlap.
+    bool Overlaps;
+    if (I->start < J->start) {
+      Overlaps = I->end > J->start;
+    } else {
+      Overlaps = J->end > I->start;
+    }
+
+    // If so, check value # info to determine if they are really different.
+    if (Overlaps) {
+      // If the live range overlap will map to the same value number in the
+      // result liverange, we can still coalesce them.  If not, we can't.
+      if (LHSValNoAssignments[I->valno->id] !=
+          RHSValNoAssignments[J->valno->id])
+        return false;
+      // If it's re-defined by an early clobber somewhere in the live range,
+      // then conservatively abort coalescing.
+      if (NewVNInfo[LHSValNoAssignments[I->valno->id]]->hasRedefByEC())
+        return false;
+    }
+
+    if (I->end < J->end)
+      ++I;
+    else
+      ++J;
+  }
+
+  // Update kill info. Some live ranges are extended due to copy coalescing.
+  for (DenseMap<VNInfo*, VNInfo*>::iterator I = LHSValsDefinedFromRHS.begin(),
+         E = LHSValsDefinedFromRHS.end(); I != E; ++I) {
+    VNInfo *VNI = I->first;
+    unsigned LHSValID = LHSValNoAssignments[VNI->id];
+    if (VNI->hasPHIKill())
+      NewVNInfo[LHSValID]->setHasPHIKill(true);
+  }
+
+  // Update kill info. Some live ranges are extended due to copy coalescing.
+  for (DenseMap<VNInfo*, VNInfo*>::iterator I = RHSValsDefinedFromLHS.begin(),
+         E = RHSValsDefinedFromLHS.end(); I != E; ++I) {
+    VNInfo *VNI = I->first;
+    unsigned RHSValID = RHSValNoAssignments[VNI->id];
+    if (VNI->hasPHIKill())
+      NewVNInfo[RHSValID]->setHasPHIKill(true);
+  }
+
+  if (LHSValNoAssignments.empty())
+    LHSValNoAssignments.push_back(-1);
+  if (RHSValNoAssignments.empty())
+    RHSValNoAssignments.push_back(-1);
+
+  for (SmallVector<MachineInstr*, 8>::iterator I = DupCopies.begin(),
+         E = DupCopies.end(); I != E; ++I) {
+    MachineInstr *MI = *I;
+
+    // We have pretended that the assignment to B in
+    // A = X
+    // B = X
+    // was actually a copy from A. Now that we decided to coalesce A and B,
+    // transform the code into
+    // A = X
+    // X = X
+    // and mark the X as coalesced to keep the illusion.
+    unsigned Src = MI->getOperand(1).getReg();
+    MI->getOperand(0).substVirtReg(Src, 0, *tri_);
+
+    markAsJoined(MI);
+  }
+
+  // If we get here, we know that we can coalesce the live ranges.  Ask the
+  // intervals to coalesce themselves now.
+  LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
+           mri_);
+  return true;
+}
+
+namespace {
+  // DepthMBBCompare - Comparison predicate that sort first based on the loop
+  // depth of the basic block (the unsigned), and then on the MBB number.
+  struct DepthMBBCompare {
+    typedef std::pair<unsigned, MachineBasicBlock*> DepthMBBPair;
+    bool operator()(const DepthMBBPair &LHS, const DepthMBBPair &RHS) const {
+      // Deeper loops first
+      if (LHS.first != RHS.first)
+        return LHS.first > RHS.first;
+
+      // Prefer blocks that are more connected in the CFG. This takes care of
+      // the most difficult copies first while intervals are short.
+      unsigned cl = LHS.second->pred_size() + LHS.second->succ_size();
+      unsigned cr = RHS.second->pred_size() + RHS.second->succ_size();
+      if (cl != cr)
+        return cl > cr;
+
+      // As a last resort, sort by block number.
+      return LHS.second->getNumber() < RHS.second->getNumber();
+    }
+  };
+}
+
+void RegisterCoalescer::CopyCoalesceInMBB(MachineBasicBlock *MBB,
+                                            std::vector<MachineInstr*> &TryAgain) {
+  DEBUG(dbgs() << MBB->getName() << ":\n");
+
+  SmallVector<MachineInstr*, 8> VirtCopies;
+  SmallVector<MachineInstr*, 8> PhysCopies;
+  SmallVector<MachineInstr*, 8> ImpDefCopies;
+  for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
+       MII != E;) {
+    MachineInstr *Inst = MII++;
+
+    // If this isn't a copy nor a extract_subreg, we can't join intervals.
+    unsigned SrcReg, DstReg;
+    if (Inst->isCopy()) {
+      DstReg = Inst->getOperand(0).getReg();
+      SrcReg = Inst->getOperand(1).getReg();
+    } else if (Inst->isSubregToReg()) {
+      DstReg = Inst->getOperand(0).getReg();
+      SrcReg = Inst->getOperand(2).getReg();
+    } else
+      continue;
+
+    bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
+    bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
+    if (li_->hasInterval(SrcReg) && li_->getInterval(SrcReg).empty())
+      ImpDefCopies.push_back(Inst);
+    else if (SrcIsPhys || DstIsPhys)
+      PhysCopies.push_back(Inst);
+    else
+      VirtCopies.push_back(Inst);
+  }
+
+  // Try coalescing implicit copies and insert_subreg <undef> first,
+  // followed by copies to / from physical registers, then finally copies
+  // from virtual registers to virtual registers.
+  for (unsigned i = 0, e = ImpDefCopies.size(); i != e; ++i) {
+    MachineInstr *TheCopy = ImpDefCopies[i];
+    bool Again = false;
+    if (!JoinCopy(TheCopy, Again))
+      if (Again)
+        TryAgain.push_back(TheCopy);
+  }
+  for (unsigned i = 0, e = PhysCopies.size(); i != e; ++i) {
+    MachineInstr *TheCopy = PhysCopies[i];
+    bool Again = false;
+    if (!JoinCopy(TheCopy, Again))
+      if (Again)
+        TryAgain.push_back(TheCopy);
+  }
+  for (unsigned i = 0, e = VirtCopies.size(); i != e; ++i) {
+    MachineInstr *TheCopy = VirtCopies[i];
+    bool Again = false;
+    if (!JoinCopy(TheCopy, Again))
+      if (Again)
+        TryAgain.push_back(TheCopy);
+  }
+}
+
+void RegisterCoalescer::joinIntervals() {
+  DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n");
+
+  std::vector<MachineInstr*> TryAgainList;
+  if (loopInfo->empty()) {
+    // If there are no loops in the function, join intervals in function order.
+    for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();
+         I != E; ++I)
+      CopyCoalesceInMBB(I, TryAgainList);
+  } else {
+    // Otherwise, join intervals in inner loops before other intervals.
+    // Unfortunately we can't just iterate over loop hierarchy here because
+    // there may be more MBB's than BB's.  Collect MBB's for sorting.
+
+    // Join intervals in the function prolog first. We want to join physical
+    // registers with virtual registers before the intervals got too long.
+    std::vector<std::pair<unsigned, MachineBasicBlock*> > MBBs;
+    for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();I != E;++I){
+      MachineBasicBlock *MBB = I;
+      MBBs.push_back(std::make_pair(loopInfo->getLoopDepth(MBB), I));
+    }
+
+    // Sort by loop depth.
+    std::sort(MBBs.begin(), MBBs.end(), DepthMBBCompare());
+
+    // Finally, join intervals in loop nest order.
+    for (unsigned i = 0, e = MBBs.size(); i != e; ++i)
+      CopyCoalesceInMBB(MBBs[i].second, TryAgainList);
+  }
+
+  // Joining intervals can allow other intervals to be joined.  Iteratively join
+  // until we make no progress.
+  bool ProgressMade = true;
+  while (ProgressMade) {
+    ProgressMade = false;
+
+    for (unsigned i = 0, e = TryAgainList.size(); i != e; ++i) {
+      MachineInstr *&TheCopy = TryAgainList[i];
+      if (!TheCopy)
+        continue;
+
+      bool Again = false;
+      bool Success = JoinCopy(TheCopy, Again);
+      if (Success || !Again) {
+        TheCopy= 0;   // Mark this one as done.
+        ProgressMade = true;
+      }
+    }
+  }
+}
+
+void RegisterCoalescer::releaseMemory() {
+  JoinedCopies.clear();
+  ReMatCopies.clear();
+  ReMatDefs.clear();
+}
+
+bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
+  mf_ = &fn;
+  mri_ = &fn.getRegInfo();
+  tm_ = &fn.getTarget();
+  tri_ = tm_->getRegisterInfo();
+  tii_ = tm_->getInstrInfo();
+  li_ = &getAnalysis<LiveIntervals>();
+  ldv_ = &getAnalysis<LiveDebugVariables>();
+  AA = &getAnalysis<AliasAnalysis>();
+  loopInfo = &getAnalysis<MachineLoopInfo>();
+
+  DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
+               << "********** Function: "
+               << ((Value*)mf_->getFunction())->getName() << '\n');
+
+  if (VerifyCoalescing)
+    mf_->verify(this, "Before register coalescing");
+
+  RegClassInfo.runOnMachineFunction(fn);
+
+  // Join (coalesce) intervals if requested.
+  if (EnableJoining) {
+    joinIntervals();
+    DEBUG({
+        dbgs() << "********** INTERVALS POST JOINING **********\n";
+        for (LiveIntervals::iterator I = li_->begin(), E = li_->end();
+             I != E; ++I){
+          I->second->print(dbgs(), tri_);
+          dbgs() << "\n";
+        }
+      });
+  }
+
+  // Perform a final pass over the instructions and compute spill weights
+  // and remove identity moves.
+  SmallVector<unsigned, 4> DeadDefs;
+  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
+       mbbi != mbbe; ++mbbi) {
+    MachineBasicBlock* mbb = mbbi;
+    for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end();
+         mii != mie; ) {
+      MachineInstr *MI = mii;
+      if (JoinedCopies.count(MI)) {
+        // Delete all coalesced copies.
+        bool DoDelete = true;
+        assert(MI->isCopyLike() && "Unrecognized copy instruction");
+        unsigned SrcReg = MI->getOperand(MI->isSubregToReg() ? 2 : 1).getReg();
+        if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
+            MI->getNumOperands() > 2)
+          // Do not delete extract_subreg, insert_subreg of physical
+          // registers unless the definition is dead. e.g.
+          // %DO<def> = INSERT_SUBREG %D0<undef>, %S0<kill>, 1
+          // or else the scavenger may complain. LowerSubregs will
+          // delete them later.
+          DoDelete = false;
+
+        if (MI->allDefsAreDead()) {
+          if (TargetRegisterInfo::isVirtualRegister(SrcReg) &&
+              li_->hasInterval(SrcReg))
+            li_->shrinkToUses(&li_->getInterval(SrcReg));
+          DoDelete = true;
+        }
+        if (!DoDelete) {
+          // We need the instruction to adjust liveness, so make it a KILL.
+          if (MI->isSubregToReg()) {
+            MI->RemoveOperand(3);
+            MI->RemoveOperand(1);
+          }
+          MI->setDesc(tii_->get(TargetOpcode::KILL));
+          mii = llvm::next(mii);
+        } else {
+          li_->RemoveMachineInstrFromMaps(MI);
+          mii = mbbi->erase(mii);
+          ++numPeep;
+        }
+        continue;
+      }
+
+      // Now check if this is a remat'ed def instruction which is now dead.
+      if (ReMatDefs.count(MI)) {
+        bool isDead = true;
+        for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+          const MachineOperand &MO = MI->getOperand(i);
+          if (!MO.isReg())
+            continue;
+          unsigned Reg = MO.getReg();
+          if (!Reg)
+            continue;
+          if (TargetRegisterInfo::isVirtualRegister(Reg))
+            DeadDefs.push_back(Reg);
+          if (MO.isDead())
+            continue;
+          if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
+              !mri_->use_nodbg_empty(Reg)) {
+            isDead = false;
+            break;
+          }
+        }
+        if (isDead) {
+          while (!DeadDefs.empty()) {
+            unsigned DeadDef = DeadDefs.back();
+            DeadDefs.pop_back();
+            RemoveDeadDef(li_->getInterval(DeadDef), MI);
+          }
+          li_->RemoveMachineInstrFromMaps(mii);
+          mii = mbbi->erase(mii);
+          continue;
+        } else
+          DeadDefs.clear();
+      }
+
+      ++mii;
+
+      // Check for now unnecessary kill flags.
+      if (li_->isNotInMIMap(MI)) continue;
+      SlotIndex DefIdx = li_->getInstructionIndex(MI).getDefIndex();
+      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+        MachineOperand &MO = MI->getOperand(i);
+        if (!MO.isReg() || !MO.isKill()) continue;
+        unsigned reg = MO.getReg();
+        if (!reg || !li_->hasInterval(reg)) continue;
+        if (!li_->getInterval(reg).killedAt(DefIdx)) {
+          MO.setIsKill(false);
+          continue;
+        }
+        // When leaving a kill flag on a physreg, check if any subregs should
+        // remain alive.
+        if (!TargetRegisterInfo::isPhysicalRegister(reg))
+          continue;
+        for (const unsigned *SR = tri_->getSubRegisters(reg);
+             unsigned S = *SR; ++SR)
+          if (li_->hasInterval(S) && li_->getInterval(S).liveAt(DefIdx))
+            MI->addRegisterDefined(S, tri_);
+      }
+    }
+  }
+
+  DEBUG(dump());
+  DEBUG(ldv_->dump());
+  if (VerifyCoalescing)
+    mf_->verify(this, "After register coalescing");
+  return true;
+}
+
+/// print - Implement the dump method.
+void RegisterCoalescer::print(raw_ostream &O, const Module* m) const {
+   li_->print(O, m);
+}
+
+RegisterCoalescer *llvm::createRegisterCoalescer() {
+  return new RegisterCoalescer();
+}

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/ScheduleDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/ScheduleDAG.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/ScheduleDAG.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/ScheduleDAG.cpp Sat Jul  2 22:28:07 2011
@@ -45,7 +45,7 @@
 ScheduleDAG::~ScheduleDAG() {}
 
 /// getInstrDesc helper to handle SDNodes.
-const TargetInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
+const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
   if (!Node || !Node->isMachineOpcode()) return NULL;
   return &TII->get(Node->getMachineOpcode());
 }

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/ScheduleDAGInstrs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/ScheduleDAGInstrs.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/ScheduleDAGInstrs.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/ScheduleDAGInstrs.cpp Sat Jul  2 22:28:07 2011
@@ -21,10 +21,11 @@
 #include "llvm/CodeGen/MachineMemOperand.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/MC/MCInstrItineraries.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetSubtarget.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/ADT/SmallSet.h"
@@ -205,7 +206,7 @@
   bool UnitLatencies = ForceUnitLatencies();
 
   // Ask the target if address-backscheduling is desirable, and if so how much.
-  const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
+  const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
   unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
 
   // Remove any stale debug info; sometimes BuildSchedGraph is called again
@@ -236,13 +237,13 @@
       continue;
     }
 
-    const TargetInstrDesc &TID = MI->getDesc();
-    assert(!TID.isTerminator() && !MI->isLabel() &&
+    const MCInstrDesc &MCID = MI->getDesc();
+    assert(!MCID.isTerminator() && !MI->isLabel() &&
            "Cannot schedule terminators or labels!");
     // Create the SUnit for this MI.
     SUnit *SU = NewSUnit(MI);
-    SU->isCall = TID.isCall();
-    SU->isCommutable = TID.isCommutable();
+    SU->isCall = MCID.isCall();
+    SU->isCommutable = MCID.isCommutable();
 
     // Assign the Latency field of SU using target-provided information.
     if (UnitLatencies)
@@ -309,13 +310,13 @@
           if (SpecialAddressLatency != 0 && !UnitLatencies &&
               UseSU != &ExitSU) {
             MachineInstr *UseMI = UseSU->getInstr();
-            const TargetInstrDesc &UseTID = UseMI->getDesc();
+            const MCInstrDesc &UseMCID = UseMI->getDesc();
             int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
             assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
             if (RegUseIndex >= 0 &&
-                (UseTID.mayLoad() || UseTID.mayStore()) &&
-                (unsigned)RegUseIndex < UseTID.getNumOperands() &&
-                UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
+                (UseMCID.mayLoad() || UseMCID.mayStore()) &&
+                (unsigned)RegUseIndex < UseMCID.getNumOperands() &&
+                UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
               LDataLatency += SpecialAddressLatency;
           }
           // Adjust the dependence latency using operand def/use
@@ -352,17 +353,17 @@
             unsigned Count = I->second.second;
             const MachineInstr *UseMI = UseMO->getParent();
             unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
-            const TargetInstrDesc &UseTID = UseMI->getDesc();
+            const MCInstrDesc &UseMCID = UseMI->getDesc();
             // TODO: If we knew the total depth of the region here, we could
             // handle the case where the whole loop is inside the region but
             // is large enough that the isScheduleHigh trick isn't needed.
-            if (UseMOIdx < UseTID.getNumOperands()) {
+            if (UseMOIdx < UseMCID.getNumOperands()) {
               // Currently, we only support scheduling regions consisting of
               // single basic blocks. Check to see if the instruction is in
               // the same region by checking to see if it has the same parent.
               if (UseMI->getParent() != MI->getParent()) {
                 unsigned Latency = SU->Latency;
-                if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass())
+                if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass())
                   Latency += SpecialAddressLatency;
                 // This is a wild guess as to the portion of the latency which
                 // will be overlapped by work done outside the current
@@ -374,7 +375,7 @@
                                     /*isMustAlias=*/false,
                                     /*isArtificial=*/true));
               } else if (SpecialAddressLatency > 0 &&
-                         UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
+                         UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
                 // The entire loop body is within the current scheduling region
                 // and the latency of this operation is assumed to be greater
                 // than the latency of the loop.
@@ -417,9 +418,9 @@
     // produce more precise dependence information.
 #define STORE_LOAD_LATENCY 1
     unsigned TrueMemOrderLatency = 0;
-    if (TID.isCall() || MI->hasUnmodeledSideEffects() ||
+    if (MCID.isCall() || MI->hasUnmodeledSideEffects() ||
         (MI->hasVolatileMemoryRef() &&
-         (!TID.mayLoad() || !MI->isInvariantLoad(AA)))) {
+         (!MCID.mayLoad() || !MI->isInvariantLoad(AA)))) {
       // Be conservative with these and add dependencies on all memory
       // references, even those that are known to not alias.
       for (std::map<const Value *, SUnit *>::iterator I =
@@ -458,7 +459,7 @@
       PendingLoads.clear();
       AliasMemDefs.clear();
       AliasMemUses.clear();
-    } else if (TID.mayStore()) {
+    } else if (MCID.mayStore()) {
       bool MayAlias = true;
       TrueMemOrderLatency = STORE_LOAD_LATENCY;
       if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
@@ -514,7 +515,7 @@
                             /*Reg=*/0, /*isNormalMemory=*/false,
                             /*isMustAlias=*/false,
                             /*isArtificial=*/true));
-    } else if (TID.mayLoad()) {
+    } else if (MCID.mayLoad()) {
       bool MayAlias = true;
       TrueMemOrderLatency = 0;
       if (MI->isInvariantLoad(AA)) {

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/ScoreboardHazardRecognizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/ScoreboardHazardRecognizer.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/ScoreboardHazardRecognizer.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/ScoreboardHazardRecognizer.cpp Sat Jul  2 22:28:07 2011
@@ -16,11 +16,11 @@
 #define DEBUG_TYPE ::llvm::ScoreboardHazardRecognizer::DebugType
 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
 #include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/MC/MCInstrItineraries.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetInstrItineraries.h"
 
 using namespace llvm;
 
@@ -115,12 +115,12 @@
   // Use the itinerary for the underlying instruction to check for
   // free FU's in the scoreboard at the appropriate future cycles.
 
-  const TargetInstrDesc *TID = DAG->getInstrDesc(SU);
-  if (TID == NULL) {
+  const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
+  if (MCID == NULL) {
     // Don't check hazards for non-machineinstr Nodes.
     return NoHazard;
   }
-  unsigned idx = TID->getSchedClass();
+  unsigned idx = MCID->getSchedClass();
   for (const InstrStage *IS = ItinData->beginStage(idx),
          *E = ItinData->endStage(idx); IS != E; ++IS) {
     // We must find one of the stage's units free for every cycle the
@@ -173,16 +173,16 @@
 
   // Use the itinerary for the underlying instruction to reserve FU's
   // in the scoreboard at the appropriate future cycles.
-  const TargetInstrDesc *TID = DAG->getInstrDesc(SU);
-  assert(TID && "The scheduler must filter non-machineinstrs");
-  if (DAG->TII->isZeroCost(TID->Opcode))
+  const MCInstrDesc *MCID = DAG->getInstrDesc(SU);
+  assert(MCID && "The scheduler must filter non-machineinstrs");
+  if (DAG->TII->isZeroCost(MCID->Opcode))
     return;
 
   ++IssueCount;
 
   unsigned cycle = 0;
 
-  unsigned idx = TID->getSchedClass();
+  unsigned idx = MCID->getSchedClass();
   for (const InstrStage *IS = ItinData->beginStage(idx),
          *E = ItinData->endStage(idx); IS != E; ++IS) {
     // We must reserve one of the stage's units for every cycle the

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Sat Jul  2 22:28:07 2011
@@ -238,6 +238,9 @@
     SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
     SDValue BuildSDIV(SDNode *N);
     SDValue BuildUDIV(SDNode *N);
+    SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
+                               bool DemandHighBits = true);
+    SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1);
     SDNode *MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL);
     SDValue ReduceLoadWidth(SDNode *N);
     SDValue ReduceLoadOpStoreWidth(SDNode *N);
@@ -1307,16 +1310,6 @@
   return SDValue();
 }
 
-/// isCarryMaterialization - Returns true if V is an ADDE node that is known to
-/// return 0 or 1 depending on the carry flag.
-static bool isCarryMaterialization(SDValue V) {
-  if (V.getOpcode() != ISD::ADDE)
-    return false;
-
-  ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(0));
-  return C && C->isNullValue() && V.getOperand(0) == V.getOperand(1);
-}
-
 SDValue DAGCombiner::visitADD(SDNode *N) {
   SDValue N0 = N->getOperand(0);
   SDValue N1 = N->getOperand(1);
@@ -1480,18 +1473,6 @@
     return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt);
   }
 
-  // add (adde 0, 0, glue), X -> adde X, 0, glue
-  if (N0->hasOneUse() && isCarryMaterialization(N0))
-    return DAG.getNode(ISD::ADDE, N->getDebugLoc(),
-                       DAG.getVTList(VT, MVT::Glue), N1, N0.getOperand(0),
-                       N0.getOperand(2));
-
-  // add X, (adde 0, 0, glue) -> adde X, 0, glue
-  if (N1->hasOneUse() && isCarryMaterialization(N1))
-    return DAG.getNode(ISD::ADDE, N->getDebugLoc(),
-                       DAG.getVTList(VT, MVT::Glue), N0, N1.getOperand(0),
-                       N1.getOperand(2));
-
   return SDValue();
 }
 
@@ -1535,16 +1516,6 @@
                                    N->getDebugLoc(), MVT::Glue));
   }
 
-  // addc (adde 0, 0, glue), X -> adde X, 0, glue
-  if (N0->hasOneUse() && isCarryMaterialization(N0))
-    return DAG.getNode(ISD::ADDE, N->getDebugLoc(), N->getVTList(), N1,
-                       DAG.getConstant(0, VT), N0.getOperand(2));
-
-  // addc X, (adde 0, 0, glue) -> adde X, 0, glue
-  if (N1->hasOneUse() && isCarryMaterialization(N1))
-    return DAG.getNode(ISD::ADDE, N->getDebugLoc(), N->getVTList(), N0,
-                       DAG.getConstant(0, VT), N1.getOperand(2));
-
   return SDValue();
 }
 
@@ -2512,6 +2483,244 @@
   return SDValue();
 }
 
+/// MatchBSwapHWord - Match (a >> 8) | (a << 8) as (bswap a) >> 16
+///
+SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
+                                        bool DemandHighBits) {
+  if (!LegalOperations)
+    return SDValue();
+
+  EVT VT = N->getValueType(0);
+  if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16)
+    return SDValue();
+  if (!TLI.isOperationLegal(ISD::BSWAP, VT))
+    return SDValue();
+
+  // Recognize (and (shl a, 8), 0xff), (and (srl a, 8), 0xff00)
+  bool LookPassAnd0 = false;
+  bool LookPassAnd1 = false;
+  if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL)
+      std::swap(N0, N1);
+  if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL)
+      std::swap(N0, N1);
+  if (N0.getOpcode() == ISD::AND) {
+    if (!N0.getNode()->hasOneUse())
+      return SDValue();
+    ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+    if (!N01C || N01C->getZExtValue() != 0xFF00)
+      return SDValue();
+    N0 = N0.getOperand(0);
+    LookPassAnd0 = true;
+  }
+
+  if (N1.getOpcode() == ISD::AND) {
+    if (!N1.getNode()->hasOneUse())
+      return SDValue();
+    ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
+    if (!N11C || N11C->getZExtValue() != 0xFF)
+      return SDValue();
+    N1 = N1.getOperand(0);
+    LookPassAnd1 = true;
+  }
+
+  if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
+    std::swap(N0, N1);
+  if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
+    return SDValue();
+  if (!N0.getNode()->hasOneUse() ||
+      !N1.getNode()->hasOneUse())
+    return SDValue();
+
+  ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+  ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
+  if (!N01C || !N11C)
+    return SDValue();
+  if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8)
+    return SDValue();
+
+  // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8)
+  SDValue N00 = N0->getOperand(0);
+  if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) {
+    if (!N00.getNode()->hasOneUse())
+      return SDValue();
+    ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1));
+    if (!N001C || N001C->getZExtValue() != 0xFF)
+      return SDValue();
+    N00 = N00.getOperand(0);
+    LookPassAnd0 = true;
+  }
+
+  SDValue N10 = N1->getOperand(0);
+  if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) {
+    if (!N10.getNode()->hasOneUse())
+      return SDValue();
+    ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1));
+    if (!N101C || N101C->getZExtValue() != 0xFF00)
+      return SDValue();
+    N10 = N10.getOperand(0);
+    LookPassAnd1 = true;
+  }
+
+  if (N00 != N10)
+    return SDValue();
+
+  // Make sure everything beyond the low halfword is zero since the SRL 16
+  // will clear the top bits.
+  unsigned OpSizeInBits = VT.getSizeInBits();
+  if (DemandHighBits && OpSizeInBits > 16 &&
+      (!LookPassAnd0 || !LookPassAnd1) &&
+      !DAG.MaskedValueIsZero(N10, APInt::getHighBitsSet(OpSizeInBits, 16)))
+    return SDValue();
+  
+  SDValue Res = DAG.getNode(ISD::BSWAP, N->getDebugLoc(), VT, N00);
+  if (OpSizeInBits > 16)
+    Res = DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, Res,
+                      DAG.getConstant(OpSizeInBits-16, getShiftAmountTy(VT)));
+  return Res;
+}
+
+/// isBSwapHWordElement - Return true if the specified node is an element
+/// that makes up a 32-bit packed halfword byteswap. i.e.
+/// ((x&0xff)<<8)|((x&0xff00)>>8)|((x&0x00ff0000)<<8)|((x&0xff000000)>>8)
+static bool isBSwapHWordElement(SDValue N, SmallVector<SDNode*,4> &Parts) {
+  if (!N.getNode()->hasOneUse())
+    return false;
+
+  unsigned Opc = N.getOpcode();
+  if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL)
+    return false;
+
+  ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N.getOperand(1));
+  if (!N1C)
+    return false;
+
+  unsigned Num;
+  switch (N1C->getZExtValue()) {
+  default:
+    return false;
+  case 0xFF:       Num = 0; break;
+  case 0xFF00:     Num = 1; break;
+  case 0xFF0000:   Num = 2; break;
+  case 0xFF000000: Num = 3; break;
+  }
+
+  // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00).
+  SDValue N0 = N.getOperand(0);
+  if (Opc == ISD::AND) {
+    if (Num == 0 || Num == 2) {
+      // (x >> 8) & 0xff
+      // (x >> 8) & 0xff0000
+      if (N0.getOpcode() != ISD::SRL)
+        return false;
+      ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+      if (!C || C->getZExtValue() != 8)
+        return false;
+    } else {
+      // (x << 8) & 0xff00
+      // (x << 8) & 0xff000000
+      if (N0.getOpcode() != ISD::SHL)
+        return false;
+      ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+      if (!C || C->getZExtValue() != 8)
+        return false;
+    }
+  } else if (Opc == ISD::SHL) {
+    // (x & 0xff) << 8
+    // (x & 0xff0000) << 8
+    if (Num != 0 && Num != 2)
+      return false;
+    ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
+    if (!C || C->getZExtValue() != 8)
+      return false;
+  } else { // Opc == ISD::SRL
+    // (x & 0xff00) >> 8
+    // (x & 0xff000000) >> 8
+    if (Num != 1 && Num != 3)
+      return false;
+    ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
+    if (!C || C->getZExtValue() != 8)
+      return false;
+  }
+
+  if (Parts[Num])
+    return false;
+
+  Parts[Num] = N0.getOperand(0).getNode();
+  return true;
+}
+
+/// MatchBSwapHWord - Match a 32-bit packed halfword bswap. That is
+/// ((x&0xff)<<8)|((x&0xff00)>>8)|((x&0x00ff0000)<<8)|((x&0xff000000)>>8)
+/// => (rotl (bswap x), 16)
+SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {
+  if (!LegalOperations)
+    return SDValue();
+
+  EVT VT = N->getValueType(0);
+  if (VT != MVT::i32)
+    return SDValue();
+  if (!TLI.isOperationLegal(ISD::BSWAP, VT))
+    return SDValue();
+
+  SmallVector<SDNode*,4> Parts(4, (SDNode*)0);
+  // Look for either
+  // (or (or (and), (and)), (or (and), (and)))
+  // (or (or (or (and), (and)), (and)), (and))
+  if (N0.getOpcode() != ISD::OR)
+    return SDValue();
+  SDValue N00 = N0.getOperand(0);
+  SDValue N01 = N0.getOperand(1);
+
+  if (N1.getOpcode() == ISD::OR) {
+    // (or (or (and), (and)), (or (and), (and)))
+    SDValue N000 = N00.getOperand(0);
+    if (!isBSwapHWordElement(N000, Parts))
+      return SDValue();
+
+    SDValue N001 = N00.getOperand(1);
+    if (!isBSwapHWordElement(N001, Parts))
+      return SDValue();
+    SDValue N010 = N01.getOperand(0);
+    if (!isBSwapHWordElement(N010, Parts))
+      return SDValue();
+    SDValue N011 = N01.getOperand(1);
+    if (!isBSwapHWordElement(N011, Parts))
+      return SDValue();
+  } else {
+    // (or (or (or (and), (and)), (and)), (and))
+    if (!isBSwapHWordElement(N1, Parts))
+      return SDValue();
+    if (!isBSwapHWordElement(N01, Parts))
+      return SDValue();
+    if (N00.getOpcode() != ISD::OR)
+      return SDValue();
+    SDValue N000 = N00.getOperand(0);
+    if (!isBSwapHWordElement(N000, Parts))
+      return SDValue();
+    SDValue N001 = N00.getOperand(1);
+    if (!isBSwapHWordElement(N001, Parts))
+      return SDValue();
+  }
+
+  // Make sure the parts are all coming from the same node.
+  if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3])
+    return SDValue();
+
+  SDValue BSwap = DAG.getNode(ISD::BSWAP, N->getDebugLoc(), VT,
+                              SDValue(Parts[0],0));
+
+  // Result of the bswap should be rotated by 16. If it's not legal, than
+  // do  (x << 16) | (x >> 16).
+  SDValue ShAmt = DAG.getConstant(16, getShiftAmountTy(VT));
+  if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT))
+    return DAG.getNode(ISD::ROTL, N->getDebugLoc(), VT, BSwap, ShAmt);
+  else if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT))
+    return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, BSwap, ShAmt);
+  return DAG.getNode(ISD::OR, N->getDebugLoc(), VT,
+                     DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, BSwap, ShAmt),
+                     DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, BSwap, ShAmt));
+}
+
 SDValue DAGCombiner::visitOR(SDNode *N) {
   SDValue N0 = N->getOperand(0);
   SDValue N1 = N->getOperand(1);
@@ -2547,6 +2756,15 @@
   // fold (or x, c) -> c iff (x & ~c) == 0
   if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
     return N1;
+
+  // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16)
+  SDValue BSwap = MatchBSwapHWord(N, N0, N1);
+  if (BSwap.getNode() != 0)
+    return BSwap;
+  BSwap = MatchBSwapHWordLow(N, N0, N1);
+  if (BSwap.getNode() != 0)
+    return BSwap;
+
   // reassociate or
   SDValue ROR = ReassociateOps(ISD::OR, N->getDebugLoc(), N0, N1);
   if (ROR.getNode() != 0)
@@ -4606,6 +4824,16 @@
     CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1));
     return SDValue(N, 0);   // Return N so it doesn't get rechecked!
   }
+
+  // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16))
+  if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) {
+    SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
+                                       N0.getOperand(1), false);
+    if (BSwap.getNode() != 0)
+      return DAG.getNode(ISD::SIGN_EXTEND_INREG, N->getDebugLoc(), VT,
+                         BSwap, N1);
+  }
+
   return SDValue();
 }
 
@@ -5231,7 +5459,8 @@
   // fold (sint_to_fp c1) -> c1fp
   if (N0C && OpVT != MVT::ppcf128 &&
       // ...but only if the target supports immediate floating-point values
-      (Level == llvm::Unrestricted || TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
+      (Level == llvm::Unrestricted ||
+       TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
     return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0);
 
   // If the input is a legal type, and SINT_TO_FP is not legal on this target,
@@ -5255,7 +5484,8 @@
   // fold (uint_to_fp c1) -> c1fp
   if (N0C && OpVT != MVT::ppcf128 &&
       // ...but only if the target supports immediate floating-point values
-      (Level == llvm::Unrestricted || TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
+      (Level == llvm::Unrestricted ||
+       TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT)))
     return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0);
 
   // If the input is a legal type, and UINT_TO_FP is not legal on this target,
@@ -7208,7 +7438,7 @@
         const TargetData &TD = *TLI.getTargetData();
 
         // Create a ConstantArray of the two constants.
-        Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts, 2);
+        Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
         SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(),
                                             TD.getPrefTypeAlignment(FPTy));
         unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/FastISel.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/FastISel.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/FastISel.cpp Sat Jul  2 22:28:07 2011
@@ -547,7 +547,7 @@
   case Intrinsic::dbg_value: {
     // This form of DBG_VALUE is target-independent.
     const DbgValueInst *DI = cast<DbgValueInst>(Call);
-    const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
+    const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
     const Value *V = DI->getValue();
     if (!V) {
       // Currently the optimizer can produce this; insert an undef to
@@ -556,9 +556,14 @@
         .addReg(0U).addImm(DI->getOffset())
         .addMetadata(DI->getVariable());
     } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
-      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
-        .addImm(CI->getZExtValue()).addImm(DI->getOffset())
-        .addMetadata(DI->getVariable());
+      if (CI->getBitWidth() > 64)
+        BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+          .addCImm(CI).addImm(DI->getOffset())
+          .addMetadata(DI->getVariable());
+      else 
+        BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+          .addImm(CI->getZExtValue()).addImm(DI->getOffset())
+          .addMetadata(DI->getVariable());
     } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
         .addFPImm(CF).addImm(DI->getOffset())
@@ -1085,7 +1090,7 @@
 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
                                  const TargetRegisterClass* RC) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
   return ResultReg;
@@ -1095,7 +1100,7 @@
                                   const TargetRegisterClass *RC,
                                   unsigned Op0, bool Op0IsKill) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -1115,7 +1120,7 @@
                                    unsigned Op0, bool Op0IsKill,
                                    unsigned Op1, bool Op1IsKill) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -1137,7 +1142,7 @@
                                    unsigned Op1, bool Op1IsKill,
                                    unsigned Op2, bool Op2IsKill) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -1160,7 +1165,7 @@
                                    unsigned Op0, bool Op0IsKill,
                                    uint64_t Imm) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -1181,7 +1186,7 @@
                                    unsigned Op0, bool Op0IsKill,
                                    uint64_t Imm1, uint64_t Imm2) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -1204,7 +1209,7 @@
                                    unsigned Op0, bool Op0IsKill,
                                    const ConstantFP *FPImm) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -1226,7 +1231,7 @@
                                     unsigned Op1, bool Op1IsKill,
                                     uint64_t Imm) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -1248,7 +1253,7 @@
                                   const TargetRegisterClass *RC,
                                   uint64_t Imm) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
@@ -1264,7 +1269,7 @@
                                   const TargetRegisterClass *RC,
                                   uint64_t Imm1, uint64_t Imm2) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/InstrEmitter.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/InstrEmitter.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/InstrEmitter.cpp Sat Jul  2 22:28:07 2011
@@ -106,10 +106,10 @@
             continue;
           Match = false;
           if (User->isMachineOpcode()) {
-            const TargetInstrDesc &II = TII->get(User->getMachineOpcode());
+            const MCInstrDesc &II = TII->get(User->getMachineOpcode());
             const TargetRegisterClass *RC = 0;
             if (i+II.getNumDefs() < II.getNumOperands())
-              RC = II.OpInfo[i+II.getNumDefs()].getRegClass(TRI);
+              RC = TII->getRegClass(II, i+II.getNumDefs(), TRI);
             if (!UseRC)
               UseRC = RC;
             else if (RC) {
@@ -178,7 +178,7 @@
 }
 
 void InstrEmitter::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
-                                       const TargetInstrDesc &II,
+                                       const MCInstrDesc &II,
                                        bool IsClone, bool IsCloned,
                                        DenseMap<SDValue, unsigned> &VRBaseMap) {
   assert(Node->getMachineOpcode() != TargetOpcode::IMPLICIT_DEF &&
@@ -189,7 +189,7 @@
     // is a vreg in the same register class, use the CopyToReg'd destination
     // register instead of creating a new vreg.
     unsigned VRBase = 0;
-    const TargetRegisterClass *RC = II.OpInfo[i].getRegClass(TRI);
+    const TargetRegisterClass *RC = TII->getRegClass(II, i, TRI);
     if (II.OpInfo[i].isOptionalDef()) {
       // Optional def must be a physical register.
       unsigned NumResults = CountResults(Node);
@@ -242,7 +242,7 @@
       Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) {
     // Add an IMPLICIT_DEF instruction before every use.
     unsigned VReg = getDstOfOnlyCopyToRegUse(Op.getNode(), Op.getResNo());
-    // IMPLICIT_DEF can produce any type of result so its TargetInstrDesc
+    // IMPLICIT_DEF can produce any type of result so its MCInstrDesc
     // does not include operand register class info.
     if (!VReg) {
       const TargetRegisterClass *RC = TLI->getRegClassFor(Op.getValueType());
@@ -265,7 +265,7 @@
 void
 InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op,
                                  unsigned IIOpNum,
-                                 const TargetInstrDesc *II,
+                                 const MCInstrDesc *II,
                                  DenseMap<SDValue, unsigned> &VRBaseMap,
                                  bool IsDebug, bool IsClone, bool IsCloned) {
   assert(Op.getValueType() != MVT::Other &&
@@ -275,9 +275,9 @@
   unsigned VReg = getVR(Op, VRBaseMap);
   assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
 
-  const TargetInstrDesc &TID = MI->getDesc();
-  bool isOptDef = IIOpNum < TID.getNumOperands() &&
-    TID.OpInfo[IIOpNum].isOptionalDef();
+  const MCInstrDesc &MCID = MI->getDesc();
+  bool isOptDef = IIOpNum < MCID.getNumOperands() &&
+    MCID.OpInfo[IIOpNum].isOptionalDef();
 
   // If the instruction requires a register in a different class, create
   // a new virtual register and copy the value into it.
@@ -285,8 +285,8 @@
     const TargetRegisterClass *SrcRC = MRI->getRegClass(VReg);
     const TargetRegisterClass *DstRC = 0;
     if (IIOpNum < II->getNumOperands())
-      DstRC = II->OpInfo[IIOpNum].getRegClass(TRI);
-    assert((DstRC || (TID.isVariadic() && IIOpNum >= TID.getNumOperands())) &&
+      DstRC = TII->getRegClass(*II, IIOpNum, TRI);
+    assert((DstRC || (MCID.isVariadic() && IIOpNum >= MCID.getNumOperands())) &&
            "Don't have operand info for this instruction!");
     if (DstRC && !SrcRC->hasSuperClassEq(DstRC)) {
       unsigned NewVReg = MRI->createVirtualRegister(DstRC);
@@ -312,7 +312,7 @@
     while (Idx > 0 &&
            MI->getOperand(Idx-1).isReg() && MI->getOperand(Idx-1).isImplicit())
       --Idx;
-    bool isTied = MI->getDesc().getOperandConstraint(Idx, TOI::TIED_TO) != -1;
+    bool isTied = MI->getDesc().getOperandConstraint(Idx, MCOI::TIED_TO) != -1;
     if (isTied)
       isKill = false;
   }
@@ -330,7 +330,7 @@
 /// assertions only.
 void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
                               unsigned IIOpNum,
-                              const TargetInstrDesc *II,
+                              const MCInstrDesc *II,
                               DenseMap<SDValue, unsigned> &VRBaseMap,
                               bool IsDebug, bool IsClone, bool IsCloned) {
   if (Op.isMachineOpcode()) {
@@ -556,7 +556,7 @@
   unsigned NumOps = Node->getNumOperands();
   assert((NumOps & 1) == 1 &&
          "REG_SEQUENCE must have an odd number of operands!");
-  const TargetInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
+  const MCInstrDesc &II = TII->get(TargetOpcode::REG_SEQUENCE);
   for (unsigned i = 1; i != NumOps; ++i) {
     SDValue Op = Node->getOperand(i);
     if ((i & 1) == 0) {
@@ -597,7 +597,7 @@
     return TII->emitFrameIndexDebugValue(*MF, FrameIx, Offset, MDPtr, DL);
   }
   // Otherwise, we're going to create an instruction here.
-  const TargetInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
+  const MCInstrDesc &II = TII->get(TargetOpcode::DBG_VALUE);
   MachineInstrBuilder MIB = BuildMI(*MF, DL, II);
   if (SD->getKind() == SDDbgValue::SDNODE) {
     SDNode *Node = SD->getSDNode();
@@ -616,12 +616,8 @@
   } else if (SD->getKind() == SDDbgValue::CONST) {
     const Value *V = SD->getConst();
     if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
-      // FIXME: SDDbgValue constants aren't updated with legalization, so it's 
-      // possible to have i128 constants in them at this point. Dwarf writer
-      // does not handle i128 constants at the moment so, as a crude workaround,
-      // just drop the debug info if this happens.
-      if (!CI->getValue().isSignedIntN(64))
-        MIB.addReg(0U);
+      if (CI->getBitWidth() > 64)
+        MIB.addCImm(CI);
       else
         MIB.addImm(CI->getSExtValue());
     } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
@@ -672,7 +668,7 @@
     // We want a unique VR for each IMPLICIT_DEF use.
     return;
   
-  const TargetInstrDesc &II = TII->get(Opc);
+  const MCInstrDesc &II = TII->get(Opc);
   unsigned NumResults = CountResults(Node);
   unsigned NodeOperands = CountOperands(Node);
   bool HasPhysRegOuts = NumResults > II.getNumDefs() && II.getImplicitDefs()!=0;
@@ -701,9 +697,9 @@
         UsedRegs.push_back(cast<RegisterSDNode>(F->getOperand(1))->getReg());
       else {
         // Collect declared implicit uses.
-        const TargetInstrDesc &TID = TII->get(F->getMachineOpcode());
-        UsedRegs.append(TID.getImplicitUses(),
-                        TID.getImplicitUses() + TID.getNumImplicitUses());
+        const MCInstrDesc &MCID = TII->get(F->getMachineOpcode());
+        UsedRegs.append(MCID.getImplicitUses(),
+                        MCID.getImplicitUses() + MCID.getNumImplicitUses());
         // In addition to declared implicit uses, we must also check for
         // direct RegisterSDNode operands.
         for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i)
@@ -855,6 +851,7 @@
         }
         break;
       case InlineAsm::Kind_RegDefEarlyClobber:
+      case InlineAsm::Kind_Clobber:
         for (; NumVals; --NumVals, ++i) {
           unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
           MI->addOperand(MachineOperand::CreateReg(Reg, /*isDef=*/ true,

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/InstrEmitter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/InstrEmitter.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/InstrEmitter.h (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/InstrEmitter.h Sat Jul  2 22:28:07 2011
@@ -22,7 +22,7 @@
 
 namespace llvm {
 
-class TargetInstrDesc;
+class MCInstrDesc;
 class SDDbgValue;
 
 class InstrEmitter {
@@ -49,7 +49,7 @@
                                     unsigned ResNo) const;
 
   void CreateVirtualRegisters(SDNode *Node, MachineInstr *MI,
-                              const TargetInstrDesc &II,
+                              const MCInstrDesc &II,
                               bool IsClone, bool IsCloned,
                               DenseMap<SDValue, unsigned> &VRBaseMap);
 
@@ -63,7 +63,7 @@
   /// not in the required register class.
   void AddRegisterOperand(MachineInstr *MI, SDValue Op,
                           unsigned IIOpNum,
-                          const TargetInstrDesc *II,
+                          const MCInstrDesc *II,
                           DenseMap<SDValue, unsigned> &VRBaseMap,
                           bool IsDebug, bool IsClone, bool IsCloned);
 
@@ -73,7 +73,7 @@
   /// assertions only.
   void AddOperand(MachineInstr *MI, SDValue Op,
                   unsigned IIOpNum,
-                  const TargetInstrDesc *II,
+                  const MCInstrDesc *II,
                   DenseMap<SDValue, unsigned> &VRBaseMap,
                   bool IsDebug, bool IsClone, bool IsCloned);
 

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp Sat Jul  2 22:28:07 2011
@@ -520,20 +520,44 @@
 SDValue DAGTypeLegalizer::PromoteIntRes_TRUNCATE(SDNode *N) {
   EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
   SDValue Res;
+  SDValue InOp = N->getOperand(0);
+  DebugLoc dl = N->getDebugLoc();
 
-  switch (getTypeAction(N->getOperand(0).getValueType())) {
+  switch (getTypeAction(InOp.getValueType())) {
   default: llvm_unreachable("Unknown type action!");
   case TargetLowering::TypeLegal:
   case TargetLowering::TypeExpandInteger:
-    Res = N->getOperand(0);
+    Res = InOp;
     break;
   case TargetLowering::TypePromoteInteger:
-    Res = GetPromotedInteger(N->getOperand(0));
+    Res = GetPromotedInteger(InOp);
     break;
+  case TargetLowering::TypeSplitVector:
+    EVT InVT = InOp.getValueType();
+    assert(InVT.isVector() && "Cannot split scalar types");
+    unsigned NumElts = InVT.getVectorNumElements();
+    assert(NumElts == NVT.getVectorNumElements() &&
+           "Dst and Src must have the same number of elements");
+    EVT EltVT = InVT.getScalarType();
+    assert(isPowerOf2_32(NumElts) &&
+           "Promoted vector type must be a power of two");
+
+    EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts/2);
+    EVT HalfNVT = EVT::getVectorVT(*DAG.getContext(), NVT.getScalarType(),
+                                   NumElts/2);
+
+    SDValue EOp1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HalfVT, InOp,
+                               DAG.getIntPtrConstant(0));
+    SDValue EOp2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, HalfVT, InOp,
+                               DAG.getIntPtrConstant(NumElts/2));
+    EOp1 = DAG.getNode(ISD::TRUNCATE, dl, HalfNVT, EOp1);
+    EOp2 = DAG.getNode(ISD::TRUNCATE, dl, HalfNVT, EOp2);
+
+    return DAG.getNode(ISD::CONCAT_VECTORS, dl, NVT, EOp1, EOp2);
   }
 
   // Truncate to NVT instead of VT
-  return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), NVT, Res);
+  return DAG.getNode(ISD::TRUNCATE, dl, NVT, Res);
 }
 
 SDValue DAGTypeLegalizer::PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo) {

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp Sat Jul  2 22:28:07 2011
@@ -249,14 +249,14 @@
     assert(N->getNodeId() == -1 && "Node already inserted!");
     N->setNodeId(NewSU->NodeNum);
       
-    const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
-    for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
-      if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
+    const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
+    for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
+      if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
         NewSU->isTwoAddress = true;
         break;
       }
     }
-    if (TID.isCommutable())
+    if (MCID.isCommutable())
       NewSU->isCommutable = true;
 
     // LoadNode may already exist. This can happen when there is another
@@ -422,10 +422,10 @@
 /// FIXME: Move to SelectionDAG?
 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
                                  const TargetInstrInfo *TII) {
-  const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
-  assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
-  unsigned NumRes = TID.getNumDefs();
-  for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
+  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
+  assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
+  unsigned NumRes = MCID.getNumDefs();
+  for (const unsigned *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
     if (Reg == *ImpDef)
       break;
     ++NumRes;
@@ -490,7 +490,8 @@
 
         ++i; // Skip the ID value.
         if (InlineAsm::isRegDefKind(Flags) ||
-            InlineAsm::isRegDefEarlyClobberKind(Flags)) {
+            InlineAsm::isRegDefEarlyClobberKind(Flags) ||
+            InlineAsm::isClobberKind(Flags)) {
           // Check for def of register or earlyclobber register.
           for (; NumVals; --NumVals, ++i) {
             unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
@@ -504,10 +505,10 @@
     }
     if (!Node->isMachineOpcode())
       continue;
-    const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
-    if (!TID.ImplicitDefs)
+    const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
+    if (!MCID.ImplicitDefs)
       continue;
-    for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
+    for (const unsigned *Reg = MCID.ImplicitDefs; *Reg; ++Reg) {
       CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
     }
   }

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp Sat Jul  2 22:28:07 2011
@@ -290,10 +290,20 @@
   // Special handling for untyped values.  These values can only come from
   // the expansion of custom DAG-to-DAG patterns.
   if (VT == MVT::untyped) {
-    unsigned Opcode = RegDefPos.GetNode()->getMachineOpcode();
+    const SDNode *Node = RegDefPos.GetNode();
+    unsigned Opcode = Node->getMachineOpcode();
+
+    if (Opcode == TargetOpcode::REG_SEQUENCE) {
+      unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
+      const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
+      RegClass = RC->getID();
+      Cost = 1;
+      return;
+    }
+
     unsigned Idx = RegDefPos.GetIdx();
-    const TargetInstrDesc Desc = TII->get(Opcode);
-    const TargetRegisterClass *RC = Desc.getRegClass(Idx, TRI);
+    const MCInstrDesc Desc = TII->get(Opcode);
+    const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI);
     RegClass = RC->getID();
     // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
     // better way to determine it.
@@ -827,14 +837,14 @@
     assert(N->getNodeId() == -1 && "Node already inserted!");
     N->setNodeId(NewSU->NodeNum);
 
-    const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
-    for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
-      if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
+    const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
+    for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
+      if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
         NewSU->isTwoAddress = true;
         break;
       }
     }
-    if (TID.isCommutable())
+    if (MCID.isCommutable())
       NewSU->isCommutable = true;
 
     InitNumRegDefsLeft(NewSU);
@@ -1014,10 +1024,10 @@
 /// FIXME: Move to SelectionDAG?
 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
                                  const TargetInstrInfo *TII) {
-  const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
-  assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
-  unsigned NumRes = TID.getNumDefs();
-  for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
+  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
+  assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
+  unsigned NumRes = MCID.getNumDefs();
+  for (const unsigned *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
     if (Reg == *ImpDef)
       break;
     ++NumRes;
@@ -1082,7 +1092,8 @@
 
         ++i; // Skip the ID value.
         if (InlineAsm::isRegDefKind(Flags) ||
-            InlineAsm::isRegDefEarlyClobberKind(Flags)) {
+            InlineAsm::isRegDefEarlyClobberKind(Flags) ||
+            InlineAsm::isClobberKind(Flags)) {
           // Check for def of register or earlyclobber register.
           for (; NumVals; --NumVals, ++i) {
             unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
@@ -1097,10 +1108,10 @@
 
     if (!Node->isMachineOpcode())
       continue;
-    const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
-    if (!TID.ImplicitDefs)
+    const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
+    if (!MCID.ImplicitDefs)
       continue;
-    for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg)
+    for (const unsigned *Reg = MCID.ImplicitDefs; *Reg; ++Reg)
       CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
   }
 
@@ -2018,13 +2029,9 @@
     unsigned POpc = PN->getMachineOpcode();
     if (POpc == TargetOpcode::IMPLICIT_DEF)
       continue;
-    if (POpc == TargetOpcode::EXTRACT_SUBREG) {
-      EVT VT = PN->getOperand(0).getValueType();
-      unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
-      RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
-      continue;
-    } else if (POpc == TargetOpcode::INSERT_SUBREG ||
-               POpc == TargetOpcode::SUBREG_TO_REG) {
+    if (POpc == TargetOpcode::EXTRACT_SUBREG ||
+        POpc == TargetOpcode::INSERT_SUBREG ||
+        POpc == TargetOpcode::SUBREG_TO_REG) {
       EVT VT = PN->getValueType(0);
       unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
       RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
@@ -2599,11 +2606,11 @@
 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
   if (SU->isTwoAddress) {
     unsigned Opc = SU->getNode()->getMachineOpcode();
-    const TargetInstrDesc &TID = TII->get(Opc);
-    unsigned NumRes = TID.getNumDefs();
-    unsigned NumOps = TID.getNumOperands() - NumRes;
+    const MCInstrDesc &MCID = TII->get(Opc);
+    unsigned NumRes = MCID.getNumDefs();
+    unsigned NumOps = MCID.getNumOperands() - NumRes;
     for (unsigned i = 0; i != NumOps; ++i) {
-      if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
+      if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
         SDNode *DU = SU->getNode()->getOperand(i).getNode();
         if (DU->getNodeId() != -1 &&
             Op->OrigNode == &(*SUnits)[DU->getNodeId()])
@@ -2783,11 +2790,11 @@
 
     bool isLiveOut = hasOnlyLiveOutUses(SU);
     unsigned Opc = Node->getMachineOpcode();
-    const TargetInstrDesc &TID = TII->get(Opc);
-    unsigned NumRes = TID.getNumDefs();
-    unsigned NumOps = TID.getNumOperands() - NumRes;
+    const MCInstrDesc &MCID = TII->get(Opc);
+    unsigned NumRes = MCID.getNumDefs();
+    unsigned NumOps = MCID.getNumOperands() - NumRes;
     for (unsigned j = 0; j != NumOps; ++j) {
-      if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
+      if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
         continue;
       SDNode *DU = SU->getNode()->getOperand(j).getNode();
       if (DU->getNodeId() == -1)

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp Sat Jul  2 22:28:07 2011
@@ -17,11 +17,12 @@
 #include "ScheduleDAGSDNodes.h"
 #include "InstrEmitter.h"
 #include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/MC/MCInstrItineraries.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetInstrInfo.h"
 #include "llvm/Target/TargetLowering.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Target/TargetSubtarget.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallSet.h"
@@ -111,7 +112,7 @@
 
   unsigned ResNo = User->getOperand(2).getResNo();
   if (Def->isMachineOpcode()) {
-    const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
+    const MCInstrDesc &II = TII->get(Def->getMachineOpcode());
     if (ResNo >= II.getNumDefs() &&
         II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
       PhysReg = Reg;
@@ -255,8 +256,8 @@
       continue;
 
     unsigned Opc = Node->getMachineOpcode();
-    const TargetInstrDesc &TID = TII->get(Opc);
-    if (TID.mayLoad())
+    const MCInstrDesc &MCID = TII->get(Opc);
+    if (MCID.mayLoad())
       // Cluster loads from "near" addresses into combined SUnits.
       ClusterNeighboringLoads(Node);
   }
@@ -378,7 +379,7 @@
 }
 
 void ScheduleDAGSDNodes::AddSchedEdges() {
-  const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
+  const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
 
   // Check to see if the scheduler cares about latencies.
   bool UnitLatencies = ForceUnitLatencies();
@@ -390,14 +391,14 @@
 
     if (MainNode->isMachineOpcode()) {
       unsigned Opc = MainNode->getMachineOpcode();
-      const TargetInstrDesc &TID = TII->get(Opc);
-      for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
-        if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
+      const MCInstrDesc &MCID = TII->get(Opc);
+      for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
+        if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
           SU->isTwoAddress = true;
           break;
         }
       }
-      if (TID.isCommutable())
+      if (MCID.isCommutable())
         SU->isCommutable = true;
     }
 
@@ -520,14 +521,7 @@
     for (;DefIdx < NodeNumDefs; ++DefIdx) {
       if (!Node->hasAnyUseOfValue(DefIdx))
         continue;
-      if (Node->isMachineOpcode() &&
-          Node->getMachineOpcode() == TargetOpcode::EXTRACT_SUBREG) {
-        // Propagate the incoming (full-register) type. I doubt it's needed.
-        ValueType = Node->getOperand(0).getValueType();
-      }
-      else {
-        ValueType = Node->getValueType(DefIdx);
-      }
+      ValueType = Node->getValueType(DefIdx);
       ++DefIdx;
       return; // Found a normal regdef.
     }

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h Sat Jul  2 22:28:07 2011
@@ -140,7 +140,7 @@
       }
 
       unsigned GetIdx() const {
-        return DefIdx;
+        return DefIdx-1;
       }
 
       void Advance();

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Sat Jul  2 22:28:07 2011
@@ -5428,55 +5428,6 @@
 
 } // end anonymous namespace
 
-/// isAllocatableRegister - If the specified register is safe to allocate,
-/// i.e. it isn't a stack pointer or some other special register, return the
-/// register class for the register.  Otherwise, return null.
-static const TargetRegisterClass *
-isAllocatableRegister(unsigned Reg, MachineFunction &MF,
-                      const TargetLowering &TLI,
-                      const TargetRegisterInfo *TRI) {
-  EVT FoundVT = MVT::Other;
-  const TargetRegisterClass *FoundRC = 0;
-  for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
-       E = TRI->regclass_end(); RCI != E; ++RCI) {
-    EVT ThisVT = MVT::Other;
-
-    const TargetRegisterClass *RC = *RCI;
-    if (!RC->isAllocatable())
-      continue;
-    // If none of the value types for this register class are valid, we
-    // can't use it.  For example, 64-bit reg classes on 32-bit targets.
-    for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
-         I != E; ++I) {
-      if (TLI.isTypeLegal(*I)) {
-        // If we have already found this register in a different register class,
-        // choose the one with the largest VT specified.  For example, on
-        // PowerPC, we favor f64 register classes over f32.
-        if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
-          ThisVT = *I;
-          break;
-        }
-      }
-    }
-
-    if (ThisVT == MVT::Other) continue;
-
-    // NOTE: This isn't ideal.  In particular, this might allocate the
-    // frame pointer in functions that need it (due to them not being taken
-    // out of allocation, because a variable sized allocation hasn't been seen
-    // yet).  This is a slight code pessimization, but should still work.
-    ArrayRef<unsigned> RawOrder = RC->getRawAllocationOrder(MF);
-    if (std::find(RawOrder.begin(), RawOrder.end(), Reg) != RawOrder.end()) {
-      // We found a matching register class.  Keep looking at others in case
-      // we find one with larger registers that this physreg is also in.
-      FoundRC = RC;
-      FoundVT = ThisVT;
-      break;
-    }
-  }
-  return FoundRC;
-}
-
 /// GetRegistersForValue - Assign registers (virtual or physical) for the
 /// specified operand.  We prefer to assign virtual registers, to allow the
 /// register allocator to handle the assignment process.  However, if the asm
@@ -5611,58 +5562,6 @@
     return;
   }
 
-  // This is a reference to a register class that doesn't directly correspond
-  // to an LLVM register class.  Allocate NumRegs consecutive, available,
-  // registers from the class.
-  std::vector<unsigned> RegClassRegs
-    = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
-                                            OpInfo.ConstraintVT);
-
-  const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
-  BitVector Reserved = TRI->getReservedRegs(MF);
-  unsigned NumAllocated = 0;
-  for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
-    unsigned Reg = RegClassRegs[i];
-    // Filter out the reserved registers, but note that reserved registers are
-    // not fully determined at this point. We may still decide we need a frame
-    // pointer.
-    if (Reserved.test(Reg))
-      continue;
-    // See if this register is available.
-    if ((isOutReg && OutputRegs.count(Reg)) ||   // Already used.
-        (isInReg  && InputRegs.count(Reg))) {    // Already used.
-      // Make sure we find consecutive registers.
-      NumAllocated = 0;
-      continue;
-    }
-
-    // Check to see if this register is allocatable (i.e. don't give out the
-    // stack pointer).
-    const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
-    if (!RC) {        // Couldn't allocate this register.
-      // Reset NumAllocated to make sure we return consecutive registers.
-      NumAllocated = 0;
-      continue;
-    }
-
-    // Okay, this register is good, we can use it.
-    ++NumAllocated;
-
-    // If we allocated enough consecutive registers, succeed.
-    if (NumAllocated == NumRegs) {
-      unsigned RegStart = (i-NumAllocated)+1;
-      unsigned RegEnd   = i+1;
-      // Mark all of the allocated registers used.
-      for (unsigned i = RegStart; i != RegEnd; ++i)
-        Regs.push_back(RegClassRegs[i]);
-
-      OpInfo.AssignedRegs = RegsForValue(Regs, *RC->vt_begin(),
-                                         OpInfo.ConstraintVT);
-      OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
-      return;
-    }
-  }
-
   // Otherwise, we couldn't allocate enough registers for this.
 }
 
@@ -6051,8 +5950,7 @@
       // Add the clobbered value to the operand list, so that the register
       // allocator is aware that the physreg got clobbered.
       if (!OpInfo.AssignedRegs.Regs.empty())
-        OpInfo.AssignedRegs.AddInlineAsmOperands(
-                                            InlineAsm::Kind_RegDefEarlyClobber,
+        OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
                                                  false, 0, DAG,
                                                  AsmNodeOperands);
       break;

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp Sat Jul  2 22:28:07 2011
@@ -354,9 +354,9 @@
       const MachineBasicBlock *MBB = I;
       for (MachineBasicBlock::const_iterator
              II = MBB->begin(), IE = MBB->end(); II != IE; ++II) {
-        const TargetInstrDesc &TID = TM.getInstrInfo()->get(II->getOpcode());
+        const MCInstrDesc &MCID = TM.getInstrInfo()->get(II->getOpcode());
 
-        if ((TID.isCall() && !TID.isReturn()) ||
+        if ((MCID.isCall() && !MCID.isReturn()) ||
             II->isStackAligningInlineAsm()) {
           MFI->setHasCalls(true);
           goto done;
@@ -681,7 +681,7 @@
   // landing pad can thus be detected via the MachineModuleInfo.
   MCSymbol *Label = MF->getMMI().addLandingPad(FuncInfo->MBB);
 
-  const TargetInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
+  const MCInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL);
   BuildMI(*FuncInfo->MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
     .addSym(Label);
 
@@ -2611,9 +2611,9 @@
       if (EmitNodeInfo & OPFL_MemRefs) {
         // Only attach load or store memory operands if the generated
         // instruction may load or store.
-        const TargetInstrDesc &TID = TM.getInstrInfo()->get(TargetOpc);
-        bool mayLoad = TID.mayLoad();
-        bool mayStore = TID.mayStore();
+        const MCInstrDesc &MCID = TM.getInstrInfo()->get(TargetOpc);
+        bool mayLoad = MCID.mayLoad();
+        bool mayStore = MCID.mayStore();
 
         unsigned NumMemRefs = 0;
         for (SmallVector<MachineMemOperand*, 2>::const_iterator I =

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/TargetLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/TargetLowering.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/TargetLowering.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SelectionDAG/TargetLowering.cpp Sat Jul  2 22:28:07 2011
@@ -2737,13 +2737,6 @@
   }
 }
 
-std::vector<unsigned> TargetLowering::
-getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                  EVT VT) const {
-  return std::vector<unsigned>();
-}
-
-
 std::pair<unsigned, const TargetRegisterClass*> TargetLowering::
 getRegForInlineAsmConstraint(const std::string &Constraint,
                              EVT VT) const {

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/ShadowStackGC.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/ShadowStackGC.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/ShadowStackGC.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/ShadowStackGC.cpp Sat Jul  2 22:28:07 2011
@@ -202,6 +202,7 @@
       NumMeta = I + 1;
     Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr));
   }
+  Metadata.resize(NumMeta);
 
   const Type *Int32Ty = Type::getInt32Ty(F.getContext());
   
@@ -212,8 +213,7 @@
 
   Constant *DescriptorElts[] = {
     ConstantStruct::get(FrameMapTy, BaseElts),
-    ConstantArray::get(ArrayType::get(VoidPtr, NumMeta),
-                       Metadata.begin(), NumMeta)
+    ConstantArray::get(ArrayType::get(VoidPtr, NumMeta), Metadata)
   };
 
   Type *EltTys[] = { DescriptorElts[0]->getType(),DescriptorElts[1]->getType()};

Removed: llvm/branches/type-system-rewrite/lib/CodeGen/SimpleRegisterCoalescing.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SimpleRegisterCoalescing.cpp?rev=134362&view=auto
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SimpleRegisterCoalescing.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SimpleRegisterCoalescing.cpp (removed)
@@ -1,1539 +0,0 @@
-//===-- SimpleRegisterCoalescing.cpp - Register Coalescing ----------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements a simple register coalescing pass that attempts to
-// aggressively coalesce every register copy that it can.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "regcoalescing"
-#include "SimpleRegisterCoalescing.h"
-#include "VirtRegMap.h"
-#include "LiveDebugVariables.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/Value.h"
-#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/RegisterCoalescer.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetOptions.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/OwningPtr.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
-#include <algorithm>
-#include <cmath>
-using namespace llvm;
-
-STATISTIC(numJoins    , "Number of interval joins performed");
-STATISTIC(numCrossRCs , "Number of cross class joins performed");
-STATISTIC(numCommutes , "Number of instruction commuting performed");
-STATISTIC(numExtends  , "Number of copies extended");
-STATISTIC(NumReMats   , "Number of instructions re-materialized");
-STATISTIC(numPeep     , "Number of identity moves eliminated after coalescing");
-STATISTIC(numAborts   , "Number of times interval joining aborted");
-
-char SimpleRegisterCoalescing::ID = 0;
-static cl::opt<bool>
-EnableJoining("join-liveintervals",
-              cl::desc("Coalesce copies (default=true)"),
-              cl::init(true));
-
-static cl::opt<bool>
-DisableCrossClassJoin("disable-cross-class-join",
-               cl::desc("Avoid coalescing cross register class copies"),
-               cl::init(false), cl::Hidden);
-
-static cl::opt<bool>
-EnablePhysicalJoin("join-physregs",
-                   cl::desc("Join physical register copies"),
-                   cl::init(false), cl::Hidden);
-
-static cl::opt<bool>
-VerifyCoalescing("verify-coalescing",
-         cl::desc("Verify machine instrs before and after register coalescing"),
-         cl::Hidden);
-
-INITIALIZE_AG_PASS_BEGIN(SimpleRegisterCoalescing, RegisterCoalescer,
-                "simple-register-coalescing", "Simple Register Coalescing", 
-                false, false, true)
-INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
-INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
-INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
-INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
-INITIALIZE_PASS_DEPENDENCY(StrongPHIElimination)
-INITIALIZE_PASS_DEPENDENCY(PHIElimination)
-INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
-INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
-INITIALIZE_AG_PASS_END(SimpleRegisterCoalescing, RegisterCoalescer,
-                "simple-register-coalescing", "Simple Register Coalescing", 
-                false, false, true)
-
-char &llvm::SimpleRegisterCoalescingID = SimpleRegisterCoalescing::ID;
-
-void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
-  AU.setPreservesCFG();
-  AU.addRequired<AliasAnalysis>();
-  AU.addRequired<LiveIntervals>();
-  AU.addPreserved<LiveIntervals>();
-  AU.addRequired<LiveDebugVariables>();
-  AU.addPreserved<LiveDebugVariables>();
-  AU.addPreserved<SlotIndexes>();
-  AU.addRequired<MachineLoopInfo>();
-  AU.addPreserved<MachineLoopInfo>();
-  AU.addPreservedID(MachineDominatorsID);
-  AU.addPreservedID(StrongPHIEliminationID);
-  AU.addPreservedID(PHIEliminationID);
-  AU.addPreservedID(TwoAddressInstructionPassID);
-  MachineFunctionPass::getAnalysisUsage(AU);
-}
-
-void SimpleRegisterCoalescing::markAsJoined(MachineInstr *CopyMI) {
-  /// Joined copies are not deleted immediately, but kept in JoinedCopies.
-  JoinedCopies.insert(CopyMI);
-
-  /// Mark all register operands of CopyMI as <undef> so they won't affect dead
-  /// code elimination.
-  for (MachineInstr::mop_iterator I = CopyMI->operands_begin(),
-       E = CopyMI->operands_end(); I != E; ++I)
-    if (I->isReg())
-      I->setIsUndef(true);
-}
-
-/// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy with IntA
-/// being the source and IntB being the dest, thus this defines a value number
-/// in IntB.  If the source value number (in IntA) is defined by a copy from B,
-/// see if we can merge these two pieces of B into a single value number,
-/// eliminating a copy.  For example:
-///
-///  A3 = B0
-///    ...
-///  B1 = A3      <- this copy
-///
-/// In this case, B0 can be extended to where the B1 copy lives, allowing the B1
-/// value number to be replaced with B0 (which simplifies the B liveinterval).
-///
-/// This returns true if an interval was modified.
-///
-bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(const CoalescerPair &CP,
-                                                    MachineInstr *CopyMI) {
-  // Bail if there is no dst interval - can happen when merging physical subreg
-  // operations.
-  if (!li_->hasInterval(CP.getDstReg()))
-    return false;
-
-  LiveInterval &IntA =
-    li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
-  LiveInterval &IntB =
-    li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
-  SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
-
-  // BValNo is a value number in B that is defined by a copy from A.  'B3' in
-  // the example above.
-  LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
-  if (BLR == IntB.end()) return false;
-  VNInfo *BValNo = BLR->valno;
-
-  // Get the location that B is defined at.  Two options: either this value has
-  // an unknown definition point or it is defined at CopyIdx.  If unknown, we
-  // can't process it.
-  if (!BValNo->isDefByCopy()) return false;
-  assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
-
-  // AValNo is the value number in A that defines the copy, A3 in the example.
-  SlotIndex CopyUseIdx = CopyIdx.getUseIndex();
-  LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
-  // The live range might not exist after fun with physreg coalescing.
-  if (ALR == IntA.end()) return false;
-  VNInfo *AValNo = ALR->valno;
-  // If it's re-defined by an early clobber somewhere in the live range, then
-  // it's not safe to eliminate the copy. FIXME: This is a temporary workaround.
-  // See PR3149:
-  // 172     %ECX<def> = MOV32rr %reg1039<kill>
-  // 180     INLINEASM <es:subl $5,$1
-  //         sbbl $3,$0>, 10, %EAX<def>, 14, %ECX<earlyclobber,def>, 9,
-  //         %EAX<kill>,
-  // 36, <fi#0>, 1, %reg0, 0, 9, %ECX<kill>, 36, <fi#1>, 1, %reg0, 0
-  // 188     %EAX<def> = MOV32rr %EAX<kill>
-  // 196     %ECX<def> = MOV32rr %ECX<kill>
-  // 204     %ECX<def> = MOV32rr %ECX<kill>
-  // 212     %EAX<def> = MOV32rr %EAX<kill>
-  // 220     %EAX<def> = MOV32rr %EAX
-  // 228     %reg1039<def> = MOV32rr %ECX<kill>
-  // The early clobber operand ties ECX input to the ECX def.
-  //
-  // The live interval of ECX is represented as this:
-  // %reg20,inf = [46,47:1)[174,230:0)  0 at 174-(230) 1 at 46-(47)
-  // The coalescer has no idea there was a def in the middle of [174,230].
-  if (AValNo->hasRedefByEC())
-    return false;
-
-  // If AValNo is defined as a copy from IntB, we can potentially process this.
-  // Get the instruction that defines this value number.
-  if (!CP.isCoalescable(AValNo->getCopy()))
-    return false;
-
-  // Get the LiveRange in IntB that this value number starts with.
-  LiveInterval::iterator ValLR =
-    IntB.FindLiveRangeContaining(AValNo->def.getPrevSlot());
-  if (ValLR == IntB.end())
-    return false;
-
-  // Make sure that the end of the live range is inside the same block as
-  // CopyMI.
-  MachineInstr *ValLREndInst =
-    li_->getInstructionFromIndex(ValLR->end.getPrevSlot());
-  if (!ValLREndInst || ValLREndInst->getParent() != CopyMI->getParent())
-    return false;
-
-  // Okay, we now know that ValLR ends in the same block that the CopyMI
-  // live-range starts.  If there are no intervening live ranges between them in
-  // IntB, we can merge them.
-  if (ValLR+1 != BLR) return false;
-
-  // If a live interval is a physical register, conservatively check if any
-  // of its aliases is overlapping the live interval of the virtual register.
-  // If so, do not coalesce.
-  if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
-    for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS)
-      if (li_->hasInterval(*AS) && IntA.overlaps(li_->getInterval(*AS))) {
-        DEBUG({
-            dbgs() << "\t\tInterfere with alias ";
-            li_->getInterval(*AS).print(dbgs(), tri_);
-          });
-        return false;
-      }
-  }
-
-  DEBUG({
-      dbgs() << "Extending: ";
-      IntB.print(dbgs(), tri_);
-    });
-
-  SlotIndex FillerStart = ValLR->end, FillerEnd = BLR->start;
-  // We are about to delete CopyMI, so need to remove it as the 'instruction
-  // that defines this value #'. Update the valnum with the new defining
-  // instruction #.
-  BValNo->def  = FillerStart;
-  BValNo->setCopy(0);
-
-  // Okay, we can merge them.  We need to insert a new liverange:
-  // [ValLR.end, BLR.begin) of either value number, then we merge the
-  // two value numbers.
-  IntB.addRange(LiveRange(FillerStart, FillerEnd, BValNo));
-
-  // If the IntB live range is assigned to a physical register, and if that
-  // physreg has sub-registers, update their live intervals as well.
-  if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
-    for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
-      if (!li_->hasInterval(*SR))
-        continue;
-      LiveInterval &SRLI = li_->getInterval(*SR);
-      SRLI.addRange(LiveRange(FillerStart, FillerEnd,
-                              SRLI.getNextValue(FillerStart, 0,
-                                                li_->getVNInfoAllocator())));
-    }
-  }
-
-  // Okay, merge "B1" into the same value number as "B0".
-  if (BValNo != ValLR->valno) {
-    // If B1 is killed by a PHI, then the merged live range must also be killed
-    // by the same PHI, as B0 and B1 can not overlap.
-    bool HasPHIKill = BValNo->hasPHIKill();
-    IntB.MergeValueNumberInto(BValNo, ValLR->valno);
-    if (HasPHIKill)
-      ValLR->valno->setHasPHIKill(true);
-  }
-  DEBUG({
-      dbgs() << "   result = ";
-      IntB.print(dbgs(), tri_);
-      dbgs() << "\n";
-    });
-
-  // If the source instruction was killing the source register before the
-  // merge, unset the isKill marker given the live range has been extended.
-  int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true);
-  if (UIdx != -1) {
-    ValLREndInst->getOperand(UIdx).setIsKill(false);
-  }
-
-  // If the copy instruction was killing the destination register before the
-  // merge, find the last use and trim the live range. That will also add the
-  // isKill marker.
-  if (ALR->end == CopyIdx)
-    li_->shrinkToUses(&IntA);
-
-  ++numExtends;
-  return true;
-}
-
-/// HasOtherReachingDefs - Return true if there are definitions of IntB
-/// other than BValNo val# that can reach uses of AValno val# of IntA.
-bool SimpleRegisterCoalescing::HasOtherReachingDefs(LiveInterval &IntA,
-                                                    LiveInterval &IntB,
-                                                    VNInfo *AValNo,
-                                                    VNInfo *BValNo) {
-  for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
-       AI != AE; ++AI) {
-    if (AI->valno != AValNo) continue;
-    LiveInterval::Ranges::iterator BI =
-      std::upper_bound(IntB.ranges.begin(), IntB.ranges.end(), AI->start);
-    if (BI != IntB.ranges.begin())
-      --BI;
-    for (; BI != IntB.ranges.end() && AI->end >= BI->start; ++BI) {
-      if (BI->valno == BValNo)
-        continue;
-      if (BI->start <= AI->start && BI->end > AI->start)
-        return true;
-      if (BI->start > AI->start && BI->start < AI->end)
-        return true;
-    }
-  }
-  return false;
-}
-
-/// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy with
-/// IntA being the source and IntB being the dest, thus this defines a value
-/// number in IntB.  If the source value number (in IntA) is defined by a
-/// commutable instruction and its other operand is coalesced to the copy dest
-/// register, see if we can transform the copy into a noop by commuting the
-/// definition. For example,
-///
-///  A3 = op A2 B0<kill>
-///    ...
-///  B1 = A3      <- this copy
-///    ...
-///     = op A3   <- more uses
-///
-/// ==>
-///
-///  B2 = op B0 A2<kill>
-///    ...
-///  B1 = B2      <- now an identify copy
-///    ...
-///     = op B2   <- more uses
-///
-/// This returns true if an interval was modified.
-///
-bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP,
-                                                        MachineInstr *CopyMI) {
-  // FIXME: For now, only eliminate the copy by commuting its def when the
-  // source register is a virtual register. We want to guard against cases
-  // where the copy is a back edge copy and commuting the def lengthen the
-  // live interval of the source register to the entire loop.
-  if (CP.isPhys() && CP.isFlipped())
-    return false;
-
-  // Bail if there is no dst interval.
-  if (!li_->hasInterval(CP.getDstReg()))
-    return false;
-
-  SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
-
-  LiveInterval &IntA =
-    li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
-  LiveInterval &IntB =
-    li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
-
-  // BValNo is a value number in B that is defined by a copy from A. 'B3' in
-  // the example above.
-  VNInfo *BValNo = IntB.getVNInfoAt(CopyIdx);
-  if (!BValNo || !BValNo->isDefByCopy())
-    return false;
-
-  assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
-
-  // AValNo is the value number in A that defines the copy, A3 in the example.
-  VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getUseIndex());
-  assert(AValNo && "COPY source not live");
-
-  // If other defs can reach uses of this def, then it's not safe to perform
-  // the optimization.
-  if (AValNo->isPHIDef() || AValNo->isUnused() || AValNo->hasPHIKill())
-    return false;
-  MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def);
-  if (!DefMI)
-    return false;
-  const TargetInstrDesc &TID = DefMI->getDesc();
-  if (!TID.isCommutable())
-    return false;
-  // If DefMI is a two-address instruction then commuting it will change the
-  // destination register.
-  int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg);
-  assert(DefIdx != -1);
-  unsigned UseOpIdx;
-  if (!DefMI->isRegTiedToUseOperand(DefIdx, &UseOpIdx))
-    return false;
-  unsigned Op1, Op2, NewDstIdx;
-  if (!tii_->findCommutedOpIndices(DefMI, Op1, Op2))
-    return false;
-  if (Op1 == UseOpIdx)
-    NewDstIdx = Op2;
-  else if (Op2 == UseOpIdx)
-    NewDstIdx = Op1;
-  else
-    return false;
-
-  MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
-  unsigned NewReg = NewDstMO.getReg();
-  if (NewReg != IntB.reg || !NewDstMO.isKill())
-    return false;
-
-  // Make sure there are no other definitions of IntB that would reach the
-  // uses which the new definition can reach.
-  if (HasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
-    return false;
-
-  // Abort if the aliases of IntB.reg have values that are not simply the
-  // clobbers from the superreg.
-  if (TargetRegisterInfo::isPhysicalRegister(IntB.reg))
-    for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS)
-      if (li_->hasInterval(*AS) &&
-          HasOtherReachingDefs(IntA, li_->getInterval(*AS), AValNo, 0))
-        return false;
-
-  // If some of the uses of IntA.reg is already coalesced away, return false.
-  // It's not possible to determine whether it's safe to perform the coalescing.
-  for (MachineRegisterInfo::use_nodbg_iterator UI = 
-         mri_->use_nodbg_begin(IntA.reg), 
-       UE = mri_->use_nodbg_end(); UI != UE; ++UI) {
-    MachineInstr *UseMI = &*UI;
-    SlotIndex UseIdx = li_->getInstructionIndex(UseMI);
-    LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
-    if (ULR == IntA.end())
-      continue;
-    if (ULR->valno == AValNo && JoinedCopies.count(UseMI))
-      return false;
-  }
-
-  DEBUG(dbgs() << "\tRemoveCopyByCommutingDef: " << AValNo->def << '\t'
-               << *DefMI);
-
-  // At this point we have decided that it is legal to do this
-  // transformation.  Start by commuting the instruction.
-  MachineBasicBlock *MBB = DefMI->getParent();
-  MachineInstr *NewMI = tii_->commuteInstruction(DefMI);
-  if (!NewMI)
-    return false;
-  if (TargetRegisterInfo::isVirtualRegister(IntA.reg) &&
-      TargetRegisterInfo::isVirtualRegister(IntB.reg) &&
-      !mri_->constrainRegClass(IntB.reg, mri_->getRegClass(IntA.reg)))
-    return false;
-  if (NewMI != DefMI) {
-    li_->ReplaceMachineInstrInMaps(DefMI, NewMI);
-    MBB->insert(DefMI, NewMI);
-    MBB->erase(DefMI);
-  }
-  unsigned OpIdx = NewMI->findRegisterUseOperandIdx(IntA.reg, false);
-  NewMI->getOperand(OpIdx).setIsKill();
-
-  // If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
-  // A = or A, B
-  // ...
-  // B = A
-  // ...
-  // C = A<kill>
-  // ...
-  //   = B
-
-  // Update uses of IntA of the specific Val# with IntB.
-  for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg),
-         UE = mri_->use_end(); UI != UE;) {
-    MachineOperand &UseMO = UI.getOperand();
-    MachineInstr *UseMI = &*UI;
-    ++UI;
-    if (JoinedCopies.count(UseMI))
-      continue;
-    if (UseMI->isDebugValue()) {
-      // FIXME These don't have an instruction index.  Not clear we have enough
-      // info to decide whether to do this replacement or not.  For now do it.
-      UseMO.setReg(NewReg);
-      continue;
-    }
-    SlotIndex UseIdx = li_->getInstructionIndex(UseMI).getUseIndex();
-    LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
-    if (ULR == IntA.end() || ULR->valno != AValNo)
-      continue;
-    if (TargetRegisterInfo::isPhysicalRegister(NewReg))
-      UseMO.substPhysReg(NewReg, *tri_);
-    else
-      UseMO.setReg(NewReg);
-    if (UseMI == CopyMI)
-      continue;
-    if (!UseMI->isCopy())
-      continue;
-    if (UseMI->getOperand(0).getReg() != IntB.reg ||
-        UseMI->getOperand(0).getSubReg())
-      continue;
-
-    // This copy will become a noop. If it's defining a new val#, merge it into
-    // BValNo.
-    SlotIndex DefIdx = UseIdx.getDefIndex();
-    VNInfo *DVNI = IntB.getVNInfoAt(DefIdx);
-    if (!DVNI)
-      continue;
-    DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI);
-    assert(DVNI->def == DefIdx);
-    BValNo = IntB.MergeValueNumberInto(BValNo, DVNI);
-    markAsJoined(UseMI);
-  }
-
-  // Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
-  // is updated.
-  VNInfo *ValNo = BValNo;
-  ValNo->def = AValNo->def;
-  ValNo->setCopy(0);
-  for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
-       AI != AE; ++AI) {
-    if (AI->valno != AValNo) continue;
-    IntB.addRange(LiveRange(AI->start, AI->end, ValNo));
-  }
-  DEBUG(dbgs() << "\t\textended: " << IntB << '\n');
-
-  IntA.removeValNo(AValNo);
-  DEBUG(dbgs() << "\t\ttrimmed:  " << IntA << '\n');
-  ++numCommutes;
-  return true;
-}
-
-/// ReMaterializeTrivialDef - If the source of a copy is defined by a trivial
-/// computation, replace the copy by rematerialize the definition.
-bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
-                                                       bool preserveSrcInt,
-                                                       unsigned DstReg,
-                                                       unsigned DstSubIdx,
-                                                       MachineInstr *CopyMI) {
-  SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getUseIndex();
-  LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx);
-  assert(SrcLR != SrcInt.end() && "Live range not found!");
-  VNInfo *ValNo = SrcLR->valno;
-  // If other defs can reach uses of this def, then it's not safe to perform
-  // the optimization.
-  if (ValNo->isPHIDef() || ValNo->isUnused() || ValNo->hasPHIKill())
-    return false;
-  MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def);
-  if (!DefMI)
-    return false;
-  assert(DefMI && "Defining instruction disappeared");
-  const TargetInstrDesc &TID = DefMI->getDesc();
-  if (!TID.isAsCheapAsAMove())
-    return false;
-  if (!tii_->isTriviallyReMaterializable(DefMI, AA))
-    return false;
-  bool SawStore = false;
-  if (!DefMI->isSafeToMove(tii_, AA, SawStore))
-    return false;
-  if (TID.getNumDefs() != 1)
-    return false;
-  if (!DefMI->isImplicitDef()) {
-    // Make sure the copy destination register class fits the instruction
-    // definition register class. The mismatch can happen as a result of earlier
-    // extract_subreg, insert_subreg, subreg_to_reg coalescing.
-    const TargetRegisterClass *RC = TID.OpInfo[0].getRegClass(tri_);
-    if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
-      if (mri_->getRegClass(DstReg) != RC)
-        return false;
-    } else if (!RC->contains(DstReg))
-      return false;
-  }
-
-  // If destination register has a sub-register index on it, make sure it
-  // matches the instruction register class.
-  if (DstSubIdx) {
-    const TargetInstrDesc &TID = DefMI->getDesc();
-    if (TID.getNumDefs() != 1)
-      return false;
-    const TargetRegisterClass *DstRC = mri_->getRegClass(DstReg);
-    const TargetRegisterClass *DstSubRC =
-      DstRC->getSubRegisterRegClass(DstSubIdx);
-    const TargetRegisterClass *DefRC = TID.OpInfo[0].getRegClass(tri_);
-    if (DefRC == DstRC)
-      DstSubIdx = 0;
-    else if (DefRC != DstSubRC)
-      return false;
-  }
-
-  RemoveCopyFlag(DstReg, CopyMI);
-
-  MachineBasicBlock *MBB = CopyMI->getParent();
-  MachineBasicBlock::iterator MII =
-    llvm::next(MachineBasicBlock::iterator(CopyMI));
-  tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, *tri_);
-  MachineInstr *NewMI = prior(MII);
-
-  // CopyMI may have implicit operands, transfer them over to the newly
-  // rematerialized instruction. And update implicit def interval valnos.
-  for (unsigned i = CopyMI->getDesc().getNumOperands(),
-         e = CopyMI->getNumOperands(); i != e; ++i) {
-    MachineOperand &MO = CopyMI->getOperand(i);
-    if (MO.isReg() && MO.isImplicit())
-      NewMI->addOperand(MO);
-    if (MO.isDef())
-      RemoveCopyFlag(MO.getReg(), CopyMI);
-  }
-
-  NewMI->copyImplicitOps(CopyMI);
-  li_->ReplaceMachineInstrInMaps(CopyMI, NewMI);
-  CopyMI->eraseFromParent();
-  ReMatCopies.insert(CopyMI);
-  ReMatDefs.insert(DefMI);
-  DEBUG(dbgs() << "Remat: " << *NewMI);
-  ++NumReMats;
-
-  // The source interval can become smaller because we removed a use.
-  if (preserveSrcInt)
-    li_->shrinkToUses(&SrcInt);
-
-  return true;
-}
-
-/// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
-/// update the subregister number if it is not zero. If DstReg is a
-/// physical register and the existing subregister number of the def / use
-/// being updated is not zero, make sure to set it to the correct physical
-/// subregister.
-void
-SimpleRegisterCoalescing::UpdateRegDefsUses(const CoalescerPair &CP) {
-  bool DstIsPhys = CP.isPhys();
-  unsigned SrcReg = CP.getSrcReg();
-  unsigned DstReg = CP.getDstReg();
-  unsigned SubIdx = CP.getSubIdx();
-
-  // Update LiveDebugVariables.
-  ldv_->renameRegister(SrcReg, DstReg, SubIdx);
-
-  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg);
-       MachineInstr *UseMI = I.skipInstruction();) {
-    // A PhysReg copy that won't be coalesced can perhaps be rematerialized
-    // instead.
-    if (DstIsPhys) {
-      if (UseMI->isCopy() &&
-          !UseMI->getOperand(1).getSubReg() &&
-          !UseMI->getOperand(0).getSubReg() &&
-          UseMI->getOperand(1).getReg() == SrcReg &&
-          UseMI->getOperand(0).getReg() != SrcReg &&
-          UseMI->getOperand(0).getReg() != DstReg &&
-          !JoinedCopies.count(UseMI) &&
-          ReMaterializeTrivialDef(li_->getInterval(SrcReg), false,
-                                  UseMI->getOperand(0).getReg(), 0, UseMI))
-        continue;
-    }
-
-    SmallVector<unsigned,8> Ops;
-    bool Reads, Writes;
-    tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
-    bool Kills = false, Deads = false;
-
-    // Replace SrcReg with DstReg in all UseMI operands.
-    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
-      MachineOperand &MO = UseMI->getOperand(Ops[i]);
-      Kills |= MO.isKill();
-      Deads |= MO.isDead();
-
-      if (DstIsPhys)
-        MO.substPhysReg(DstReg, *tri_);
-      else
-        MO.substVirtReg(DstReg, SubIdx, *tri_);
-    }
-
-    // This instruction is a copy that will be removed.
-    if (JoinedCopies.count(UseMI))
-      continue;
-
-    if (SubIdx) {
-      // If UseMI was a simple SrcReg def, make sure we didn't turn it into a
-      // read-modify-write of DstReg.
-      if (Deads)
-        UseMI->addRegisterDead(DstReg, tri_);
-      else if (!Reads && Writes)
-        UseMI->addRegisterDefined(DstReg, tri_);
-
-      // Kill flags apply to the whole physical register.
-      if (DstIsPhys && Kills)
-        UseMI->addRegisterKilled(DstReg, tri_);
-    }
-
-    DEBUG({
-        dbgs() << "\t\tupdated: ";
-        if (!UseMI->isDebugValue())
-          dbgs() << li_->getInstructionIndex(UseMI) << "\t";
-        dbgs() << *UseMI;
-      });
-  }
-}
-
-/// removeIntervalIfEmpty - Check if the live interval of a physical register
-/// is empty, if so remove it and also remove the empty intervals of its
-/// sub-registers. Return true if live interval is removed.
-static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *li_,
-                                  const TargetRegisterInfo *tri_) {
-  if (li.empty()) {
-    if (TargetRegisterInfo::isPhysicalRegister(li.reg))
-      for (const unsigned* SR = tri_->getSubRegisters(li.reg); *SR; ++SR) {
-        if (!li_->hasInterval(*SR))
-          continue;
-        LiveInterval &sli = li_->getInterval(*SR);
-        if (sli.empty())
-          li_->removeInterval(*SR);
-      }
-    li_->removeInterval(li.reg);
-    return true;
-  }
-  return false;
-}
-
-/// RemoveDeadDef - If a def of a live interval is now determined dead, remove
-/// the val# it defines. If the live interval becomes empty, remove it as well.
-bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li,
-                                             MachineInstr *DefMI) {
-  SlotIndex DefIdx = li_->getInstructionIndex(DefMI).getDefIndex();
-  LiveInterval::iterator MLR = li.FindLiveRangeContaining(DefIdx);
-  if (DefIdx != MLR->valno->def)
-    return false;
-  li.removeValNo(MLR->valno);
-  return removeIntervalIfEmpty(li, li_, tri_);
-}
-
-void SimpleRegisterCoalescing::RemoveCopyFlag(unsigned DstReg,
-                                              const MachineInstr *CopyMI) {
-  SlotIndex DefIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
-  if (li_->hasInterval(DstReg)) {
-    LiveInterval &LI = li_->getInterval(DstReg);
-    if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
-      if (LR->valno->def == DefIdx)
-        LR->valno->setCopy(0);
-  }
-  if (!TargetRegisterInfo::isPhysicalRegister(DstReg))
-    return;
-  for (const unsigned* AS = tri_->getAliasSet(DstReg); *AS; ++AS) {
-    if (!li_->hasInterval(*AS))
-      continue;
-    LiveInterval &LI = li_->getInterval(*AS);
-    if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
-      if (LR->valno->def == DefIdx)
-        LR->valno->setCopy(0);
-  }
-}
-
-/// shouldJoinPhys - Return true if a copy involving a physreg should be joined.
-/// We need to be careful about coalescing a source physical register with a
-/// virtual register. Once the coalescing is done, it cannot be broken and these
-/// are not spillable! If the destination interval uses are far away, think
-/// twice about coalescing them!
-bool SimpleRegisterCoalescing::shouldJoinPhys(CoalescerPair &CP) {
-  bool Allocatable = li_->isAllocatable(CP.getDstReg());
-  LiveInterval &JoinVInt = li_->getInterval(CP.getSrcReg());
-
-  /// Always join simple intervals that are defined by a single copy from a
-  /// reserved register. This doesn't increase register pressure, so it is
-  /// always beneficial.
-  if (!Allocatable && CP.isFlipped() && JoinVInt.containsOneValue())
-    return true;
-
-  if (!EnablePhysicalJoin) {
-    DEBUG(dbgs() << "\tPhysreg joins disabled.\n");
-    return false;
-  }
-
-  // Only coalesce to allocatable physreg, we don't want to risk modifying
-  // reserved registers.
-  if (!Allocatable) {
-    DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n");
-    return false;  // Not coalescable.
-  }
-
-  // Don't join with physregs that have a ridiculous number of live
-  // ranges. The data structure performance is really bad when that
-  // happens.
-  if (li_->hasInterval(CP.getDstReg()) &&
-      li_->getInterval(CP.getDstReg()).ranges.size() > 1000) {
-    ++numAborts;
-    DEBUG(dbgs()
-          << "\tPhysical register live interval too complicated, abort!\n");
-    return false;
-  }
-
-  // FIXME: Why are we skipping this test for partial copies?
-  //        CodeGen/X86/phys_subreg_coalesce-3.ll needs it.
-  if (!CP.isPartial()) {
-    const TargetRegisterClass *RC = mri_->getRegClass(CP.getSrcReg());
-    unsigned Threshold = RegClassInfo.getNumAllocatableRegs(RC) * 2;
-    unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
-    if (Length > Threshold) {
-      ++numAborts;
-      DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
-      return false;
-    }
-  }
-  return true;
-}
-
-/// isWinToJoinCrossClass - Return true if it's profitable to coalesce
-/// two virtual registers from different register classes.
-bool
-SimpleRegisterCoalescing::isWinToJoinCrossClass(unsigned SrcReg,
-                                                unsigned DstReg,
-                                             const TargetRegisterClass *SrcRC,
-                                             const TargetRegisterClass *DstRC,
-                                             const TargetRegisterClass *NewRC) {
-  unsigned NewRCCount = RegClassInfo.getNumAllocatableRegs(NewRC);
-  // This heuristics is good enough in practice, but it's obviously not *right*.
-  // 4 is a magic number that works well enough for x86, ARM, etc. It filter
-  // out all but the most restrictive register classes.
-  if (NewRCCount > 4 ||
-      // Early exit if the function is fairly small, coalesce aggressively if
-      // that's the case. For really special register classes with 3 or
-      // fewer registers, be a bit more careful.
-      (li_->getFuncInstructionCount() / NewRCCount) < 8)
-    return true;
-  LiveInterval &SrcInt = li_->getInterval(SrcReg);
-  LiveInterval &DstInt = li_->getInterval(DstReg);
-  unsigned SrcSize = li_->getApproximateInstructionCount(SrcInt);
-  unsigned DstSize = li_->getApproximateInstructionCount(DstInt);
-
-  // Coalesce aggressively if the intervals are small compared to the number of
-  // registers in the new class. The number 4 is fairly arbitrary, chosen to be
-  // less aggressive than the 8 used for the whole function size.
-  const unsigned ThresSize = 4 * NewRCCount;
-  if (SrcSize <= ThresSize && DstSize <= ThresSize)
-    return true;
-
-  // Estimate *register use density*. If it doubles or more, abort.
-  unsigned SrcUses = std::distance(mri_->use_nodbg_begin(SrcReg),
-                                   mri_->use_nodbg_end());
-  unsigned DstUses = std::distance(mri_->use_nodbg_begin(DstReg),
-                                   mri_->use_nodbg_end());
-  unsigned NewUses = SrcUses + DstUses;
-  unsigned NewSize = SrcSize + DstSize;
-  if (SrcRC != NewRC && SrcSize > ThresSize) {
-    unsigned SrcRCCount = RegClassInfo.getNumAllocatableRegs(SrcRC);
-    if (NewUses*SrcSize*SrcRCCount > 2*SrcUses*NewSize*NewRCCount)
-      return false;
-  }
-  if (DstRC != NewRC && DstSize > ThresSize) {
-    unsigned DstRCCount = RegClassInfo.getNumAllocatableRegs(DstRC);
-    if (NewUses*DstSize*DstRCCount > 2*DstUses*NewSize*NewRCCount)
-      return false;
-  }
-  return true;
-}
-
-
-/// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
-/// which are the src/dst of the copy instruction CopyMI.  This returns true
-/// if the copy was successfully coalesced away. If it is not currently
-/// possible to coalesce this interval, but it may be possible if other
-/// things get coalesced, then it returns true by reference in 'Again'.
-bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
-  MachineInstr *CopyMI = TheCopy.MI;
-
-  Again = false;
-  if (JoinedCopies.count(CopyMI) || ReMatCopies.count(CopyMI))
-    return false; // Already done.
-
-  DEBUG(dbgs() << li_->getInstructionIndex(CopyMI) << '\t' << *CopyMI);
-
-  CoalescerPair CP(*tii_, *tri_);
-  if (!CP.setRegisters(CopyMI)) {
-    DEBUG(dbgs() << "\tNot coalescable.\n");
-    return false;
-  }
-
-  // If they are already joined we continue.
-  if (CP.getSrcReg() == CP.getDstReg()) {
-    markAsJoined(CopyMI);
-    DEBUG(dbgs() << "\tCopy already coalesced.\n");
-    return false;  // Not coalescable.
-  }
-
-  DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), tri_)
-               << " with " << PrintReg(CP.getDstReg(), tri_, CP.getSubIdx())
-               << "\n");
-
-  // Enforce policies.
-  if (CP.isPhys()) {
-    if (!shouldJoinPhys(CP)) {
-      // Before giving up coalescing, if definition of source is defined by
-      // trivial computation, try rematerializing it.
-      if (!CP.isFlipped() &&
-          ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true,
-                                  CP.getDstReg(), 0, CopyMI))
-        return true;
-      return false;
-    }
-  } else {
-    // Avoid constraining virtual register regclass too much.
-    if (CP.isCrossClass()) {
-      DEBUG(dbgs() << "\tCross-class to " << CP.getNewRC()->getName() << ".\n");
-      if (DisableCrossClassJoin) {
-        DEBUG(dbgs() << "\tCross-class joins disabled.\n");
-        return false;
-      }
-      if (!isWinToJoinCrossClass(CP.getSrcReg(), CP.getDstReg(),
-                                 mri_->getRegClass(CP.getSrcReg()),
-                                 mri_->getRegClass(CP.getDstReg()),
-                                 CP.getNewRC())) {
-        DEBUG(dbgs() << "\tAvoid coalescing to constrained register class.\n");
-        Again = true;  // May be possible to coalesce later.
-        return false;
-      }
-    }
-
-    // When possible, let DstReg be the larger interval.
-    if (!CP.getSubIdx() && li_->getInterval(CP.getSrcReg()).ranges.size() >
-                           li_->getInterval(CP.getDstReg()).ranges.size())
-      CP.flip();
-  }
-
-  // Okay, attempt to join these two intervals.  On failure, this returns false.
-  // Otherwise, if one of the intervals being joined is a physreg, this method
-  // always canonicalizes DstInt to be it.  The output "SrcInt" will not have
-  // been modified, so we can use this information below to update aliases.
-  if (!JoinIntervals(CP)) {
-    // Coalescing failed.
-
-    // If definition of source is defined by trivial computation, try
-    // rematerializing it.
-    if (!CP.isFlipped() &&
-        ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true,
-                                CP.getDstReg(), 0, CopyMI))
-      return true;
-
-    // If we can eliminate the copy without merging the live ranges, do so now.
-    if (!CP.isPartial()) {
-      if (AdjustCopiesBackFrom(CP, CopyMI) ||
-          RemoveCopyByCommutingDef(CP, CopyMI)) {
-        markAsJoined(CopyMI);
-        DEBUG(dbgs() << "\tTrivial!\n");
-        return true;
-      }
-    }
-
-    // Otherwise, we are unable to join the intervals.
-    DEBUG(dbgs() << "\tInterference!\n");
-    Again = true;  // May be possible to coalesce later.
-    return false;
-  }
-
-  // Coalescing to a virtual register that is of a sub-register class of the
-  // other. Make sure the resulting register is set to the right register class.
-  if (CP.isCrossClass()) {
-    ++numCrossRCs;
-    mri_->setRegClass(CP.getDstReg(), CP.getNewRC());
-  }
-
-  // Remember to delete the copy instruction.
-  markAsJoined(CopyMI);
-
-  UpdateRegDefsUses(CP);
-
-  // If we have extended the live range of a physical register, make sure we
-  // update live-in lists as well.
-  if (CP.isPhys()) {
-    SmallVector<MachineBasicBlock*, 16> BlockSeq;
-    // JoinIntervals invalidates the VNInfos in SrcInt, but we only need the
-    // ranges for this, and they are preserved.
-    LiveInterval &SrcInt = li_->getInterval(CP.getSrcReg());
-    for (LiveInterval::const_iterator I = SrcInt.begin(), E = SrcInt.end();
-         I != E; ++I ) {
-      li_->findLiveInMBBs(I->start, I->end, BlockSeq);
-      for (unsigned idx = 0, size = BlockSeq.size(); idx != size; ++idx) {
-        MachineBasicBlock &block = *BlockSeq[idx];
-        if (!block.isLiveIn(CP.getDstReg()))
-          block.addLiveIn(CP.getDstReg());
-      }
-      BlockSeq.clear();
-    }
-  }
-
-  // SrcReg is guarateed to be the register whose live interval that is
-  // being merged.
-  li_->removeInterval(CP.getSrcReg());
-
-  // Update regalloc hint.
-  tri_->UpdateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *mf_);
-
-  DEBUG({
-    LiveInterval &DstInt = li_->getInterval(CP.getDstReg());
-    dbgs() << "\tJoined. Result = ";
-    DstInt.print(dbgs(), tri_);
-    dbgs() << "\n";
-  });
-
-  ++numJoins;
-  return true;
-}
-
-/// ComputeUltimateVN - Assuming we are going to join two live intervals,
-/// compute what the resultant value numbers for each value in the input two
-/// ranges will be.  This is complicated by copies between the two which can
-/// and will commonly cause multiple value numbers to be merged into one.
-///
-/// VN is the value number that we're trying to resolve.  InstDefiningValue
-/// keeps track of the new InstDefiningValue assignment for the result
-/// LiveInterval.  ThisFromOther/OtherFromThis are sets that keep track of
-/// whether a value in this or other is a copy from the opposite set.
-/// ThisValNoAssignments/OtherValNoAssignments keep track of value #'s that have
-/// already been assigned.
-///
-/// ThisFromOther[x] - If x is defined as a copy from the other interval, this
-/// contains the value number the copy is from.
-///
-static unsigned ComputeUltimateVN(VNInfo *VNI,
-                                  SmallVector<VNInfo*, 16> &NewVNInfo,
-                                  DenseMap<VNInfo*, VNInfo*> &ThisFromOther,
-                                  DenseMap<VNInfo*, VNInfo*> &OtherFromThis,
-                                  SmallVector<int, 16> &ThisValNoAssignments,
-                                  SmallVector<int, 16> &OtherValNoAssignments) {
-  unsigned VN = VNI->id;
-
-  // If the VN has already been computed, just return it.
-  if (ThisValNoAssignments[VN] >= 0)
-    return ThisValNoAssignments[VN];
-  assert(ThisValNoAssignments[VN] != -2 && "Cyclic value numbers");
-
-  // If this val is not a copy from the other val, then it must be a new value
-  // number in the destination.
-  DenseMap<VNInfo*, VNInfo*>::iterator I = ThisFromOther.find(VNI);
-  if (I == ThisFromOther.end()) {
-    NewVNInfo.push_back(VNI);
-    return ThisValNoAssignments[VN] = NewVNInfo.size()-1;
-  }
-  VNInfo *OtherValNo = I->second;
-
-  // Otherwise, this *is* a copy from the RHS.  If the other side has already
-  // been computed, return it.
-  if (OtherValNoAssignments[OtherValNo->id] >= 0)
-    return ThisValNoAssignments[VN] = OtherValNoAssignments[OtherValNo->id];
-
-  // Mark this value number as currently being computed, then ask what the
-  // ultimate value # of the other value is.
-  ThisValNoAssignments[VN] = -2;
-  unsigned UltimateVN =
-    ComputeUltimateVN(OtherValNo, NewVNInfo, OtherFromThis, ThisFromOther,
-                      OtherValNoAssignments, ThisValNoAssignments);
-  return ThisValNoAssignments[VN] = UltimateVN;
-}
-
-/// JoinIntervals - Attempt to join these two intervals.  On failure, this
-/// returns false.
-bool SimpleRegisterCoalescing::JoinIntervals(CoalescerPair &CP) {
-  LiveInterval &RHS = li_->getInterval(CP.getSrcReg());
-  DEBUG({ dbgs() << "\t\tRHS = "; RHS.print(dbgs(), tri_); dbgs() << "\n"; });
-
-  // If a live interval is a physical register, check for interference with any
-  // aliases. The interference check implemented here is a bit more conservative
-  // than the full interfeence check below. We allow overlapping live ranges
-  // only when one is a copy of the other.
-  if (CP.isPhys()) {
-    for (const unsigned *AS = tri_->getAliasSet(CP.getDstReg()); *AS; ++AS){
-      if (!li_->hasInterval(*AS))
-        continue;
-      const LiveInterval &LHS = li_->getInterval(*AS);
-      LiveInterval::const_iterator LI = LHS.begin();
-      for (LiveInterval::const_iterator RI = RHS.begin(), RE = RHS.end();
-           RI != RE; ++RI) {
-        LI = std::lower_bound(LI, LHS.end(), RI->start);
-        // Does LHS have an overlapping live range starting before RI?
-        if ((LI != LHS.begin() && LI[-1].end > RI->start) &&
-            (RI->start != RI->valno->def ||
-             !CP.isCoalescable(li_->getInstructionFromIndex(RI->start)))) {
-          DEBUG({
-            dbgs() << "\t\tInterference from alias: ";
-            LHS.print(dbgs(), tri_);
-            dbgs() << "\n\t\tOverlap at " << RI->start << " and no copy.\n";
-          });
-          return false;
-        }
-
-        // Check that LHS ranges beginning in this range are copies.
-        for (; LI != LHS.end() && LI->start < RI->end; ++LI) {
-          if (LI->start != LI->valno->def ||
-              !CP.isCoalescable(li_->getInstructionFromIndex(LI->start))) {
-            DEBUG({
-              dbgs() << "\t\tInterference from alias: ";
-              LHS.print(dbgs(), tri_);
-              dbgs() << "\n\t\tDef at " << LI->start << " is not a copy.\n";
-            });
-            return false;
-          }
-        }
-      }
-    }
-  }
-
-  // Compute the final value assignment, assuming that the live ranges can be
-  // coalesced.
-  SmallVector<int, 16> LHSValNoAssignments;
-  SmallVector<int, 16> RHSValNoAssignments;
-  DenseMap<VNInfo*, VNInfo*> LHSValsDefinedFromRHS;
-  DenseMap<VNInfo*, VNInfo*> RHSValsDefinedFromLHS;
-  SmallVector<VNInfo*, 16> NewVNInfo;
-
-  LiveInterval &LHS = li_->getOrCreateInterval(CP.getDstReg());
-  DEBUG({ dbgs() << "\t\tLHS = "; LHS.print(dbgs(), tri_); dbgs() << "\n"; });
-
-  // Loop over the value numbers of the LHS, seeing if any are defined from
-  // the RHS.
-  for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
-       i != e; ++i) {
-    VNInfo *VNI = *i;
-    if (VNI->isUnused() || !VNI->isDefByCopy())  // Src not defined by a copy?
-      continue;
-
-    // Never join with a register that has EarlyClobber redefs.
-    if (VNI->hasRedefByEC())
-      return false;
-
-    // DstReg is known to be a register in the LHS interval.  If the src is
-    // from the RHS interval, we can use its value #.
-    if (!CP.isCoalescable(VNI->getCopy()))
-      continue;
-
-    // Figure out the value # from the RHS.
-    LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
-    // The copy could be to an aliased physreg.
-    if (!lr) continue;
-    LHSValsDefinedFromRHS[VNI] = lr->valno;
-  }
-
-  // Loop over the value numbers of the RHS, seeing if any are defined from
-  // the LHS.
-  for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
-       i != e; ++i) {
-    VNInfo *VNI = *i;
-    if (VNI->isUnused() || !VNI->isDefByCopy())  // Src not defined by a copy?
-      continue;
-
-    // Never join with a register that has EarlyClobber redefs.
-    if (VNI->hasRedefByEC())
-      return false;
-
-    // DstReg is known to be a register in the RHS interval.  If the src is
-    // from the LHS interval, we can use its value #.
-    if (!CP.isCoalescable(VNI->getCopy()))
-      continue;
-
-    // Figure out the value # from the LHS.
-    LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
-    // The copy could be to an aliased physreg.
-    if (!lr) continue;
-    RHSValsDefinedFromLHS[VNI] = lr->valno;
-  }
-
-  LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
-  RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
-  NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
-
-  for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
-       i != e; ++i) {
-    VNInfo *VNI = *i;
-    unsigned VN = VNI->id;
-    if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
-      continue;
-    ComputeUltimateVN(VNI, NewVNInfo,
-                      LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
-                      LHSValNoAssignments, RHSValNoAssignments);
-  }
-  for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
-       i != e; ++i) {
-    VNInfo *VNI = *i;
-    unsigned VN = VNI->id;
-    if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
-      continue;
-    // If this value number isn't a copy from the LHS, it's a new number.
-    if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
-      NewVNInfo.push_back(VNI);
-      RHSValNoAssignments[VN] = NewVNInfo.size()-1;
-      continue;
-    }
-
-    ComputeUltimateVN(VNI, NewVNInfo,
-                      RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
-                      RHSValNoAssignments, LHSValNoAssignments);
-  }
-
-  // Armed with the mappings of LHS/RHS values to ultimate values, walk the
-  // interval lists to see if these intervals are coalescable.
-  LiveInterval::const_iterator I = LHS.begin();
-  LiveInterval::const_iterator IE = LHS.end();
-  LiveInterval::const_iterator J = RHS.begin();
-  LiveInterval::const_iterator JE = RHS.end();
-
-  // Skip ahead until the first place of potential sharing.
-  if (I != IE && J != JE) {
-    if (I->start < J->start) {
-      I = std::upper_bound(I, IE, J->start);
-      if (I != LHS.begin()) --I;
-    } else if (J->start < I->start) {
-      J = std::upper_bound(J, JE, I->start);
-      if (J != RHS.begin()) --J;
-    }
-  }
-
-  while (I != IE && J != JE) {
-    // Determine if these two live ranges overlap.
-    bool Overlaps;
-    if (I->start < J->start) {
-      Overlaps = I->end > J->start;
-    } else {
-      Overlaps = J->end > I->start;
-    }
-
-    // If so, check value # info to determine if they are really different.
-    if (Overlaps) {
-      // If the live range overlap will map to the same value number in the
-      // result liverange, we can still coalesce them.  If not, we can't.
-      if (LHSValNoAssignments[I->valno->id] !=
-          RHSValNoAssignments[J->valno->id])
-        return false;
-      // If it's re-defined by an early clobber somewhere in the live range,
-      // then conservatively abort coalescing.
-      if (NewVNInfo[LHSValNoAssignments[I->valno->id]]->hasRedefByEC())
-        return false;
-    }
-
-    if (I->end < J->end)
-      ++I;
-    else
-      ++J;
-  }
-
-  // Update kill info. Some live ranges are extended due to copy coalescing.
-  for (DenseMap<VNInfo*, VNInfo*>::iterator I = LHSValsDefinedFromRHS.begin(),
-         E = LHSValsDefinedFromRHS.end(); I != E; ++I) {
-    VNInfo *VNI = I->first;
-    unsigned LHSValID = LHSValNoAssignments[VNI->id];
-    if (VNI->hasPHIKill())
-      NewVNInfo[LHSValID]->setHasPHIKill(true);
-  }
-
-  // Update kill info. Some live ranges are extended due to copy coalescing.
-  for (DenseMap<VNInfo*, VNInfo*>::iterator I = RHSValsDefinedFromLHS.begin(),
-         E = RHSValsDefinedFromLHS.end(); I != E; ++I) {
-    VNInfo *VNI = I->first;
-    unsigned RHSValID = RHSValNoAssignments[VNI->id];
-    if (VNI->hasPHIKill())
-      NewVNInfo[RHSValID]->setHasPHIKill(true);
-  }
-
-  if (LHSValNoAssignments.empty())
-    LHSValNoAssignments.push_back(-1);
-  if (RHSValNoAssignments.empty())
-    RHSValNoAssignments.push_back(-1);
-
-  // If we get here, we know that we can coalesce the live ranges.  Ask the
-  // intervals to coalesce themselves now.
-  LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
-           mri_);
-  return true;
-}
-
-namespace {
-  // DepthMBBCompare - Comparison predicate that sort first based on the loop
-  // depth of the basic block (the unsigned), and then on the MBB number.
-  struct DepthMBBCompare {
-    typedef std::pair<unsigned, MachineBasicBlock*> DepthMBBPair;
-    bool operator()(const DepthMBBPair &LHS, const DepthMBBPair &RHS) const {
-      // Deeper loops first
-      if (LHS.first != RHS.first)
-        return LHS.first > RHS.first;
-
-      // Prefer blocks that are more connected in the CFG. This takes care of
-      // the most difficult copies first while intervals are short.
-      unsigned cl = LHS.second->pred_size() + LHS.second->succ_size();
-      unsigned cr = RHS.second->pred_size() + RHS.second->succ_size();
-      if (cl != cr)
-        return cl > cr;
-
-      // As a last resort, sort by block number.
-      return LHS.second->getNumber() < RHS.second->getNumber();
-    }
-  };
-}
-
-void SimpleRegisterCoalescing::CopyCoalesceInMBB(MachineBasicBlock *MBB,
-                                               std::vector<CopyRec> &TryAgain) {
-  DEBUG(dbgs() << MBB->getName() << ":\n");
-
-  SmallVector<CopyRec, 8> VirtCopies;
-  SmallVector<CopyRec, 8> PhysCopies;
-  SmallVector<CopyRec, 8> ImpDefCopies;
-  for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
-       MII != E;) {
-    MachineInstr *Inst = MII++;
-
-    // If this isn't a copy nor a extract_subreg, we can't join intervals.
-    unsigned SrcReg, DstReg;
-    if (Inst->isCopy()) {
-      DstReg = Inst->getOperand(0).getReg();
-      SrcReg = Inst->getOperand(1).getReg();
-    } else if (Inst->isSubregToReg()) {
-      DstReg = Inst->getOperand(0).getReg();
-      SrcReg = Inst->getOperand(2).getReg();
-    } else
-      continue;
-
-    bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
-    bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
-    if (li_->hasInterval(SrcReg) && li_->getInterval(SrcReg).empty())
-      ImpDefCopies.push_back(CopyRec(Inst, 0));
-    else if (SrcIsPhys || DstIsPhys)
-      PhysCopies.push_back(CopyRec(Inst, 0));
-    else
-      VirtCopies.push_back(CopyRec(Inst, 0));
-  }
-
-  // Try coalescing implicit copies and insert_subreg <undef> first,
-  // followed by copies to / from physical registers, then finally copies
-  // from virtual registers to virtual registers.
-  for (unsigned i = 0, e = ImpDefCopies.size(); i != e; ++i) {
-    CopyRec &TheCopy = ImpDefCopies[i];
-    bool Again = false;
-    if (!JoinCopy(TheCopy, Again))
-      if (Again)
-        TryAgain.push_back(TheCopy);
-  }
-  for (unsigned i = 0, e = PhysCopies.size(); i != e; ++i) {
-    CopyRec &TheCopy = PhysCopies[i];
-    bool Again = false;
-    if (!JoinCopy(TheCopy, Again))
-      if (Again)
-        TryAgain.push_back(TheCopy);
-  }
-  for (unsigned i = 0, e = VirtCopies.size(); i != e; ++i) {
-    CopyRec &TheCopy = VirtCopies[i];
-    bool Again = false;
-    if (!JoinCopy(TheCopy, Again))
-      if (Again)
-        TryAgain.push_back(TheCopy);
-  }
-}
-
-void SimpleRegisterCoalescing::joinIntervals() {
-  DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n");
-
-  std::vector<CopyRec> TryAgainList;
-  if (loopInfo->empty()) {
-    // If there are no loops in the function, join intervals in function order.
-    for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();
-         I != E; ++I)
-      CopyCoalesceInMBB(I, TryAgainList);
-  } else {
-    // Otherwise, join intervals in inner loops before other intervals.
-    // Unfortunately we can't just iterate over loop hierarchy here because
-    // there may be more MBB's than BB's.  Collect MBB's for sorting.
-
-    // Join intervals in the function prolog first. We want to join physical
-    // registers with virtual registers before the intervals got too long.
-    std::vector<std::pair<unsigned, MachineBasicBlock*> > MBBs;
-    for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();I != E;++I){
-      MachineBasicBlock *MBB = I;
-      MBBs.push_back(std::make_pair(loopInfo->getLoopDepth(MBB), I));
-    }
-
-    // Sort by loop depth.
-    std::sort(MBBs.begin(), MBBs.end(), DepthMBBCompare());
-
-    // Finally, join intervals in loop nest order.
-    for (unsigned i = 0, e = MBBs.size(); i != e; ++i)
-      CopyCoalesceInMBB(MBBs[i].second, TryAgainList);
-  }
-
-  // Joining intervals can allow other intervals to be joined.  Iteratively join
-  // until we make no progress.
-  bool ProgressMade = true;
-  while (ProgressMade) {
-    ProgressMade = false;
-
-    for (unsigned i = 0, e = TryAgainList.size(); i != e; ++i) {
-      CopyRec &TheCopy = TryAgainList[i];
-      if (!TheCopy.MI)
-        continue;
-
-      bool Again = false;
-      bool Success = JoinCopy(TheCopy, Again);
-      if (Success || !Again) {
-        TheCopy.MI = 0;   // Mark this one as done.
-        ProgressMade = true;
-      }
-    }
-  }
-}
-
-void SimpleRegisterCoalescing::releaseMemory() {
-  JoinedCopies.clear();
-  ReMatCopies.clear();
-  ReMatDefs.clear();
-}
-
-bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
-  mf_ = &fn;
-  mri_ = &fn.getRegInfo();
-  tm_ = &fn.getTarget();
-  tri_ = tm_->getRegisterInfo();
-  tii_ = tm_->getInstrInfo();
-  li_ = &getAnalysis<LiveIntervals>();
-  ldv_ = &getAnalysis<LiveDebugVariables>();
-  AA = &getAnalysis<AliasAnalysis>();
-  loopInfo = &getAnalysis<MachineLoopInfo>();
-
-  DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
-               << "********** Function: "
-               << ((Value*)mf_->getFunction())->getName() << '\n');
-
-  if (VerifyCoalescing)
-    mf_->verify(this, "Before register coalescing");
-
-  RegClassInfo.runOnMachineFunction(fn);
-
-  // Join (coalesce) intervals if requested.
-  if (EnableJoining) {
-    joinIntervals();
-    DEBUG({
-        dbgs() << "********** INTERVALS POST JOINING **********\n";
-        for (LiveIntervals::iterator I = li_->begin(), E = li_->end();
-             I != E; ++I){
-          I->second->print(dbgs(), tri_);
-          dbgs() << "\n";
-        }
-      });
-  }
-
-  // Perform a final pass over the instructions and compute spill weights
-  // and remove identity moves.
-  SmallVector<unsigned, 4> DeadDefs;
-  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
-       mbbi != mbbe; ++mbbi) {
-    MachineBasicBlock* mbb = mbbi;
-    for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end();
-         mii != mie; ) {
-      MachineInstr *MI = mii;
-      if (JoinedCopies.count(MI)) {
-        // Delete all coalesced copies.
-        bool DoDelete = true;
-        assert(MI->isCopyLike() && "Unrecognized copy instruction");
-        unsigned SrcReg = MI->getOperand(MI->isSubregToReg() ? 2 : 1).getReg();
-        if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
-            MI->getNumOperands() > 2)
-          // Do not delete extract_subreg, insert_subreg of physical
-          // registers unless the definition is dead. e.g.
-          // %DO<def> = INSERT_SUBREG %D0<undef>, %S0<kill>, 1
-          // or else the scavenger may complain. LowerSubregs will
-          // delete them later.
-          DoDelete = false;
-
-        if (MI->allDefsAreDead()) {
-          if (TargetRegisterInfo::isVirtualRegister(SrcReg) &&
-              li_->hasInterval(SrcReg))
-            li_->shrinkToUses(&li_->getInterval(SrcReg));
-          DoDelete = true;
-        }
-        if (!DoDelete) {
-          // We need the instruction to adjust liveness, so make it a KILL.
-          if (MI->isSubregToReg()) {
-            MI->RemoveOperand(3);
-            MI->RemoveOperand(1);
-          }
-          MI->setDesc(tii_->get(TargetOpcode::KILL));
-          mii = llvm::next(mii);
-        } else {
-          li_->RemoveMachineInstrFromMaps(MI);
-          mii = mbbi->erase(mii);
-          ++numPeep;
-        }
-        continue;
-      }
-
-      // Now check if this is a remat'ed def instruction which is now dead.
-      if (ReMatDefs.count(MI)) {
-        bool isDead = true;
-        for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
-          const MachineOperand &MO = MI->getOperand(i);
-          if (!MO.isReg())
-            continue;
-          unsigned Reg = MO.getReg();
-          if (!Reg)
-            continue;
-          if (TargetRegisterInfo::isVirtualRegister(Reg))
-            DeadDefs.push_back(Reg);
-          if (MO.isDead())
-            continue;
-          if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
-              !mri_->use_nodbg_empty(Reg)) {
-            isDead = false;
-            break;
-          }
-        }
-        if (isDead) {
-          while (!DeadDefs.empty()) {
-            unsigned DeadDef = DeadDefs.back();
-            DeadDefs.pop_back();
-            RemoveDeadDef(li_->getInterval(DeadDef), MI);
-          }
-          li_->RemoveMachineInstrFromMaps(mii);
-          mii = mbbi->erase(mii);
-          continue;
-        } else
-          DeadDefs.clear();
-      }
-
-      ++mii;
-
-      // Check for now unnecessary kill flags.
-      if (li_->isNotInMIMap(MI)) continue;
-      SlotIndex DefIdx = li_->getInstructionIndex(MI).getDefIndex();
-      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
-        MachineOperand &MO = MI->getOperand(i);
-        if (!MO.isReg() || !MO.isKill()) continue;
-        unsigned reg = MO.getReg();
-        if (!reg || !li_->hasInterval(reg)) continue;
-        if (!li_->getInterval(reg).killedAt(DefIdx)) {
-          MO.setIsKill(false);
-          continue;
-        }
-        // When leaving a kill flag on a physreg, check if any subregs should
-        // remain alive.
-        if (!TargetRegisterInfo::isPhysicalRegister(reg))
-          continue;
-        for (const unsigned *SR = tri_->getSubRegisters(reg);
-             unsigned S = *SR; ++SR)
-          if (li_->hasInterval(S) && li_->getInterval(S).liveAt(DefIdx))
-            MI->addRegisterDefined(S, tri_);
-      }
-    }
-  }
-
-  DEBUG(dump());
-  DEBUG(ldv_->dump());
-  if (VerifyCoalescing)
-    mf_->verify(this, "After register coalescing");
-  return true;
-}
-
-/// print - Implement the dump method.
-void SimpleRegisterCoalescing::print(raw_ostream &O, const Module* m) const {
-   li_->print(O, m);
-}
-
-RegisterCoalescer* llvm::createSimpleRegisterCoalescer() {
-  return new SimpleRegisterCoalescing();
-}
-
-// Make sure that anything that uses RegisterCoalescer pulls in this file...
-DEFINING_FILE_FOR(SimpleRegisterCoalescing)

Removed: llvm/branches/type-system-rewrite/lib/CodeGen/SimpleRegisterCoalescing.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SimpleRegisterCoalescing.h?rev=134362&view=auto
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SimpleRegisterCoalescing.h (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SimpleRegisterCoalescing.h (removed)
@@ -1,162 +0,0 @@
-//===-- SimpleRegisterCoalescing.h - Register Coalescing --------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements a simple register copy coalescing phase.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_SIMPLE_REGISTER_COALESCING_H
-#define LLVM_CODEGEN_SIMPLE_REGISTER_COALESCING_H
-
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/RegisterCoalescer.h"
-#include "RegisterClassInfo.h"
-
-namespace llvm {
-  class SimpleRegisterCoalescing;
-  class LiveDebugVariables;
-  class TargetRegisterInfo;
-  class TargetInstrInfo;
-  class VirtRegMap;
-  class MachineLoopInfo;
-
-  /// CopyRec - Representation for copy instructions in coalescer queue.
-  ///
-  struct CopyRec {
-    MachineInstr *MI;
-    unsigned LoopDepth;
-    CopyRec(MachineInstr *mi, unsigned depth)
-      : MI(mi), LoopDepth(depth) {}
-  };
-
-  class SimpleRegisterCoalescing : public MachineFunctionPass,
-                                   public RegisterCoalescer {
-    MachineFunction* mf_;
-    MachineRegisterInfo* mri_;
-    const TargetMachine* tm_;
-    const TargetRegisterInfo* tri_;
-    const TargetInstrInfo* tii_;
-    LiveIntervals *li_;
-    LiveDebugVariables *ldv_;
-    const MachineLoopInfo* loopInfo;
-    AliasAnalysis *AA;
-    RegisterClassInfo RegClassInfo;
-
-    /// JoinedCopies - Keep track of copies eliminated due to coalescing.
-    ///
-    SmallPtrSet<MachineInstr*, 32> JoinedCopies;
-
-    /// ReMatCopies - Keep track of copies eliminated due to remat.
-    ///
-    SmallPtrSet<MachineInstr*, 32> ReMatCopies;
-
-    /// ReMatDefs - Keep track of definition instructions which have
-    /// been remat'ed.
-    SmallPtrSet<MachineInstr*, 8> ReMatDefs;
-
-  public:
-    static char ID; // Pass identifcation, replacement for typeid
-    SimpleRegisterCoalescing() : MachineFunctionPass(ID) {
-      initializeSimpleRegisterCoalescingPass(*PassRegistry::getPassRegistry());
-    }
-
-    virtual void getAnalysisUsage(AnalysisUsage &AU) const;
-    virtual void releaseMemory();
-
-    /// runOnMachineFunction - pass entry point
-    virtual bool runOnMachineFunction(MachineFunction&);
-
-    bool coalesceFunction(MachineFunction &mf, RegallocQuery &) {
-      // This runs as an independent pass, so don't do anything.
-      return false;
-    }
-
-    /// print - Implement the dump method.
-    virtual void print(raw_ostream &O, const Module* = 0) const;
-
-  private:
-    /// joinIntervals - join compatible live intervals
-    void joinIntervals();
-
-    /// CopyCoalesceInMBB - Coalesce copies in the specified MBB, putting
-    /// copies that cannot yet be coalesced into the "TryAgain" list.
-    void CopyCoalesceInMBB(MachineBasicBlock *MBB,
-                           std::vector<CopyRec> &TryAgain);
-
-    /// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
-    /// which are the src/dst of the copy instruction CopyMI.  This returns true
-    /// if the copy was successfully coalesced away. If it is not currently
-    /// possible to coalesce this interval, but it may be possible if other
-    /// things get coalesced, then it returns true by reference in 'Again'.
-    bool JoinCopy(CopyRec &TheCopy, bool &Again);
-
-    /// JoinIntervals - Attempt to join these two intervals.  On failure, this
-    /// returns false.  The output "SrcInt" will not have been modified, so we can
-    /// use this information below to update aliases.
-    bool JoinIntervals(CoalescerPair &CP);
-
-    /// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy. If
-    /// the source value number is defined by a copy from the destination reg
-    /// see if we can merge these two destination reg valno# into a single
-    /// value number, eliminating a copy.
-    bool AdjustCopiesBackFrom(const CoalescerPair &CP, MachineInstr *CopyMI);
-
-    /// HasOtherReachingDefs - Return true if there are definitions of IntB
-    /// other than BValNo val# that can reach uses of AValno val# of IntA.
-    bool HasOtherReachingDefs(LiveInterval &IntA, LiveInterval &IntB,
-                              VNInfo *AValNo, VNInfo *BValNo);
-
-    /// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy.
-    /// If the source value number is defined by a commutable instruction and
-    /// its other operand is coalesced to the copy dest register, see if we
-    /// can transform the copy into a noop by commuting the definition.
-    bool RemoveCopyByCommutingDef(const CoalescerPair &CP,MachineInstr *CopyMI);
-
-    /// ReMaterializeTrivialDef - If the source of a copy is defined by a trivial
-    /// computation, replace the copy by rematerialize the definition.
-    /// If PreserveSrcInt is true, make sure SrcInt is valid after the call.
-    bool ReMaterializeTrivialDef(LiveInterval &SrcInt, bool PreserveSrcInt,
-                                 unsigned DstReg, unsigned DstSubIdx,
-                                 MachineInstr *CopyMI);
-
-    /// shouldJoinPhys - Return true if a physreg copy should be joined.
-    bool shouldJoinPhys(CoalescerPair &CP);
-
-    /// isWinToJoinCrossClass - Return true if it's profitable to coalesce
-    /// two virtual registers from different register classes.
-    bool isWinToJoinCrossClass(unsigned SrcReg,
-                               unsigned DstReg,
-                               const TargetRegisterClass *SrcRC,
-                               const TargetRegisterClass *DstRC,
-                               const TargetRegisterClass *NewRC);
-
-    /// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
-    /// update the subregister number if it is not zero. If DstReg is a
-    /// physical register and the existing subregister number of the def / use
-    /// being updated is not zero, make sure to set it to the correct physical
-    /// subregister.
-    void UpdateRegDefsUses(const CoalescerPair &CP);
-
-    /// RemoveDeadDef - If a def of a live interval is now determined dead,
-    /// remove the val# it defines. If the live interval becomes empty, remove
-    /// it as well.
-    bool RemoveDeadDef(LiveInterval &li, MachineInstr *DefMI);
-
-    /// RemoveCopyFlag - If DstReg is no longer defined by CopyMI, clear the
-    /// VNInfo copy flag for DstReg and all aliases.
-    void RemoveCopyFlag(unsigned DstReg, const MachineInstr *CopyMI);
-
-    /// markAsJoined - Remember that CopyMI has already been joined.
-    void markAsJoined(MachineInstr *CopyMI);
-  };
-
-} // End llvm namespace
-
-#endif

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SplitKit.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SplitKit.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SplitKit.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SplitKit.cpp Sat Jul  2 22:28:07 2011
@@ -76,12 +76,14 @@
       return LSP.first;
     // There may not be a call instruction (?) in which case we ignore LPad.
     LSP.second = LSP.first;
-    for (MachineBasicBlock::const_iterator I = FirstTerm, E = MBB->begin();
-         I != E; --I)
+    for (MachineBasicBlock::const_iterator I = MBB->end(), E = MBB->begin();
+         I != E;) {
+      --I;
       if (I->getDesc().isCall()) {
         LSP.second = LIS.getInstructionIndex(I);
         break;
       }
+    }
   }
 
   // If CurLI is live into a landing pad successor, move the last split point
@@ -122,7 +124,7 @@
   // Compute per-live block info.
   if (!calcLiveBlockInfo()) {
     // FIXME: calcLiveBlockInfo found inconsistencies in the live range.
-    // I am looking at you, SimpleRegisterCoalescing!
+    // I am looking at you, RegisterCoalescer!
     DidRepairRange = true;
     ++NumRepairs;
     DEBUG(dbgs() << "*** Fixing inconsistent live interval! ***\n");
@@ -165,7 +167,7 @@
     tie(Start, Stop) = LIS.getSlotIndexes()->getMBBRange(BI.MBB);
 
     // If the block contains no uses, the range must be live through. At one
-    // point, SimpleRegisterCoalescing could create dangling ranges that ended
+    // point, RegisterCoalescer could create dangling ranges that ended
     // mid-block.
     if (UseI == UseE || *UseI >= Stop) {
       ++NumThroughBlocks;
@@ -634,6 +636,7 @@
 void SplitEditor::selectIntv(unsigned Idx) {
   assert(Idx != 0 && "Cannot select the complement interval");
   assert(Idx < Edit->size() && "Can only select previously opened interval");
+  DEBUG(dbgs() << "    selectIntv " << OpenIdx << " -> " << Idx << '\n');
   OpenIdx = Idx;
 }
 
@@ -654,6 +657,24 @@
   return VNI->def;
 }
 
+SlotIndex SplitEditor::enterIntvAfter(SlotIndex Idx) {
+  assert(OpenIdx && "openIntv not called before enterIntvAfter");
+  DEBUG(dbgs() << "    enterIntvAfter " << Idx);
+  Idx = Idx.getBoundaryIndex();
+  VNInfo *ParentVNI = Edit->getParent().getVNInfoAt(Idx);
+  if (!ParentVNI) {
+    DEBUG(dbgs() << ": not live\n");
+    return Idx;
+  }
+  DEBUG(dbgs() << ": valno " << ParentVNI->id << '\n');
+  MachineInstr *MI = LIS.getInstructionFromIndex(Idx);
+  assert(MI && "enterIntvAfter called with invalid index");
+
+  VNInfo *VNI = defFromParent(OpenIdx, ParentVNI, Idx, *MI->getParent(),
+                              llvm::next(MachineBasicBlock::iterator(MI)));
+  return VNI->def;
+}
+
 SlotIndex SplitEditor::enterIntvAtEnd(MachineBasicBlock &MBB) {
   assert(OpenIdx && "openIntv not called before enterIntvAtEnd");
   SlotIndex End = LIS.getMBBEndIdx(&MBB);
@@ -1005,12 +1026,6 @@
         markComplexMapped(i, ParentVNI);
   }
 
-#ifndef NDEBUG
-  // Every new interval must have a def by now, otherwise the split is bogus.
-  for (LiveRangeEdit::iterator I = Edit->begin(), E = Edit->end(); I != E; ++I)
-    assert((*I)->hasAtLeastOneValue() && "Split interval has no value");
-#endif
-
   // Transfer the simply mapped values, check if any are skipped.
   bool Skipped = transferValues();
   if (Skipped)

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/SplitKit.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/SplitKit.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/SplitKit.h (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/SplitKit.h Sat Jul  2 22:28:07 2011
@@ -81,6 +81,12 @@
     bool LiveThrough;     ///< Live in whole block (Templ 5. above).
     bool LiveIn;          ///< Current reg is live in.
     bool LiveOut;         ///< Current reg is live out.
+
+    /// isOneInstr - Returns true when this BlockInfo describes a single
+    /// instruction.
+    bool isOneInstr() const {
+      return SlotIndex::isSameInstr(FirstUse, LastUse);
+    }
   };
 
 private:
@@ -360,6 +366,10 @@
   /// Return the beginning of the new live range.
   SlotIndex enterIntvBefore(SlotIndex Idx);
 
+  /// enterIntvAfter - Enter the open interval after the instruction at Idx.
+  /// Return the beginning of the new live range.
+  SlotIndex enterIntvAfter(SlotIndex Idx);
+
   /// enterIntvAtEnd - Enter the open interval at the end of MBB.
   /// Use the open interval from he inserted copy to the MBB end.
   /// Return the beginning of the new live range.

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/Splitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/Splitter.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/Splitter.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/Splitter.cpp Sat Jul  2 22:28:07 2011
@@ -11,7 +11,7 @@
 
 #include "Splitter.h"
 
-#include "SimpleRegisterCoalescing.h"
+#include "RegisterCoalescer.h"
 #include "llvm/Module.h"
 #include "llvm/CodeGen/CalcSpillWeights.h"
 #include "llvm/CodeGen/LiveIntervalAnalysis.h"

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/StackSlotColoring.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/StackSlotColoring.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/StackSlotColoring.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/StackSlotColoring.cpp Sat Jul  2 22:28:07 2011
@@ -504,7 +504,7 @@
     bool FoundDef = false;  // Not counting 2address def.
 
     Uses.clear();
-    const TargetInstrDesc &TID = MII->getDesc();
+    const MCInstrDesc &MCID = MII->getDesc();
     for (unsigned i = 0, e = MII->getNumOperands(); i != e; ++i) {
       MachineOperand &MO = MII->getOperand(i);
       if (!MO.isReg())
@@ -521,7 +521,7 @@
         if (MO.getSubReg() || MII->isSubregToReg())
           return false;
 
-        const TargetRegisterClass *RC = TID.OpInfo[i].getRegClass(TRI);
+        const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI);
         if (RC && !RC->contains(NewReg))
           return false;
 
@@ -566,7 +566,7 @@
   SmallVector<MachineOperand*, 4> Uses;
   while (++MII != MBB->end()) {
     bool FoundKill = false;
-    const TargetInstrDesc &TID = MII->getDesc();
+    const MCInstrDesc &MCID = MII->getDesc();
     for (unsigned i = 0, e = MII->getNumOperands(); i != e; ++i) {
       MachineOperand &MO = MII->getOperand(i);
       if (!MO.isReg())
@@ -583,7 +583,7 @@
         if (MO.getSubReg())
           return false;
 
-        const TargetRegisterClass *RC = TID.OpInfo[i].getRegClass(TRI);
+        const TargetRegisterClass *RC = TII->getRegClass(MCID, i, TRI);
         if (RC && !RC->contains(NewReg))
           return false;
         if (MO.isKill())

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/TailDuplication.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/TailDuplication.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/TailDuplication.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/TailDuplication.cpp Sat Jul  2 22:28:07 2011
@@ -95,9 +95,9 @@
                               SmallSetVector<MachineBasicBlock*, 8> &Succs);
     bool TailDuplicateBlocks(MachineFunction &MF);
     bool shouldTailDuplicate(const MachineFunction &MF,
-                             MachineBasicBlock &TailBB);
+                             bool IsSimple, MachineBasicBlock &TailBB);
     bool isSimpleBB(MachineBasicBlock *TailBB);
-    bool canCompletelyDuplicateSimpleBB(MachineBasicBlock &BB);
+    bool canCompletelyDuplicateBB(MachineBasicBlock &BB);
     bool duplicateSimpleBB(MachineBasicBlock *TailBB,
                            SmallVector<MachineBasicBlock*, 8> &TDBBs,
                            const DenseSet<unsigned> &RegsUsedByPhi,
@@ -207,7 +207,7 @@
       // TailBB's immediate successors are now successors of those predecessors
       // which duplicated TailBB. Add the predecessors as sources to the PHI
       // instructions.
-      bool isDead = MBB->pred_empty();
+      bool isDead = MBB->pred_empty() && !MBB->hasAddressTaken();
       if (PreRegAlloc)
         UpdateSuccessorsPHIs(MBB, isDead, TDBBs, Succs);
 
@@ -501,6 +501,7 @@
 /// shouldTailDuplicate - Determine if it is profitable to duplicate this block.
 bool
 TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF,
+                                       bool IsSimple,
                                        MachineBasicBlock &TailBB) {
   // Only duplicate blocks that end with unconditional branches.
   if (TailBB.canFallThrough())
@@ -526,10 +527,13 @@
   // to allow undoing the effects of tail merging and other optimizations
   // that rearrange the predecessors of the indirect branch.
 
+  bool hasIndirectBR = false;
   if (PreRegAlloc && !TailBB.empty()) {
-    const TargetInstrDesc &TID = TailBB.back().getDesc();
-    if (TID.isIndirectBranch())
+    const MCInstrDesc &MCID = TailBB.back().getDesc();
+    if (MCID.isIndirectBranch()) {
       MaxDuplicateCount = 20;
+      hasIndirectBR = true;
+    }
   }
 
   // Check the instructions in the block to determine whether tail-duplication
@@ -560,7 +564,16 @@
       return false;
   }
 
-  return true;
+  if (hasIndirectBR)
+    return true;
+
+  if (IsSimple)
+    return true;
+
+  if (!PreRegAlloc)
+    return true;
+
+  return canCompletelyDuplicateBB(TailBB);
 }
 
 /// isSimpleBB - True if this BB has only one unconditional jump.
@@ -568,9 +581,11 @@
 TailDuplicatePass::isSimpleBB(MachineBasicBlock *TailBB) {
   if (TailBB->succ_size() != 1)
     return false;
-  MachineBasicBlock::iterator I = TailBB->getFirstNonPHI();
+  if (TailBB->pred_empty())
+    return false;
+  MachineBasicBlock::iterator I = TailBB->begin();
   MachineBasicBlock::iterator E = TailBB->end();
-  while (I->isDebugValue() && I != E)
+  while (I != E && I->isDebugValue())
     ++I;
   if (I == E)
     return true;
@@ -591,20 +606,23 @@
 }
 
 bool
-TailDuplicatePass::canCompletelyDuplicateSimpleBB(MachineBasicBlock &BB) {
+TailDuplicatePass::canCompletelyDuplicateBB(MachineBasicBlock &BB) {
   SmallPtrSet<MachineBasicBlock*, 8> Succs(BB.succ_begin(), BB.succ_end());
 
   for (MachineBasicBlock::pred_iterator PI = BB.pred_begin(),
        PE = BB.pred_end(); PI != PE; ++PI) {
     MachineBasicBlock *PredBB = *PI;
-    if (PredBB->getLandingPadSuccessor())
-      return false;
-    if (bothUsedInPHI(*PredBB, Succs))
+
+    if (PredBB->succ_size() > 1)
       return false;
+
     MachineBasicBlock *PredTBB = NULL, *PredFBB = NULL;
     SmallVector<MachineOperand, 4> PredCond;
     if (TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true))
       return false;
+
+    if (!PredCond.empty())
+      return false;
   }
   return true;
 }
@@ -614,44 +632,33 @@
                                      SmallVector<MachineBasicBlock*, 8> &TDBBs,
                                      const DenseSet<unsigned> &UsedByPhi,
                                      SmallVector<MachineInstr*, 16> &Copies) {
-  if (!canCompletelyDuplicateSimpleBB(*TailBB))
-    return false;
-
-  bool Changed = false;
+  SmallPtrSet<MachineBasicBlock*, 8> Succs(TailBB->succ_begin(),
+                                           TailBB->succ_end());
   SmallVector<MachineBasicBlock*, 8> Preds(TailBB->pred_begin(),
                                            TailBB->pred_end());
+  bool Changed = false;
   for (SmallSetVector<MachineBasicBlock *, 8>::iterator PI = Preds.begin(),
        PE = Preds.end(); PI != PE; ++PI) {
     MachineBasicBlock *PredBB = *PI;
 
+    if (PredBB->getLandingPadSuccessor())
+      continue;
+
+    if (bothUsedInPHI(*PredBB, Succs))
+      continue;
+
     MachineBasicBlock *PredTBB = NULL, *PredFBB = NULL;
     SmallVector<MachineOperand, 4> PredCond;
-    bool NotAnalyzable =
-      TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true);
-    (void)NotAnalyzable;
-    assert(!NotAnalyzable && "Cannot duplicate this!");
+    if (TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true))
+      continue;
 
+    Changed = true;
     DEBUG(dbgs() << "\nTail-duplicating into PredBB: " << *PredBB
                  << "From simple Succ: " << *TailBB);
 
     MachineBasicBlock *NewTarget = *TailBB->succ_begin();
     MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(PredBB));
 
-    DenseMap<unsigned, unsigned> LocalVRMap;
-    SmallVector<std::pair<unsigned,unsigned>, 4> CopyInfos;
-    for (MachineBasicBlock::iterator I = TailBB->begin();
-         I != TailBB->end() && I->isPHI();) {
-      MachineInstr *MI = &*I;
-      ++I;
-      ProcessPHI(MI, TailBB, PredBB, LocalVRMap, CopyInfos, UsedByPhi, true);
-    }
-    MachineBasicBlock::iterator Loc = PredBB->getFirstTerminator();
-    for (unsigned i = 0, e = CopyInfos.size(); i != e; ++i) {
-      Copies.push_back(BuildMI(*PredBB, Loc, DebugLoc(),
-                               TII->get(TargetOpcode::COPY),
-                               CopyInfos[i].first).addReg(CopyInfos[i].second));
-    }
-
     // Make PredFBB explicit.
     if (PredCond.empty())
       PredFBB = PredTBB;
@@ -669,8 +676,10 @@
       PredTBB = NewTarget;
 
     // Make the branch unconditional if possible
-    if (PredTBB == PredFBB)
+    if (PredTBB == PredFBB) {
+      PredCond.clear();
       PredFBB = NULL;
+    }
 
     // Avoid adding fall through branches.
     if (PredFBB == NextBB)
@@ -684,11 +693,12 @@
       TII->InsertBranch(*PredBB, PredTBB, PredFBB, PredCond, DebugLoc());
 
     PredBB->removeSuccessor(TailBB);
-    PredBB->addSuccessor(NewTarget);
+    unsigned NumSuccessors = PredBB->succ_size();
+    assert(NumSuccessors <= 1);
+    if (NumSuccessors == 0 || *PredBB->succ_begin() != NewTarget)
+      PredBB->addSuccessor(NewTarget);
 
     TDBBs.push_back(PredBB);
-
-    Changed = true;
   }
   return Changed;
 }
@@ -699,7 +709,9 @@
 TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB, MachineFunction &MF,
                                  SmallVector<MachineBasicBlock*, 8> &TDBBs,
                                  SmallVector<MachineInstr*, 16> &Copies) {
-  if (!shouldTailDuplicate(MF, *TailBB))
+  bool IsSimple = isSimpleBB(TailBB);
+
+  if (!shouldTailDuplicate(MF, IsSimple, *TailBB))
     return false;
 
   DEBUG(dbgs() << "\n*** Tail-duplicating BB#" << TailBB->getNumber() << '\n');
@@ -707,7 +719,7 @@
   DenseSet<unsigned> UsedByPhi;
   getRegsUsedByPHIs(*TailBB, &UsedByPhi);
 
-  if (isSimpleBB(TailBB))
+  if (IsSimple)
     return duplicateSimpleBB(TailBB, TDBBs, UsedByPhi, Copies);
 
   // Iterate through all the unique predecessors and tail-duplicate this

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/TargetInstrInfoImpl.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/TargetInstrInfoImpl.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/TargetInstrInfoImpl.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/TargetInstrInfoImpl.cpp Sat Jul  2 22:28:07 2011
@@ -59,8 +59,8 @@
 // the two operands returned by findCommutedOpIndices.
 MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI,
                                                       bool NewMI) const {
-  const TargetInstrDesc &TID = MI->getDesc();
-  bool HasDef = TID.getNumDefs();
+  const MCInstrDesc &MCID = MI->getDesc();
+  bool HasDef = MCID.getNumDefs();
   if (HasDef && !MI->getOperand(0).isReg())
     // No idea how to commute this instruction. Target should implement its own.
     return 0;
@@ -81,7 +81,7 @@
   bool ChangeReg0 = false;
   if (HasDef && MI->getOperand(0).getReg() == Reg1) {
     // Must be two address instruction!
-    assert(MI->getDesc().getOperandConstraint(0, TOI::TIED_TO) &&
+    assert(MI->getDesc().getOperandConstraint(0, MCOI::TIED_TO) &&
            "Expecting a two-address instruction!");
     Reg2IsKill = false;
     ChangeReg0 = true;
@@ -119,12 +119,12 @@
 bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr *MI,
                                                 unsigned &SrcOpIdx1,
                                                 unsigned &SrcOpIdx2) const {
-  const TargetInstrDesc &TID = MI->getDesc();
-  if (!TID.isCommutable())
+  const MCInstrDesc &MCID = MI->getDesc();
+  if (!MCID.isCommutable())
     return false;
   // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
   // is not true, then the target must implement this.
-  SrcOpIdx1 = TID.getNumDefs();
+  SrcOpIdx1 = MCID.getNumDefs();
   SrcOpIdx2 = SrcOpIdx1 + 1;
   if (!MI->getOperand(SrcOpIdx1).isReg() ||
       !MI->getOperand(SrcOpIdx2).isReg())
@@ -137,12 +137,12 @@
 bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr *MI,
                             const SmallVectorImpl<MachineOperand> &Pred) const {
   bool MadeChange = false;
-  const TargetInstrDesc &TID = MI->getDesc();
-  if (!TID.isPredicable())
+  const MCInstrDesc &MCID = MI->getDesc();
+  if (!MCID.isPredicable())
     return false;
 
   for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
-    if (TID.OpInfo[i].isPredicate()) {
+    if (MCID.OpInfo[i].isPredicate()) {
       MachineOperand &MO = MI->getOperand(i);
       if (MO.isReg()) {
         MO.setReg(Pred[j].getReg());
@@ -332,10 +332,10 @@
       MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
     return true;
 
-  const TargetInstrDesc &TID = MI->getDesc();
+  const MCInstrDesc &MCID = MI->getDesc();
 
   // Avoid instructions obviously unsafe for remat.
-  if (TID.isNotDuplicable() || TID.mayStore() ||
+  if (MCID.isNotDuplicable() || MCID.mayStore() ||
       MI->hasUnmodeledSideEffects())
     return false;
 
@@ -345,7 +345,7 @@
     return false;
 
   // Avoid instructions which load from potentially varying memory.
-  if (TID.mayLoad() && !MI->isInvariantLoad(AA))
+  if (MCID.mayLoad() && !MI->isInvariantLoad(AA))
     return false;
 
   // If any of the registers accessed are non-constant, conservatively assume

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/TargetLoweringObjectFileImpl.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/TargetLoweringObjectFileImpl.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/TargetLoweringObjectFileImpl.cpp Sat Jul  2 22:28:07 2011
@@ -605,6 +605,13 @@
   // Exception Handling.
   LSDASection = getContext().getMachOSection("__TEXT", "__gcc_except_tab", 0,
                                              SectionKind::getReadOnlyWithRel());
+
+  if (T.isMacOSX() && !T.isMacOSXVersionLT(10, 6))
+    CompactUnwindSection =
+      getContext().getMachOSection("__LD", "__compact_unwind",
+                                   MCSectionMachO::S_ATTR_DEBUG,
+                                   SectionKind::getReadOnly());
+
   // Debug Information.
   DwarfAbbrevSection =
     getContext().getMachOSection("__DWARF", "__debug_abbrev",

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/TwoAddressInstructionPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/TwoAddressInstructionPass.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/TwoAddressInstructionPass.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/TwoAddressInstructionPass.cpp Sat Jul  2 22:28:07 2011
@@ -280,8 +280,8 @@
 /// isTwoAddrUse - Return true if the specified MI is using the specified
 /// register as a two-address operand.
 static bool isTwoAddrUse(MachineInstr *UseMI, unsigned Reg) {
-  const TargetInstrDesc &TID = UseMI->getDesc();
-  for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i) {
+  const MCInstrDesc &MCID = UseMI->getDesc();
+  for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
     MachineOperand &MO = UseMI->getOperand(i);
     if (MO.isReg() && MO.getReg() == Reg &&
         (MO.isDef() || UseMI->isRegTiedToDefOperand(i)))
@@ -443,8 +443,9 @@
 /// isTwoAddrUse - Return true if the specified MI uses the specified register
 /// as a two-address use. If so, return the destination register by reference.
 static bool isTwoAddrUse(MachineInstr &MI, unsigned Reg, unsigned &DstReg) {
-  const TargetInstrDesc &TID = MI.getDesc();
-  unsigned NumOps = MI.isInlineAsm() ? MI.getNumOperands():TID.getNumOperands();
+  const MCInstrDesc &MCID = MI.getDesc();
+  unsigned NumOps = MI.isInlineAsm()
+    ? MI.getNumOperands() : MCID.getNumOperands();
   for (unsigned i = 0; i != NumOps; ++i) {
     const MachineOperand &MO = MI.getOperand(i);
     if (!MO.isReg() || !MO.isUse() || MO.getReg() != Reg)
@@ -761,10 +762,10 @@
 static bool isSafeToDelete(MachineInstr *MI,
                            const TargetInstrInfo *TII,
                            SmallVector<unsigned, 4> &Kills) {
-  const TargetInstrDesc &TID = MI->getDesc();
-  if (TID.mayStore() || TID.isCall())
+  const MCInstrDesc &MCID = MI->getDesc();
+  if (MCID.mayStore() || MCID.isCall())
     return false;
-  if (TID.isTerminator() || MI->hasUnmodeledSideEffects())
+  if (MCID.isTerminator() || MI->hasUnmodeledSideEffects())
     return false;
 
   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -854,7 +855,7 @@
                         MachineFunction::iterator &mbbi,
                         unsigned SrcIdx, unsigned DstIdx, unsigned Dist,
                         SmallPtrSet<MachineInstr*, 8> &Processed) {
-  const TargetInstrDesc &TID = mi->getDesc();
+  const MCInstrDesc &MCID = mi->getDesc();
   unsigned regA = mi->getOperand(DstIdx).getReg();
   unsigned regB = mi->getOperand(SrcIdx).getReg();
 
@@ -876,7 +877,7 @@
   unsigned regCIdx = ~0U;
   bool TryCommute = false;
   bool AggressiveCommute = false;
-  if (TID.isCommutable() && mi->getNumOperands() >= 3 &&
+  if (MCID.isCommutable() && mi->getNumOperands() >= 3 &&
       TII->findCommutedOpIndices(mi, SrcOp1, SrcOp2)) {
     if (SrcIdx == SrcOp1)
       regCIdx = SrcOp2;
@@ -907,7 +908,7 @@
   if (TargetRegisterInfo::isVirtualRegister(regA))
     ScanUses(regA, &*mbbi, Processed);
 
-  if (TID.isConvertibleTo3Addr()) {
+  if (MCID.isConvertibleTo3Addr()) {
     // This instruction is potentially convertible to a true
     // three-address instruction.  Check if it is profitable.
     if (!regBKilled || isProfitableToConv3Addr(regA, regB)) {
@@ -927,7 +928,7 @@
   //   movq (%rax), %rcx
   //   addq %rdx, %rcx
   // because it's preferable to schedule a load than a register copy.
-  if (TID.mayLoad() && !regBKilled) {
+  if (MCID.mayLoad() && !regBKilled) {
     // Determine if a load can be unfolded.
     unsigned LoadRegIndex;
     unsigned NewOpc =
@@ -936,14 +937,14 @@
                                       /*UnfoldStore=*/false,
                                       &LoadRegIndex);
     if (NewOpc != 0) {
-      const TargetInstrDesc &UnfoldTID = TII->get(NewOpc);
-      if (UnfoldTID.getNumDefs() == 1) {
+      const MCInstrDesc &UnfoldMCID = TII->get(NewOpc);
+      if (UnfoldMCID.getNumDefs() == 1) {
         MachineFunction &MF = *mbbi->getParent();
 
         // Unfold the load.
         DEBUG(dbgs() << "2addr:   UNFOLDING: " << *mi);
         const TargetRegisterClass *RC =
-          UnfoldTID.OpInfo[LoadRegIndex].getRegClass(TRI);
+          TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI);
         unsigned Reg = MRI->createVirtualRegister(RC);
         SmallVector<MachineInstr *, 2> NewMIs;
         if (!TII->unfoldMemoryOperand(MF, mi, Reg,
@@ -1067,7 +1068,7 @@
       if (mi->isRegSequence())
         RegSequences.push_back(&*mi);
 
-      const TargetInstrDesc &TID = mi->getDesc();
+      const MCInstrDesc &MCID = mi->getDesc();
       bool FirstTied = true;
 
       DistanceMap.insert(std::make_pair(mi, ++Dist));
@@ -1077,7 +1078,7 @@
       // First scan through all the tied register uses in this instruction
       // and record a list of pairs of tied operands for each register.
       unsigned NumOps = mi->isInlineAsm()
-        ? mi->getNumOperands() : TID.getNumOperands();
+        ? mi->getNumOperands() : MCID.getNumOperands();
       for (unsigned SrcIdx = 0; SrcIdx < NumOps; ++SrcIdx) {
         unsigned DstIdx = 0;
         if (!mi->isRegTiedToDefOperand(SrcIdx, &DstIdx))

Modified: llvm/branches/type-system-rewrite/lib/CodeGen/VirtRegRewriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/CodeGen/VirtRegRewriter.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/CodeGen/VirtRegRewriter.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/CodeGen/VirtRegRewriter.cpp Sat Jul  2 22:28:07 2011
@@ -679,8 +679,8 @@
                           VirtRegMap &VRM) {
   MachineInstr *ReMatDefMI = VRM.getReMaterializedMI(Reg);
 #ifndef NDEBUG
-  const TargetInstrDesc &TID = ReMatDefMI->getDesc();
-  assert(TID.getNumDefs() == 1 &&
+  const MCInstrDesc &MCID = ReMatDefMI->getDesc();
+  assert(MCID.getNumDefs() == 1 &&
          "Don't know how to remat instructions that define > 1 values!");
 #endif
   TII->reMaterialize(MBB, MII, DestReg, 0, ReMatDefMI, *TRI);
@@ -1483,11 +1483,11 @@
 /// where SrcReg is r1 and it is tied to r0. Return true if after
 /// commuting this instruction it will be r0 = op r2, r1.
 static bool CommuteChangesDestination(MachineInstr *DefMI,
-                                      const TargetInstrDesc &TID,
+                                      const MCInstrDesc &MCID,
                                       unsigned SrcReg,
                                       const TargetInstrInfo *TII,
                                       unsigned &DstIdx) {
-  if (TID.getNumDefs() != 1 && TID.getNumOperands() != 3)
+  if (MCID.getNumDefs() != 1 && MCID.getNumOperands() != 3)
     return false;
   if (!DefMI->getOperand(1).isReg() ||
       DefMI->getOperand(1).getReg() != SrcReg)
@@ -1527,11 +1527,11 @@
   MachineInstr &MI = *MII;
   MachineBasicBlock::iterator DefMII = prior(MII);
   MachineInstr *DefMI = DefMII;
-  const TargetInstrDesc &TID = DefMI->getDesc();
+  const MCInstrDesc &MCID = DefMI->getDesc();
   unsigned NewDstIdx;
   if (DefMII != MBB->begin() &&
-      TID.isCommutable() &&
-      CommuteChangesDestination(DefMI, TID, SrcReg, TII, NewDstIdx)) {
+      MCID.isCommutable() &&
+      CommuteChangesDestination(DefMI, MCID, SrcReg, TII, NewDstIdx)) {
     MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
     unsigned NewReg = NewDstMO.getReg();
     if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
@@ -1658,9 +1658,9 @@
 /// isSafeToDelete - Return true if this instruction doesn't produce any side
 /// effect and all of its defs are dead.
 static bool isSafeToDelete(MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
-  if (TID.mayLoad() || TID.mayStore() || TID.isTerminator() ||
-      TID.isCall() || TID.isBarrier() || TID.isReturn() ||
+  const MCInstrDesc &MCID = MI.getDesc();
+  if (MCID.mayLoad() || MCID.mayStore() || MCID.isTerminator() ||
+      MCID.isCall() || MCID.isBarrier() || MCID.isReturn() ||
       MI.isLabel() || MI.isDebugValue() ||
       MI.hasUnmodeledSideEffects())
     return false;

Modified: llvm/branches/type-system-rewrite/lib/ExecutionEngine/TargetSelect.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/ExecutionEngine/TargetSelect.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/ExecutionEngine/TargetSelect.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/ExecutionEngine/TargetSelect.cpp Sat Jul  2 22:28:07 2011
@@ -16,10 +16,10 @@
 #include "llvm/ExecutionEngine/ExecutionEngine.h"
 #include "llvm/Module.h"
 #include "llvm/ADT/Triple.h"
+#include "llvm/MC/SubtargetFeature.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Support/Host.h"
-#include "llvm/Target/SubtargetFeature.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetRegistry.h"
 using namespace llvm;
@@ -75,9 +75,8 @@
 
   // Package up features to be passed to target/subtarget
   std::string FeaturesStr;
-  if (!MCPU.empty() || !MAttrs.empty()) {
+  if (!MAttrs.empty()) {
     SubtargetFeatures Features;
-    Features.setCPU(MCPU);
     for (unsigned i = 0; i != MAttrs.size(); ++i)
       Features.AddFeature(MAttrs[i]);
     FeaturesStr = Features.getString();
@@ -85,7 +84,7 @@
 
   // Allocate a target...
   TargetMachine *Target =
-    TheTarget->createTargetMachine(TheTriple.getTriple(), FeaturesStr);
+    TheTarget->createTargetMachine(TheTriple.getTriple(), MCPU, FeaturesStr);
   assert(Target && "Could not allocate target machine!");
   return Target;
 }

Modified: llvm/branches/type-system-rewrite/lib/MC/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/MC/CMakeLists.txt?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/MC/CMakeLists.txt (original)
+++ llvm/branches/type-system-rewrite/lib/MC/CMakeLists.txt Sat Jul  2 22:28:07 2011
@@ -28,12 +28,14 @@
   MCSectionELF.cpp
   MCSectionMachO.cpp
   MCStreamer.cpp
+  MCSubtargetInfo.cpp
   MCSymbol.cpp
   MCValue.cpp
   MCWin64EH.cpp
   MachObjectWriter.cpp
   WinCOFFStreamer.cpp
   WinCOFFObjectWriter.cpp
+  SubtargetFeature.cpp
   TargetAsmBackend.cpp
   )
 

Modified: llvm/branches/type-system-rewrite/lib/MC/MCAsmStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/MC/MCAsmStreamer.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/MC/MCAsmStreamer.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/MC/MCAsmStreamer.cpp Sat Jul  2 22:28:07 2011
@@ -1244,391 +1244,12 @@
   if (!UseCFI)
     EmitFrames(false);
 }
-
-//===----------------------------------------------------------------------===//
-/// MCLSDADecoderAsmStreamer - This is identical to the MCAsmStreamer, but
-/// outputs a description of the LSDA in a human readable format.
-///
-namespace {
-
-class MCLSDADecoderAsmStreamer : public MCAsmStreamer {
-  const MCSymbol *PersonalitySymbol;
-  const MCSymbol *LSDASymbol;
-  bool InLSDA;
-  bool ReadingULEB128;
-
-  uint64_t BytesRead;
-  uint64_t ActionTableBytes;
-  uint64_t LSDASize;
-
-  SmallVector<char, 2> ULEB128Value;
-  std::vector<int64_t> LSDAEncoding;
-  std::vector<const MCExpr*> Assignments;
-
-  /// GetULEB128Value - A helper function to convert the value in the
-  /// ULEB128Value vector into a uint64_t.
-  uint64_t GetULEB128Value(SmallVectorImpl<char> &ULEB128Value) {
-    uint64_t Val = 0;
-    for (unsigned i = 0, e = ULEB128Value.size(); i != e; ++i)
-      Val |= (ULEB128Value[i] & 0x7F) << (7 * i);
-    return Val;
-  }
-
-  /// ResetState - Reset the state variables.
-  void ResetState() {
-    PersonalitySymbol = 0;
-    LSDASymbol = 0;
-    LSDASize = 0;
-    BytesRead = 0;
-    ActionTableBytes = 0;
-    InLSDA = false;
-    ReadingULEB128 = false;
-    ULEB128Value.clear();
-    LSDAEncoding.clear();
-    Assignments.clear();
-  }
-
-  void EmitEHTableDescription();
-
-  const char *DecodeDWARFEncoding(unsigned Encoding) {
-    switch (Encoding) {
-    case dwarf::DW_EH_PE_absptr: return "absptr";
-    case dwarf::DW_EH_PE_omit:   return "omit";
-    case dwarf::DW_EH_PE_pcrel:  return "pcrel";
-    case dwarf::DW_EH_PE_udata4: return "udata4";
-    case dwarf::DW_EH_PE_udata8: return "udata8";
-    case dwarf::DW_EH_PE_sdata4: return "sdata4";
-    case dwarf::DW_EH_PE_sdata8: return "sdata8";
-    case dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_udata4: return "pcrel udata4";
-    case dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_sdata4: return "pcrel sdata4";
-    case dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_udata8: return "pcrel udata8";
-    case dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_sdata8: return "pcrel sdata8";
-    case dwarf::DW_EH_PE_indirect|dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_udata4:
-      return "indirect pcrel udata4";
-    case dwarf::DW_EH_PE_indirect|dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_sdata4:
-      return "indirect pcrel sdata4";
-    case dwarf::DW_EH_PE_indirect|dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_udata8:
-      return "indirect pcrel udata8";
-    case dwarf::DW_EH_PE_indirect|dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_sdata8:
-      return "indirect pcrel sdata8";
-    }
-  
-    return "<unknown encoding>";
-  }
-public:
-  MCLSDADecoderAsmStreamer(MCContext &Context, formatted_raw_ostream &os,
-                           bool isVerboseAsm, bool useLoc, bool useCFI,
-                           MCInstPrinter *printer, MCCodeEmitter *emitter,
-                           TargetAsmBackend *asmbackend,
-                           bool showInst)
-    : MCAsmStreamer(Context, os, isVerboseAsm, useLoc, useCFI,
-                    printer, emitter, asmbackend, showInst) {
-    ResetState();
-  }
-  ~MCLSDADecoderAsmStreamer() {}
-
-  virtual void Finish() {
-    ResetState();
-    MCAsmStreamer::Finish();
-  }
-
-  virtual void EmitLabel(MCSymbol *Symbol) {
-    if (Symbol == LSDASymbol)
-      InLSDA = true;
-    MCAsmStreamer::EmitLabel(Symbol);
-  }
-  virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
-    if (InLSDA)
-      Assignments.push_back(Value);
-    MCAsmStreamer::EmitAssignment(Symbol, Value);
-  }
-  virtual void EmitBytes(StringRef Data, unsigned AddrSpace);
-  virtual void EmitIntValue(uint64_t Value, unsigned Size,
-                            unsigned AddrSpace = 0);
-  virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
-                             unsigned AddrSpace);
-  virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue,
-                        unsigned AddrSpace);
-  virtual void EmitCFIPersonality(const MCSymbol *Sym, unsigned Encoding) {
-    PersonalitySymbol = Sym;
-    MCAsmStreamer::EmitCFIPersonality(Sym, Encoding);
-  }
-  virtual void EmitCFILsda(const MCSymbol *Sym, unsigned Encoding) {
-    LSDASymbol = Sym;
-    MCAsmStreamer::EmitCFILsda(Sym, Encoding);
-  }
-};
-
-} // end anonymous namespace
-
-void MCLSDADecoderAsmStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) {
-  if (InLSDA && Data.size() == 1) {
-    LSDAEncoding.push_back((unsigned)(unsigned char)Data[0]);
-    ++BytesRead;
-
-    if (LSDAEncoding.size() == 4)
-      // The fourth value tells us where the bottom of the type table is.
-      LSDASize = BytesRead + LSDAEncoding[3];
-    else if (LSDAEncoding.size() == 6)
-      // The sixth value tells us where the start of the action table is.
-      ActionTableBytes = BytesRead;
-  }
-
-  MCAsmStreamer::EmitBytes(Data, AddrSpace);
-}
-
-void MCLSDADecoderAsmStreamer::EmitIntValue(uint64_t Value, unsigned Size,
-                                            unsigned AddrSpace) {
-  if (!InLSDA)
-    return MCAsmStreamer::EmitIntValue(Value, Size, AddrSpace);
-
-  BytesRead += Size;
-
-  // We place the LSDA into the LSDAEncoding vector for later analysis. If we
-  // have a ULEB128, we read that in separate iterations through here and then
-  // get its value.
-  if (!ReadingULEB128) {
-    LSDAEncoding.push_back(Value);
-    int EncodingSize = LSDAEncoding.size();
-
-    if (EncodingSize == 1 || EncodingSize == 3) {
-      // The LPStart and TType encodings.
-      if (Value != dwarf::DW_EH_PE_omit) {
-        // The encoding is next and is a ULEB128 value.
-        ReadingULEB128 = true;
-        ULEB128Value.clear();
-      } else {
-        // The encoding was omitted. Put a 0 here as a placeholder.
-        LSDAEncoding.push_back(0);
-      }
-    } else if (EncodingSize == 5) {
-      // The next value is a ULEB128 value that tells us how long the call site
-      // table is -- where the start of the action tab
-      ReadingULEB128 = true;
-      ULEB128Value.clear();
-    }
-
-    InLSDA = (LSDASize == 0 || BytesRead < LSDASize);
-  } else {
-    // We're reading a ULEB128. Make it so!
-    ULEB128Value.push_back(Value);
-
-    if ((Value & 0x80) == 0) {
-      uint64_t Val = GetULEB128Value(ULEB128Value);
-      LSDAEncoding.push_back(Val);
-      ULEB128Value.clear();
-      ReadingULEB128 = false;
-
-      if (LSDAEncoding.size() == 4)
-        // The fourth value tells us where the bottom of the type table is.
-        LSDASize = BytesRead + LSDAEncoding[3];
-      else if (LSDAEncoding.size() == 6)
-        // The sixth value tells us where the start of the action table is.
-        ActionTableBytes = BytesRead;
-    }
-  }
-
-  MCAsmStreamer::EmitValueImpl(MCConstantExpr::Create(Value, getContext()),
-                               Size, AddrSpace);
-
-  if (LSDASize != 0 && !InLSDA)
-    EmitEHTableDescription();
-}
-
-void MCLSDADecoderAsmStreamer::EmitValueImpl(const MCExpr *Value,
-                                             unsigned Size,
-                                             unsigned AddrSpace) {
-  if (InLSDA && LSDASize != 0) {
-    assert(BytesRead + Size <= LSDASize  && "EH table too small!");
-
-    if (BytesRead > uint64_t(LSDAEncoding[5]) + ActionTableBytes)
-      // Insert the type values.
-      Assignments.push_back(Value);
-
-    LSDAEncoding.push_back(Assignments.size());
-    BytesRead += Size;
-    InLSDA = (LSDASize == 0 || BytesRead < LSDASize);
-  }
-
-  MCAsmStreamer::EmitValueImpl(Value, Size, AddrSpace);
-
-  if (LSDASize != 0 && !InLSDA)
-    EmitEHTableDescription();
-}
-
-void MCLSDADecoderAsmStreamer::EmitFill(uint64_t NumBytes, uint8_t FillValue,
-                                        unsigned AddrSpace) {
-  if (InLSDA && ReadingULEB128) {
-    for (uint64_t I = NumBytes; I != 0; --I)
-      ULEB128Value.push_back(FillValue);
-
-    BytesRead += NumBytes;
-
-    if ((FillValue & 0x80) == 0) {
-      uint64_t Val = GetULEB128Value(ULEB128Value);
-      LSDAEncoding.push_back(Val);
-      ULEB128Value.clear();
-      ReadingULEB128 = false;
-
-      if (LSDAEncoding.size() == 4)
-        // The fourth value tells us where the bottom of the type table is.
-        LSDASize = BytesRead + LSDAEncoding[3];
-      else if (LSDAEncoding.size() == 6)
-        // The sixth value tells us where the start of the action table is.
-        ActionTableBytes = BytesRead;
-    }
-  }
-
-  MCAsmStreamer::EmitFill(NumBytes, FillValue, AddrSpace);
-}
-
-/// EmitEHTableDescription - Emit a human readable version of the LSDA.
-void MCLSDADecoderAsmStreamer::EmitEHTableDescription() {
-  assert(LSDAEncoding.size() > 6 && "Invalid LSDA!");
-
-  // Emit header information.
-  StringRef C = MAI.getCommentString();
-#define CMT OS << C << ' '
-  CMT << "Exception Handling Table: " << LSDASymbol->getName() << "\n";
-  CMT << " @LPStart Encoding: " << DecodeDWARFEncoding(LSDAEncoding[0]) << "\n";
-  if (LSDAEncoding[1])
-    CMT << "@LPStart: 0x" << LSDAEncoding[1] << "\n";
-  CMT << "   @TType Encoding: " << DecodeDWARFEncoding(LSDAEncoding[2]) << "\n";
-  CMT << "       @TType Base: " << LSDAEncoding[3] << " bytes\n";
-  CMT << "@CallSite Encoding: " << DecodeDWARFEncoding(LSDAEncoding[4]) << "\n";
-  CMT << "@Action Table Size: " << LSDAEncoding[5] << " bytes\n\n";
-
-  bool isSjLjEH = (MAI.getExceptionHandlingType() == ExceptionHandling::SjLj);
-
-  int64_t CallSiteTableSize = LSDAEncoding[5];
-  unsigned CallSiteEntrySize;
-  if (!isSjLjEH)
-    CallSiteEntrySize = 4 + // Region start.
-                        4 + // Region end.
-                        4 + // Landing pad.
-                        1;  // TType index.
-  else
-    CallSiteEntrySize = 1 + // Call index.
-                        1;  // TType index.
-
-  unsigned NumEntries = CallSiteTableSize / CallSiteEntrySize;
-  assert(CallSiteTableSize % CallSiteEntrySize == 0 &&
-         "The action table size is not a multiple of what it should be!");
-  unsigned TTypeIdx = 5 +              // Action table size index.
-     (isSjLjEH ? 2 : 4) * NumEntries + // Action table entries.
-                      1;               // Just because.
-
-  // Emit the action table.
-  unsigned Action = 1;
-  for (unsigned I = 6; I < TTypeIdx; ) {
-    CMT << "Action " << Action++ << ":\n";
-
-    // The beginning of the throwing region.
-    uint64_t Idx = LSDAEncoding[I++];
-
-    if (!isSjLjEH) {
-      CMT << "  A throw between "
-          << *cast<MCBinaryExpr>(Assignments[Idx - 1])->getLHS() << " and ";
-
-      // The end of the throwing region.
-      Idx = LSDAEncoding[I++];
-      OS << *cast<MCBinaryExpr>(Assignments[Idx - 1])->getLHS();
-
-      // The landing pad.
-      Idx = LSDAEncoding[I++];
-      if (Idx) {
-        OS << " jumps to "
-           << *cast<MCBinaryExpr>(Assignments[Idx - 1])->getLHS()
-           << " on an exception.\n";
-      } else {
-        OS << " does not have a landing pad.\n";
-        ++I;
-        continue;
-      }
-    } else {
-      CMT << "  A throw from call " << Idx << "\n";
-    }
-
-    // The index into the action table.
-    Idx = LSDAEncoding[I++];
-    if (!Idx) {
-      CMT << "    :cleanup:\n";
-      continue;
-    }
-
-    // A semi-graphical representation of what the different indexes are in the
-    // loop below.
-    //
-    //    Idx    - Index into the action table.
-    //    Action - Index into the type table from the type table base.
-    //    Next   - Offset from Idx to the next action type.
-    //
-    //                               Idx---.
-    //                                     |
-    //                                     v
-    //            [call site table] _1 _2 _3
-    // TTypeIdx--> .........................
-    //            [action 1] _1 _2
-    //            [action 2] _1 _2
-    //              ...
-    //            [action n] _1 _2
-    //            [type m]      ^
-    //              ...         |
-    //            [type 1]      `---Next
-    //
-
-    int Action = LSDAEncoding[TTypeIdx + Idx - 1];
-    if ((Action & 0x40) != 0)
-      // Ignore exception specifications.
-      continue;
-
-    // Emit the types that are caught by this exception.
-    CMT << "    For type(s): ";
-    for (;;) {
-      if ((Action & 0x40) != 0)
-        // Ignore exception specifications.
-        break;
-
-      if (uint64_t Ty = LSDAEncoding[LSDAEncoding.size() - Action]) {
-        OS << " " << *Assignments[Ty - 1];
-
-        // Types can be chained together. Typically, it's a negative offset from
-        // the current type to a different one in the type table.
-        int Next = LSDAEncoding[TTypeIdx + Idx];
-        if (Next == 0)
-          break;
-        if ((Next & 0x40) != 0)
-          Next = (int)(signed char)(Next | 0x80);
-        Idx += Next + 1;
-        Action = LSDAEncoding[TTypeIdx + Idx - 1];
-        continue;
-      } else {
-        OS << " :catchall:";
-      }
-      break;
-    }
-
-    OS << "\n";
-  }
-
-  OS << "\n";
-  ResetState();
-}
-
 MCStreamer *llvm::createAsmStreamer(MCContext &Context,
                                     formatted_raw_ostream &OS,
                                     bool isVerboseAsm, bool useLoc,
                                     bool useCFI, MCInstPrinter *IP,
                                     MCCodeEmitter *CE, TargetAsmBackend *TAB,
                                     bool ShowInst) {
-  ExceptionHandling::ExceptionsType ET =
-    Context.getAsmInfo().getExceptionHandlingType();
-
-  if (useCFI && isVerboseAsm &&
-      (ET == ExceptionHandling::SjLj || ET == ExceptionHandling::DwarfCFI))
-    return new MCLSDADecoderAsmStreamer(Context, OS, isVerboseAsm, useLoc,
-                                        useCFI, IP, CE, TAB, ShowInst);
-
   return new MCAsmStreamer(Context, OS, isVerboseAsm, useLoc, useCFI,
                            IP, CE, TAB, ShowInst);
 }

Modified: llvm/branches/type-system-rewrite/lib/MC/MCDisassembler/Disassembler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/MC/MCDisassembler/Disassembler.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/MC/MCDisassembler/Disassembler.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/MC/MCDisassembler/Disassembler.cpp Sat Jul  2 22:28:07 2011
@@ -55,11 +55,13 @@
 
   // Package up features to be passed to target/subtarget
   std::string FeaturesStr;
+  std::string CPU;
 
   // FIXME: We shouldn't need to do this (and link in codegen).
   //        When we split this out, we should do it in a way that makes
   //        it straightforward to switch subtargets on the fly.
-  TargetMachine *TM = TheTarget->createTargetMachine(TripleName, FeaturesStr);
+  TargetMachine *TM = TheTarget->createTargetMachine(TripleName, CPU,
+                                                     FeaturesStr);
   assert(TM && "Unable to create target machine!");
 
   // Get the target assembler info needed to setup the context.

Modified: llvm/branches/type-system-rewrite/lib/MC/MCDisassembler/EDDisassembler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/MC/MCDisassembler/EDDisassembler.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/MC/MCDisassembler/EDDisassembler.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/MC/MCDisassembler/EDDisassembler.cpp Sat Jul  2 22:28:07 2011
@@ -167,9 +167,9 @@
   if (!Tgt)
     return;
   
+  std::string CPU;
   std::string featureString;
-  
-  TargetMachine.reset(Tgt->createTargetMachine(tripleString,
+  TargetMachine.reset(Tgt->createTargetMachine(tripleString, CPU,
                                                featureString));
   
   const TargetRegisterInfo *registerInfo = TargetMachine->getRegisterInfo();

Modified: llvm/branches/type-system-rewrite/lib/MC/MCDwarf.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/MC/MCDwarf.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/MC/MCDwarf.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/MC/MCDwarf.cpp Sat Jul  2 22:28:07 2011
@@ -30,28 +30,27 @@
 #define SPECIAL_ADDR(op) (((op) - DWARF2_LINE_OPCODE_BASE)/DWARF2_LINE_RANGE)
 
 // The maximum address skip amount that can be encoded with a special op.
-#define MAX_SPECIAL_ADDR_DELTA		SPECIAL_ADDR(255)
+#define MAX_SPECIAL_ADDR_DELTA         SPECIAL_ADDR(255)
 
 // First special line opcode - leave room for the standard opcodes.
 // Note: If you want to change this, you'll have to update the
 // "standard_opcode_lengths" table that is emitted in DwarfFileTable::Emit().  
-#define DWARF2_LINE_OPCODE_BASE		13
+#define DWARF2_LINE_OPCODE_BASE         13
 
 // Minimum line offset in a special line info. opcode.  This value
 // was chosen to give a reasonable range of values.
-#define DWARF2_LINE_BASE		-5
+#define DWARF2_LINE_BASE                -5
 
 // Range of line offsets in a special line info. opcode.
-# define DWARF2_LINE_RANGE		14
+#define DWARF2_LINE_RANGE               14
 
 // Define the architecture-dependent minimum instruction length (in bytes).
 // This value should be rather too small than too big.
-# define DWARF2_LINE_MIN_INSN_LENGTH	1
+#define DWARF2_LINE_MIN_INSN_LENGTH     1
 
 // Note: when DWARF2_LINE_MIN_INSN_LENGTH == 1 which is the current setting,
 // this routine is a nop and will be optimized away.
-static inline uint64_t ScaleAddrDelta(uint64_t AddrDelta)
-{
+static inline uint64_t ScaleAddrDelta(uint64_t AddrDelta) {
   if (DWARF2_LINE_MIN_INSN_LENGTH == 1)
     return AddrDelta;
   if (AddrDelta % DWARF2_LINE_MIN_INSN_LENGTH != 0) {
@@ -291,7 +290,7 @@
   const std::vector<const MCSection *> &MCLineSectionOrder =
     MCOS->getContext().getMCLineSectionOrder();
   for (std::vector<const MCSection*>::const_iterator it =
-	MCLineSectionOrder.begin(), ie = MCLineSectionOrder.end(); it != ie;
+         MCLineSectionOrder.begin(), ie = MCLineSectionOrder.end(); it != ie;
        ++it) {
     const MCSection *Sec = *it;
     const MCLineSection *Line = MCLineSections.lookup(Sec);
@@ -461,13 +460,14 @@
 }
 
 static void EmitSymbol(MCStreamer &streamer, const MCSymbol &symbol,
-                       unsigned symbolEncoding) {
+                       unsigned symbolEncoding, const char *comment = 0) {
   MCContext &context = streamer.getContext();
   const MCAsmInfo &asmInfo = context.getAsmInfo();
   const MCExpr *v = asmInfo.getExprForFDESymbol(&symbol,
                                                 symbolEncoding,
                                                 streamer);
   unsigned size = getSizeForEncoding(streamer, symbolEncoding);
+  if (streamer.isVerboseAsm() && comment) streamer.AddComment(comment);
   streamer.EmitAbsValue(v, size);
 }
 
@@ -507,6 +507,12 @@
       SectionStart(sectionStart) {
     }
 
+    /// EmitCompactUnwind - Emit the unwind information in a compact way. If
+    /// we're successful, return 'true'. Otherwise, return 'false' and it will
+    /// emit the normal CIE and FDE.
+    bool EmitCompactUnwind(MCStreamer &streamer,
+                           const MCDwarfFrameInfo &frame);
+
     const MCSymbol &EmitCIE(MCStreamer &streamer,
                             const MCSymbol *personality,
                             unsigned personalityEncoding,
@@ -521,11 +527,46 @@
     void EmitCFIInstruction(MCStreamer &Streamer,
                             const MCCFIInstruction &Instr);
   };
+
+} // end anonymous namespace
+
+static void EmitEncodingByte(MCStreamer &Streamer, unsigned Encoding,
+                             StringRef Prefix) {
+  if (Streamer.isVerboseAsm()) {
+    const char *EncStr = 0;
+    switch (Encoding) {
+    default: EncStr = "<unknown encoding>";
+    case dwarf::DW_EH_PE_absptr: EncStr = "absptr";
+    case dwarf::DW_EH_PE_omit:   EncStr = "omit";
+    case dwarf::DW_EH_PE_pcrel:  EncStr = "pcrel";
+    case dwarf::DW_EH_PE_udata4: EncStr = "udata4";
+    case dwarf::DW_EH_PE_udata8: EncStr = "udata8";
+    case dwarf::DW_EH_PE_sdata4: EncStr = "sdata4";
+    case dwarf::DW_EH_PE_sdata8: EncStr = "sdata8";
+    case dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_udata4: EncStr = "pcrel udata4";
+    case dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_sdata4: EncStr = "pcrel sdata4";
+    case dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_udata8: EncStr = "pcrel udata8";
+    case dwarf::DW_EH_PE_pcrel |dwarf::DW_EH_PE_sdata8: EncStr = "pcrel sdata8";
+    case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_udata4:
+      EncStr = "indirect pcrel udata4";
+    case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_sdata4:
+      EncStr = "indirect pcrel sdata4";
+    case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_udata8:
+      EncStr = "indirect pcrel udata8";
+    case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_sdata8:
+      EncStr = "indirect pcrel sdata8";
+    }
+
+    Streamer.AddComment(Twine(Prefix) + " = " + EncStr);
+  }
+
+  Streamer.EmitIntValue(Encoding, 1);
 }
 
 void FrameEmitterImpl::EmitCFIInstruction(MCStreamer &Streamer,
                                           const MCCFIInstruction &Instr) {
   int dataAlignmentFactor = getDataAlignmentFactor(Streamer);
+  bool VerboseAsm = Streamer.isVerboseAsm();
 
   switch (Instr.getOperation()) {
   case MCCFIInstruction::Move:
@@ -537,9 +578,13 @@
     // If advancing cfa.
     if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) {
       if (Src.getReg() == MachineLocation::VirtualFP) {
+        if (VerboseAsm) Streamer.AddComment("DW_CFA_def_cfa_offset");
         Streamer.EmitIntValue(dwarf::DW_CFA_def_cfa_offset, 1);
       } else {
+        if (VerboseAsm) Streamer.AddComment("DW_CFA_def_cfa");
         Streamer.EmitIntValue(dwarf::DW_CFA_def_cfa, 1);
+        if (VerboseAsm) Streamer.AddComment(Twine("Reg ") +
+                                            Twine(Src.getReg()));
         Streamer.EmitULEB128IntValue(Src.getReg());
       }
 
@@ -548,47 +593,62 @@
       else
         CFAOffset = -Src.getOffset();
 
+      if (VerboseAsm) Streamer.AddComment(Twine("Offset " + Twine(CFAOffset)));
       Streamer.EmitULEB128IntValue(CFAOffset);
       return;
     }
 
     if (Src.isReg() && Src.getReg() == MachineLocation::VirtualFP) {
       assert(Dst.isReg() && "Machine move not supported yet.");
+      if (VerboseAsm) Streamer.AddComment("DW_CFA_def_cfa_register");
       Streamer.EmitIntValue(dwarf::DW_CFA_def_cfa_register, 1);
+      if (VerboseAsm) Streamer.AddComment(Twine("Reg ") + Twine(Dst.getReg()));
       Streamer.EmitULEB128IntValue(Dst.getReg());
       return;
     }
 
     unsigned Reg = Src.getReg();
-
     int Offset = Dst.getOffset();
     if (IsRelative)
       Offset -= CFAOffset;
     Offset = Offset / dataAlignmentFactor;
 
     if (Offset < 0) {
+      if (VerboseAsm) Streamer.AddComment("DW_CFA_offset_extended_sf");
       Streamer.EmitIntValue(dwarf::DW_CFA_offset_extended_sf, 1);
+      if (VerboseAsm) Streamer.AddComment(Twine("Reg ") + Twine(Reg));
       Streamer.EmitULEB128IntValue(Reg);
+      if (VerboseAsm) Streamer.AddComment(Twine("Offset ") + Twine(Offset));
       Streamer.EmitSLEB128IntValue(Offset);
     } else if (Reg < 64) {
+      if (VerboseAsm) Streamer.AddComment(Twine("DW_CFA_offset + Reg(") +
+                                          Twine(Reg) + ")");
       Streamer.EmitIntValue(dwarf::DW_CFA_offset + Reg, 1);
+      if (VerboseAsm) Streamer.AddComment(Twine("Offset ") + Twine(Offset));
       Streamer.EmitULEB128IntValue(Offset);
     } else {
+      if (VerboseAsm) Streamer.AddComment("DW_CFA_offset_extended");
       Streamer.EmitIntValue(dwarf::DW_CFA_offset_extended, 1);
+      if (VerboseAsm) Streamer.AddComment(Twine("Reg ") + Twine(Reg));
       Streamer.EmitULEB128IntValue(Reg);
+      if (VerboseAsm) Streamer.AddComment(Twine("Offset ") + Twine(Offset));
       Streamer.EmitULEB128IntValue(Offset);
     }
     return;
   }
   case MCCFIInstruction::Remember:
+    if (VerboseAsm) Streamer.AddComment("DW_CFA_remember_state");
     Streamer.EmitIntValue(dwarf::DW_CFA_remember_state, 1);
     return;
   case MCCFIInstruction::Restore:
+    if (VerboseAsm) Streamer.AddComment("DW_CFA_restore_state");
     Streamer.EmitIntValue(dwarf::DW_CFA_restore_state, 1);
     return;
   case MCCFIInstruction::SameValue: {
     unsigned Reg = Instr.getDestination().getReg();
+    if (VerboseAsm) Streamer.AddComment("DW_CFA_same_value");
     Streamer.EmitIntValue(dwarf::DW_CFA_same_value, 1);
+    if (VerboseAsm) Streamer.AddComment(Twine("Reg ") + Twine(Reg));
     Streamer.EmitULEB128IntValue(Reg);
     return;
   }
@@ -611,6 +671,7 @@
     if (BaseLabel && Label) {
       MCSymbol *ThisSym = Label;
       if (ThisSym != BaseLabel) {
+        if (streamer.isVerboseAsm()) streamer.AddComment("DW_CFA_advance_loc4");
         streamer.EmitDwarfAdvanceFrameAddr(BaseLabel, ThisSym);
         BaseLabel = ThisSym;
       }
@@ -620,6 +681,82 @@
   }
 }
 
+/// EmitCompactUnwind - Emit the unwind information in a compact way. If we're
+/// successful, return 'true'. Otherwise, return 'false' and it will emit the
+/// normal CIE and FDE.
+bool FrameEmitterImpl::EmitCompactUnwind(MCStreamer &Streamer,
+                                         const MCDwarfFrameInfo &Frame) {
+#if 1
+  return false;
+#else
+  MCContext &Context = Streamer.getContext();
+  const TargetAsmInfo &TAI = Context.getTargetAsmInfo();
+  bool VerboseAsm = Streamer.isVerboseAsm();
+
+  // range-start range-length  compact-unwind-enc personality-func   lsda
+  //  _foo       LfooEnd-_foo  0x00000023          0                 0
+  //  _bar       LbarEnd-_bar  0x00000025         __gxx_personality  except_tab1
+  //
+  //   .section __LD,__compact_unwind,regular,debug
+  //
+  //   # compact unwind for _foo
+  //   .quad _foo
+  //   .set L1,LfooEnd-_foo
+  //   .long L1
+  //   .long 0x01010001
+  //   .quad 0
+  //   .quad 0
+  //
+  //   # compact unwind for _bar
+  //   .quad _bar
+  //   .set L2,LbarEnd-_bar
+  //   .long L2
+  //   .long 0x01020011
+  //   .quad __gxx_personality
+  //   .quad except_tab1
+
+  Streamer.SwitchSection(TAI.getCompactUnwindSection());
+
+  // Range Start
+  unsigned FDEEncoding = TAI.getFDEEncoding(UsingCFI);
+  unsigned Size = getSizeForEncoding(Streamer, FDEEncoding);
+  if (VerboseAsm) Streamer.AddComment("Range Start");
+  Streamer.EmitSymbolValue(Frame.Function, Size);
+
+  // Range Length
+  const MCExpr *Range = MakeStartMinusEndExpr(Streamer, *Frame.Begin,
+                                              *Frame.End, 0);
+  if (VerboseAsm) Streamer.AddComment("Range Length");
+  Streamer.EmitAbsValue(Range, 4);
+
+  // FIXME:
+  // Compact Encoding
+  const std::vector<MachineMove> &Moves = TAI.getInitialFrameState();
+  uint32_t Encoding = 0;
+  Size = getSizeForEncoding(Streamer, dwarf::DW_EH_PE_udata4);
+  if (VerboseAsm) Streamer.AddComment("Compact Unwind Encoding");
+  Streamer.EmitIntValue(Encoding, Size);
+
+  // Personality Function
+  Size = getSizeForEncoding(Streamer, Frame.PersonalityEncoding);
+  if (VerboseAsm) Streamer.AddComment("Personality Function");
+  if (Frame.Personality)
+    Streamer.EmitSymbolValue(Frame.Personality, Size);
+  else
+    Streamer.EmitIntValue(0, Size); // No personality fn
+
+  // LSDA
+  Size = getSizeForEncoding(Streamer, Frame.LsdaEncoding);
+  if (VerboseAsm) Streamer.AddComment("LSDA");
+  if (Frame.Lsda)
+    Streamer.EmitSymbolValue(Frame.Lsda, Size);
+  else
+    Streamer.EmitIntValue(0, Size); // No LSDA
+
+  return true;
+#endif
+}
+
 const MCSymbol &FrameEmitterImpl::EmitCIE(MCStreamer &streamer,
                                           const MCSymbol *personality,
                                           unsigned personalityEncoding,
@@ -627,6 +764,7 @@
                                           unsigned lsdaEncoding) {
   MCContext &context = streamer.getContext();
   const TargetAsmInfo &asmInfo = context.getTargetAsmInfo();
+  bool verboseAsm = streamer.isVerboseAsm();
 
   MCSymbol *sectionStart;
   if (asmInfo.isFunctionEHFrameSymbolPrivate() || !IsEH)
@@ -634,6 +772,7 @@
   else
     sectionStart = context.GetOrCreateSymbol(Twine("EH_frame") + Twine(CIENum));
 
+  streamer.EmitLabel(sectionStart);
   CIENum++;
 
   MCSymbol *sectionEnd = streamer.getContext().CreateTempSymbol();
@@ -641,19 +780,22 @@
   // Length
   const MCExpr *Length = MakeStartMinusEndExpr(streamer, *sectionStart,
                                                *sectionEnd, 4);
-  streamer.EmitLabel(sectionStart);
+  if (verboseAsm) streamer.AddComment("CIE Length");
   streamer.EmitAbsValue(Length, 4);
 
   // CIE ID
   unsigned CIE_ID = IsEH ? 0 : -1;
+  if (verboseAsm) streamer.AddComment("CIE ID Tag");
   streamer.EmitIntValue(CIE_ID, 4);
 
   // Version
+  if (verboseAsm) streamer.AddComment("DW_CIE_VERSION");
   streamer.EmitIntValue(dwarf::DW_CIE_VERSION, 1);
 
   // Augmentation String
   SmallString<8> Augmentation;
   if (IsEH) {
+    if (verboseAsm) streamer.AddComment("CIE Augmentation");
     Augmentation += "z";
     if (personality)
       Augmentation += "P";
@@ -665,12 +807,15 @@
   streamer.EmitIntValue(0, 1);
 
   // Code Alignment Factor
+  if (verboseAsm) streamer.AddComment("CIE Code Alignment Factor");
   streamer.EmitULEB128IntValue(1);
 
   // Data Alignment Factor
+  if (verboseAsm) streamer.AddComment("CIE Data Alignment Factor");
   streamer.EmitSLEB128IntValue(getDataAlignmentFactor(streamer));
 
   // Return Address Register
+  if (verboseAsm) streamer.AddComment("CIE Return Address Column");
   streamer.EmitULEB128IntValue(asmInfo.getDwarfRARegNum(true));
 
   // Augmentation Data Length (optional)
@@ -688,24 +833,30 @@
     // Encoding of the FDE pointers
     augmentationLength += 1;
 
+    if (verboseAsm) streamer.AddComment("Augmentation Size");
     streamer.EmitULEB128IntValue(augmentationLength);
 
     // Augmentation Data (optional)
     if (personality) {
       // Personality Encoding
-      streamer.EmitIntValue(personalityEncoding, 1);
+      EmitEncodingByte(streamer, personalityEncoding,
+                       "Personality Encoding");
       // Personality
+      if (verboseAsm) streamer.AddComment("Personality");
       EmitPersonality(streamer, *personality, personalityEncoding);
     }
+
     if (lsda)
-      streamer.EmitIntValue(lsdaEncoding, 1); // LSDA Encoding
+      EmitEncodingByte(streamer, lsdaEncoding, "LSDA Encoding");
+
     // Encoding of the FDE pointers
-    streamer.EmitIntValue(asmInfo.getFDEEncoding(UsingCFI), 1);
+    EmitEncodingByte(streamer, asmInfo.getFDEEncoding(UsingCFI),
+                     "FDE Encoding");
   }
 
   // Initial Instructions
 
-  const std::vector<MachineMove> Moves = asmInfo.getInitialFrameState();
+  const std::vector<MachineMove> &Moves = asmInfo.getInitialFrameState();
   std::vector<MCCFIInstruction> Instructions;
 
   for (int i = 0, n = Moves.size(); i != n; ++i) {
@@ -734,16 +885,18 @@
   MCSymbol *fdeStart = context.CreateTempSymbol();
   MCSymbol *fdeEnd = context.CreateTempSymbol();
   const TargetAsmInfo &TAsmInfo = context.getTargetAsmInfo();
+  bool verboseAsm = streamer.isVerboseAsm();
 
   if (!TAsmInfo.isFunctionEHFrameSymbolPrivate() && IsEH) {
-    MCSymbol *EHSym = context.GetOrCreateSymbol(
-      frame.Function->getName() + Twine(".eh"));
+    MCSymbol *EHSym =
+      context.GetOrCreateSymbol(frame.Function->getName() + Twine(".eh"));
     streamer.EmitEHSymAttributes(frame.Function, EHSym);
     streamer.EmitLabel(EHSym);
   }
 
   // Length
   const MCExpr *Length = MakeStartMinusEndExpr(streamer, *fdeStart, *fdeEnd, 0);
+  if (verboseAsm) streamer.AddComment("FDE Length");
   streamer.EmitAbsValue(Length, 4);
 
   streamer.EmitLabel(fdeStart);
@@ -753,6 +906,7 @@
   if (IsEH) {
     const MCExpr *offset = MakeStartMinusEndExpr(streamer, cieStart, *fdeStart,
                                                  0);
+    if (verboseAsm) streamer.AddComment("FDE CIE Offset");
     streamer.EmitAbsValue(offset, 4);
   } else if (!asmInfo.doesDwarfRequireRelocationForSectionOffset()) {
     const MCExpr *offset = MakeStartMinusEndExpr(streamer, *SectionStart,
@@ -761,6 +915,7 @@
   } else {
     streamer.EmitSymbolValue(&cieStart, 4);
   }
+
   unsigned fdeEncoding = TAsmInfo.getFDEEncoding(UsingCFI);
   unsigned size = getSizeForEncoding(streamer, fdeEncoding);
 
@@ -768,11 +923,12 @@
   unsigned PCBeginEncoding = IsEH ? fdeEncoding :
     (unsigned)dwarf::DW_EH_PE_absptr;
   unsigned PCBeginSize = getSizeForEncoding(streamer, PCBeginEncoding);
-  EmitSymbol(streamer, *frame.Begin, PCBeginEncoding);
+  EmitSymbol(streamer, *frame.Begin, PCBeginEncoding, "FDE initial location");
 
   // PC Range
   const MCExpr *Range = MakeStartMinusEndExpr(streamer, *frame.Begin,
                                               *frame.End, 0);
+  if (verboseAsm) streamer.AddComment("FDE address range");
   streamer.EmitAbsValue(Range, size);
 
   if (IsEH) {
@@ -782,11 +938,13 @@
     if (frame.Lsda)
       augmentationLength += getSizeForEncoding(streamer, frame.LsdaEncoding);
 
+    if (verboseAsm) streamer.AddComment("Augmentation size");
     streamer.EmitULEB128IntValue(augmentationLength);
 
     // Augmentation Data
     if (frame.Lsda)
-      EmitSymbol(streamer, *frame.Lsda, frame.LsdaEncoding);
+      EmitSymbol(streamer, *frame.Lsda, frame.LsdaEncoding,
+                 "Language Specific Data Area");
   }
 
   // Call Frame Instructions
@@ -840,39 +998,45 @@
   };
 }
 
-void MCDwarfFrameEmitter::Emit(MCStreamer &streamer,
-                               bool usingCFI,
-                               bool isEH) {
-  MCContext &context = streamer.getContext();
-  const TargetAsmInfo &asmInfo = context.getTargetAsmInfo();
-  const MCSection &section = isEH ?
-    *asmInfo.getEHFrameSection() : *asmInfo.getDwarfFrameSection();
-  streamer.SwitchSection(&section);
-  MCSymbol *SectionStart = context.CreateTempSymbol();
-  streamer.EmitLabel(SectionStart);
+void MCDwarfFrameEmitter::Emit(MCStreamer &Streamer,
+                               bool UsingCFI,
+                               bool IsEH) {
+  MCContext &Context = Streamer.getContext();
+  const TargetAsmInfo &AsmInfo = Context.getTargetAsmInfo();
+  const MCSection &Section = IsEH ? *AsmInfo.getEHFrameSection() :
+                                    *AsmInfo.getDwarfFrameSection();
+  Streamer.SwitchSection(&Section);
+  MCSymbol *SectionStart = Context.CreateTempSymbol();
+  Streamer.EmitLabel(SectionStart);
 
-  MCSymbol *fdeEnd = NULL;
+  MCSymbol *FDEEnd = NULL;
   DenseMap<CIEKey, const MCSymbol*> CIEStarts;
-  FrameEmitterImpl Emitter(usingCFI, isEH, SectionStart);
+  FrameEmitterImpl Emitter(UsingCFI, IsEH, SectionStart);
 
   const MCSymbol *DummyDebugKey = NULL;
-  for (unsigned i = 0, n = streamer.getNumFrameInfos(); i < n; ++i) {
-    const MCDwarfFrameInfo &frame = streamer.getFrameInfo(i);
-    CIEKey key(frame.Personality, frame.PersonalityEncoding,
-               frame.LsdaEncoding);
-    const MCSymbol *&cieStart = isEH ? CIEStarts[key] : DummyDebugKey;
-    if (!cieStart)
-      cieStart = &Emitter.EmitCIE(streamer, frame.Personality,
-                                  frame.PersonalityEncoding, frame.Lsda,
-                                  frame.LsdaEncoding);
-    fdeEnd = Emitter.EmitFDE(streamer, *cieStart, frame);
+  for (unsigned i = 0, n = Streamer.getNumFrameInfos(); i < n; ++i) {
+    const MCDwarfFrameInfo &Frame = Streamer.getFrameInfo(i);
+    if (IsEH && AsmInfo.getCompactUnwindSection() &&
+        Emitter.EmitCompactUnwind(Streamer, Frame))
+      continue;
+
+    CIEKey Key(Frame.Personality, Frame.PersonalityEncoding,
+               Frame.LsdaEncoding);
+    const MCSymbol *&CIEStart = IsEH ? CIEStarts[Key] : DummyDebugKey;
+    if (!CIEStart)
+      CIEStart = &Emitter.EmitCIE(Streamer, Frame.Personality,
+                                  Frame.PersonalityEncoding, Frame.Lsda,
+                                  Frame.LsdaEncoding);
+
+    FDEEnd = Emitter.EmitFDE(Streamer, *CIEStart, Frame);
+
     if (i != n - 1)
-      streamer.EmitLabel(fdeEnd);
+      Streamer.EmitLabel(FDEEnd);
   }
 
-  streamer.EmitValueToAlignment(asmInfo.getPointerSize());
-  if (fdeEnd)
-    streamer.EmitLabel(fdeEnd);
+  Streamer.EmitValueToAlignment(AsmInfo.getPointerSize());
+  if (FDEEnd)
+    Streamer.EmitLabel(FDEEnd);
 }
 
 void MCDwarfFrameEmitter::EmitAdvanceLoc(MCStreamer &Streamer,

Modified: llvm/branches/type-system-rewrite/lib/MC/MCParser/AsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/MC/MCParser/AsmParser.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/MC/MCParser/AsmParser.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/MC/MCParser/AsmParser.cpp Sat Jul  2 22:28:07 2011
@@ -28,6 +28,7 @@
 #include "llvm/MC/MCSymbol.h"
 #include "llvm/MC/MCDwarf.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/MathExtras.h"
 #include "llvm/Support/MemoryBuffer.h"
 #include "llvm/Support/SourceMgr.h"
 #include "llvm/Support/raw_ostream.h"
@@ -1612,13 +1613,18 @@
 
     for (;;) {
       const MCExpr *Value;
+      SMLoc ExprLoc = getLexer().getLoc();
       if (ParseExpression(Value))
         return true;
 
       // Special case constant expressions to match code generator.
-      if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Value))
-        getStreamer().EmitIntValue(MCE->getValue(), Size, DEFAULT_ADDRSPACE);
-      else
+      if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Value)) {
+        assert(Size <= 8 && "Invalid size");
+        uint64_t IntValue = MCE->getValue();
+        if (!isUIntN(8 * Size, IntValue) && !isIntN(8 * Size, IntValue))
+          return Error(ExprLoc, "literal value out of range for directive");
+        getStreamer().EmitIntValue(IntValue, Size, DEFAULT_ADDRSPACE);
+      } else
         getStreamer().EmitValue(Value, Size, DEFAULT_ADDRSPACE);
 
       if (getLexer().is(AsmToken::EndOfStatement))

Modified: llvm/branches/type-system-rewrite/lib/MC/MachObjectWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/MC/MachObjectWriter.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/MC/MachObjectWriter.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/MC/MachObjectWriter.cpp Sat Jul  2 22:28:07 2011
@@ -23,34 +23,12 @@
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Target/TargetAsmBackend.h"
 
-// FIXME: Gross.
-#include "../Target/ARM/ARMFixupKinds.h"
-#include "../Target/X86/X86FixupKinds.h"
-
 #include <vector>
 using namespace llvm;
 using namespace llvm::object;
 
-// FIXME: this has been copied from (or to) X86AsmBackend.cpp
-static unsigned getFixupKindLog2Size(unsigned Kind) {
-  switch (Kind) {
-  default:
-    llvm_unreachable("invalid fixup kind!");
-  case FK_PCRel_1:
-  case FK_Data_1: return 0;
-  case FK_PCRel_2:
-  case FK_Data_2: return 1;
-  case FK_PCRel_4:
-    // FIXME: Remove these!!!
-  case X86::reloc_riprel_4byte:
-  case X86::reloc_riprel_4byte_movq_load:
-  case X86::reloc_signed_4byte:
-  case FK_Data_4: return 2;
-  case FK_Data_8: return 3;
-  }
-}
-
-static bool doesSymbolRequireExternRelocation(MCSymbolData *SD) {
+bool MachObjectWriter::
+doesSymbolRequireExternRelocation(const MCSymbolData *SD) {
   // Undefined symbols are always extern.
   if (SD->Symbol->isUndefined())
     return true;
@@ -64,1557 +42,740 @@
   return false;
 }
 
-namespace {
-
-class MachObjectWriter : public MCObjectWriter {
-  /// MachSymbolData - Helper struct for containing some precomputed information
-  /// on symbols.
-  struct MachSymbolData {
-    MCSymbolData *SymbolData;
-    uint64_t StringIndex;
-    uint8_t SectionIndex;
-
-    // Support lexicographic sorting.
-    bool operator<(const MachSymbolData &RHS) const {
-      return SymbolData->getSymbol().getName() <
-             RHS.SymbolData->getSymbol().getName();
-    }
-  };
-
-  /// The target specific Mach-O writer instance.
-  llvm::OwningPtr<MCMachObjectTargetWriter> TargetObjectWriter;
-
-  /// @name Relocation Data
-  /// @{
-
-  llvm::DenseMap<const MCSectionData*,
-                 std::vector<macho::RelocationEntry> > Relocations;
-  llvm::DenseMap<const MCSectionData*, unsigned> IndirectSymBase;
-
-  /// @}
-  /// @name Symbol Table Data
-  /// @{
-
-  SmallString<256> StringTable;
-  std::vector<MachSymbolData> LocalSymbolData;
-  std::vector<MachSymbolData> ExternalSymbolData;
-  std::vector<MachSymbolData> UndefinedSymbolData;
-
-  /// @}
-
-private:
-  /// @name Utility Methods
-  /// @{
+bool MachObjectWriter::
+MachSymbolData::operator<(const MachSymbolData &RHS) const {
+  return SymbolData->getSymbol().getName() <
+    RHS.SymbolData->getSymbol().getName();
+}
 
-  bool isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind) {
-    const MCFixupKindInfo &FKI = Asm.getBackend().getFixupKindInfo(
-      (MCFixupKind) Kind);
+bool MachObjectWriter::isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind) {
+  const MCFixupKindInfo &FKI = Asm.getBackend().getFixupKindInfo(
+    (MCFixupKind) Kind);
 
-    return FKI.Flags & MCFixupKindInfo::FKF_IsPCRel;
-  }
+  return FKI.Flags & MCFixupKindInfo::FKF_IsPCRel;
+}
 
-  /// @}
+uint64_t MachObjectWriter::getFragmentAddress(const MCFragment *Fragment,
+                                              const MCAsmLayout &Layout) const {
+  return getSectionAddress(Fragment->getParent()) +
+    Layout.getFragmentOffset(Fragment);
+}
 
-  SectionAddrMap SectionAddress;
-  uint64_t getSectionAddress(const MCSectionData* SD) const {
-    return SectionAddress.lookup(SD);
-  }
-  uint64_t getSymbolAddress(const MCSymbolData* SD,
-                            const MCAsmLayout &Layout) const {
-    const MCSymbol &S = SD->getSymbol();
-
-    // If this is a variable, then recursively evaluate now.
-    if (S.isVariable()) {
-      MCValue Target;
-      if (!S.getVariableValue()->EvaluateAsRelocatable(Target, Layout))
-        report_fatal_error("unable to evaluate offset for variable '" +
-                           S.getName() + "'");
-
-      // Verify that any used symbols are defined.
-      if (Target.getSymA() && Target.getSymA()->getSymbol().isUndefined())
-        report_fatal_error("unable to evaluate offset to undefined symbol '" +
-                           Target.getSymA()->getSymbol().getName() + "'");
-      if (Target.getSymB() && Target.getSymB()->getSymbol().isUndefined())
-        report_fatal_error("unable to evaluate offset to undefined symbol '" +
-                           Target.getSymB()->getSymbol().getName() + "'");
-
-      uint64_t Address = Target.getConstant();
-      if (Target.getSymA())
-        Address += getSymbolAddress(&Layout.getAssembler().getSymbolData(
-                                      Target.getSymA()->getSymbol()), Layout);
-      if (Target.getSymB())
-        Address += getSymbolAddress(&Layout.getAssembler().getSymbolData(
-                                      Target.getSymB()->getSymbol()), Layout);
-      return Address;
-    }
+uint64_t MachObjectWriter::getSymbolAddress(const MCSymbolData* SD,
+                                            const MCAsmLayout &Layout) const {
+  const MCSymbol &S = SD->getSymbol();
+
+  // If this is a variable, then recursively evaluate now.
+  if (S.isVariable()) {
+    MCValue Target;
+    if (!S.getVariableValue()->EvaluateAsRelocatable(Target, Layout))
+      report_fatal_error("unable to evaluate offset for variable '" +
+                         S.getName() + "'");
+
+    // Verify that any used symbols are defined.
+    if (Target.getSymA() && Target.getSymA()->getSymbol().isUndefined())
+      report_fatal_error("unable to evaluate offset to undefined symbol '" +
+                         Target.getSymA()->getSymbol().getName() + "'");
+    if (Target.getSymB() && Target.getSymB()->getSymbol().isUndefined())
+      report_fatal_error("unable to evaluate offset to undefined symbol '" +
+                         Target.getSymB()->getSymbol().getName() + "'");
 
-    return getSectionAddress(SD->getFragment()->getParent()) +
-      Layout.getSymbolOffset(SD);
-  }
-  uint64_t getFragmentAddress(const MCFragment *Fragment,
-                            const MCAsmLayout &Layout) const {
-    return getSectionAddress(Fragment->getParent()) +
-      Layout.getFragmentOffset(Fragment);
+    uint64_t Address = Target.getConstant();
+    if (Target.getSymA())
+      Address += getSymbolAddress(&Layout.getAssembler().getSymbolData(
+                                    Target.getSymA()->getSymbol()), Layout);
+    if (Target.getSymB())
+      Address += getSymbolAddress(&Layout.getAssembler().getSymbolData(
+                                    Target.getSymB()->getSymbol()), Layout);
+    return Address;
   }
 
-  uint64_t getPaddingSize(const MCSectionData *SD,
-                          const MCAsmLayout &Layout) const {
-    uint64_t EndAddr = getSectionAddress(SD) + Layout.getSectionAddressSize(SD);
-    unsigned Next = SD->getLayoutOrder() + 1;
-    if (Next >= Layout.getSectionOrder().size())
-      return 0;
-
-    const MCSectionData &NextSD = *Layout.getSectionOrder()[Next];
-    if (NextSD.getSection().isVirtualSection())
-      return 0;
-    return OffsetToAlignment(EndAddr, NextSD.getAlignment());
-  }
+  return getSectionAddress(SD->getFragment()->getParent()) +
+    Layout.getSymbolOffset(SD);
+}
 
-public:
-  MachObjectWriter(MCMachObjectTargetWriter *MOTW, raw_ostream &_OS,
-                   bool _IsLittleEndian)
-    : MCObjectWriter(_OS, _IsLittleEndian), TargetObjectWriter(MOTW) {
-  }
+uint64_t MachObjectWriter::getPaddingSize(const MCSectionData *SD,
+                                          const MCAsmLayout &Layout) const {
+  uint64_t EndAddr = getSectionAddress(SD) + Layout.getSectionAddressSize(SD);
+  unsigned Next = SD->getLayoutOrder() + 1;
+  if (Next >= Layout.getSectionOrder().size())
+    return 0;
+
+  const MCSectionData &NextSD = *Layout.getSectionOrder()[Next];
+  if (NextSD.getSection().isVirtualSection())
+    return 0;
+  return OffsetToAlignment(EndAddr, NextSD.getAlignment());
+}
 
-  /// @name Target Writer Proxy Accessors
-  /// @{
+void MachObjectWriter::WriteHeader(unsigned NumLoadCommands,
+                                   unsigned LoadCommandsSize,
+                                   bool SubsectionsViaSymbols) {
+  uint32_t Flags = 0;
+
+  if (SubsectionsViaSymbols)
+    Flags |= macho::HF_SubsectionsViaSymbols;
+
+  // struct mach_header (28 bytes) or
+  // struct mach_header_64 (32 bytes)
+
+  uint64_t Start = OS.tell();
+  (void) Start;
+
+  Write32(is64Bit() ? macho::HM_Object64 : macho::HM_Object32);
+
+  Write32(TargetObjectWriter->getCPUType());
+  Write32(TargetObjectWriter->getCPUSubtype());
+
+  Write32(macho::HFT_Object);
+  Write32(NumLoadCommands);
+  Write32(LoadCommandsSize);
+  Write32(Flags);
+  if (is64Bit())
+    Write32(0); // reserved
 
-  bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
-  bool isARM() const {
-    uint32_t CPUType = TargetObjectWriter->getCPUType() & ~mach::CTFM_ArchMask;
-    return CPUType == mach::CTM_ARM;
-  }
+  assert(OS.tell() - Start ==
+         (is64Bit() ? macho::Header64Size : macho::Header32Size));
+}
 
-  /// @}
+/// WriteSegmentLoadCommand - Write a segment load command.
+///
+/// \arg NumSections - The number of sections in this segment.
+/// \arg SectionDataSize - The total size of the sections.
+void MachObjectWriter::WriteSegmentLoadCommand(unsigned NumSections,
+                                               uint64_t VMSize,
+                                               uint64_t SectionDataStartOffset,
+                                               uint64_t SectionDataSize) {
+  // struct segment_command (56 bytes) or
+  // struct segment_command_64 (72 bytes)
+
+  uint64_t Start = OS.tell();
+  (void) Start;
+
+  unsigned SegmentLoadCommandSize =
+    is64Bit() ? macho::SegmentLoadCommand64Size:
+    macho::SegmentLoadCommand32Size;
+  Write32(is64Bit() ? macho::LCT_Segment64 : macho::LCT_Segment);
+  Write32(SegmentLoadCommandSize +
+          NumSections * (is64Bit() ? macho::Section64Size :
+                         macho::Section32Size));
+
+  WriteBytes("", 16);
+  if (is64Bit()) {
+    Write64(0); // vmaddr
+    Write64(VMSize); // vmsize
+    Write64(SectionDataStartOffset); // file offset
+    Write64(SectionDataSize); // file size
+  } else {
+    Write32(0); // vmaddr
+    Write32(VMSize); // vmsize
+    Write32(SectionDataStartOffset); // file offset
+    Write32(SectionDataSize); // file size
+  }
+  Write32(0x7); // maxprot
+  Write32(0x7); // initprot
+  Write32(NumSections);
+  Write32(0); // flags
 
-  void WriteHeader(unsigned NumLoadCommands, unsigned LoadCommandsSize,
-                   bool SubsectionsViaSymbols) {
-    uint32_t Flags = 0;
+  assert(OS.tell() - Start == SegmentLoadCommandSize);
+}
 
-    if (SubsectionsViaSymbols)
-      Flags |= macho::HF_SubsectionsViaSymbols;
+void MachObjectWriter::WriteSection(const MCAssembler &Asm,
+                                    const MCAsmLayout &Layout,
+                                    const MCSectionData &SD,
+                                    uint64_t FileOffset,
+                                    uint64_t RelocationsStart,
+                                    unsigned NumRelocations) {
+  uint64_t SectionSize = Layout.getSectionAddressSize(&SD);
+
+  // The offset is unused for virtual sections.
+  if (SD.getSection().isVirtualSection()) {
+    assert(Layout.getSectionFileSize(&SD) == 0 && "Invalid file size!");
+    FileOffset = 0;
+  }
+
+  // struct section (68 bytes) or
+  // struct section_64 (80 bytes)
+
+  uint64_t Start = OS.tell();
+  (void) Start;
+
+  const MCSectionMachO &Section = cast<MCSectionMachO>(SD.getSection());
+  WriteBytes(Section.getSectionName(), 16);
+  WriteBytes(Section.getSegmentName(), 16);
+  if (is64Bit()) {
+    Write64(getSectionAddress(&SD)); // address
+    Write64(SectionSize); // size
+  } else {
+    Write32(getSectionAddress(&SD)); // address
+    Write32(SectionSize); // size
+  }
+  Write32(FileOffset);
+
+  unsigned Flags = Section.getTypeAndAttributes();
+  if (SD.hasInstructions())
+    Flags |= MCSectionMachO::S_ATTR_SOME_INSTRUCTIONS;
+
+  assert(isPowerOf2_32(SD.getAlignment()) && "Invalid alignment!");
+  Write32(Log2_32(SD.getAlignment()));
+  Write32(NumRelocations ? RelocationsStart : 0);
+  Write32(NumRelocations);
+  Write32(Flags);
+  Write32(IndirectSymBase.lookup(&SD)); // reserved1
+  Write32(Section.getStubSize()); // reserved2
+  if (is64Bit())
+    Write32(0); // reserved3
 
-    // struct mach_header (28 bytes) or
-    // struct mach_header_64 (32 bytes)
+  assert(OS.tell() - Start == (is64Bit() ? macho::Section64Size :
+                               macho::Section32Size));
+}
 
-    uint64_t Start = OS.tell();
-    (void) Start;
+void MachObjectWriter::WriteSymtabLoadCommand(uint32_t SymbolOffset,
+                                              uint32_t NumSymbols,
+                                              uint32_t StringTableOffset,
+                                              uint32_t StringTableSize) {
+  // struct symtab_command (24 bytes)
+
+  uint64_t Start = OS.tell();
+  (void) Start;
+
+  Write32(macho::LCT_Symtab);
+  Write32(macho::SymtabLoadCommandSize);
+  Write32(SymbolOffset);
+  Write32(NumSymbols);
+  Write32(StringTableOffset);
+  Write32(StringTableSize);
 
-    Write32(is64Bit() ? macho::HM_Object64 : macho::HM_Object32);
+  assert(OS.tell() - Start == macho::SymtabLoadCommandSize);
+}
 
-    Write32(TargetObjectWriter->getCPUType());
-    Write32(TargetObjectWriter->getCPUSubtype());
+void MachObjectWriter::WriteDysymtabLoadCommand(uint32_t FirstLocalSymbol,
+                                                uint32_t NumLocalSymbols,
+                                                uint32_t FirstExternalSymbol,
+                                                uint32_t NumExternalSymbols,
+                                                uint32_t FirstUndefinedSymbol,
+                                                uint32_t NumUndefinedSymbols,
+                                                uint32_t IndirectSymbolOffset,
+                                                uint32_t NumIndirectSymbols) {
+  // struct dysymtab_command (80 bytes)
+
+  uint64_t Start = OS.tell();
+  (void) Start;
+
+  Write32(macho::LCT_Dysymtab);
+  Write32(macho::DysymtabLoadCommandSize);
+  Write32(FirstLocalSymbol);
+  Write32(NumLocalSymbols);
+  Write32(FirstExternalSymbol);
+  Write32(NumExternalSymbols);
+  Write32(FirstUndefinedSymbol);
+  Write32(NumUndefinedSymbols);
+  Write32(0); // tocoff
+  Write32(0); // ntoc
+  Write32(0); // modtaboff
+  Write32(0); // nmodtab
+  Write32(0); // extrefsymoff
+  Write32(0); // nextrefsyms
+  Write32(IndirectSymbolOffset);
+  Write32(NumIndirectSymbols);
+  Write32(0); // extreloff
+  Write32(0); // nextrel
+  Write32(0); // locreloff
+  Write32(0); // nlocrel
 
-    Write32(macho::HFT_Object);
-    Write32(NumLoadCommands);
-    Write32(LoadCommandsSize);
-    Write32(Flags);
-    if (is64Bit())
-      Write32(0); // reserved
+  assert(OS.tell() - Start == macho::DysymtabLoadCommandSize);
+}
 
-    assert(OS.tell() - Start ==
-           (is64Bit() ? macho::Header64Size : macho::Header32Size));
-  }
+void MachObjectWriter::WriteNlist(MachSymbolData &MSD,
+                                  const MCAsmLayout &Layout) {
+  MCSymbolData &Data = *MSD.SymbolData;
+  const MCSymbol &Symbol = Data.getSymbol();
+  uint8_t Type = 0;
+  uint16_t Flags = Data.getFlags();
+  uint32_t Address = 0;
 
-  /// WriteSegmentLoadCommand - Write a segment load command.
-  ///
-  /// \arg NumSections - The number of sections in this segment.
-  /// \arg SectionDataSize - The total size of the sections.
-  void WriteSegmentLoadCommand(unsigned NumSections,
-                               uint64_t VMSize,
-                               uint64_t SectionDataStartOffset,
-                               uint64_t SectionDataSize) {
-    // struct segment_command (56 bytes) or
-    // struct segment_command_64 (72 bytes)
-
-    uint64_t Start = OS.tell();
-    (void) Start;
-
-    unsigned SegmentLoadCommandSize =
-      is64Bit() ? macho::SegmentLoadCommand64Size:
-      macho::SegmentLoadCommand32Size;
-    Write32(is64Bit() ? macho::LCT_Segment64 : macho::LCT_Segment);
-    Write32(SegmentLoadCommandSize +
-            NumSections * (is64Bit() ? macho::Section64Size :
-                           macho::Section32Size));
-
-    WriteBytes("", 16);
-    if (is64Bit()) {
-      Write64(0); // vmaddr
-      Write64(VMSize); // vmsize
-      Write64(SectionDataStartOffset); // file offset
-      Write64(SectionDataSize); // file size
+  // Set the N_TYPE bits. See <mach-o/nlist.h>.
+  //
+  // FIXME: Are the prebound or indirect fields possible here?
+  if (Symbol.isUndefined())
+    Type = macho::STT_Undefined;
+  else if (Symbol.isAbsolute())
+    Type = macho::STT_Absolute;
+  else
+    Type = macho::STT_Section;
+
+  // FIXME: Set STAB bits.
+
+  if (Data.isPrivateExtern())
+    Type |= macho::STF_PrivateExtern;
+
+  // Set external bit.
+  if (Data.isExternal() || Symbol.isUndefined())
+    Type |= macho::STF_External;
+
+  // Compute the symbol address.
+  if (Symbol.isDefined()) {
+    if (Symbol.isAbsolute()) {
+      Address = cast<MCConstantExpr>(Symbol.getVariableValue())->getValue();
     } else {
-      Write32(0); // vmaddr
-      Write32(VMSize); // vmsize
-      Write32(SectionDataStartOffset); // file offset
-      Write32(SectionDataSize); // file size
+      Address = getSymbolAddress(&Data, Layout);
     }
-    Write32(0x7); // maxprot
-    Write32(0x7); // initprot
-    Write32(NumSections);
-    Write32(0); // flags
-
-    assert(OS.tell() - Start == SegmentLoadCommandSize);
-  }
-
-  void WriteSection(const MCAssembler &Asm, const MCAsmLayout &Layout,
-                    const MCSectionData &SD, uint64_t FileOffset,
-                    uint64_t RelocationsStart, unsigned NumRelocations) {
-    uint64_t SectionSize = Layout.getSectionAddressSize(&SD);
-
-    // The offset is unused for virtual sections.
-    if (SD.getSection().isVirtualSection()) {
-      assert(Layout.getSectionFileSize(&SD) == 0 && "Invalid file size!");
-      FileOffset = 0;
+  } else if (Data.isCommon()) {
+    // Common symbols are encoded with the size in the address
+    // field, and their alignment in the flags.
+    Address = Data.getCommonSize();
+
+    // Common alignment is packed into the 'desc' bits.
+    if (unsigned Align = Data.getCommonAlignment()) {
+      unsigned Log2Size = Log2_32(Align);
+      assert((1U << Log2Size) == Align && "Invalid 'common' alignment!");
+      if (Log2Size > 15)
+        report_fatal_error("invalid 'common' alignment '" +
+                           Twine(Align) + "'");
+      // FIXME: Keep this mask with the SymbolFlags enumeration.
+      Flags = (Flags & 0xF0FF) | (Log2Size << 8);
     }
+  }
 
-    // struct section (68 bytes) or
-    // struct section_64 (80 bytes)
+  // struct nlist (12 bytes)
+
+  Write32(MSD.StringIndex);
+  Write8(Type);
+  Write8(MSD.SectionIndex);
+
+  // The Mach-O streamer uses the lowest 16-bits of the flags for the 'desc'
+  // value.
+  Write16(Flags);
+  if (is64Bit())
+    Write64(Address);
+  else
+    Write32(Address);
+}
 
-    uint64_t Start = OS.tell();
-    (void) Start;
+void MachObjectWriter::RecordRelocation(const MCAssembler &Asm,
+                                        const MCAsmLayout &Layout,
+                                        const MCFragment *Fragment,
+                                        const MCFixup &Fixup,
+                                        MCValue Target,
+                                        uint64_t &FixedValue) {
+  TargetObjectWriter->RecordRelocation(this, Asm, Layout, Fragment, Fixup,
+                                       Target, FixedValue);
+}
 
-    const MCSectionMachO &Section = cast<MCSectionMachO>(SD.getSection());
-    WriteBytes(Section.getSectionName(), 16);
-    WriteBytes(Section.getSegmentName(), 16);
-    if (is64Bit()) {
-      Write64(getSectionAddress(&SD)); // address
-      Write64(SectionSize); // size
-    } else {
-      Write32(getSectionAddress(&SD)); // address
-      Write32(SectionSize); // size
-    }
-    Write32(FileOffset);
+void MachObjectWriter::BindIndirectSymbols(MCAssembler &Asm) {
+  // This is the point where 'as' creates actual symbols for indirect symbols
+  // (in the following two passes). It would be easier for us to do this sooner
+  // when we see the attribute, but that makes getting the order in the symbol
+  // table much more complicated than it is worth.
+  //
+  // FIXME: Revisit this when the dust settles.
 
-    unsigned Flags = Section.getTypeAndAttributes();
-    if (SD.hasInstructions())
-      Flags |= MCSectionMachO::S_ATTR_SOME_INSTRUCTIONS;
-
-    assert(isPowerOf2_32(SD.getAlignment()) && "Invalid alignment!");
-    Write32(Log2_32(SD.getAlignment()));
-    Write32(NumRelocations ? RelocationsStart : 0);
-    Write32(NumRelocations);
-    Write32(Flags);
-    Write32(IndirectSymBase.lookup(&SD)); // reserved1
-    Write32(Section.getStubSize()); // reserved2
-    if (is64Bit())
-      Write32(0); // reserved3
+  // Bind non lazy symbol pointers first.
+  unsigned IndirectIndex = 0;
+  for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(),
+         ie = Asm.indirect_symbol_end(); it != ie; ++it, ++IndirectIndex) {
+    const MCSectionMachO &Section =
+      cast<MCSectionMachO>(it->SectionData->getSection());
 
-    assert(OS.tell() - Start == (is64Bit() ? macho::Section64Size :
-           macho::Section32Size));
-  }
+    if (Section.getType() != MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS)
+      continue;
 
-  void WriteSymtabLoadCommand(uint32_t SymbolOffset, uint32_t NumSymbols,
-                              uint32_t StringTableOffset,
-                              uint32_t StringTableSize) {
-    // struct symtab_command (24 bytes)
-
-    uint64_t Start = OS.tell();
-    (void) Start;
-
-    Write32(macho::LCT_Symtab);
-    Write32(macho::SymtabLoadCommandSize);
-    Write32(SymbolOffset);
-    Write32(NumSymbols);
-    Write32(StringTableOffset);
-    Write32(StringTableSize);
+    // Initialize the section indirect symbol base, if necessary.
+    if (!IndirectSymBase.count(it->SectionData))
+      IndirectSymBase[it->SectionData] = IndirectIndex;
 
-    assert(OS.tell() - Start == macho::SymtabLoadCommandSize);
+    Asm.getOrCreateSymbolData(*it->Symbol);
   }
 
-  void WriteDysymtabLoadCommand(uint32_t FirstLocalSymbol,
-                                uint32_t NumLocalSymbols,
-                                uint32_t FirstExternalSymbol,
-                                uint32_t NumExternalSymbols,
-                                uint32_t FirstUndefinedSymbol,
-                                uint32_t NumUndefinedSymbols,
-                                uint32_t IndirectSymbolOffset,
-                                uint32_t NumIndirectSymbols) {
-    // struct dysymtab_command (80 bytes)
-
-    uint64_t Start = OS.tell();
-    (void) Start;
-
-    Write32(macho::LCT_Dysymtab);
-    Write32(macho::DysymtabLoadCommandSize);
-    Write32(FirstLocalSymbol);
-    Write32(NumLocalSymbols);
-    Write32(FirstExternalSymbol);
-    Write32(NumExternalSymbols);
-    Write32(FirstUndefinedSymbol);
-    Write32(NumUndefinedSymbols);
-    Write32(0); // tocoff
-    Write32(0); // ntoc
-    Write32(0); // modtaboff
-    Write32(0); // nmodtab
-    Write32(0); // extrefsymoff
-    Write32(0); // nextrefsyms
-    Write32(IndirectSymbolOffset);
-    Write32(NumIndirectSymbols);
-    Write32(0); // extreloff
-    Write32(0); // nextrel
-    Write32(0); // locreloff
-    Write32(0); // nlocrel
+  // Then lazy symbol pointers and symbol stubs.
+  IndirectIndex = 0;
+  for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(),
+         ie = Asm.indirect_symbol_end(); it != ie; ++it, ++IndirectIndex) {
+    const MCSectionMachO &Section =
+      cast<MCSectionMachO>(it->SectionData->getSection());
 
-    assert(OS.tell() - Start == macho::DysymtabLoadCommandSize);
-  }
+    if (Section.getType() != MCSectionMachO::S_LAZY_SYMBOL_POINTERS &&
+        Section.getType() != MCSectionMachO::S_SYMBOL_STUBS)
+      continue;
 
-  void WriteNlist(MachSymbolData &MSD, const MCAsmLayout &Layout) {
-    MCSymbolData &Data = *MSD.SymbolData;
-    const MCSymbol &Symbol = Data.getSymbol();
-    uint8_t Type = 0;
-    uint16_t Flags = Data.getFlags();
-    uint32_t Address = 0;
+    // Initialize the section indirect symbol base, if necessary.
+    if (!IndirectSymBase.count(it->SectionData))
+      IndirectSymBase[it->SectionData] = IndirectIndex;
 
-    // Set the N_TYPE bits. See <mach-o/nlist.h>.
+    // Set the symbol type to undefined lazy, but only on construction.
     //
-    // FIXME: Are the prebound or indirect fields possible here?
-    if (Symbol.isUndefined())
-      Type = macho::STT_Undefined;
-    else if (Symbol.isAbsolute())
-      Type = macho::STT_Absolute;
-    else
-      Type = macho::STT_Section;
-
-    // FIXME: Set STAB bits.
-
-    if (Data.isPrivateExtern())
-      Type |= macho::STF_PrivateExtern;
-
-    // Set external bit.
-    if (Data.isExternal() || Symbol.isUndefined())
-      Type |= macho::STF_External;
-
-    // Compute the symbol address.
-    if (Symbol.isDefined()) {
-      if (Symbol.isAbsolute()) {
-        Address = cast<MCConstantExpr>(Symbol.getVariableValue())->getValue();
-      } else {
-        Address = getSymbolAddress(&Data, Layout);
-      }
-    } else if (Data.isCommon()) {
-      // Common symbols are encoded with the size in the address
-      // field, and their alignment in the flags.
-      Address = Data.getCommonSize();
-
-      // Common alignment is packed into the 'desc' bits.
-      if (unsigned Align = Data.getCommonAlignment()) {
-        unsigned Log2Size = Log2_32(Align);
-        assert((1U << Log2Size) == Align && "Invalid 'common' alignment!");
-        if (Log2Size > 15)
-          report_fatal_error("invalid 'common' alignment '" +
-                            Twine(Align) + "'");
-        // FIXME: Keep this mask with the SymbolFlags enumeration.
-        Flags = (Flags & 0xF0FF) | (Log2Size << 8);
-      }
-    }
-
-    // struct nlist (12 bytes)
-
-    Write32(MSD.StringIndex);
-    Write8(Type);
-    Write8(MSD.SectionIndex);
-
-    // The Mach-O streamer uses the lowest 16-bits of the flags for the 'desc'
-    // value.
-    Write16(Flags);
-    if (is64Bit())
-      Write64(Address);
-    else
-      Write32(Address);
+    // FIXME: Do not hardcode.
+    bool Created;
+    MCSymbolData &Entry = Asm.getOrCreateSymbolData(*it->Symbol, &Created);
+    if (Created)
+      Entry.setFlags(Entry.getFlags() | 0x0001);
   }
+}
 
-  // FIXME: We really need to improve the relocation validation. Basically, we
-  // want to implement a separate computation which evaluates the relocation
-  // entry as the linker would, and verifies that the resultant fixup value is
-  // exactly what the encoder wanted. This will catch several classes of
-  // problems:
-  //
-  //  - Relocation entry bugs, the two algorithms are unlikely to have the same
-  //    exact bug.
-  //
-  //  - Relaxation issues, where we forget to relax something.
-  //
-  //  - Input errors, where something cannot be correctly encoded. 'as' allows
-  //    these through in many cases.
+/// ComputeSymbolTable - Compute the symbol table data
+///
+/// \param StringTable [out] - The string table data.
+/// \param StringIndexMap [out] - Map from symbol names to offsets in the
+/// string table.
+void MachObjectWriter::
+ComputeSymbolTable(MCAssembler &Asm, SmallString<256> &StringTable,
+                   std::vector<MachSymbolData> &LocalSymbolData,
+                   std::vector<MachSymbolData> &ExternalSymbolData,
+                   std::vector<MachSymbolData> &UndefinedSymbolData) {
+  // Build section lookup table.
+  DenseMap<const MCSection*, uint8_t> SectionIndexMap;
+  unsigned Index = 1;
+  for (MCAssembler::iterator it = Asm.begin(),
+         ie = Asm.end(); it != ie; ++it, ++Index)
+    SectionIndexMap[&it->getSection()] = Index;
+  assert(Index <= 256 && "Too many sections!");
+
+  // Index 0 is always the empty string.
+  StringMap<uint64_t> StringIndexMap;
+  StringTable += '\x00';
 
-  static bool isFixupKindRIPRel(unsigned Kind) {
-    return Kind == X86::reloc_riprel_4byte ||
-      Kind == X86::reloc_riprel_4byte_movq_load;
-  }
-  void RecordX86_64Relocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
-                              const MCFragment *Fragment,
-                              const MCFixup &Fixup, MCValue Target,
-                              uint64_t &FixedValue) {
-    unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
-    unsigned IsRIPRel = isFixupKindRIPRel(Fixup.getKind());
-    unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
-
-    // See <reloc.h>.
-    uint32_t FixupOffset =
-      Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
-    uint32_t FixupAddress =
-      getFragmentAddress(Fragment, Layout) + Fixup.getOffset();
-    int64_t Value = 0;
-    unsigned Index = 0;
-    unsigned IsExtern = 0;
-    unsigned Type = 0;
-
-    Value = Target.getConstant();
-
-    if (IsPCRel) {
-      // Compensate for the relocation offset, Darwin x86_64 relocations only
-      // have the addend and appear to have attempted to define it to be the
-      // actual expression addend without the PCrel bias. However, instructions
-      // with data following the relocation are not accommodated for (see comment
-      // below regarding SIGNED{1,2,4}), so it isn't exactly that either.
-      Value += 1LL << Log2Size;
+  // Build the symbol arrays and the string table, but only for non-local
+  // symbols.
+  //
+  // The particular order that we collect the symbols and create the string
+  // table, then sort the symbols is chosen to match 'as'. Even though it
+  // doesn't matter for correctness, this is important for letting us diff .o
+  // files.
+  for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
+         ie = Asm.symbol_end(); it != ie; ++it) {
+    const MCSymbol &Symbol = it->getSymbol();
+
+    // Ignore non-linker visible symbols.
+    if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
+      continue;
+
+    if (!it->isExternal() && !Symbol.isUndefined())
+      continue;
+
+    uint64_t &Entry = StringIndexMap[Symbol.getName()];
+    if (!Entry) {
+      Entry = StringTable.size();
+      StringTable += Symbol.getName();
+      StringTable += '\x00';
     }
 
-    if (Target.isAbsolute()) { // constant
-      // SymbolNum of 0 indicates the absolute section.
-      Type = macho::RIT_X86_64_Unsigned;
-      Index = 0;
-
-      // FIXME: I believe this is broken, I don't think the linker can
-      // understand it. I think it would require a local relocation, but I'm not
-      // sure if that would work either. The official way to get an absolute
-      // PCrel relocation is to use an absolute symbol (which we don't support
-      // yet).
-      if (IsPCRel) {
-        IsExtern = 1;
-        Type = macho::RIT_X86_64_Branch;
-      }
-    } else if (Target.getSymB()) { // A - B + constant
-      const MCSymbol *A = &Target.getSymA()->getSymbol();
-      MCSymbolData &A_SD = Asm.getSymbolData(*A);
-      const MCSymbolData *A_Base = Asm.getAtom(&A_SD);
-
-      const MCSymbol *B = &Target.getSymB()->getSymbol();
-      MCSymbolData &B_SD = Asm.getSymbolData(*B);
-      const MCSymbolData *B_Base = Asm.getAtom(&B_SD);
-
-      // Neither symbol can be modified.
-      if (Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None ||
-          Target.getSymB()->getKind() != MCSymbolRefExpr::VK_None)
-        report_fatal_error("unsupported relocation of modified symbol");
-
-      // We don't support PCrel relocations of differences. Darwin 'as' doesn't
-      // implement most of these correctly.
-      if (IsPCRel)
-        report_fatal_error("unsupported pc-relative relocation of difference");
-
-      // The support for the situation where one or both of the symbols would
-      // require a local relocation is handled just like if the symbols were
-      // external.  This is certainly used in the case of debug sections where
-      // the section has only temporary symbols and thus the symbols don't have
-      // base symbols.  This is encoded using the section ordinal and
-      // non-extern relocation entries.
-
-      // Darwin 'as' doesn't emit correct relocations for this (it ends up with
-      // a single SIGNED relocation); reject it for now.  Except the case where
-      // both symbols don't have a base, equal but both NULL.
-      if (A_Base == B_Base && A_Base)
-        report_fatal_error("unsupported relocation with identical base");
-
-      Value += getSymbolAddress(&A_SD, Layout) -
-        (A_Base == NULL ? 0 : getSymbolAddress(A_Base, Layout));
-      Value -= getSymbolAddress(&B_SD, Layout) -
-        (B_Base == NULL ? 0 : getSymbolAddress(B_Base, Layout));
-
-      if (A_Base) {
-        Index = A_Base->getIndex();
-        IsExtern = 1;
-      }
-      else {
-        Index = A_SD.getFragment()->getParent()->getOrdinal() + 1;
-        IsExtern = 0;
-      }
-      Type = macho::RIT_X86_64_Unsigned;
-
-      macho::RelocationEntry MRE;
-      MRE.Word0 = FixupOffset;
-      MRE.Word1 = ((Index     <<  0) |
-                   (IsPCRel   << 24) |
-                   (Log2Size  << 25) |
-                   (IsExtern  << 27) |
-                   (Type      << 28));
-      Relocations[Fragment->getParent()].push_back(MRE);
-
-      if (B_Base) {
-        Index = B_Base->getIndex();
-        IsExtern = 1;
-      }
-      else {
-        Index = B_SD.getFragment()->getParent()->getOrdinal() + 1;
-        IsExtern = 0;
-      }
-      Type = macho::RIT_X86_64_Subtractor;
+    MachSymbolData MSD;
+    MSD.SymbolData = it;
+    MSD.StringIndex = Entry;
+
+    if (Symbol.isUndefined()) {
+      MSD.SectionIndex = 0;
+      UndefinedSymbolData.push_back(MSD);
+    } else if (Symbol.isAbsolute()) {
+      MSD.SectionIndex = 0;
+      ExternalSymbolData.push_back(MSD);
     } else {
-      const MCSymbol *Symbol = &Target.getSymA()->getSymbol();
-      MCSymbolData &SD = Asm.getSymbolData(*Symbol);
-      const MCSymbolData *Base = Asm.getAtom(&SD);
-
-      // Relocations inside debug sections always use local relocations when
-      // possible. This seems to be done because the debugger doesn't fully
-      // understand x86_64 relocation entries, and expects to find values that
-      // have already been fixed up.
-      if (Symbol->isInSection()) {
-        const MCSectionMachO &Section = static_cast<const MCSectionMachO&>(
-          Fragment->getParent()->getSection());
-        if (Section.hasAttribute(MCSectionMachO::S_ATTR_DEBUG))
-          Base = 0;
-      }
-
-      // x86_64 almost always uses external relocations, except when there is no
-      // symbol to use as a base address (a local symbol with no preceding
-      // non-local symbol).
-      if (Base) {
-        Index = Base->getIndex();
-        IsExtern = 1;
-
-        // Add the local offset, if needed.
-        if (Base != &SD)
-          Value += Layout.getSymbolOffset(&SD) - Layout.getSymbolOffset(Base);
-      } else if (Symbol->isInSection() && !Symbol->isVariable()) {
-        // The index is the section ordinal (1-based).
-        Index = SD.getFragment()->getParent()->getOrdinal() + 1;
-        IsExtern = 0;
-        Value += getSymbolAddress(&SD, Layout);
-
-        if (IsPCRel)
-          Value -= FixupAddress + (1 << Log2Size);
-      } else if (Symbol->isVariable()) {
-        const MCExpr *Value = Symbol->getVariableValue();
-        int64_t Res;
-        bool isAbs = Value->EvaluateAsAbsolute(Res, Layout, SectionAddress);
-        if (isAbs) {
-          FixedValue = Res;
-          return;
-        } else {
-          report_fatal_error("unsupported relocation of variable '" +
-                             Symbol->getName() + "'");
-        }
-      } else {
-        report_fatal_error("unsupported relocation of undefined symbol '" +
-                           Symbol->getName() + "'");
-      }
-
-      MCSymbolRefExpr::VariantKind Modifier = Target.getSymA()->getKind();
-      if (IsPCRel) {
-        if (IsRIPRel) {
-          if (Modifier == MCSymbolRefExpr::VK_GOTPCREL) {
-            // x86_64 distinguishes movq foo at GOTPCREL so that the linker can
-            // rewrite the movq to an leaq at link time if the symbol ends up in
-            // the same linkage unit.
-            if (unsigned(Fixup.getKind()) == X86::reloc_riprel_4byte_movq_load)
-              Type = macho::RIT_X86_64_GOTLoad;
-            else
-              Type = macho::RIT_X86_64_GOT;
-          }  else if (Modifier == MCSymbolRefExpr::VK_TLVP) {
-            Type = macho::RIT_X86_64_TLV;
-          }  else if (Modifier != MCSymbolRefExpr::VK_None) {
-            report_fatal_error("unsupported symbol modifier in relocation");
-          } else {
-            Type = macho::RIT_X86_64_Signed;
-
-            // The Darwin x86_64 relocation format has a problem where it cannot
-            // encode an address (L<foo> + <constant>) which is outside the atom
-            // containing L<foo>. Generally, this shouldn't occur but it does
-            // happen when we have a RIPrel instruction with data following the
-            // relocation entry (e.g., movb $012, L0(%rip)). Even with the PCrel
-            // adjustment Darwin x86_64 uses, the offset is still negative and
-            // the linker has no way to recognize this.
-            //
-            // To work around this, Darwin uses several special relocation types
-            // to indicate the offsets. However, the specification or
-            // implementation of these seems to also be incomplete; they should
-            // adjust the addend as well based on the actual encoded instruction
-            // (the additional bias), but instead appear to just look at the
-            // final offset.
-            switch (-(Target.getConstant() + (1LL << Log2Size))) {
-            case 1: Type = macho::RIT_X86_64_Signed1; break;
-            case 2: Type = macho::RIT_X86_64_Signed2; break;
-            case 4: Type = macho::RIT_X86_64_Signed4; break;
-            }
-          }
-        } else {
-          if (Modifier != MCSymbolRefExpr::VK_None)
-            report_fatal_error("unsupported symbol modifier in branch "
-                              "relocation");
-
-          Type = macho::RIT_X86_64_Branch;
-        }
-      } else {
-        if (Modifier == MCSymbolRefExpr::VK_GOT) {
-          Type = macho::RIT_X86_64_GOT;
-        } else if (Modifier == MCSymbolRefExpr::VK_GOTPCREL) {
-          // GOTPCREL is allowed as a modifier on non-PCrel instructions, in
-          // which case all we do is set the PCrel bit in the relocation entry;
-          // this is used with exception handling, for example. The source is
-          // required to include any necessary offset directly.
-          Type = macho::RIT_X86_64_GOT;
-          IsPCRel = 1;
-        } else if (Modifier == MCSymbolRefExpr::VK_TLVP) {
-          report_fatal_error("TLVP symbol modifier should have been rip-rel");
-        } else if (Modifier != MCSymbolRefExpr::VK_None)
-          report_fatal_error("unsupported symbol modifier in relocation");
-        else
-          Type = macho::RIT_X86_64_Unsigned;
-      }
+      MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
+      assert(MSD.SectionIndex && "Invalid section index!");
+      ExternalSymbolData.push_back(MSD);
     }
-
-    // x86_64 always writes custom values into the fixups.
-    FixedValue = Value;
-
-    // struct relocation_info (8 bytes)
-    macho::RelocationEntry MRE;
-    MRE.Word0 = FixupOffset;
-    MRE.Word1 = ((Index     <<  0) |
-                 (IsPCRel   << 24) |
-                 (Log2Size  << 25) |
-                 (IsExtern  << 27) |
-                 (Type      << 28));
-    Relocations[Fragment->getParent()].push_back(MRE);
   }
 
-  void RecordScatteredRelocation(const MCAssembler &Asm,
-                                 const MCAsmLayout &Layout,
-                                 const MCFragment *Fragment,
-                                 const MCFixup &Fixup, MCValue Target,
-                                 unsigned Log2Size,
-                                 uint64_t &FixedValue) {
-    uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
-    unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
-    unsigned Type = macho::RIT_Vanilla;
-
-    // See <reloc.h>.
-    const MCSymbol *A = &Target.getSymA()->getSymbol();
-    MCSymbolData *A_SD = &Asm.getSymbolData(*A);
-
-    if (!A_SD->getFragment())
-      report_fatal_error("symbol '" + A->getName() +
-                        "' can not be undefined in a subtraction expression");
-
-    uint32_t Value = getSymbolAddress(A_SD, Layout);
-    uint64_t SecAddr = getSectionAddress(A_SD->getFragment()->getParent());
-    FixedValue += SecAddr;
-    uint32_t Value2 = 0;
-
-    if (const MCSymbolRefExpr *B = Target.getSymB()) {
-      MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
-
-      if (!B_SD->getFragment())
-        report_fatal_error("symbol '" + B->getSymbol().getName() +
-                          "' can not be undefined in a subtraction expression");
-
-      // Select the appropriate difference relocation type.
-      //
-      // Note that there is no longer any semantic difference between these two
-      // relocation types from the linkers point of view, this is done solely
-      // for pedantic compatibility with 'as'.
-      Type = A_SD->isExternal() ? (unsigned)macho::RIT_Difference :
-        (unsigned)macho::RIT_Generic_LocalDifference;
-      Value2 = getSymbolAddress(B_SD, Layout);
-      FixedValue -= getSectionAddress(B_SD->getFragment()->getParent());
-    }
-
-    // Relocations are written out in reverse order, so the PAIR comes first.
-    if (Type == macho::RIT_Difference ||
-        Type == macho::RIT_Generic_LocalDifference) {
-      macho::RelocationEntry MRE;
-      MRE.Word0 = ((0         <<  0) |
-                   (macho::RIT_Pair  << 24) |
-                   (Log2Size  << 28) |
-                   (IsPCRel   << 30) |
-                   macho::RF_Scattered);
-      MRE.Word1 = Value2;
-      Relocations[Fragment->getParent()].push_back(MRE);
-    }
+  // Now add the data for local symbols.
+  for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
+         ie = Asm.symbol_end(); it != ie; ++it) {
+    const MCSymbol &Symbol = it->getSymbol();
 
-    macho::RelocationEntry MRE;
-    MRE.Word0 = ((FixupOffset <<  0) |
-                 (Type        << 24) |
-                 (Log2Size    << 28) |
-                 (IsPCRel     << 30) |
-                 macho::RF_Scattered);
-    MRE.Word1 = Value;
-    Relocations[Fragment->getParent()].push_back(MRE);
-  }
+    // Ignore non-linker visible symbols.
+    if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
+      continue;
 
-  void RecordARMScatteredRelocation(const MCAssembler &Asm,
-                                    const MCAsmLayout &Layout,
-                                    const MCFragment *Fragment,
-                                    const MCFixup &Fixup, MCValue Target,
-                                    unsigned Log2Size,
-                                    uint64_t &FixedValue) {
-    uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
-    unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
-    unsigned Type = macho::RIT_Vanilla;
-
-    // See <reloc.h>.
-    const MCSymbol *A = &Target.getSymA()->getSymbol();
-    MCSymbolData *A_SD = &Asm.getSymbolData(*A);
-
-    if (!A_SD->getFragment())
-      report_fatal_error("symbol '" + A->getName() +
-                        "' can not be undefined in a subtraction expression");
-
-    uint32_t Value = getSymbolAddress(A_SD, Layout);
-    uint64_t SecAddr = getSectionAddress(A_SD->getFragment()->getParent());
-    FixedValue += SecAddr;
-    uint32_t Value2 = 0;
-
-    if (const MCSymbolRefExpr *B = Target.getSymB()) {
-      MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
-
-      if (!B_SD->getFragment())
-        report_fatal_error("symbol '" + B->getSymbol().getName() +
-                          "' can not be undefined in a subtraction expression");
-
-      // Select the appropriate difference relocation type.
-      Type = macho::RIT_Difference;
-      Value2 = getSymbolAddress(B_SD, Layout);
-      FixedValue -= getSectionAddress(B_SD->getFragment()->getParent());
-    }
+    if (it->isExternal() || Symbol.isUndefined())
+      continue;
 
-    // Relocations are written out in reverse order, so the PAIR comes first.
-    if (Type == macho::RIT_Difference ||
-        Type == macho::RIT_Generic_LocalDifference) {
-      macho::RelocationEntry MRE;
-      MRE.Word0 = ((0         <<  0) |
-                   (macho::RIT_Pair  << 24) |
-                   (Log2Size  << 28) |
-                   (IsPCRel   << 30) |
-                   macho::RF_Scattered);
-      MRE.Word1 = Value2;
-      Relocations[Fragment->getParent()].push_back(MRE);
-    }
-
-    macho::RelocationEntry MRE;
-    MRE.Word0 = ((FixupOffset <<  0) |
-                 (Type        << 24) |
-                 (Log2Size    << 28) |
-                 (IsPCRel     << 30) |
-                 macho::RF_Scattered);
-    MRE.Word1 = Value;
-    Relocations[Fragment->getParent()].push_back(MRE);
-  }
-
-  void RecordARMMovwMovtRelocation(const MCAssembler &Asm,
-                                   const MCAsmLayout &Layout,
-                                   const MCFragment *Fragment,
-                                   const MCFixup &Fixup, MCValue Target,
-                                   uint64_t &FixedValue) {
-    uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
-    unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
-    unsigned Type = macho::RIT_ARM_Half;
-
-    // See <reloc.h>.
-    const MCSymbol *A = &Target.getSymA()->getSymbol();
-    MCSymbolData *A_SD = &Asm.getSymbolData(*A);
-
-    if (!A_SD->getFragment())
-      report_fatal_error("symbol '" + A->getName() +
-                        "' can not be undefined in a subtraction expression");
-
-    uint32_t Value = getSymbolAddress(A_SD, Layout);
-    uint32_t Value2 = 0;
-    uint64_t SecAddr = getSectionAddress(A_SD->getFragment()->getParent());
-    FixedValue += SecAddr;
-
-    if (const MCSymbolRefExpr *B = Target.getSymB()) {
-      MCSymbolData *B_SD = &Asm.getSymbolData(B->getSymbol());
-
-      if (!B_SD->getFragment())
-        report_fatal_error("symbol '" + B->getSymbol().getName() +
-                          "' can not be undefined in a subtraction expression");
-
-      // Select the appropriate difference relocation type.
-      Type = macho::RIT_ARM_HalfDifference;
-      Value2 = getSymbolAddress(B_SD, Layout);
-      FixedValue -= getSectionAddress(B_SD->getFragment()->getParent());
-    }
-
-    // Relocations are written out in reverse order, so the PAIR comes first.
-    // ARM_RELOC_HALF and ARM_RELOC_HALF_SECTDIFF abuse the r_length field:
-    //
-    // For these two r_type relocations they always have a pair following them
-    // and the r_length bits are used differently.  The encoding of the
-    // r_length is as follows:
-    // low bit of r_length:
-    //  0 - :lower16: for movw instructions
-    //  1 - :upper16: for movt instructions
-    // high bit of r_length:
-    //  0 - arm instructions
-    //  1 - thumb instructions
-    // the other half of the relocated expression is in the following pair
-    // relocation entry in the the low 16 bits of r_address field.
-    unsigned ThumbBit = 0;
-    unsigned MovtBit = 0;
-    switch ((unsigned)Fixup.getKind()) {
-    default: break;
-    case ARM::fixup_arm_movt_hi16:
-    case ARM::fixup_arm_movt_hi16_pcrel:
-      MovtBit = 1;
-      break;
-    case ARM::fixup_t2_movt_hi16:
-    case ARM::fixup_t2_movt_hi16_pcrel:
-      MovtBit = 1;
-      // Fallthrough
-    case ARM::fixup_t2_movw_lo16:
-    case ARM::fixup_t2_movw_lo16_pcrel:
-      ThumbBit = 1;
-      break;
-    }
-
-
-    if (Type == macho::RIT_ARM_HalfDifference) {
-      uint32_t OtherHalf = MovtBit
-        ? (FixedValue & 0xffff) : ((FixedValue & 0xffff0000) >> 16);
-
-      macho::RelocationEntry MRE;
-      MRE.Word0 = ((OtherHalf       <<  0) |
-                   (macho::RIT_Pair << 24) |
-                   (MovtBit         << 28) |
-                   (ThumbBit        << 29) |
-                   (IsPCRel         << 30) |
-                   macho::RF_Scattered);
-      MRE.Word1 = Value2;
-      Relocations[Fragment->getParent()].push_back(MRE);
+    uint64_t &Entry = StringIndexMap[Symbol.getName()];
+    if (!Entry) {
+      Entry = StringTable.size();
+      StringTable += Symbol.getName();
+      StringTable += '\x00';
     }
 
-    macho::RelocationEntry MRE;
-    MRE.Word0 = ((FixupOffset <<  0) |
-                 (Type        << 24) |
-                 (MovtBit     << 28) |
-                 (ThumbBit    << 29) |
-                 (IsPCRel     << 30) |
-                 macho::RF_Scattered);
-    MRE.Word1 = Value;
-    Relocations[Fragment->getParent()].push_back(MRE);
-  }
-
-  void RecordTLVPRelocation(const MCAssembler &Asm,
-                            const MCAsmLayout &Layout,
-                            const MCFragment *Fragment,
-                            const MCFixup &Fixup, MCValue Target,
-                            uint64_t &FixedValue) {
-    assert(Target.getSymA()->getKind() == MCSymbolRefExpr::VK_TLVP &&
-           !is64Bit() &&
-           "Should only be called with a 32-bit TLVP relocation!");
-
-    unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
-    uint32_t Value = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
-    unsigned IsPCRel = 0;
-
-    // Get the symbol data.
-    MCSymbolData *SD_A = &Asm.getSymbolData(Target.getSymA()->getSymbol());
-    unsigned Index = SD_A->getIndex();
-
-    // We're only going to have a second symbol in pic mode and it'll be a
-    // subtraction from the picbase. For 32-bit pic the addend is the difference
-    // between the picbase and the next address.  For 32-bit static the addend
-    // is zero.
-    if (Target.getSymB()) {
-      // If this is a subtraction then we're pcrel.
-      uint32_t FixupAddress =
-        getFragmentAddress(Fragment, Layout) + Fixup.getOffset();
-      MCSymbolData *SD_B = &Asm.getSymbolData(Target.getSymB()->getSymbol());
-      IsPCRel = 1;
-      FixedValue = (FixupAddress - getSymbolAddress(SD_B, Layout) +
-                    Target.getConstant());
-      FixedValue += 1ULL << Log2Size;
+    MachSymbolData MSD;
+    MSD.SymbolData = it;
+    MSD.StringIndex = Entry;
+
+    if (Symbol.isAbsolute()) {
+      MSD.SectionIndex = 0;
+      LocalSymbolData.push_back(MSD);
     } else {
-      FixedValue = 0;
+      MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
+      assert(MSD.SectionIndex && "Invalid section index!");
+      LocalSymbolData.push_back(MSD);
     }
-
-    // struct relocation_info (8 bytes)
-    macho::RelocationEntry MRE;
-    MRE.Word0 = Value;
-    MRE.Word1 = ((Index                  <<  0) |
-                 (IsPCRel                << 24) |
-                 (Log2Size               << 25) |
-                 (1                      << 27) | // Extern
-                 (macho::RIT_Generic_TLV << 28)); // Type
-    Relocations[Fragment->getParent()].push_back(MRE);
   }
 
-  static bool getARMFixupKindMachOInfo(unsigned Kind, unsigned &RelocType,
-                                       unsigned &Log2Size) {
-    RelocType = unsigned(macho::RIT_Vanilla);
-    Log2Size = ~0U;
+  // External and undefined symbols are required to be in lexicographic order.
+  std::sort(ExternalSymbolData.begin(), ExternalSymbolData.end());
+  std::sort(UndefinedSymbolData.begin(), UndefinedSymbolData.end());
+
+  // Set the symbol indices.
+  Index = 0;
+  for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i)
+    LocalSymbolData[i].SymbolData->setIndex(Index++);
+  for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i)
+    ExternalSymbolData[i].SymbolData->setIndex(Index++);
+  for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i)
+    UndefinedSymbolData[i].SymbolData->setIndex(Index++);
 
-    switch (Kind) {
-    default:
-      return false;
-
-    case FK_Data_1:
-      Log2Size = llvm::Log2_32(1);
-      return true;
-    case FK_Data_2:
-      Log2Size = llvm::Log2_32(2);
-      return true;
-    case FK_Data_4:
-      Log2Size = llvm::Log2_32(4);
-      return true;
-    case FK_Data_8:
-      Log2Size = llvm::Log2_32(8);
-      return true;
-
-      // Handle 24-bit branch kinds.
-    case ARM::fixup_arm_ldst_pcrel_12:
-    case ARM::fixup_arm_pcrel_10:
-    case ARM::fixup_arm_adr_pcrel_12:
-    case ARM::fixup_arm_condbranch:
-    case ARM::fixup_arm_uncondbranch:
-      RelocType = unsigned(macho::RIT_ARM_Branch24Bit);
-      // Report as 'long', even though that is not quite accurate.
-      Log2Size = llvm::Log2_32(4);
-      return true;
-
-      // Handle Thumb branches.
-    case ARM::fixup_arm_thumb_br:
-      RelocType = unsigned(macho::RIT_ARM_ThumbBranch22Bit);
-      Log2Size = llvm::Log2_32(2);
-      return true;
-      
-    case ARM::fixup_arm_thumb_bl:
-    case ARM::fixup_arm_thumb_blx:
-      RelocType = unsigned(macho::RIT_ARM_ThumbBranch22Bit);
-      Log2Size = llvm::Log2_32(4);
-      return true;
-
-    case ARM::fixup_arm_movt_hi16:
-    case ARM::fixup_arm_movt_hi16_pcrel:
-    case ARM::fixup_t2_movt_hi16:
-    case ARM::fixup_t2_movt_hi16_pcrel:
-      RelocType = unsigned(macho::RIT_ARM_HalfDifference);
-      // Report as 'long', even though that is not quite accurate.
-      Log2Size = llvm::Log2_32(4);
-      return true;
+  // The string table is padded to a multiple of 4.
+  while (StringTable.size() % 4)
+    StringTable += '\x00';
+}
 
-    case ARM::fixup_arm_movw_lo16:
-    case ARM::fixup_arm_movw_lo16_pcrel:
-    case ARM::fixup_t2_movw_lo16:
-    case ARM::fixup_t2_movw_lo16_pcrel:
-      RelocType = unsigned(macho::RIT_ARM_Half);
-      // Report as 'long', even though that is not quite accurate.
-      Log2Size = llvm::Log2_32(4);
-      return true;
-    }
+void MachObjectWriter::computeSectionAddresses(const MCAssembler &Asm,
+                                               const MCAsmLayout &Layout) {
+  uint64_t StartAddress = 0;
+  const SmallVectorImpl<MCSectionData*> &Order = Layout.getSectionOrder();
+  for (int i = 0, n = Order.size(); i != n ; ++i) {
+    const MCSectionData *SD = Order[i];
+    StartAddress = RoundUpToAlignment(StartAddress, SD->getAlignment());
+    SectionAddress[SD] = StartAddress;
+    StartAddress += Layout.getSectionAddressSize(SD);
+
+    // Explicitly pad the section to match the alignment requirements of the
+    // following one. This is for 'gas' compatibility, it shouldn't
+    /// strictly be necessary.
+    StartAddress += getPaddingSize(SD, Layout);
   }
-  void RecordARMRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
-                           const MCFragment *Fragment, const MCFixup &Fixup,
-                           MCValue Target, uint64_t &FixedValue) {
-    unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
-    unsigned Log2Size;
-    unsigned RelocType = macho::RIT_Vanilla;
-    if (!getARMFixupKindMachOInfo(Fixup.getKind(), RelocType, Log2Size)) {
-      report_fatal_error("unknown ARM fixup kind!");
-      return;
-    }
-
-    // If this is a difference or a defined symbol plus an offset, then we need
-    // a scattered relocation entry.  Differences always require scattered
-    // relocations.
-    if (Target.getSymB()) {
-      if (RelocType == macho::RIT_ARM_Half ||
-          RelocType == macho::RIT_ARM_HalfDifference)
-        return RecordARMMovwMovtRelocation(Asm, Layout, Fragment, Fixup,
-                                           Target, FixedValue);
-      return RecordARMScatteredRelocation(Asm, Layout, Fragment, Fixup,
-                                          Target, Log2Size, FixedValue);
-    }
+}
 
-    // Get the symbol data, if any.
-    MCSymbolData *SD = 0;
-    if (Target.getSymA())
-      SD = &Asm.getSymbolData(Target.getSymA()->getSymbol());
+void MachObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm,
+                                                const MCAsmLayout &Layout) {
+  computeSectionAddresses(Asm, Layout);
+
+  // Create symbol data for any indirect symbols.
+  BindIndirectSymbols(Asm);
+
+  // Compute symbol table information and bind symbol indices.
+  ComputeSymbolTable(Asm, StringTable, LocalSymbolData, ExternalSymbolData,
+                     UndefinedSymbolData);
+}
 
-    // FIXME: For other platforms, we need to use scattered relocations for
-    // internal relocations with offsets.  If this is an internal relocation
-    // with an offset, it also needs a scattered relocation entry.
-    //
-    // Is this right for ARM?
-    uint32_t Offset = Target.getConstant();
-    if (IsPCRel && RelocType == macho::RIT_Vanilla)
-      Offset += 1 << Log2Size;
-    if (Offset && SD && !doesSymbolRequireExternRelocation(SD))
-      return RecordARMScatteredRelocation(Asm, Layout, Fragment, Fixup, Target,
-                                          Log2Size, FixedValue);
-
-    // See <reloc.h>.
-    uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
-    unsigned Index = 0;
-    unsigned IsExtern = 0;
-    unsigned Type = 0;
-
-    if (Target.isAbsolute()) { // constant
-      // FIXME!
-      report_fatal_error("FIXME: relocations to absolute targets "
-                         "not yet implemented");
-    } else {
-      // Resolve constant variables.
-      if (SD->getSymbol().isVariable()) {
-        int64_t Res;
-        if (SD->getSymbol().getVariableValue()->EvaluateAsAbsolute(
-              Res, Layout, SectionAddress)) {
-          FixedValue = Res;
-          return;
-        }
-      }
+bool MachObjectWriter::
+IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+                                       const MCSymbolData &DataA,
+                                       const MCFragment &FB,
+                                       bool InSet,
+                                       bool IsPCRel) const {
+  if (InSet)
+    return true;
 
-      // Check whether we need an external or internal relocation.
-      if (doesSymbolRequireExternRelocation(SD)) {
-        IsExtern = 1;
-        Index = SD->getIndex();
-        // For external relocations, make sure to offset the fixup value to
-        // compensate for the addend of the symbol address, if it was
-        // undefined. This occurs with weak definitions, for example.
-        if (!SD->Symbol->isUndefined())
-          FixedValue -= Layout.getSymbolOffset(SD);
-      } else {
-        // The index is the section ordinal (1-based).
-        const MCSectionData &SymSD = Asm.getSectionData(
-          SD->getSymbol().getSection());
-        Index = SymSD.getOrdinal() + 1;
-        FixedValue += getSectionAddress(&SymSD);
-      }
-      if (IsPCRel)
-        FixedValue -= getSectionAddress(Fragment->getParent());
+  // The effective address is
+  //     addr(atom(A)) + offset(A)
+  //   - addr(atom(B)) - offset(B)
+  // and the offsets are not relocatable, so the fixup is fully resolved when
+  //  addr(atom(A)) - addr(atom(B)) == 0.
+  const MCSymbolData *A_Base = 0, *B_Base = 0;
+
+  const MCSymbol &SA = DataA.getSymbol().AliasedSymbol();
+  const MCSection &SecA = SA.getSection();
+  const MCSection &SecB = FB.getParent()->getSection();
+
+  if (IsPCRel) {
+    // The simple (Darwin, except on x86_64) way of dealing with this was to
+    // assume that any reference to a temporary symbol *must* be a temporary
+    // symbol in the same atom, unless the sections differ. Therefore, any PCrel
+    // relocation to a temporary symbol (in the same section) is fully
+    // resolved. This also works in conjunction with absolutized .set, which
+    // requires the compiler to use .set to absolutize the differences between
+    // symbols which the compiler knows to be assembly time constants, so we
+    // don't need to worry about considering symbol differences fully resolved.
 
-      // The type is determined by the fixup kind.
-      Type = RelocType;
+    if (!Asm.getBackend().hasReliableSymbolDifference()) {
+      if (!SA.isTemporary() || !SA.isInSection() || &SecA != &SecB)
+        return false;
+      return true;
     }
-
-    // struct relocation_info (8 bytes)
-    macho::RelocationEntry MRE;
-    MRE.Word0 = FixupOffset;
-    MRE.Word1 = ((Index     <<  0) |
-                 (IsPCRel   << 24) |
-                 (Log2Size  << 25) |
-                 (IsExtern  << 27) |
-                 (Type      << 28));
-    Relocations[Fragment->getParent()].push_back(MRE);
+  } else {
+    if (!TargetObjectWriter->useAggressiveSymbolFolding())
+      return false;
   }
 
-  void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
-                        const MCFragment *Fragment, const MCFixup &Fixup,
-                        MCValue Target, uint64_t &FixedValue) {
-    // FIXME: These needs to be factored into the target Mach-O writer.
-    if (isARM()) {
-      RecordARMRelocation(Asm, Layout, Fragment, Fixup, Target, FixedValue);
-      return;
-    }
-    if (is64Bit()) {
-      RecordX86_64Relocation(Asm, Layout, Fragment, Fixup, Target, FixedValue);
-      return;
-    }
+  const MCFragment &FA = *Asm.getSymbolData(SA).getFragment();
 
-    unsigned IsPCRel = isFixupKindPCRel(Asm, Fixup.getKind());
-    unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
+  A_Base = FA.getAtom();
+  if (!A_Base)
+    return false;
 
-    // If this is a 32-bit TLVP reloc it's handled a bit differently.
-    if (Target.getSymA() &&
-        Target.getSymA()->getKind() == MCSymbolRefExpr::VK_TLVP) {
-      RecordTLVPRelocation(Asm, Layout, Fragment, Fixup, Target, FixedValue);
-      return;
-    }
+  B_Base = FB.getAtom();
+  if (!B_Base)
+    return false;
 
-    // If this is a difference or a defined symbol plus an offset, then we need
-    // a scattered relocation entry.
-    // Differences always require scattered relocations.
-    if (Target.getSymB())
-        return RecordScatteredRelocation(Asm, Layout, Fragment, Fixup,
-                                         Target, Log2Size, FixedValue);
+  // If the atoms are the same, they are guaranteed to have the same address.
+  if (A_Base == B_Base)
+    return true;
 
-    // Get the symbol data, if any.
-    MCSymbolData *SD = 0;
-    if (Target.getSymA())
-      SD = &Asm.getSymbolData(Target.getSymA()->getSymbol());
+  // Otherwise, we can't prove this is fully resolved.
+  return false;
+}
 
-    // If this is an internal relocation with an offset, it also needs a
-    // scattered relocation entry.
-    uint32_t Offset = Target.getConstant();
-    if (IsPCRel)
-      Offset += 1 << Log2Size;
-    if (Offset && SD && !doesSymbolRequireExternRelocation(SD))
-      return RecordScatteredRelocation(Asm, Layout, Fragment, Fixup,
-                                       Target, Log2Size, FixedValue);
-
-    // See <reloc.h>.
-    uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
-    unsigned Index = 0;
-    unsigned IsExtern = 0;
-    unsigned Type = 0;
-
-    if (Target.isAbsolute()) { // constant
-      // SymbolNum of 0 indicates the absolute section.
-      //
-      // FIXME: Currently, these are never generated (see code below). I cannot
-      // find a case where they are actually emitted.
-      Type = macho::RIT_Vanilla;
-    } else {
-      // Resolve constant variables.
-      if (SD->getSymbol().isVariable()) {
-        int64_t Res;
-        if (SD->getSymbol().getVariableValue()->EvaluateAsAbsolute(
-              Res, Layout, SectionAddress)) {
-          FixedValue = Res;
-          return;
-        }
-      }
+void MachObjectWriter::WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout) {
+  unsigned NumSections = Asm.size();
 
-      // Check whether we need an external or internal relocation.
-      if (doesSymbolRequireExternRelocation(SD)) {
-        IsExtern = 1;
-        Index = SD->getIndex();
-        // For external relocations, make sure to offset the fixup value to
-        // compensate for the addend of the symbol address, if it was
-        // undefined. This occurs with weak definitions, for example.
-        if (!SD->Symbol->isUndefined())
-          FixedValue -= Layout.getSymbolOffset(SD);
-      } else {
-        // The index is the section ordinal (1-based).
-        const MCSectionData &SymSD = Asm.getSectionData(
-          SD->getSymbol().getSection());
-        Index = SymSD.getOrdinal() + 1;
-        FixedValue += getSectionAddress(&SymSD);
-      }
-      if (IsPCRel)
-        FixedValue -= getSectionAddress(Fragment->getParent());
+  // The section data starts after the header, the segment load command (and
+  // section headers) and the symbol table.
+  unsigned NumLoadCommands = 1;
+  uint64_t LoadCommandsSize = is64Bit() ?
+    macho::SegmentLoadCommand64Size + NumSections * macho::Section64Size :
+    macho::SegmentLoadCommand32Size + NumSections * macho::Section32Size;
+
+  // Add the symbol table load command sizes, if used.
+  unsigned NumSymbols = LocalSymbolData.size() + ExternalSymbolData.size() +
+    UndefinedSymbolData.size();
+  if (NumSymbols) {
+    NumLoadCommands += 2;
+    LoadCommandsSize += (macho::SymtabLoadCommandSize +
+                         macho::DysymtabLoadCommandSize);
+  }
+
+  // Compute the total size of the section data, as well as its file size and vm
+  // size.
+  uint64_t SectionDataStart = (is64Bit() ? macho::Header64Size :
+                               macho::Header32Size) + LoadCommandsSize;
+  uint64_t SectionDataSize = 0;
+  uint64_t SectionDataFileSize = 0;
+  uint64_t VMSize = 0;
+  for (MCAssembler::const_iterator it = Asm.begin(),
+         ie = Asm.end(); it != ie; ++it) {
+    const MCSectionData &SD = *it;
+    uint64_t Address = getSectionAddress(&SD);
+    uint64_t Size = Layout.getSectionAddressSize(&SD);
+    uint64_t FileSize = Layout.getSectionFileSize(&SD);
+    FileSize += getPaddingSize(&SD, Layout);
+
+    VMSize = std::max(VMSize, Address + Size);
 
-      Type = macho::RIT_Vanilla;
-    }
+    if (SD.getSection().isVirtualSection())
+      continue;
 
-    // struct relocation_info (8 bytes)
-    macho::RelocationEntry MRE;
-    MRE.Word0 = FixupOffset;
-    MRE.Word1 = ((Index     <<  0) |
-                 (IsPCRel   << 24) |
-                 (Log2Size  << 25) |
-                 (IsExtern  << 27) |
-                 (Type      << 28));
-    Relocations[Fragment->getParent()].push_back(MRE);
+    SectionDataSize = std::max(SectionDataSize, Address + Size);
+    SectionDataFileSize = std::max(SectionDataFileSize, Address + FileSize);
   }
 
-  void BindIndirectSymbols(MCAssembler &Asm) {
-    // This is the point where 'as' creates actual symbols for indirect symbols
-    // (in the following two passes). It would be easier for us to do this
-    // sooner when we see the attribute, but that makes getting the order in the
-    // symbol table much more complicated than it is worth.
-    //
-    // FIXME: Revisit this when the dust settles.
-
-    // Bind non lazy symbol pointers first.
-    unsigned IndirectIndex = 0;
-    for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(),
-           ie = Asm.indirect_symbol_end(); it != ie; ++it, ++IndirectIndex) {
-      const MCSectionMachO &Section =
-        cast<MCSectionMachO>(it->SectionData->getSection());
-
-      if (Section.getType() != MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS)
-        continue;
-
-      // Initialize the section indirect symbol base, if necessary.
-      if (!IndirectSymBase.count(it->SectionData))
-        IndirectSymBase[it->SectionData] = IndirectIndex;
-
-      Asm.getOrCreateSymbolData(*it->Symbol);
-    }
-
-    // Then lazy symbol pointers and symbol stubs.
-    IndirectIndex = 0;
-    for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(),
-           ie = Asm.indirect_symbol_end(); it != ie; ++it, ++IndirectIndex) {
+  // The section data is padded to 4 bytes.
+  //
+  // FIXME: Is this machine dependent?
+  unsigned SectionDataPadding = OffsetToAlignment(SectionDataFileSize, 4);
+  SectionDataFileSize += SectionDataPadding;
+
+  // Write the prolog, starting with the header and load command...
+  WriteHeader(NumLoadCommands, LoadCommandsSize,
+              Asm.getSubsectionsViaSymbols());
+  WriteSegmentLoadCommand(NumSections, VMSize,
+                          SectionDataStart, SectionDataSize);
+
+  // ... and then the section headers.
+  uint64_t RelocTableEnd = SectionDataStart + SectionDataFileSize;
+  for (MCAssembler::const_iterator it = Asm.begin(),
+         ie = Asm.end(); it != ie; ++it) {
+    std::vector<macho::RelocationEntry> &Relocs = Relocations[it];
+    unsigned NumRelocs = Relocs.size();
+    uint64_t SectionStart = SectionDataStart + getSectionAddress(it);
+    WriteSection(Asm, Layout, *it, SectionStart, RelocTableEnd, NumRelocs);
+    RelocTableEnd += NumRelocs * macho::RelocationInfoSize;
+  }
+
+  // Write the symbol table load command, if used.
+  if (NumSymbols) {
+    unsigned FirstLocalSymbol = 0;
+    unsigned NumLocalSymbols = LocalSymbolData.size();
+    unsigned FirstExternalSymbol = FirstLocalSymbol + NumLocalSymbols;
+    unsigned NumExternalSymbols = ExternalSymbolData.size();
+    unsigned FirstUndefinedSymbol = FirstExternalSymbol + NumExternalSymbols;
+    unsigned NumUndefinedSymbols = UndefinedSymbolData.size();
+    unsigned NumIndirectSymbols = Asm.indirect_symbol_size();
+    unsigned NumSymTabSymbols =
+      NumLocalSymbols + NumExternalSymbols + NumUndefinedSymbols;
+    uint64_t IndirectSymbolSize = NumIndirectSymbols * 4;
+    uint64_t IndirectSymbolOffset = 0;
+
+    // If used, the indirect symbols are written after the section data.
+    if (NumIndirectSymbols)
+      IndirectSymbolOffset = RelocTableEnd;
+
+    // The symbol table is written after the indirect symbol data.
+    uint64_t SymbolTableOffset = RelocTableEnd + IndirectSymbolSize;
+
+    // The string table is written after symbol table.
+    uint64_t StringTableOffset =
+      SymbolTableOffset + NumSymTabSymbols * (is64Bit() ? macho::Nlist64Size :
+                                              macho::Nlist32Size);
+    WriteSymtabLoadCommand(SymbolTableOffset, NumSymTabSymbols,
+                           StringTableOffset, StringTable.size());
+
+    WriteDysymtabLoadCommand(FirstLocalSymbol, NumLocalSymbols,
+                             FirstExternalSymbol, NumExternalSymbols,
+                             FirstUndefinedSymbol, NumUndefinedSymbols,
+                             IndirectSymbolOffset, NumIndirectSymbols);
+  }
+
+  // Write the actual section data.
+  for (MCAssembler::const_iterator it = Asm.begin(),
+         ie = Asm.end(); it != ie; ++it) {
+    Asm.WriteSectionData(it, Layout);
+
+    uint64_t Pad = getPaddingSize(it, Layout);
+    for (unsigned int i = 0; i < Pad; ++i)
+      Write8(0);
+  }
+
+  // Write the extra padding.
+  WriteZeros(SectionDataPadding);
+
+  // Write the relocation entries.
+  for (MCAssembler::const_iterator it = Asm.begin(),
+         ie = Asm.end(); it != ie; ++it) {
+    // Write the section relocation entries, in reverse order to match 'as'
+    // (approximately, the exact algorithm is more complicated than this).
+    std::vector<macho::RelocationEntry> &Relocs = Relocations[it];
+    for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
+      Write32(Relocs[e - i - 1].Word0);
+      Write32(Relocs[e - i - 1].Word1);
+    }
+  }
+
+  // Write the symbol table data, if used.
+  if (NumSymbols) {
+    // Write the indirect symbol entries.
+    for (MCAssembler::const_indirect_symbol_iterator
+           it = Asm.indirect_symbol_begin(),
+           ie = Asm.indirect_symbol_end(); it != ie; ++it) {
+      // Indirect symbols in the non lazy symbol pointer section have some
+      // special handling.
       const MCSectionMachO &Section =
-        cast<MCSectionMachO>(it->SectionData->getSection());
-
-      if (Section.getType() != MCSectionMachO::S_LAZY_SYMBOL_POINTERS &&
-          Section.getType() != MCSectionMachO::S_SYMBOL_STUBS)
-        continue;
-
-      // Initialize the section indirect symbol base, if necessary.
-      if (!IndirectSymBase.count(it->SectionData))
-        IndirectSymBase[it->SectionData] = IndirectIndex;
-
-      // Set the symbol type to undefined lazy, but only on construction.
-      //
-      // FIXME: Do not hardcode.
-      bool Created;
-      MCSymbolData &Entry = Asm.getOrCreateSymbolData(*it->Symbol, &Created);
-      if (Created)
-        Entry.setFlags(Entry.getFlags() | 0x0001);
-    }
-  }
-
-  /// ComputeSymbolTable - Compute the symbol table data
-  ///
-  /// \param StringTable [out] - The string table data.
-  /// \param StringIndexMap [out] - Map from symbol names to offsets in the
-  /// string table.
-  void ComputeSymbolTable(MCAssembler &Asm, SmallString<256> &StringTable,
-                          std::vector<MachSymbolData> &LocalSymbolData,
-                          std::vector<MachSymbolData> &ExternalSymbolData,
-                          std::vector<MachSymbolData> &UndefinedSymbolData) {
-    // Build section lookup table.
-    DenseMap<const MCSection*, uint8_t> SectionIndexMap;
-    unsigned Index = 1;
-    for (MCAssembler::iterator it = Asm.begin(),
-           ie = Asm.end(); it != ie; ++it, ++Index)
-      SectionIndexMap[&it->getSection()] = Index;
-    assert(Index <= 256 && "Too many sections!");
-
-    // Index 0 is always the empty string.
-    StringMap<uint64_t> StringIndexMap;
-    StringTable += '\x00';
-
-    // Build the symbol arrays and the string table, but only for non-local
-    // symbols.
-    //
-    // The particular order that we collect the symbols and create the string
-    // table, then sort the symbols is chosen to match 'as'. Even though it
-    // doesn't matter for correctness, this is important for letting us diff .o
-    // files.
-    for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
-           ie = Asm.symbol_end(); it != ie; ++it) {
-      const MCSymbol &Symbol = it->getSymbol();
-
-      // Ignore non-linker visible symbols.
-      if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
-        continue;
-
-      if (!it->isExternal() && !Symbol.isUndefined())
-        continue;
-
-      uint64_t &Entry = StringIndexMap[Symbol.getName()];
-      if (!Entry) {
-        Entry = StringTable.size();
-        StringTable += Symbol.getName();
-        StringTable += '\x00';
-      }
-
-      MachSymbolData MSD;
-      MSD.SymbolData = it;
-      MSD.StringIndex = Entry;
-
-      if (Symbol.isUndefined()) {
-        MSD.SectionIndex = 0;
-        UndefinedSymbolData.push_back(MSD);
-      } else if (Symbol.isAbsolute()) {
-        MSD.SectionIndex = 0;
-        ExternalSymbolData.push_back(MSD);
-      } else {
-        MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
-        assert(MSD.SectionIndex && "Invalid section index!");
-        ExternalSymbolData.push_back(MSD);
-      }
-    }
-
-    // Now add the data for local symbols.
-    for (MCAssembler::symbol_iterator it = Asm.symbol_begin(),
-           ie = Asm.symbol_end(); it != ie; ++it) {
-      const MCSymbol &Symbol = it->getSymbol();
-
-      // Ignore non-linker visible symbols.
-      if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
-        continue;
-
-      if (it->isExternal() || Symbol.isUndefined())
-        continue;
-
-      uint64_t &Entry = StringIndexMap[Symbol.getName()];
-      if (!Entry) {
-        Entry = StringTable.size();
-        StringTable += Symbol.getName();
-        StringTable += '\x00';
+        static_cast<const MCSectionMachO&>(it->SectionData->getSection());
+      if (Section.getType() == MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS) {
+        // If this symbol is defined and internal, mark it as such.
+        if (it->Symbol->isDefined() &&
+            !Asm.getSymbolData(*it->Symbol).isExternal()) {
+          uint32_t Flags = macho::ISF_Local;
+          if (it->Symbol->isAbsolute())
+            Flags |= macho::ISF_Absolute;
+          Write32(Flags);
+          continue;
+        }
       }
 
-      MachSymbolData MSD;
-      MSD.SymbolData = it;
-      MSD.StringIndex = Entry;
-
-      if (Symbol.isAbsolute()) {
-        MSD.SectionIndex = 0;
-        LocalSymbolData.push_back(MSD);
-      } else {
-        MSD.SectionIndex = SectionIndexMap.lookup(&Symbol.getSection());
-        assert(MSD.SectionIndex && "Invalid section index!");
-        LocalSymbolData.push_back(MSD);
-      }
+      Write32(Asm.getSymbolData(*it->Symbol).getIndex());
     }
 
-    // External and undefined symbols are required to be in lexicographic order.
-    std::sort(ExternalSymbolData.begin(), ExternalSymbolData.end());
-    std::sort(UndefinedSymbolData.begin(), UndefinedSymbolData.end());
+    // FIXME: Check that offsets match computed ones.
 
-    // Set the symbol indices.
-    Index = 0;
+    // Write the symbol table entries.
     for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i)
-      LocalSymbolData[i].SymbolData->setIndex(Index++);
+      WriteNlist(LocalSymbolData[i], Layout);
     for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i)
-      ExternalSymbolData[i].SymbolData->setIndex(Index++);
+      WriteNlist(ExternalSymbolData[i], Layout);
     for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i)
-      UndefinedSymbolData[i].SymbolData->setIndex(Index++);
-
-    // The string table is padded to a multiple of 4.
-    while (StringTable.size() % 4)
-      StringTable += '\x00';
-  }
-
-  void computeSectionAddresses(const MCAssembler &Asm,
-                               const MCAsmLayout &Layout) {
-    uint64_t StartAddress = 0;
-    const SmallVectorImpl<MCSectionData*> &Order = Layout.getSectionOrder();
-    for (int i = 0, n = Order.size(); i != n ; ++i) {
-      const MCSectionData *SD = Order[i];
-      StartAddress = RoundUpToAlignment(StartAddress, SD->getAlignment());
-      SectionAddress[SD] = StartAddress;
-      StartAddress += Layout.getSectionAddressSize(SD);
-      // Explicitly pad the section to match the alignment requirements of the
-      // following one. This is for 'gas' compatibility, it shouldn't
-      /// strictly be necessary.
-      StartAddress += getPaddingSize(SD, Layout);
-    }
-  }
-
-  void ExecutePostLayoutBinding(MCAssembler &Asm, const MCAsmLayout &Layout) {
-    computeSectionAddresses(Asm, Layout);
-
-    // Create symbol data for any indirect symbols.
-    BindIndirectSymbols(Asm);
-
-    // Compute symbol table information and bind symbol indices.
-    ComputeSymbolTable(Asm, StringTable, LocalSymbolData, ExternalSymbolData,
-                       UndefinedSymbolData);
-  }
-
-  virtual bool IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
-                                                      const MCSymbolData &DataA,
-                                                      const MCFragment &FB,
-                                                      bool InSet,
-                                                      bool IsPCRel) const {
-    if (InSet)
-      return true;
-
-    // The effective address is
-    //     addr(atom(A)) + offset(A)
-    //   - addr(atom(B)) - offset(B)
-    // and the offsets are not relocatable, so the fixup is fully resolved when
-    //  addr(atom(A)) - addr(atom(B)) == 0.
-    const MCSymbolData *A_Base = 0, *B_Base = 0;
-
-    const MCSymbol &SA = DataA.getSymbol().AliasedSymbol();
-    const MCSection &SecA = SA.getSection();
-    const MCSection &SecB = FB.getParent()->getSection();
-
-    if (IsPCRel) {
-      // The simple (Darwin, except on x86_64) way of dealing with this was to
-      // assume that any reference to a temporary symbol *must* be a temporary
-      // symbol in the same atom, unless the sections differ. Therefore, any
-      // PCrel relocation to a temporary symbol (in the same section) is fully
-      // resolved. This also works in conjunction with absolutized .set, which
-      // requires the compiler to use .set to absolutize the differences between
-      // symbols which the compiler knows to be assembly time constants, so we
-      // don't need to worry about considering symbol differences fully
-      // resolved.
-
-      if (!Asm.getBackend().hasReliableSymbolDifference()) {
-        if (!SA.isTemporary() || !SA.isInSection() || &SecA != &SecB)
-          return false;
-        return true;
-      }
-    } else {
-      if (!TargetObjectWriter->useAggressiveSymbolFolding())
-        return false;
-    }
-
-    const MCFragment &FA = *Asm.getSymbolData(SA).getFragment();
-
-    A_Base = FA.getAtom();
-    if (!A_Base)
-      return false;
+      WriteNlist(UndefinedSymbolData[i], Layout);
 
-    B_Base = FB.getAtom();
-    if (!B_Base)
-      return false;
-
-    // If the atoms are the same, they are guaranteed to have the same address.
-    if (A_Base == B_Base)
-      return true;
-
-    // Otherwise, we can't prove this is fully resolved.
-    return false;
+    // Write the string table.
+    OS << StringTable.str();
   }
-
-  void WriteObject(MCAssembler &Asm, const MCAsmLayout &Layout) {
-    unsigned NumSections = Asm.size();
-
-    // The section data starts after the header, the segment load command (and
-    // section headers) and the symbol table.
-    unsigned NumLoadCommands = 1;
-    uint64_t LoadCommandsSize = is64Bit() ?
-      macho::SegmentLoadCommand64Size + NumSections * macho::Section64Size :
-      macho::SegmentLoadCommand32Size + NumSections * macho::Section32Size;
-
-    // Add the symbol table load command sizes, if used.
-    unsigned NumSymbols = LocalSymbolData.size() + ExternalSymbolData.size() +
-      UndefinedSymbolData.size();
-    if (NumSymbols) {
-      NumLoadCommands += 2;
-      LoadCommandsSize += (macho::SymtabLoadCommandSize +
-                           macho::DysymtabLoadCommandSize);
-    }
-
-    // Compute the total size of the section data, as well as its file size and
-    // vm size.
-    uint64_t SectionDataStart = (is64Bit() ? macho::Header64Size :
-                                 macho::Header32Size) + LoadCommandsSize;
-    uint64_t SectionDataSize = 0;
-    uint64_t SectionDataFileSize = 0;
-    uint64_t VMSize = 0;
-    for (MCAssembler::const_iterator it = Asm.begin(),
-           ie = Asm.end(); it != ie; ++it) {
-      const MCSectionData &SD = *it;
-      uint64_t Address = getSectionAddress(&SD);
-      uint64_t Size = Layout.getSectionAddressSize(&SD);
-      uint64_t FileSize = Layout.getSectionFileSize(&SD);
-      FileSize += getPaddingSize(&SD, Layout);
-
-      VMSize = std::max(VMSize, Address + Size);
-
-      if (SD.getSection().isVirtualSection())
-        continue;
-
-      SectionDataSize = std::max(SectionDataSize, Address + Size);
-      SectionDataFileSize = std::max(SectionDataFileSize, Address + FileSize);
-    }
-
-    // The section data is padded to 4 bytes.
-    //
-    // FIXME: Is this machine dependent?
-    unsigned SectionDataPadding = OffsetToAlignment(SectionDataFileSize, 4);
-    SectionDataFileSize += SectionDataPadding;
-
-    // Write the prolog, starting with the header and load command...
-    WriteHeader(NumLoadCommands, LoadCommandsSize,
-                Asm.getSubsectionsViaSymbols());
-    WriteSegmentLoadCommand(NumSections, VMSize,
-                            SectionDataStart, SectionDataSize);
-
-    // ... and then the section headers.
-    uint64_t RelocTableEnd = SectionDataStart + SectionDataFileSize;
-    for (MCAssembler::const_iterator it = Asm.begin(),
-           ie = Asm.end(); it != ie; ++it) {
-      std::vector<macho::RelocationEntry> &Relocs = Relocations[it];
-      unsigned NumRelocs = Relocs.size();
-      uint64_t SectionStart = SectionDataStart + getSectionAddress(it);
-      WriteSection(Asm, Layout, *it, SectionStart, RelocTableEnd, NumRelocs);
-      RelocTableEnd += NumRelocs * macho::RelocationInfoSize;
-    }
-
-    // Write the symbol table load command, if used.
-    if (NumSymbols) {
-      unsigned FirstLocalSymbol = 0;
-      unsigned NumLocalSymbols = LocalSymbolData.size();
-      unsigned FirstExternalSymbol = FirstLocalSymbol + NumLocalSymbols;
-      unsigned NumExternalSymbols = ExternalSymbolData.size();
-      unsigned FirstUndefinedSymbol = FirstExternalSymbol + NumExternalSymbols;
-      unsigned NumUndefinedSymbols = UndefinedSymbolData.size();
-      unsigned NumIndirectSymbols = Asm.indirect_symbol_size();
-      unsigned NumSymTabSymbols =
-        NumLocalSymbols + NumExternalSymbols + NumUndefinedSymbols;
-      uint64_t IndirectSymbolSize = NumIndirectSymbols * 4;
-      uint64_t IndirectSymbolOffset = 0;
-
-      // If used, the indirect symbols are written after the section data.
-      if (NumIndirectSymbols)
-        IndirectSymbolOffset = RelocTableEnd;
-
-      // The symbol table is written after the indirect symbol data.
-      uint64_t SymbolTableOffset = RelocTableEnd + IndirectSymbolSize;
-
-      // The string table is written after symbol table.
-      uint64_t StringTableOffset =
-        SymbolTableOffset + NumSymTabSymbols * (is64Bit() ? macho::Nlist64Size :
-                                                macho::Nlist32Size);
-      WriteSymtabLoadCommand(SymbolTableOffset, NumSymTabSymbols,
-                             StringTableOffset, StringTable.size());
-
-      WriteDysymtabLoadCommand(FirstLocalSymbol, NumLocalSymbols,
-                               FirstExternalSymbol, NumExternalSymbols,
-                               FirstUndefinedSymbol, NumUndefinedSymbols,
-                               IndirectSymbolOffset, NumIndirectSymbols);
-    }
-
-    // Write the actual section data.
-    for (MCAssembler::const_iterator it = Asm.begin(),
-           ie = Asm.end(); it != ie; ++it) {
-      Asm.WriteSectionData(it, Layout);
-
-      uint64_t Pad = getPaddingSize(it, Layout);
-      for (unsigned int i = 0; i < Pad; ++i)
-        Write8(0);
-    }
-
-    // Write the extra padding.
-    WriteZeros(SectionDataPadding);
-
-    // Write the relocation entries.
-    for (MCAssembler::const_iterator it = Asm.begin(),
-           ie = Asm.end(); it != ie; ++it) {
-      // Write the section relocation entries, in reverse order to match 'as'
-      // (approximately, the exact algorithm is more complicated than this).
-      std::vector<macho::RelocationEntry> &Relocs = Relocations[it];
-      for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
-        Write32(Relocs[e - i - 1].Word0);
-        Write32(Relocs[e - i - 1].Word1);
-      }
-    }
-
-    // Write the symbol table data, if used.
-    if (NumSymbols) {
-      // Write the indirect symbol entries.
-      for (MCAssembler::const_indirect_symbol_iterator
-             it = Asm.indirect_symbol_begin(),
-             ie = Asm.indirect_symbol_end(); it != ie; ++it) {
-        // Indirect symbols in the non lazy symbol pointer section have some
-        // special handling.
-        const MCSectionMachO &Section =
-          static_cast<const MCSectionMachO&>(it->SectionData->getSection());
-        if (Section.getType() == MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS) {
-          // If this symbol is defined and internal, mark it as such.
-          if (it->Symbol->isDefined() &&
-              !Asm.getSymbolData(*it->Symbol).isExternal()) {
-            uint32_t Flags = macho::ISF_Local;
-            if (it->Symbol->isAbsolute())
-              Flags |= macho::ISF_Absolute;
-            Write32(Flags);
-            continue;
-          }
-        }
-
-        Write32(Asm.getSymbolData(*it->Symbol).getIndex());
-      }
-
-      // FIXME: Check that offsets match computed ones.
-
-      // Write the symbol table entries.
-      for (unsigned i = 0, e = LocalSymbolData.size(); i != e; ++i)
-        WriteNlist(LocalSymbolData[i], Layout);
-      for (unsigned i = 0, e = ExternalSymbolData.size(); i != e; ++i)
-        WriteNlist(ExternalSymbolData[i], Layout);
-      for (unsigned i = 0, e = UndefinedSymbolData.size(); i != e; ++i)
-        WriteNlist(UndefinedSymbolData[i], Layout);
-
-      // Write the string table.
-      OS << StringTable.str();
-    }
-  }
-};
-
 }
 
 MCObjectWriter *llvm::createMachObjectWriter(MCMachObjectTargetWriter *MOTW,

Modified: llvm/branches/type-system-rewrite/lib/Object/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Object/CMakeLists.txt?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Object/CMakeLists.txt (original)
+++ llvm/branches/type-system-rewrite/lib/Object/CMakeLists.txt Sat Jul  2 22:28:07 2011
@@ -1,6 +1,8 @@
 add_llvm_library(LLVMObject
+  Binary.cpp
   COFFObjectFile.cpp
   ELFObjectFile.cpp
+  Error.cpp
   MachOObject.cpp
   MachOObjectFile.cpp
   Object.cpp

Modified: llvm/branches/type-system-rewrite/lib/Object/COFFObjectFile.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Object/COFFObjectFile.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Object/COFFObjectFile.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Object/COFFObjectFile.cpp Sat Jul  2 22:28:07 2011
@@ -11,11 +11,9 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include "llvm/Object/COFF.h"
 #include "llvm/ADT/StringSwitch.h"
 #include "llvm/ADT/Triple.h"
-#include "llvm/Object/ObjectFile.h"
-#include "llvm/Support/COFF.h"
-#include "llvm/Support/Endian.h"
 
 using namespace llvm;
 using namespace object;
@@ -28,174 +26,164 @@
 }
 
 namespace {
-struct coff_file_header {
-  ulittle16_t Machine;
-  ulittle16_t NumberOfSections;
-  ulittle32_t TimeDateStamp;
-  ulittle32_t PointerToSymbolTable;
-  ulittle32_t NumberOfSymbols;
-  ulittle16_t SizeOfOptionalHeader;
-  ulittle16_t Characteristics;
-};
+// Returns false if size is greater than the buffer size. And sets ec.
+bool checkSize(const MemoryBuffer *m, error_code &ec, uint64_t size) {
+  if (m->getBufferSize() < size) {
+    ec = object_error::unexpected_eof;
+    return false;
+  }
+  return true;
 }
 
-extern char coff_file_header_layout_static_assert
-            [sizeof(coff_file_header) == 20 ? 1 : -1];
-
-namespace {
-struct coff_symbol {
-  struct StringTableOffset {
-    ulittle32_t Zeroes;
-    ulittle32_t Offset;
-  };
-
-  union {
-    char ShortName[8];
-    StringTableOffset Offset;
-  } Name;
-
-  ulittle32_t Value;
-  little16_t SectionNumber;
-
-  struct {
-    ulittle8_t BaseType;
-    ulittle8_t ComplexType;
-  } Type;
-
-  ulittle8_t  StorageClass;
-  ulittle8_t  NumberOfAuxSymbols;
-};
+// Returns false if any bytes in [addr, addr + size) fall outsize of m.
+bool checkAddr(const MemoryBuffer *m,
+               error_code &ec,
+               uintptr_t addr,
+               uint64_t size) {
+  if (addr + size < addr ||
+      addr + size < size ||
+      addr + size > uintptr_t(m->getBufferEnd())) {
+    ec = object_error::unexpected_eof;
+    return false;
+  }
+  return true;
+}
 }
 
-extern char coff_coff_symbol_layout_static_assert
-            [sizeof(coff_symbol) == 18 ? 1 : -1];
+const coff_symbol *COFFObjectFile::toSymb(DataRefImpl Symb) const {
+  const coff_symbol *addr = reinterpret_cast<const coff_symbol*>(Symb.p);
 
-namespace {
-struct coff_section {
-  char Name[8];
-  ulittle32_t VirtualSize;
-  ulittle32_t VirtualAddress;
-  ulittle32_t SizeOfRawData;
-  ulittle32_t PointerToRawData;
-  ulittle32_t PointerToRelocations;
-  ulittle32_t PointerToLinenumbers;
-  ulittle16_t NumberOfRelocations;
-  ulittle16_t NumberOfLinenumbers;
-  ulittle32_t Characteristics;
-};
+# ifndef NDEBUG
+  // Verify that the symbol points to a valid entry in the symbol table.
+  uintptr_t offset = uintptr_t(addr) - uintptr_t(base());
+  if (offset < Header->PointerToSymbolTable
+      || offset >= Header->PointerToSymbolTable
+         + (Header->NumberOfSymbols * sizeof(coff_symbol)))
+    report_fatal_error("Symbol was outside of symbol table.");
+
+  assert((offset - Header->PointerToSymbolTable) % sizeof(coff_symbol)
+         == 0 && "Symbol did not point to the beginning of a symbol");
+# endif
+
+  return addr;
 }
 
-extern char coff_coff_section_layout_static_assert
-            [sizeof(coff_section) == 40 ? 1 : -1];
+const coff_section *COFFObjectFile::toSec(DataRefImpl Sec) const {
+  const coff_section *addr = reinterpret_cast<const coff_section*>(Sec.p);
 
-namespace {
-class COFFObjectFile : public ObjectFile {
-private:
-        uint64_t         HeaderOff;
-  const coff_file_header *Header;
-  const coff_section     *SectionTable;
-  const coff_symbol      *SymbolTable;
-  const char             *StringTable;
-
-  const coff_section     *getSection(std::size_t index) const;
-  const char             *getString(std::size_t offset) const;
-
-protected:
-  virtual SymbolRef getSymbolNext(DataRefImpl Symb) const;
-  virtual StringRef getSymbolName(DataRefImpl Symb) const;
-  virtual uint64_t  getSymbolAddress(DataRefImpl Symb) const;
-  virtual uint64_t  getSymbolSize(DataRefImpl Symb) const;
-  virtual char      getSymbolNMTypeChar(DataRefImpl Symb) const;
-  virtual bool      isSymbolInternal(DataRefImpl Symb) const;
-
-  virtual SectionRef getSectionNext(DataRefImpl Sec) const;
-  virtual StringRef  getSectionName(DataRefImpl Sec) const;
-  virtual uint64_t   getSectionAddress(DataRefImpl Sec) const;
-  virtual uint64_t   getSectionSize(DataRefImpl Sec) const;
-  virtual StringRef  getSectionContents(DataRefImpl Sec) const;
-  virtual bool       isSectionText(DataRefImpl Sec) const;
-
-public:
-  COFFObjectFile(MemoryBuffer *Object);
-  virtual symbol_iterator begin_symbols() const;
-  virtual symbol_iterator end_symbols() const;
-  virtual section_iterator begin_sections() const;
-  virtual section_iterator end_sections() const;
-
-  virtual uint8_t getBytesInAddress() const;
-  virtual StringRef getFileFormatName() const;
-  virtual unsigned getArch() const;
-};
-} // end namespace
+# ifndef NDEBUG
+  // Verify that the section points to a valid entry in the section table.
+  if (addr < SectionTable
+      || addr >= (SectionTable + Header->NumberOfSections))
+    report_fatal_error("Section was outside of section table.");
+
+  uintptr_t offset = uintptr_t(addr) - uintptr_t(SectionTable);
+  assert(offset % sizeof(coff_section) == 0 &&
+         "Section did not point to the beginning of a section");
+# endif
+
+  return addr;
+}
 
-SymbolRef COFFObjectFile::getSymbolNext(DataRefImpl Symb) const {
-  const coff_symbol *symb = reinterpret_cast<const coff_symbol*>(Symb.p);
+error_code COFFObjectFile::getSymbolNext(DataRefImpl Symb,
+                                         SymbolRef &Result) const {
+  const coff_symbol *symb = toSymb(Symb);
   symb += 1 + symb->NumberOfAuxSymbols;
-  Symb.p = reinterpret_cast<intptr_t>(symb);
-  return SymbolRef(Symb, this);
+  Symb.p = reinterpret_cast<uintptr_t>(symb);
+  Result = SymbolRef(Symb, this);
+  return object_error::success;
 }
 
-StringRef COFFObjectFile::getSymbolName(DataRefImpl Symb) const {
-  const coff_symbol *symb = reinterpret_cast<const coff_symbol*>(Symb.p);
+ error_code COFFObjectFile::getSymbolName(DataRefImpl Symb,
+                                          StringRef &Result) const {
+  const coff_symbol *symb = toSymb(Symb);
   // Check for string table entry. First 4 bytes are 0.
   if (symb->Name.Offset.Zeroes == 0) {
     uint32_t Offset = symb->Name.Offset.Offset;
-    return StringRef(getString(Offset));
+    if (error_code ec = getString(Offset, Result))
+      return ec;
+    return object_error::success;
   }
 
   if (symb->Name.ShortName[7] == 0)
     // Null terminated, let ::strlen figure out the length.
-    return StringRef(symb->Name.ShortName);
-  // Not null terminated, use all 8 bytes.
-  return StringRef(symb->Name.ShortName, 8);
+    Result = StringRef(symb->Name.ShortName);
+  else
+    // Not null terminated, use all 8 bytes.
+    Result = StringRef(symb->Name.ShortName, 8);
+  return object_error::success;
 }
 
-uint64_t COFFObjectFile::getSymbolAddress(DataRefImpl Symb) const {
-  const coff_symbol *symb = reinterpret_cast<const coff_symbol*>(Symb.p);
-  const coff_section *Section = getSection(symb->SectionNumber);
-  char Type = getSymbolNMTypeChar(Symb);
+error_code COFFObjectFile::getSymbolAddress(DataRefImpl Symb,
+                                            uint64_t &Result) const {
+  const coff_symbol *symb = toSymb(Symb);
+  const coff_section *Section;
+  if (error_code ec = getSection(symb->SectionNumber, Section))
+    return ec;
+  char Type;
+  if (error_code ec = getSymbolNMTypeChar(Symb, Type))
+    return ec;
   if (Type == 'U' || Type == 'w')
-    return UnknownAddressOrSize;
-  if (Section)
-    return Section->VirtualAddress + symb->Value;
-  return symb->Value;
+    Result = UnknownAddressOrSize;
+  else if (Section)
+    Result = Section->VirtualAddress + symb->Value;
+  else
+    Result = symb->Value;
+  return object_error::success;
 }
 
-uint64_t COFFObjectFile::getSymbolSize(DataRefImpl Symb) const {
+error_code COFFObjectFile::getSymbolSize(DataRefImpl Symb,
+                                         uint64_t &Result) const {
   // FIXME: Return the correct size. This requires looking at all the symbols
   //        in the same section as this symbol, and looking for either the next
   //        symbol, or the end of the section.
-  const coff_symbol *symb = reinterpret_cast<const coff_symbol*>(Symb.p);
-  const coff_section *Section = getSection(symb->SectionNumber);
-  char Type = getSymbolNMTypeChar(Symb);
+  const coff_symbol *symb = toSymb(Symb);
+  const coff_section *Section;
+  if (error_code ec = getSection(symb->SectionNumber, Section))
+    return ec;
+  char Type;
+  if (error_code ec = getSymbolNMTypeChar(Symb, Type))
+    return ec;
   if (Type == 'U' || Type == 'w')
-    return UnknownAddressOrSize;
-  if (Section)
-    return Section->SizeOfRawData - symb->Value;
-  return 0;
+    Result = UnknownAddressOrSize;
+  else if (Section)
+    Result = Section->SizeOfRawData - symb->Value;
+  else
+    Result = 0;
+  return object_error::success;
 }
 
-char COFFObjectFile::getSymbolNMTypeChar(DataRefImpl Symb) const {
-  const coff_symbol *symb = reinterpret_cast<const coff_symbol*>(Symb.p);
-  char ret = StringSwitch<char>(getSymbolName(Symb))
+error_code COFFObjectFile::getSymbolNMTypeChar(DataRefImpl Symb,
+                                               char &Result) const {
+  const coff_symbol *symb = toSymb(Symb);
+  StringRef name;
+  if (error_code ec = getSymbolName(Symb, name))
+    return ec;
+  char ret = StringSwitch<char>(name)
     .StartsWith(".debug", 'N')
     .StartsWith(".sxdata", 'N')
     .Default('?');
 
-  if (ret != '?')
-    return ret;
+  if (ret != '?') {
+    Result = ret;
+    return object_error::success;
+  }
 
   uint32_t Characteristics = 0;
-  if (const coff_section *Section = getSection(symb->SectionNumber)) {
+  if (symb->SectionNumber > 0) {
+    const coff_section *Section;
+    if (error_code ec = getSection(symb->SectionNumber, Section))
+      return ec;
     Characteristics = Section->Characteristics;
   }
 
   switch (symb->SectionNumber) {
   case COFF::IMAGE_SYM_UNDEFINED:
     // Check storage classes.
-    if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL)
-      return 'w'; // Don't do ::toupper.
-    else
+    if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL) {
+      Result = 'w';
+      return object_error::success; // Don't do ::toupper.
+    } else
       ret = 'u';
     break;
   case COFF::IMAGE_SYM_ABSOLUTE:
@@ -227,22 +215,28 @@
   if (symb->StorageClass == COFF::IMAGE_SYM_CLASS_EXTERNAL)
     ret = ::toupper(ret);
 
-  return ret;
+  Result = ret;
+  return object_error::success;
 }
 
-bool COFFObjectFile::isSymbolInternal(DataRefImpl Symb) const {
-  return false;
+error_code COFFObjectFile::isSymbolInternal(DataRefImpl Symb,
+                                            bool &Result) const {
+  Result = false;
+  return object_error::success;
 }
 
-SectionRef COFFObjectFile::getSectionNext(DataRefImpl Sec) const {
-  const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
+error_code COFFObjectFile::getSectionNext(DataRefImpl Sec,
+                                          SectionRef &Result) const {
+  const coff_section *sec = toSec(Sec);
   sec += 1;
-  Sec.p = reinterpret_cast<intptr_t>(sec);
-  return SectionRef(Sec, this);
+  Sec.p = reinterpret_cast<uintptr_t>(sec);
+  Result = SectionRef(Sec, this);
+  return object_error::success;
 }
 
-StringRef COFFObjectFile::getSectionName(DataRefImpl Sec) const {
-  const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
+error_code COFFObjectFile::getSectionName(DataRefImpl Sec,
+                                          StringRef &Result) const {
+  const coff_section *sec = toSec(Sec);
   StringRef name;
   if (sec->Name[7] == 0)
     // Null terminated, let ::strlen figure out the length.
@@ -255,64 +249,117 @@
   if (name[0] == '/') {
     uint32_t Offset;
     name.substr(1).getAsInteger(10, Offset);
-    return StringRef(getString(Offset));
+    if (error_code ec = getString(Offset, name))
+      return ec;
   }
 
-  // It's just a normal name.
-  return name;
-}
-
-uint64_t COFFObjectFile::getSectionAddress(DataRefImpl Sec) const {
-  const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
-  return sec->VirtualAddress;
-}
-
-uint64_t COFFObjectFile::getSectionSize(DataRefImpl Sec) const {
-  const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
-  return sec->SizeOfRawData;
+  Result = name;
+  return object_error::success;
 }
 
-StringRef COFFObjectFile::getSectionContents(DataRefImpl Sec) const {
-  const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
-  return StringRef(reinterpret_cast<const char *>(base + sec->PointerToRawData),
-                   sec->SizeOfRawData);
-}
+error_code COFFObjectFile::getSectionAddress(DataRefImpl Sec,
+                                             uint64_t &Result) const {
+  const coff_section *sec = toSec(Sec);
+  Result = sec->VirtualAddress;
+  return object_error::success;
+}
+
+error_code COFFObjectFile::getSectionSize(DataRefImpl Sec,
+                                          uint64_t &Result) const {
+  const coff_section *sec = toSec(Sec);
+  Result = sec->SizeOfRawData;
+  return object_error::success;
+}
+
+error_code COFFObjectFile::getSectionContents(DataRefImpl Sec,
+                                              StringRef &Result) const {
+  const coff_section *sec = toSec(Sec);
+  // The only thing that we need to verify is that the contents is contained
+  // within the file bounds. We don't need to make sure it doesn't cover other
+  // data, as there's nothing that says that is not allowed.
+  uintptr_t con_start = uintptr_t(base()) + sec->PointerToRawData;
+  uintptr_t con_end = con_start + sec->SizeOfRawData;
+  if (con_end >= uintptr_t(Data->getBufferEnd()))
+    return object_error::parse_failed;
+  Result = StringRef(reinterpret_cast<const char*>(con_start),
+                     sec->SizeOfRawData);
+  return object_error::success;
+}
+
+error_code COFFObjectFile::isSectionText(DataRefImpl Sec,
+                                         bool &Result) const {
+  const coff_section *sec = toSec(Sec);
+  Result = sec->Characteristics & COFF::IMAGE_SCN_CNT_CODE;
+  return object_error::success;
+}
+
+COFFObjectFile::COFFObjectFile(MemoryBuffer *Object, error_code &ec)
+  : ObjectFile(Binary::isCOFF, Object, ec) {
+  // Check that we at least have enough room for a header.
+  if (!checkSize(Data, ec, sizeof(coff_file_header))) return;
+
+  // The actual starting location of the COFF header in the file. This can be
+  // non-zero in PE/COFF files.
+  uint64_t HeaderStart = 0;
 
-bool COFFObjectFile::isSectionText(DataRefImpl Sec) const {
-  const coff_section *sec = reinterpret_cast<const coff_section*>(Sec.p);
-  return sec->Characteristics & COFF::IMAGE_SCN_CNT_CODE;
-}
-
-COFFObjectFile::COFFObjectFile(MemoryBuffer *Object)
-  : ObjectFile(Object) {
-
-  HeaderOff = 0;
-
-  if (base[0] == 0x4d && base[1] == 0x5a) {
+  // Check if this is a PE/COFF file.
+  if (base()[0] == 0x4d && base()[1] == 0x5a) {
     // PE/COFF, seek through MS-DOS compatibility stub and 4-byte
     // PE signature to find 'normal' COFF header.
-    HeaderOff += *reinterpret_cast<const ulittle32_t *>(base + 0x3c);
-    HeaderOff += 4;
+    if (!checkSize(Data, ec, 0x3c + 8)) return;
+    HeaderStart += *reinterpret_cast<const ulittle32_t *>(base() + 0x3c);
+    // Check the PE header. ("PE\0\0")
+    if (StringRef(reinterpret_cast<const char *>(base() + HeaderStart), 4)
+        != "PE\0\0") {
+      ec = object_error::parse_failed;
+      return;
+    }
+    HeaderStart += 4; // Skip the PE Header.
   }
 
-  Header = reinterpret_cast<const coff_file_header *>(base + HeaderOff);
+  Header = reinterpret_cast<const coff_file_header *>(base() + HeaderStart);
+  if (!checkAddr(Data, ec, uintptr_t(Header), sizeof(coff_file_header)))
+    return;
+  
   SectionTable =
-    reinterpret_cast<const coff_section *>( base
-                                          + HeaderOff
+    reinterpret_cast<const coff_section *>( base()
+                                          + HeaderStart
                                           + sizeof(coff_file_header)
                                           + Header->SizeOfOptionalHeader);
+  if (!checkAddr(Data, ec, uintptr_t(SectionTable),
+                 Header->NumberOfSections * sizeof(coff_section)))
+    return;
+
   SymbolTable =
-    reinterpret_cast<const coff_symbol *>(base + Header->PointerToSymbolTable);
+    reinterpret_cast<const coff_symbol *>(base()
+                                          + Header->PointerToSymbolTable);
+  if (!checkAddr(Data, ec, uintptr_t(SymbolTable),
+                 Header->NumberOfSymbols * sizeof(coff_symbol)))
+    return;
 
   // Find string table.
-  StringTable = reinterpret_cast<const char *>(base)
-              + Header->PointerToSymbolTable
-              + Header->NumberOfSymbols * 18;
+  StringTable = reinterpret_cast<const char *>(base())
+                + Header->PointerToSymbolTable
+                + Header->NumberOfSymbols * sizeof(coff_symbol);
+  if (!checkAddr(Data, ec, uintptr_t(StringTable), sizeof(ulittle32_t)))
+    return;
+
+  StringTableSize = *reinterpret_cast<const ulittle32_t *>(StringTable);
+  if (!checkAddr(Data, ec, uintptr_t(StringTable), StringTableSize))
+    return;
+  // Check that the string table is null terminated if has any in it.
+  if (StringTableSize < 4
+      || (StringTableSize > 4 && StringTable[StringTableSize - 1] != 0)) {
+    ec = object_error::parse_failed;
+    return;
+  }
+  
+  ec = object_error::success;
 }
 
 ObjectFile::symbol_iterator COFFObjectFile::begin_symbols() const {
   DataRefImpl ret;
-  memset(&ret, 0, sizeof(DataRefImpl));
+  std::memset(&ret, 0, sizeof(DataRefImpl));
   ret.p = reinterpret_cast<intptr_t>(SymbolTable);
   return symbol_iterator(SymbolRef(ret, this));
 }
@@ -320,21 +367,21 @@
 ObjectFile::symbol_iterator COFFObjectFile::end_symbols() const {
   // The symbol table ends where the string table begins.
   DataRefImpl ret;
-  memset(&ret, 0, sizeof(DataRefImpl));
+  std::memset(&ret, 0, sizeof(DataRefImpl));
   ret.p = reinterpret_cast<intptr_t>(StringTable);
   return symbol_iterator(SymbolRef(ret, this));
 }
 
 ObjectFile::section_iterator COFFObjectFile::begin_sections() const {
   DataRefImpl ret;
-  memset(&ret, 0, sizeof(DataRefImpl));
+  std::memset(&ret, 0, sizeof(DataRefImpl));
   ret.p = reinterpret_cast<intptr_t>(SectionTable);
   return section_iterator(SectionRef(ret, this));
 }
 
 ObjectFile::section_iterator COFFObjectFile::end_sections() const {
   DataRefImpl ret;
-  memset(&ret, 0, sizeof(DataRefImpl));
+  std::memset(&ret, 0, sizeof(DataRefImpl));
   ret.p = reinterpret_cast<intptr_t>(SectionTable + Header->NumberOfSections);
   return section_iterator(SectionRef(ret, this));
 }
@@ -365,24 +412,37 @@
   }
 }
 
-const coff_section *COFFObjectFile::getSection(std::size_t index) const {
-  if (index > 0 && index <= Header->NumberOfSections)
-    return SectionTable + (index - 1);
-  return 0;
+error_code COFFObjectFile::getSection(int32_t index,
+                                      const coff_section *&Result) const {
+  // Check for special index values.
+  if (index == COFF::IMAGE_SYM_UNDEFINED ||
+      index == COFF::IMAGE_SYM_ABSOLUTE ||
+      index == COFF::IMAGE_SYM_DEBUG)
+    Result = NULL;
+  else if (index > 0 && index <= Header->NumberOfSections)
+    // We already verified the section table data, so no need to check again.
+    Result = SectionTable + (index - 1);
+  else
+    return object_error::parse_failed;
+  return object_error::success;
 }
 
-const char *COFFObjectFile::getString(std::size_t offset) const {
-  const ulittle32_t *StringTableSize =
-    reinterpret_cast<const ulittle32_t *>(StringTable);
-  if (offset < *StringTableSize)
-    return StringTable + offset;
-  return 0;
+error_code COFFObjectFile::getString(uint32_t offset,
+                                     StringRef &Result) const {
+  if (StringTableSize <= 4)
+    // Tried to get a string from an empty string table.
+    return object_error::parse_failed;
+  if (offset >= StringTableSize)
+    return object_error::unexpected_eof;
+  Result = StringRef(StringTable + offset);
+  return object_error::success;
 }
 
 namespace llvm {
 
   ObjectFile *ObjectFile::createCOFFObjectFile(MemoryBuffer *Object) {
-    return new COFFObjectFile(Object);
+    error_code ec;
+    return new COFFObjectFile(Object, ec);
   }
 
 } // end namespace llvm

Modified: llvm/branches/type-system-rewrite/lib/Object/ELFObjectFile.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Object/ELFObjectFile.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Object/ELFObjectFile.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Object/ELFObjectFile.cpp Sat Jul  2 22:28:07 2011
@@ -222,22 +222,22 @@
   const char     *getString(const Elf_Shdr *section, uint32_t offset) const;
 
 protected:
-  virtual SymbolRef getSymbolNext(DataRefImpl Symb) const;
-  virtual StringRef getSymbolName(DataRefImpl Symb) const;
-  virtual uint64_t  getSymbolAddress(DataRefImpl Symb) const;
-  virtual uint64_t  getSymbolSize(DataRefImpl Symb) const;
-  virtual char      getSymbolNMTypeChar(DataRefImpl Symb) const;
-  virtual bool      isSymbolInternal(DataRefImpl Symb) const;
-
-  virtual SectionRef getSectionNext(DataRefImpl Sec) const;
-  virtual StringRef  getSectionName(DataRefImpl Sec) const;
-  virtual uint64_t   getSectionAddress(DataRefImpl Sec) const;
-  virtual uint64_t   getSectionSize(DataRefImpl Sec) const;
-  virtual StringRef  getSectionContents(DataRefImpl Sec) const;
-  virtual bool       isSectionText(DataRefImpl Sec) const;
+  virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const;
+  virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const;
+  virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const;
+  virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const;
+  virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const;
+  virtual error_code isSymbolInternal(DataRefImpl Symb, bool &Res) const;
+
+  virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const;
+  virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const;
+  virtual error_code getSectionAddress(DataRefImpl Sec, uint64_t &Res) const;
+  virtual error_code getSectionSize(DataRefImpl Sec, uint64_t &Res) const;
+  virtual error_code getSectionContents(DataRefImpl Sec, StringRef &Res) const;
+  virtual error_code isSectionText(DataRefImpl Sec, bool &Res) const;
 
 public:
-  ELFObjectFile(MemoryBuffer *Object);
+  ELFObjectFile(MemoryBuffer *Object, error_code &ec);
   virtual symbol_iterator begin_symbols() const;
   virtual symbol_iterator end_symbols() const;
   virtual section_iterator begin_sections() const;
@@ -259,9 +259,9 @@
   //        an error object around.
   if (!(  symb
         && SymbolTableSection
-        && symb >= (const Elf_Sym*)(base
+        && symb >= (const Elf_Sym*)(base()
                    + SymbolTableSection->sh_offset)
-        && symb <  (const Elf_Sym*)(base
+        && symb <  (const Elf_Sym*)(base()
                    + SymbolTableSection->sh_offset
                    + SymbolTableSection->sh_size)))
     // FIXME: Proper error handling.
@@ -269,8 +269,9 @@
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-SymbolRef ELFObjectFile<target_endianness, is64Bits>
-                       ::getSymbolNext(DataRefImpl Symb) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::getSymbolNext(DataRefImpl Symb,
+                                        SymbolRef &Result) const {
   validateSymbol(Symb);
   const Elf_Shdr *SymbolTableSection = SymbolTableSections[Symb.d.b];
 
@@ -287,63 +288,80 @@
     }
   }
 
-  return SymbolRef(Symb, this);
+  Result = SymbolRef(Symb, this);
+  return object_error::success;
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-StringRef ELFObjectFile<target_endianness, is64Bits>
-                       ::getSymbolName(DataRefImpl Symb) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::getSymbolName(DataRefImpl Symb,
+                                        StringRef &Result) const {
   validateSymbol(Symb);
   const Elf_Sym  *symb = getSymbol(Symb);
   if (symb->st_name == 0) {
     const Elf_Shdr *section = getSection(symb->st_shndx);
     if (!section)
-      return "";
-    return getString(dot_shstrtab_sec, section->sh_name);
+      Result = "";
+    else
+      Result = getString(dot_shstrtab_sec, section->sh_name);
+    return object_error::success;
   }
 
   // Use the default symbol table name section.
-  return getString(dot_strtab_sec, symb->st_name);
+  Result = getString(dot_strtab_sec, symb->st_name);
+  return object_error::success;
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-uint64_t ELFObjectFile<target_endianness, is64Bits>
-                      ::getSymbolAddress(DataRefImpl Symb) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::getSymbolAddress(DataRefImpl Symb,
+                                           uint64_t &Result) const {
   validateSymbol(Symb);
   const Elf_Sym  *symb = getSymbol(Symb);
   const Elf_Shdr *Section;
   switch (symb->st_shndx) {
   case ELF::SHN_COMMON:
    // Undefined symbols have no address yet.
-  case ELF::SHN_UNDEF: return UnknownAddressOrSize;
-  case ELF::SHN_ABS: return symb->st_value;
+  case ELF::SHN_UNDEF:
+    Result = UnknownAddressOrSize;
+    return object_error::success;
+  case ELF::SHN_ABS:
+    Result = symb->st_value;
+    return object_error::success;
   default: Section = getSection(symb->st_shndx);
   }
 
   switch (symb->getType()) {
-  case ELF::STT_SECTION: return Section ? Section->sh_addr
-                                        : UnknownAddressOrSize;
+  case ELF::STT_SECTION:
+    Result = Section ? Section->sh_addr : UnknownAddressOrSize;
+    return object_error::success;
   case ELF::STT_FUNC:
   case ELF::STT_OBJECT:
   case ELF::STT_NOTYPE:
-    return symb->st_value;
-  default: return UnknownAddressOrSize;
+    Result = symb->st_value;
+    return object_error::success;
+  default:
+    Result = UnknownAddressOrSize;
+    return object_error::success;
   }
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-uint64_t ELFObjectFile<target_endianness, is64Bits>
-                      ::getSymbolSize(DataRefImpl Symb) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::getSymbolSize(DataRefImpl Symb,
+                                        uint64_t &Result) const {
   validateSymbol(Symb);
   const Elf_Sym  *symb = getSymbol(Symb);
   if (symb->st_size == 0)
-    return UnknownAddressOrSize;
-  return symb->st_size;
+    Result = UnknownAddressOrSize;
+  Result = symb->st_size;
+  return object_error::success;
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-char ELFObjectFile<target_endianness, is64Bits>
-                  ::getSymbolNMTypeChar(DataRefImpl Symb) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::getSymbolNMTypeChar(DataRefImpl Symb,
+                                              char &Result) const {
   validateSymbol(Symb);
   const Elf_Sym  *symb = getSymbol(Symb);
   const Elf_Shdr *Section = getSection(symb->st_shndx);
@@ -390,89 +408,110 @@
         ret = 'W';
   }
 
-  if (ret == '?' && symb->getType() == ELF::STT_SECTION)
-    return StringSwitch<char>(getSymbolName(Symb))
+  if (ret == '?' && symb->getType() == ELF::STT_SECTION) {
+    StringRef name;
+    if (error_code ec = getSymbolName(Symb, name))
+      return ec;
+    Result = StringSwitch<char>(name)
       .StartsWith(".debug", 'N')
       .StartsWith(".note", 'n');
+    return object_error::success;
+  }
 
-  return ret;
+  Result = ret;
+  return object_error::success;
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-bool ELFObjectFile<target_endianness, is64Bits>
-                  ::isSymbolInternal(DataRefImpl Symb) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::isSymbolInternal(DataRefImpl Symb,
+                                           bool &Result) const {
   validateSymbol(Symb);
   const Elf_Sym  *symb = getSymbol(Symb);
 
   if (  symb->getType() == ELF::STT_FILE
      || symb->getType() == ELF::STT_SECTION)
-    return true;
-  return false;
+    Result = true;
+  Result = false;
+  return object_error::success;
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-SectionRef ELFObjectFile<target_endianness, is64Bits>
-                        ::getSectionNext(DataRefImpl Sec) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::getSectionNext(DataRefImpl Sec, SectionRef &Result) const {
   const uint8_t *sec = reinterpret_cast<const uint8_t *>(Sec.p);
   sec += Header->e_shentsize;
   Sec.p = reinterpret_cast<intptr_t>(sec);
-  return SectionRef(Sec, this);
+  Result = SectionRef(Sec, this);
+  return object_error::success;
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-StringRef ELFObjectFile<target_endianness, is64Bits>
-                       ::getSectionName(DataRefImpl Sec) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::getSectionName(DataRefImpl Sec,
+                                         StringRef &Result) const {
   const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
-  return StringRef(getString(dot_shstrtab_sec, sec->sh_name));
+  Result = StringRef(getString(dot_shstrtab_sec, sec->sh_name));
+  return object_error::success;
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-uint64_t ELFObjectFile<target_endianness, is64Bits>
-                      ::getSectionAddress(DataRefImpl Sec) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::getSectionAddress(DataRefImpl Sec,
+                                            uint64_t &Result) const {
   const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
-  return sec->sh_addr;
+  Result = sec->sh_addr;
+  return object_error::success;
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-uint64_t ELFObjectFile<target_endianness, is64Bits>
-                      ::getSectionSize(DataRefImpl Sec) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::getSectionSize(DataRefImpl Sec,
+                                         uint64_t &Result) const {
   const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
-  return sec->sh_size;
+  Result = sec->sh_size;
+  return object_error::success;
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-StringRef ELFObjectFile<target_endianness, is64Bits>
-                       ::getSectionContents(DataRefImpl Sec) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::getSectionContents(DataRefImpl Sec,
+                                             StringRef &Result) const {
   const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
-  const char *start = (char*)base + sec->sh_offset;
-  return StringRef(start, sec->sh_size);
+  const char *start = (const char*)base() + sec->sh_offset;
+  Result = StringRef(start, sec->sh_size);
+  return object_error::success;
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-bool ELFObjectFile<target_endianness, is64Bits>
-                  ::isSectionText(DataRefImpl Sec) const {
+error_code ELFObjectFile<target_endianness, is64Bits>
+                        ::isSectionText(DataRefImpl Sec,
+                                        bool &Result) const {
   const Elf_Shdr *sec = reinterpret_cast<const Elf_Shdr *>(Sec.p);
   if (sec->sh_flags & ELF::SHF_EXECINSTR)
-    return true;
-  return false;
+    Result = true;
+  else
+    Result = false;
+  return object_error::success;
 }
 
 template<support::endianness target_endianness, bool is64Bits>
-ELFObjectFile<target_endianness, is64Bits>::ELFObjectFile(MemoryBuffer *Object)
-  : ObjectFile(Object)
+ELFObjectFile<target_endianness, is64Bits>::ELFObjectFile(MemoryBuffer *Object
+                                                          , error_code &ec)
+  : ObjectFile(Binary::isELF, Object, ec)
   , SectionHeaderTable(0)
   , dot_shstrtab_sec(0)
   , dot_strtab_sec(0) {
-  Header = reinterpret_cast<const Elf_Ehdr *>(base);
+  Header = reinterpret_cast<const Elf_Ehdr *>(base());
 
   if (Header->e_shoff == 0)
     return;
 
   SectionHeaderTable =
-    reinterpret_cast<const Elf_Shdr *>(base + Header->e_shoff);
+    reinterpret_cast<const Elf_Shdr *>(base() + Header->e_shoff);
   uint32_t SectionTableSize = Header->e_shnum * Header->e_shentsize;
   if (!(  (const uint8_t *)SectionHeaderTable + SectionTableSize
-         <= base + MapFile->getBufferSize()))
+         <= base() + Data->getBufferSize()))
     // FIXME: Proper error handling.
     report_fatal_error("Section table goes past end of file!");
 
@@ -491,7 +530,7 @@
   dot_shstrtab_sec = getSection(Header->e_shstrndx);
   if (dot_shstrtab_sec) {
     // Verify that the last byte in the string table in a null.
-    if (((const char*)base + dot_shstrtab_sec->sh_offset)
+    if (((const char*)base() + dot_shstrtab_sec->sh_offset)
         [dot_shstrtab_sec->sh_size - 1] != 0)
       // FIXME: Proper error handling.
       report_fatal_error("String table must end with a null terminator!");
@@ -509,7 +548,7 @@
           // FIXME: Proper error handling.
           report_fatal_error("Already found section named .strtab!");
         dot_strtab_sec = sh;
-        const char *dot_strtab = (const char*)base + sh->sh_offset;
+        const char *dot_strtab = (const char*)base() + sh->sh_offset;
           if (dot_strtab[sh->sh_size - 1] != 0)
             // FIXME: Proper error handling.
             report_fatal_error("String table must end with a null terminator!");
@@ -548,7 +587,7 @@
                                           ::begin_sections() const {
   DataRefImpl ret;
   memset(&ret, 0, sizeof(DataRefImpl));
-  ret.p = reinterpret_cast<intptr_t>(base + Header->e_shoff);
+  ret.p = reinterpret_cast<intptr_t>(base() + Header->e_shoff);
   return section_iterator(SectionRef(ret, this));
 }
 
@@ -557,7 +596,7 @@
                                           ::end_sections() const {
   DataRefImpl ret;
   memset(&ret, 0, sizeof(DataRefImpl));
-  ret.p = reinterpret_cast<intptr_t>(base
+  ret.p = reinterpret_cast<intptr_t>(base()
                                      + Header->e_shoff
                                      + (Header->e_shentsize * Header->e_shnum));
   return section_iterator(SectionRef(ret, this));
@@ -613,7 +652,7 @@
 ELFObjectFile<target_endianness, is64Bits>::getSymbol(DataRefImpl Symb) const {
   const Elf_Shdr *sec = SymbolTableSections[Symb.d.b];
   return reinterpret_cast<const Elf_Sym *>(
-           base
+           base()
            + sec->sh_offset
            + (Symb.d.a * sec->sh_entsize));
 }
@@ -656,8 +695,8 @@
   assert(section && section->sh_type == ELF::SHT_STRTAB && "Invalid section!");
   if (offset >= section->sh_size)
     // FIXME: Proper error handling.
-    report_fatal_error("Sybol name offset outside of string table!");
-  return (const char *)base + section->sh_offset + offset;
+    report_fatal_error("Symbol name offset outside of string table!");
+  return (const char *)base() + section->sh_offset + offset;
 }
 
 // EI_CLASS, EI_DATA.
@@ -673,14 +712,15 @@
 
   ObjectFile *ObjectFile::createELFObjectFile(MemoryBuffer *Object) {
     std::pair<unsigned char, unsigned char> Ident = getElfArchType(Object);
+    error_code ec;
     if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2LSB)
-      return new ELFObjectFile<support::little, false>(Object);
+      return new ELFObjectFile<support::little, false>(Object, ec);
     else if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2MSB)
-      return new ELFObjectFile<support::big, false>(Object);
+      return new ELFObjectFile<support::big, false>(Object, ec);
     else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2LSB)
-      return new ELFObjectFile<support::little, true>(Object);
+      return new ELFObjectFile<support::little, true>(Object, ec);
     else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2MSB)
-      return new ELFObjectFile<support::big, true>(Object);
+      return new ELFObjectFile<support::big, true>(Object, ec);
     // FIXME: Proper error handling.
     report_fatal_error("Not an ELF object file!");
   }

Modified: llvm/branches/type-system-rewrite/lib/Object/MachOObjectFile.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Object/MachOObjectFile.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Object/MachOObjectFile.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Object/MachOObjectFile.cpp Sat Jul  2 22:28:07 2011
@@ -32,8 +32,8 @@
 
 class MachOObjectFile : public ObjectFile {
 public:
-  MachOObjectFile(MemoryBuffer *Object, MachOObject *MOO)
-    : ObjectFile(Object),
+  MachOObjectFile(MemoryBuffer *Object, MachOObject *MOO, error_code &ec)
+    : ObjectFile(Binary::isMachO, Object, ec),
       MachOObj(MOO),
       RegisteredStringTable(std::numeric_limits<uint32_t>::max()) {}
 
@@ -47,19 +47,19 @@
   virtual unsigned getArch() const;
 
 protected:
-  virtual SymbolRef getSymbolNext(DataRefImpl Symb) const;
-  virtual StringRef getSymbolName(DataRefImpl Symb) const;
-  virtual uint64_t  getSymbolAddress(DataRefImpl Symb) const;
-  virtual uint64_t  getSymbolSize(DataRefImpl Symb) const;
-  virtual char      getSymbolNMTypeChar(DataRefImpl Symb) const;
-  virtual bool      isSymbolInternal(DataRefImpl Symb) const;
-
-  virtual SectionRef getSectionNext(DataRefImpl Sec) const;
-  virtual StringRef  getSectionName(DataRefImpl Sec) const;
-  virtual uint64_t   getSectionAddress(DataRefImpl Sec) const;
-  virtual uint64_t   getSectionSize(DataRefImpl Sec) const;
-  virtual StringRef  getSectionContents(DataRefImpl Sec) const;
-  virtual bool       isSectionText(DataRefImpl Sec) const;
+  virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const;
+  virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const;
+  virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const;
+  virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const;
+  virtual error_code getSymbolNMTypeChar(DataRefImpl Symb, char &Res) const;
+  virtual error_code isSymbolInternal(DataRefImpl Symb, bool &Res) const;
+
+  virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const;
+  virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const;
+  virtual error_code getSectionAddress(DataRefImpl Sec, uint64_t &Res) const;
+  virtual error_code getSectionSize(DataRefImpl Sec, uint64_t &Res) const;
+  virtual error_code getSectionContents(DataRefImpl Sec, StringRef &Res) const;
+  virtual error_code isSectionText(DataRefImpl Sec, bool &Res) const;
 
 private:
   MachOObject *MachOObj;
@@ -73,11 +73,12 @@
 };
 
 ObjectFile *ObjectFile::createMachOObjectFile(MemoryBuffer *Buffer) {
+  error_code ec;
   std::string Err;
   MachOObject *MachOObj = MachOObject::LoadFromBuffer(Buffer, &Err);
   if (!MachOObj)
     return NULL;
-  return new MachOObjectFile(Buffer, MachOObj);
+  return new MachOObjectFile(Buffer, MachOObj, ec);
 }
 
 /*===-- Symbols -----------------------------------------------------------===*/
@@ -114,29 +115,38 @@
 }
 
 
-SymbolRef MachOObjectFile::getSymbolNext(DataRefImpl DRI) const {
+error_code MachOObjectFile::getSymbolNext(DataRefImpl DRI,
+                                          SymbolRef &Result) const {
   DRI.d.b++;
   moveToNextSymbol(DRI);
-  return SymbolRef(DRI, this);
+  Result = SymbolRef(DRI, this);
+  return object_error::success;
 }
 
-StringRef MachOObjectFile::getSymbolName(DataRefImpl DRI) const {
+error_code MachOObjectFile::getSymbolName(DataRefImpl DRI,
+                                          StringRef &Result) const {
   InMemoryStruct<macho::SymbolTableEntry> Entry;
   getSymbolTableEntry(DRI, Entry);
-  return MachOObj->getStringAtIndex(Entry->StringIndex);
+  Result = MachOObj->getStringAtIndex(Entry->StringIndex);
+  return object_error::success;
 }
 
-uint64_t MachOObjectFile::getSymbolAddress(DataRefImpl DRI) const {
+error_code MachOObjectFile::getSymbolAddress(DataRefImpl DRI,
+                                             uint64_t &Result) const {
   InMemoryStruct<macho::SymbolTableEntry> Entry;
   getSymbolTableEntry(DRI, Entry);
-  return Entry->Value;
+  Result = Entry->Value;
+  return object_error::success;
 }
 
-uint64_t MachOObjectFile::getSymbolSize(DataRefImpl DRI) const {
-  return UnknownAddressOrSize;
+error_code MachOObjectFile::getSymbolSize(DataRefImpl DRI,
+                                          uint64_t &Result) const {
+  Result = UnknownAddressOrSize;
+  return object_error::success;
 }
 
-char MachOObjectFile::getSymbolNMTypeChar(DataRefImpl DRI) const {
+error_code MachOObjectFile::getSymbolNMTypeChar(DataRefImpl DRI,
+                                                char &Result) const {
   InMemoryStruct<macho::SymbolTableEntry> Entry;
   getSymbolTableEntry(DRI, Entry);
 
@@ -156,13 +166,16 @@
 
   if (Entry->Flags & (macho::STF_External | macho::STF_PrivateExtern))
     Char = toupper(Char);
-  return Char;
+  Result = Char;
+  return object_error::success;
 }
 
-bool MachOObjectFile::isSymbolInternal(DataRefImpl DRI) const {
+error_code MachOObjectFile::isSymbolInternal(DataRefImpl DRI,
+                                             bool &Result) const {
   InMemoryStruct<macho::SymbolTableEntry> Entry;
   getSymbolTableEntry(DRI, Entry);
-  return Entry->Flags & macho::STF_StabsEntryMask;
+  Result = Entry->Flags & macho::STF_StabsEntryMask;
+  return object_error::success;
 }
 
 ObjectFile::symbol_iterator MachOObjectFile::begin_symbols() const {
@@ -204,10 +217,12 @@
   }
 }
 
-SectionRef MachOObjectFile::getSectionNext(DataRefImpl DRI) const {
+error_code MachOObjectFile::getSectionNext(DataRefImpl DRI,
+                                           SectionRef &Result) const {
   DRI.d.b++;
   moveToNextSection(DRI);
-  return SectionRef(DRI, this);
+  Result = SectionRef(DRI, this);
+  return object_error::success;
 }
 
 void
@@ -219,43 +234,53 @@
   MachOObj->ReadSection(LCI, DRI.d.b, Res);
 }
 
-StringRef MachOObjectFile::getSectionName(DataRefImpl DRI) const {
+error_code MachOObjectFile::getSectionName(DataRefImpl DRI,
+                                           StringRef &Result) const {
   InMemoryStruct<macho::SegmentLoadCommand> SLC;
   LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(DRI.d.a);
   MachOObj->ReadSegmentLoadCommand(LCI, SLC);
   InMemoryStruct<macho::Section> Sect;
   MachOObj->ReadSection(LCI, DRI.d.b, Sect);
 
-  static char Result[34];
-  strcpy(Result, SLC->Name);
-  strcat(Result, ",");
-  strcat(Result, Sect->Name);
-  return StringRef(Result);
+  static char result[34];
+  strcpy(result, SLC->Name);
+  strcat(result, ",");
+  strcat(result, Sect->Name);
+  Result = StringRef(result);
+  return object_error::success;
 }
 
-uint64_t MachOObjectFile::getSectionAddress(DataRefImpl DRI) const {
+error_code MachOObjectFile::getSectionAddress(DataRefImpl DRI,
+                                              uint64_t &Result) const {
   InMemoryStruct<macho::Section> Sect;
   getSection(DRI, Sect);
-  return Sect->Address;
+  Result = Sect->Address;
+  return object_error::success;
 }
 
-uint64_t MachOObjectFile::getSectionSize(DataRefImpl DRI) const {
+error_code MachOObjectFile::getSectionSize(DataRefImpl DRI,
+                                           uint64_t &Result) const {
   InMemoryStruct<macho::Section> Sect;
   getSection(DRI, Sect);
-  return Sect->Size;
+  Result = Sect->Size;
+  return object_error::success;
 }
 
-StringRef MachOObjectFile::getSectionContents(DataRefImpl DRI) const {
+error_code MachOObjectFile::getSectionContents(DataRefImpl DRI,
+                                               StringRef &Result) const {
   InMemoryStruct<macho::Section> Sect;
   getSection(DRI, Sect);
-  return MachOObj->getData(Sect->Offset, Sect->Size);
+  Result = MachOObj->getData(Sect->Offset, Sect->Size);
+  return object_error::success;
 }
 
-bool MachOObjectFile::isSectionText(DataRefImpl DRI) const {
+error_code MachOObjectFile::isSectionText(DataRefImpl DRI,
+                                          bool &Result) const {
   InMemoryStruct<macho::SegmentLoadCommand> SLC;
   LoadCommandInfo LCI = MachOObj->getLoadCommandInfo(DRI.d.a);
   MachOObj->ReadSegmentLoadCommand(LCI, SLC);
-  return !strcmp(SLC->Name, "__TEXT");
+  Result = !strcmp(SLC->Name, "__TEXT");
+  return object_error::success;
 }
 
 ObjectFile::section_iterator MachOObjectFile::begin_sections() const {

Modified: llvm/branches/type-system-rewrite/lib/Object/Object.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Object/Object.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Object/Object.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Object/Object.cpp Sat Jul  2 22:28:07 2011
@@ -41,19 +41,28 @@
 }
 
 void LLVMMoveToNextSection(LLVMSectionIteratorRef SI) {
-  // We can't use unwrap() here because the argument to ++ must be an lvalue.
-  ++*reinterpret_cast<ObjectFile::section_iterator*>(SI);
+  error_code ec;
+  unwrap(SI)->increment(ec);
+  if (ec) report_fatal_error("LLVMMoveToNextSection failed: " + ec.message());
 }
 
 const char *LLVMGetSectionName(LLVMSectionIteratorRef SI) {
-  return (*unwrap(SI))->getName().data();
+  StringRef ret;
+  if (error_code ec = (*unwrap(SI))->getName(ret))
+   report_fatal_error(ec.message());
+  return ret.data();
 }
 
 uint64_t LLVMGetSectionSize(LLVMSectionIteratorRef SI) {
-  return (*unwrap(SI))->getSize();
+  uint64_t ret;
+  if (error_code ec = (*unwrap(SI))->getSize(ret))
+    report_fatal_error(ec.message());
+  return ret;
 }
 
 const char *LLVMGetSectionContents(LLVMSectionIteratorRef SI) {
-  return (*unwrap(SI))->getContents().data();
+  StringRef ret;
+  if (error_code ec = (*unwrap(SI))->getContents(ret))
+    report_fatal_error(ec.message());
+  return ret.data();
 }
-

Modified: llvm/branches/type-system-rewrite/lib/Object/ObjectFile.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Object/ObjectFile.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Object/ObjectFile.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Object/ObjectFile.cpp Sat Jul  2 22:28:07 2011
@@ -21,18 +21,8 @@
 using namespace llvm;
 using namespace object;
 
-ObjectFile::ObjectFile(MemoryBuffer *Object)
-  : MapFile(Object) {
-  assert(MapFile && "Must be a valid MemoryBuffer!");
-  base = reinterpret_cast<const uint8_t *>(MapFile->getBufferStart());
-}
-
-ObjectFile::~ObjectFile() {
-  delete MapFile;
-}
-
-StringRef ObjectFile::getFilename() const {
-  return MapFile->getBufferIdentifier();
+ObjectFile::ObjectFile(unsigned int Type, MemoryBuffer *source, error_code &ec)
+  : Binary(Type, source) {
 }
 
 ObjectFile *ObjectFile::createObjectFile(MemoryBuffer *Object) {

Modified: llvm/branches/type-system-rewrite/lib/Support/ConstantRange.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Support/ConstantRange.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Support/ConstantRange.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Support/ConstantRange.cpp Sat Jul  2 22:28:07 2011
@@ -529,8 +529,8 @@
     return ConstantRange(getBitWidth(), /*isFullSet=*/true);
 
   APInt Spread_X = getSetSize(), Spread_Y = Other.getSetSize();
-  APInt NewLower = getLower() - Other.getLower();
-  APInt NewUpper = getUpper() - Other.getUpper() + 1;
+  APInt NewLower = getLower() - Other.getUpper() + 1;
+  APInt NewUpper = getUpper() - Other.getLower();
   if (NewLower == NewUpper)
     return ConstantRange(getBitWidth(), /*isFullSet=*/true);
 

Modified: llvm/branches/type-system-rewrite/lib/Support/Triple.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Support/Triple.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Support/Triple.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Support/Triple.cpp Sat Jul  2 22:28:07 2011
@@ -113,6 +113,7 @@
   case Win32: return "win32";
   case Haiku: return "haiku";
   case Minix: return "minix";
+  case RTEMS: return "rtems";
   }
 
   return "<invalid>";

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARM.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARM.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARM.h (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARM.h Sat Jul  2 22:28:07 2011
@@ -16,6 +16,7 @@
 #define TARGET_ARM_H
 
 #include "ARMBaseInfo.h"
+#include "llvm/Support/DataTypes.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Target/TargetMachine.h"
 #include <cassert>
@@ -27,6 +28,7 @@
 class JITCodeEmitter;
 class formatted_raw_ostream;
 class MCCodeEmitter;
+class MCObjectWriter;
 class TargetAsmBackend;
 class MachineInstr;
 class ARMAsmPrinter;
@@ -58,6 +60,12 @@
 void LowerARMMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
                                   ARMAsmPrinter &AP);
 
+/// createARMMachObjectWriter - Construct an ARM Mach-O object writer.
+MCObjectWriter *createARMMachObjectWriter(raw_ostream &OS,
+                                          bool Is64Bit,
+                                          uint32_t CPUType,
+                                          uint32_t CPUSubtype);
+
 } // end namespace llvm;
 
 #endif

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARM.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARM.td?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARM.td (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARM.td Sat Jul  2 22:28:07 2011
@@ -75,6 +75,10 @@
                                                "AvoidCPSRPartialUpdate", "true",
                                  "Avoid CPSR partial update for OOO execution">;
 
+/// Some M architectures don't have the DSP extension (v7E-M vs. v7M)
+def FeatureDSPThumb2 : SubtargetFeature<"t2dsp", "Thumb2DSP", "true",
+                                 "Supports v7 DSP instructions in Thumb2.">;
+
 // Multiprocessing extension.
 def FeatureMP : SubtargetFeature<"mp", "HasMPExtension", "true",
                                  "Supports Multiprocessing extension">;
@@ -93,14 +97,20 @@
                                    [FeatureNoARM, FeatureDB]>;
 def ArchV6T2    : SubtargetFeature<"v6t2", "ARMArchVersion", "V6T2",
                                    "ARM v6t2",
-                                   [FeatureThumb2]>;
+                                   [FeatureThumb2, FeatureDSPThumb2]>;
 def ArchV7A     : SubtargetFeature<"v7a", "ARMArchVersion", "V7A",
                                    "ARM v7A",
-                                   [FeatureThumb2, FeatureNEON, FeatureDB]>;
+                                   [FeatureThumb2, FeatureNEON, FeatureDB,
+                                    FeatureDSPThumb2]>;
 def ArchV7M     : SubtargetFeature<"v7m", "ARMArchVersion", "V7M",
                                    "ARM v7M",
                                    [FeatureThumb2, FeatureNoARM, FeatureDB,
                                     FeatureHWDiv]>;
+def ArchV7EM    : SubtargetFeature<"v7em", "ARMArchVersion", "V7EM",
+                                   "ARM v7E-M",
+                                   [FeatureThumb2, FeatureNoARM, FeatureDB,
+                                    FeatureHWDiv, FeatureDSPThumb2,
+                                    FeatureT2XtPk]>;
 
 //===----------------------------------------------------------------------===//
 // ARM Processors supported.
@@ -192,7 +202,7 @@
 
 // V7M Processors.
 def : ProcNoItin<"cortex-m3",       [ArchV7M]>;
-def : ProcNoItin<"cortex-m4",       [ArchV7M, FeatureVFP2, FeatureVFPOnlySP]>;
+def : ProcNoItin<"cortex-m4",       [ArchV7EM, FeatureVFP2, FeatureVFPOnlySP]>;
 
 //===----------------------------------------------------------------------===//
 // Register File Description

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMAsmBackend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMAsmBackend.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMAsmBackend.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMAsmBackend.cpp Sat Jul  2 22:28:07 2011
@@ -28,14 +28,6 @@
 using namespace llvm;
 
 namespace {
-class ARMMachObjectWriter : public MCMachObjectTargetWriter {
-public:
-  ARMMachObjectWriter(bool Is64Bit, uint32_t CPUType,
-                      uint32_t CPUSubtype)
-    : MCMachObjectTargetWriter(Is64Bit, CPUType, CPUSubtype,
-                               /*UseAggressiveSymbolFolding=*/true) {}
-};
-
 class ARMELFObjectWriter : public MCELFObjectTargetWriter {
 public:
   ARMELFObjectWriter(Triple::OSType OSType)
@@ -182,7 +174,8 @@
     Value >>= 16;
     // Fallthrough
   case ARM::fixup_t2_movw_lo16:
-  case ARM::fixup_t2_movt_hi16_pcrel:
+  case ARM::fixup_t2_movt_hi16_pcrel:  //FIXME: Shouldn't this be shifted like
+                                       // the other hi16 fixup?
   case ARM::fixup_t2_movw_lo16_pcrel: {
     unsigned Hi4 = (Value & 0xF000) >> 12;
     unsigned i = (Value & 0x800) >> 11;
@@ -192,8 +185,10 @@
     // inst{26} = i;
     // inst{14-12} = Mid3;
     // inst{7-0} = Lo8;
-    assert ((((int64_t)Value) >= -0x8000) && (((int64_t)Value) <= 0x7fff) &&
-            "Out of range pc-relative fixup value!");
+    // The value comes in as the whole thing, not just the portion required
+    // for this fixup, so we need to mask off the bits not handled by this
+    // portion (lo vs. hi).
+    Value &= 0xffff;
     Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
     uint64_t swapped = (Value & 0xFFFF0000) >> 16;
     swapped |= (Value & 0x0000FFFF) << 16;
@@ -423,12 +418,9 @@
     : ARMAsmBackend(T), Subtype(st) { }
 
   MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
-    return createMachObjectWriter(new ARMMachObjectWriter(
-                                    /*Is64Bit=*/false,
-                                    object::mach::CTM_ARM,
-                                    Subtype),
-                                  OS,
-                                  /*IsLittleEndian=*/true);
+    return createARMMachObjectWriter(OS, /*Is64Bit=*/false,
+                                     object::mach::CTM_ARM,
+                                     Subtype);
   }
 
   void ApplyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMAsmPrinter.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMAsmPrinter.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMAsmPrinter.cpp Sat Jul  2 22:28:07 2011
@@ -1010,19 +1010,16 @@
         MI->dump();
         assert(0 && "Unsupported opcode for unwinding information");
       case ARM::MOVr:
-      case ARM::tMOVgpr2gpr:
-      case ARM::tMOVgpr2tgpr:
         Offset = 0;
         break;
       case ARM::ADDri:
         Offset = -MI->getOperand(2).getImm();
         break;
       case ARM::SUBri:
-      case ARM::t2SUBrSPi:
-        Offset =  MI->getOperand(2).getImm();
+        Offset = MI->getOperand(2).getImm();
         break;
       case ARM::tSUBspi:
-        Offset =  MI->getOperand(2).getImm()*4;
+        Offset = MI->getOperand(2).getImm()*4;
         break;
       case ARM::tADDspi:
       case ARM::tADDrSPi:
@@ -1097,13 +1094,22 @@
     OutStreamer.EmitInstruction(TmpInst);
     return;
   }
-  case ARM::t2ADDrSPi:
-  case ARM::t2ADDrSPi12:
-  case ARM::t2SUBrSPi:
-  case ARM::t2SUBrSPi12:
-    assert ((MI->getOperand(1).getReg() == ARM::SP) &&
-            "Unexpected source register!");
-    break;
+  case ARM::t2LDMIA_RET: {
+    // As above for LDMIA_RET. Map to the tPOP instruction.
+    MCInst TmpInst;
+    LowerARMMachineInstrToMCInst(MI, TmpInst, *this);
+    TmpInst.setOpcode(ARM::t2LDMIA_UPD);
+    OutStreamer.EmitInstruction(TmpInst);
+    return;
+  }
+  case ARM::tPOP_RET: {
+    // As above for LDMIA_RET. Map to the tPOP instruction.
+    MCInst TmpInst;
+    LowerARMMachineInstrToMCInst(MI, TmpInst, *this);
+    TmpInst.setOpcode(ARM::tPOP);
+    OutStreamer.EmitInstruction(TmpInst);
+    return;
+  }
 
   case ARM::t2MOVi32imm: assert(0 && "Should be lowered by thumb2it pass");
   case ARM::DBG_VALUE: {
@@ -1215,6 +1221,9 @@
       TmpInst.setOpcode(ARM::tMOVr);
       TmpInst.addOperand(MCOperand::CreateReg(ARM::LR));
       TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
+      // Add predicate operands.
+      TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+      TmpInst.addOperand(MCOperand::CreateReg(0));
       OutStreamer.EmitInstruction(TmpInst);
     }
     {
@@ -1445,7 +1454,7 @@
   case ARM::t2BR_JT: {
     // Lower and emit the instruction itself, then the jump table following it.
     MCInst TmpInst;
-    TmpInst.setOpcode(ARM::tMOVgpr2gpr);
+    TmpInst.setOpcode(ARM::tMOVr);
     TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
     TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
     // Add predicate operands.
@@ -1494,7 +1503,7 @@
     // mov pc, target
     MCInst TmpInst;
     unsigned Opc = MI->getOpcode() == ARM::BR_JTr ?
-      ARM::MOVr : ARM::tMOVgpr2gpr;
+      ARM::MOVr : ARM::tMOVr;
     TmpInst.setOpcode(Opc);
     TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
     TmpInst.addOperand(MCOperand::CreateReg(MI->getOperand(0).getReg()));
@@ -1507,7 +1516,7 @@
     OutStreamer.EmitInstruction(TmpInst);
 
     // Make sure the Thumb jump table is 4-byte aligned.
-    if (Opc == ARM::tMOVgpr2gpr)
+    if (Opc == ARM::tMOVr)
       EmitAlignment(2);
 
     // Output the data for the jump table itself
@@ -1599,11 +1608,12 @@
     MCSymbol *Label = GetARMSJLJEHLabel();
     {
       MCInst TmpInst;
-      TmpInst.setOpcode(ARM::tMOVgpr2tgpr);
+      TmpInst.setOpcode(ARM::tMOVr);
       TmpInst.addOperand(MCOperand::CreateReg(ValReg));
       TmpInst.addOperand(MCOperand::CreateReg(ARM::PC));
-      // 's' bit operand
-      TmpInst.addOperand(MCOperand::CreateReg(ARM::CPSR));
+      // Predicate.
+      TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL));
+      TmpInst.addOperand(MCOperand::CreateReg(0));
       OutStreamer.AddComment("eh_setjmp begin");
       OutStreamer.EmitInstruction(TmpInst);
     }
@@ -1817,7 +1827,7 @@
     }
     {
       MCInst TmpInst;
-      TmpInst.setOpcode(ARM::tMOVtgpr2gpr);
+      TmpInst.setOpcode(ARM::tMOVr);
       TmpInst.addOperand(MCOperand::CreateReg(ARM::SP));
       TmpInst.addOperand(MCOperand::CreateReg(ScratchReg));
       // Predicate.

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInfo.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInfo.h (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInfo.h Sat Jul  2 22:28:07 2011
@@ -25,11 +25,13 @@
 // Defines symbolic names for ARM registers.  This defines a mapping from
 // register name to register number.
 //
-#include "ARMGenRegisterNames.inc"
+#define GET_REGINFO_ENUM
+#include "ARMGenRegisterInfo.inc"
 
 // Defines symbolic names for the ARM instructions.
 //
-#include "ARMGenInstrNames.inc"
+#define GET_INSTRINFO_ENUM
+#include "ARMGenInstrInfo.inc"
 
 namespace llvm {
 

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInstrInfo.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInstrInfo.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInstrInfo.cpp Sat Jul  2 22:28:07 2011
@@ -18,7 +18,6 @@
 #include "ARMHazardRecognizer.h"
 #include "ARMMachineFunctionInfo.h"
 #include "ARMRegisterInfo.h"
-#include "ARMGenInstrInfo.inc"
 #include "llvm/Constants.h"
 #include "llvm/Function.h"
 #include "llvm/GlobalValue.h"
@@ -35,6 +34,11 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/ADT/STLExtras.h"
+
+#define GET_INSTRINFO_MC_DESC
+#define GET_INSTRINFO_CTOR
+#include "ARMGenInstrInfo.inc"
+
 using namespace llvm;
 
 static cl::opt<bool>
@@ -74,7 +78,7 @@
 };
 
 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
-  : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
+  : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
     Subtarget(STI) {
   for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
     if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
@@ -136,9 +140,9 @@
   MachineInstr *UpdateMI = NULL;
   MachineInstr *MemMI = NULL;
   unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
-  const TargetInstrDesc &TID = MI->getDesc();
-  unsigned NumOps = TID.getNumOperands();
-  bool isLoad = !TID.mayStore();
+  const MCInstrDesc &MCID = MI->getDesc();
+  unsigned NumOps = MCID.getNumOperands();
+  bool isLoad = !MCID.mayStore();
   const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
   const MachineOperand &Base = MI->getOperand(2);
   const MachineOperand &Offset = MI->getOperand(NumOps-3);
@@ -475,8 +479,8 @@
 bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
                                     std::vector<MachineOperand> &Pred) const {
   // FIXME: This confuses implicit_def with optional CPSR def.
-  const TargetInstrDesc &TID = MI->getDesc();
-  if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
+  const MCInstrDesc &MCID = MI->getDesc();
+  if (!MCID.getImplicitDefs() && !MCID.hasOptionalDef())
     return false;
 
   bool Found = false;
@@ -495,11 +499,11 @@
 /// By default, this returns true for every instruction with a
 /// PredicateOperand.
 bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
-  const TargetInstrDesc &TID = MI->getDesc();
-  if (!TID.isPredicable())
+  const MCInstrDesc &MCID = MI->getDesc();
+  if (!MCID.isPredicable())
     return false;
 
-  if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
+  if ((MCID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
     ARMFunctionInfo *AFI =
       MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
     return AFI->isThumb2Function();
@@ -525,8 +529,8 @@
   const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
 
   // Basic size info comes from the TSFlags field.
-  const TargetInstrDesc &TID = MI->getDesc();
-  uint64_t TSFlags = TID.TSFlags;
+  const MCInstrDesc &MCID = MI->getDesc();
+  uint64_t TSFlags = MCID.TSFlags;
 
   unsigned Opc = MI->getOpcode();
   switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
@@ -588,9 +592,9 @@
       // entry is one byte; TBH two byte each.
       unsigned EntrySize = (Opc == ARM::t2TBB_JT)
         ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4);
-      unsigned NumOps = TID.getNumOperands();
+      unsigned NumOps = MCID.getNumOperands();
       MachineOperand JTOP =
-        MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
+        MI->getOperand(NumOps - (MCID.isPredicable() ? 3 : 2));
       unsigned JTI = JTOP.getIndex();
       const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
       assert(MJTI != 0);
@@ -788,7 +792,7 @@
     break;
   case ARM::STRi12:
   case ARM::t2STRi12:
-  case ARM::tSpill:
+  case ARM::tSTRspi:
   case ARM::VSTRD:
   case ARM::VSTRS:
     if (MI->getOperand(1).isFI() &&
@@ -923,7 +927,7 @@
     break;
   case ARM::LDRi12:
   case ARM::t2LDRi12:
-  case ARM::tRestore:
+  case ARM::tLDRspi:
   case ARM::VLDRD:
   case ARM::VLDRS:
     if (MI->getOperand(1).isFI() &&
@@ -1363,7 +1367,7 @@
                                 unsigned FrameReg, int &Offset,
                                 const ARMBaseInstrInfo &TII) {
   unsigned Opcode = MI.getOpcode();
-  const TargetInstrDesc &Desc = MI.getDesc();
+  const MCInstrDesc &Desc = MI.getDesc();
   unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
   bool isSub = false;
 
@@ -1803,7 +1807,7 @@
   if (!ItinData || ItinData->isEmpty())
     return 1;
 
-  const TargetInstrDesc &Desc = MI->getDesc();
+  const MCInstrDesc &Desc = MI->getDesc();
   unsigned Class = Desc.getSchedClass();
   unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
   if (UOps)
@@ -1906,10 +1910,10 @@
 
 int
 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
-                                  const TargetInstrDesc &DefTID,
+                                  const MCInstrDesc &DefMCID,
                                   unsigned DefClass,
                                   unsigned DefIdx, unsigned DefAlign) const {
-  int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1;
+  int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
   if (RegNo <= 0)
     // Def is the address writeback.
     return ItinData->getOperandCycle(DefClass, DefIdx);
@@ -1924,7 +1928,7 @@
     DefCycle = RegNo;
     bool isSLoad = false;
 
-    switch (DefTID.getOpcode()) {
+    switch (DefMCID.getOpcode()) {
     default: break;
     case ARM::VLDMSIA:
     case ARM::VLDMSIA_UPD:
@@ -1947,10 +1951,10 @@
 
 int
 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
-                                 const TargetInstrDesc &DefTID,
+                                 const MCInstrDesc &DefMCID,
                                  unsigned DefClass,
                                  unsigned DefIdx, unsigned DefAlign) const {
-  int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1;
+  int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
   if (RegNo <= 0)
     // Def is the address writeback.
     return ItinData->getOperandCycle(DefClass, DefIdx);
@@ -1982,10 +1986,10 @@
 
 int
 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
-                                  const TargetInstrDesc &UseTID,
+                                  const MCInstrDesc &UseMCID,
                                   unsigned UseClass,
                                   unsigned UseIdx, unsigned UseAlign) const {
-  int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1;
+  int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
   if (RegNo <= 0)
     return ItinData->getOperandCycle(UseClass, UseIdx);
 
@@ -1999,7 +2003,7 @@
     UseCycle = RegNo;
     bool isSStore = false;
 
-    switch (UseTID.getOpcode()) {
+    switch (UseMCID.getOpcode()) {
     default: break;
     case ARM::VSTMSIA:
     case ARM::VSTMSIA_UPD:
@@ -2022,10 +2026,10 @@
 
 int
 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
-                                 const TargetInstrDesc &UseTID,
+                                 const MCInstrDesc &UseMCID,
                                  unsigned UseClass,
                                  unsigned UseIdx, unsigned UseAlign) const {
-  int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1;
+  int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
   if (RegNo <= 0)
     return ItinData->getOperandCycle(UseClass, UseIdx);
 
@@ -2051,14 +2055,14 @@
 
 int
 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
-                                    const TargetInstrDesc &DefTID,
+                                    const MCInstrDesc &DefMCID,
                                     unsigned DefIdx, unsigned DefAlign,
-                                    const TargetInstrDesc &UseTID,
+                                    const MCInstrDesc &UseMCID,
                                     unsigned UseIdx, unsigned UseAlign) const {
-  unsigned DefClass = DefTID.getSchedClass();
-  unsigned UseClass = UseTID.getSchedClass();
+  unsigned DefClass = DefMCID.getSchedClass();
+  unsigned UseClass = UseMCID.getSchedClass();
 
-  if (DefIdx < DefTID.getNumDefs() && UseIdx < UseTID.getNumOperands())
+  if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands())
     return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
 
   // This may be a def / use of a variable_ops instruction, the operand
@@ -2066,7 +2070,7 @@
   // figure it out.
   int DefCycle = -1;
   bool LdmBypass = false;
-  switch (DefTID.getOpcode()) {
+  switch (DefMCID.getOpcode()) {
   default:
     DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
     break;
@@ -2077,7 +2081,7 @@
   case ARM::VLDMSIA:
   case ARM::VLDMSIA_UPD:
   case ARM::VLDMSDB_UPD:
-    DefCycle = getVLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign);
+    DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
     break;
 
   case ARM::LDMIA_RET:
@@ -2098,7 +2102,7 @@
   case ARM::t2LDMIA_UPD:
   case ARM::t2LDMDB_UPD:
     LdmBypass = 1;
-    DefCycle = getLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign);
+    DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
     break;
   }
 
@@ -2107,7 +2111,7 @@
     DefCycle = 2;
 
   int UseCycle = -1;
-  switch (UseTID.getOpcode()) {
+  switch (UseMCID.getOpcode()) {
   default:
     UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
     break;
@@ -2118,7 +2122,7 @@
   case ARM::VSTMSIA:
   case ARM::VSTMSIA_UPD:
   case ARM::VSTMSDB_UPD:
-    UseCycle = getVSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign);
+    UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
     break;
 
   case ARM::STMIA:
@@ -2137,7 +2141,7 @@
   case ARM::t2STMDB:
   case ARM::t2STMIA_UPD:
   case ARM::t2STMDB_UPD:
-    UseCycle = getSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign);
+    UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
     break;
   }
 
@@ -2150,7 +2154,7 @@
     if (LdmBypass) {
       // It's a variable_ops instruction so we can't use DefIdx here. Just use
       // first def operand.
-      if (ItinData->hasPipelineForwarding(DefClass, DefTID.getNumOperands()-1,
+      if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1,
                                           UseClass, UseIdx))
         --UseCycle;
     } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
@@ -2170,11 +2174,11 @@
       DefMI->isRegSequence() || DefMI->isImplicitDef())
     return 1;
 
-  const TargetInstrDesc &DefTID = DefMI->getDesc();
+  const MCInstrDesc &DefMCID = DefMI->getDesc();
   if (!ItinData || ItinData->isEmpty())
-    return DefTID.mayLoad() ? 3 : 1;
+    return DefMCID.mayLoad() ? 3 : 1;
 
-  const TargetInstrDesc &UseTID = UseMI->getDesc();
+  const MCInstrDesc &UseMCID = UseMI->getDesc();
   const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
   if (DefMO.getReg() == ARM::CPSR) {
     if (DefMI->getOpcode() == ARM::FMSTAT) {
@@ -2183,7 +2187,7 @@
     }
 
     // CPSR set and branch can be paired in the same cycle.
-    if (UseTID.isBranch())
+    if (UseMCID.isBranch())
       return 0;
   }
 
@@ -2191,14 +2195,14 @@
     ? (*DefMI->memoperands_begin())->getAlignment() : 0;
   unsigned UseAlign = UseMI->hasOneMemOperand()
     ? (*UseMI->memoperands_begin())->getAlignment() : 0;
-  int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign,
-                                  UseTID, UseIdx, UseAlign);
+  int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
+                                  UseMCID, UseIdx, UseAlign);
 
   if (Latency > 1 &&
       (Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
     // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
     // variants are one cycle cheaper.
-    switch (DefTID.getOpcode()) {
+    switch (DefMCID.getOpcode()) {
     default: break;
     case ARM::LDRrs:
     case ARM::LDRBrs: {
@@ -2223,7 +2227,7 @@
   }
 
   if (DefAlign < 8 && Subtarget.isCortexA9())
-    switch (DefTID.getOpcode()) {
+    switch (DefMCID.getOpcode()) {
     default: break;
     case ARM::VLD1q8:
     case ARM::VLD1q16:
@@ -2327,37 +2331,37 @@
   if (!DefNode->isMachineOpcode())
     return 1;
 
-  const TargetInstrDesc &DefTID = get(DefNode->getMachineOpcode());
+  const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode());
 
-  if (isZeroCost(DefTID.Opcode))
+  if (isZeroCost(DefMCID.Opcode))
     return 0;
 
   if (!ItinData || ItinData->isEmpty())
-    return DefTID.mayLoad() ? 3 : 1;
+    return DefMCID.mayLoad() ? 3 : 1;
 
   if (!UseNode->isMachineOpcode()) {
-    int Latency = ItinData->getOperandCycle(DefTID.getSchedClass(), DefIdx);
+    int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
     if (Subtarget.isCortexA9())
       return Latency <= 2 ? 1 : Latency - 1;
     else
       return Latency <= 3 ? 1 : Latency - 2;
   }
 
-  const TargetInstrDesc &UseTID = get(UseNode->getMachineOpcode());
+  const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
   const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode);
   unsigned DefAlign = !DefMN->memoperands_empty()
     ? (*DefMN->memoperands_begin())->getAlignment() : 0;
   const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode);
   unsigned UseAlign = !UseMN->memoperands_empty()
     ? (*UseMN->memoperands_begin())->getAlignment() : 0;
-  int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign,
-                                  UseTID, UseIdx, UseAlign);
+  int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
+                                  UseMCID, UseIdx, UseAlign);
 
   if (Latency > 1 &&
       (Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
     // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
     // variants are one cycle cheaper.
-    switch (DefTID.getOpcode()) {
+    switch (DefMCID.getOpcode()) {
     default: break;
     case ARM::LDRrs:
     case ARM::LDRBrs: {
@@ -2384,7 +2388,7 @@
   }
 
   if (DefAlign < 8 && Subtarget.isCortexA9())
-    switch (DefTID.getOpcode()) {
+    switch (DefMCID.getOpcode()) {
     default: break;
     case ARM::VLD1q8Pseudo:
     case ARM::VLD1q16Pseudo:
@@ -2503,10 +2507,10 @@
   if (!ItinData || ItinData->isEmpty())
     return 1;
 
-  const TargetInstrDesc &TID = MI->getDesc();
-  unsigned Class = TID.getSchedClass();
+  const MCInstrDesc &MCID = MI->getDesc();
+  unsigned Class = MCID.getSchedClass();
   unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
-  if (PredCost && TID.hasImplicitDefOfPhysReg(ARM::CPSR))
+  if (PredCost && MCID.hasImplicitDefOfPhysReg(ARM::CPSR))
     // When predicated, CPSR is an additional source operand for CPSR updating
     // instructions, this apparently increases their latencies.
     *PredCost = 1;

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInstrInfo.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInstrInfo.h (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseInstrInfo.h Sat Jul  2 22:28:07 2011
@@ -20,6 +20,9 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/SmallSet.h"
 
+#define GET_INSTRINFO_HEADER
+#include "ARMGenInstrInfo.inc"
+
 namespace llvm {
   class ARMSubtarget;
   class ARMBaseRegisterInfo;
@@ -172,7 +175,7 @@
   };
 }
 
-class ARMBaseInstrInfo : public TargetInstrInfoImpl {
+class ARMBaseInstrInfo : public ARMGenInstrInfo {
   const ARMSubtarget &Subtarget;
 
 protected:
@@ -353,25 +356,25 @@
                         SDNode *UseNode, unsigned UseIdx) const;
 private:
   int getVLDMDefCycle(const InstrItineraryData *ItinData,
-                      const TargetInstrDesc &DefTID,
+                      const MCInstrDesc &DefMCID,
                       unsigned DefClass,
                       unsigned DefIdx, unsigned DefAlign) const;
   int getLDMDefCycle(const InstrItineraryData *ItinData,
-                     const TargetInstrDesc &DefTID,
+                     const MCInstrDesc &DefMCID,
                      unsigned DefClass,
                      unsigned DefIdx, unsigned DefAlign) const;
   int getVSTMUseCycle(const InstrItineraryData *ItinData,
-                      const TargetInstrDesc &UseTID,
+                      const MCInstrDesc &UseMCID,
                       unsigned UseClass,
                       unsigned UseIdx, unsigned UseAlign) const;
   int getSTMUseCycle(const InstrItineraryData *ItinData,
-                     const TargetInstrDesc &UseTID,
+                     const MCInstrDesc &UseMCID,
                      unsigned UseClass,
                      unsigned UseIdx, unsigned UseAlign) const;
   int getOperandLatency(const InstrItineraryData *ItinData,
-                        const TargetInstrDesc &DefTID,
+                        const MCInstrDesc &DefMCID,
                         unsigned DefIdx, unsigned DefAlign,
-                        const TargetInstrDesc &UseTID,
+                        const MCInstrDesc &UseMCID,
                         unsigned UseIdx, unsigned UseAlign) const;
 
   int getInstrLatency(const InstrItineraryData *ItinData,

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseRegisterInfo.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseRegisterInfo.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseRegisterInfo.cpp Sat Jul  2 22:28:07 2011
@@ -40,6 +40,10 @@
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Support/CommandLine.h"
 
+#define GET_REGINFO_MC_DESC
+#define GET_REGINFO_TARGET_DESC
+#include "ARMGenRegisterInfo.inc"
+
 using namespace llvm;
 
 static cl::opt<bool>
@@ -54,8 +58,7 @@
 
 ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
                                          const ARMSubtarget &sti)
-  : ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
-    TII(tii), STI(sti),
+  : ARMGenRegisterInfo(), TII(tii), STI(sti),
     FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
     BasePtr(ARM::R6) {
 }
@@ -955,7 +958,7 @@
 
 int64_t ARMBaseRegisterInfo::
 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
-  const TargetInstrDesc &Desc = MI->getDesc();
+  const MCInstrDesc &Desc = MI->getDesc();
   unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
   int64_t InstrOffs = 0;;
   int Scale = 1;
@@ -1105,11 +1108,11 @@
   if (Ins != MBB->end())
     DL = Ins->getDebugLoc();
 
-  const TargetInstrDesc &TID = TII.get(ADDriOpc);
+  const MCInstrDesc &MCID = TII.get(ADDriOpc);
   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
-  MRI.constrainRegClass(BaseReg, TID.OpInfo[0].getRegClass(this));
+  MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
 
-  MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, TID, BaseReg)
+  MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
     .addFrameIndex(FrameIdx).addImm(Offset);
 
   if (!AFI->isThumb1OnlyFunction())
@@ -1145,7 +1148,7 @@
 
 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
                                              int64_t Offset) const {
-  const TargetInstrDesc &Desc = MI->getDesc();
+  const MCInstrDesc &Desc = MI->getDesc();
   unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
   unsigned i = 0;
 
@@ -1281,11 +1284,5 @@
     }
     // Update the original instruction to use the scratch register.
     MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
-    if (MI.getOpcode() == ARM::t2ADDrSPi)
-      MI.setDesc(TII.get(ARM::t2ADDri));
-    else if (MI.getOpcode() == ARM::t2SUBrSPi)
-      MI.setDesc(TII.get(ARM::t2SUBri));
   }
 }
-
-#include "ARMGenRegisterInfo.inc"

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseRegisterInfo.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseRegisterInfo.h (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMBaseRegisterInfo.h Sat Jul  2 22:28:07 2011
@@ -16,7 +16,9 @@
 
 #include "ARM.h"
 #include "llvm/Target/TargetRegisterInfo.h"
-#include "ARMGenRegisterInfo.h.inc"
+
+#define GET_REGINFO_HEADER
+#include "ARMGenRegisterInfo.inc"
 
 namespace llvm {
   class ARMSubtarget;

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMCodeEmitter.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMCodeEmitter.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMCodeEmitter.cpp Sat Jul  2 22:28:07 2011
@@ -96,13 +96,13 @@
     void addPCLabel(unsigned LabelID);
     void emitPseudoInstruction(const MachineInstr &MI);
     unsigned getMachineSoRegOpValue(const MachineInstr &MI,
-                                    const TargetInstrDesc &TID,
+                                    const MCInstrDesc &MCID,
                                     const MachineOperand &MO,
                                     unsigned OpIdx);
 
     unsigned getMachineSoImmOpValue(unsigned SoImm);
     unsigned getAddrModeSBit(const MachineInstr &MI,
-                             const TargetInstrDesc &TID) const;
+                             const MCInstrDesc &MCID) const;
 
     void emitDataProcessingInstruction(const MachineInstr &MI,
                                        unsigned ImplicitRd = 0,
@@ -443,9 +443,9 @@
   else if (MO.isSymbol())
     emitExternalSymbolAddress(MO.getSymbolName(), ARM::reloc_arm_branch);
   else if (MO.isCPI()) {
-    const TargetInstrDesc &TID = MI.getDesc();
+    const MCInstrDesc &MCID = MI.getDesc();
     // For VFP load, the immediate offset is multiplied by 4.
-    unsigned Reloc =  ((TID.TSFlags & ARMII::FormMask) == ARMII::VFPLdStFrm)
+    unsigned Reloc =  ((MCID.TSFlags & ARMII::FormMask) == ARMII::VFPLdStFrm)
       ? ARM::reloc_arm_vfp_cp_entry : ARM::reloc_arm_cp_entry;
     emitConstPoolAddress(MO.getIndex(), Reloc);
   } else if (MO.isJTI())
@@ -757,7 +757,7 @@
 void ARMCodeEmitter::emitLEApcrelJTInstruction(const MachineInstr &MI) {
   // It's basically add r, pc, (LJTI - $+8)
 
-  const TargetInstrDesc &TID = MI.getDesc();
+  const MCInstrDesc &MCID = MI.getDesc();
 
   // Emit the 'add' instruction.
   unsigned Binary = 0x4 << 21;  // add: Insts{24-21} = 0b0100
@@ -766,7 +766,7 @@
   Binary |= II->getPredicate(&MI) << ARMII::CondShift;
 
   // Encode S bit if MI modifies CPSR.
-  Binary |= getAddrModeSBit(MI, TID);
+  Binary |= getAddrModeSBit(MI, MCID);
 
   // Encode Rd.
   Binary |= getMachineOpValue(MI, 0) << ARMII::RegRdShift;
@@ -912,7 +912,7 @@
 }
 
 unsigned ARMCodeEmitter::getMachineSoRegOpValue(const MachineInstr &MI,
-                                                const TargetInstrDesc &TID,
+                                                const MCInstrDesc &MCID,
                                                 const MachineOperand &MO,
                                                 unsigned OpIdx) {
   unsigned Binary = getMachineOpValue(MI, MO);
@@ -982,8 +982,8 @@
 }
 
 unsigned ARMCodeEmitter::getAddrModeSBit(const MachineInstr &MI,
-                                         const TargetInstrDesc &TID) const {
-  for (unsigned i = MI.getNumOperands(), e = TID.getNumOperands(); i >= e; --i){
+                                         const MCInstrDesc &MCID) const {
+  for (unsigned i = MI.getNumOperands(), e = MCID.getNumOperands(); i >= e; --i){
     const MachineOperand &MO = MI.getOperand(i-1);
     if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)
       return 1 << ARMII::S_BitShift;
@@ -994,7 +994,7 @@
 void ARMCodeEmitter::emitDataProcessingInstruction(const MachineInstr &MI,
                                                    unsigned ImplicitRd,
                                                    unsigned ImplicitRn) {
-  const TargetInstrDesc &TID = MI.getDesc();
+  const MCInstrDesc &MCID = MI.getDesc();
 
   // Part of binary is determined by TableGn.
   unsigned Binary = getBinaryCodeForInstr(MI);
@@ -1003,10 +1003,10 @@
   Binary |= II->getPredicate(&MI) << ARMII::CondShift;
 
   // Encode S bit if MI modifies CPSR.
-  Binary |= getAddrModeSBit(MI, TID);
+  Binary |= getAddrModeSBit(MI, MCID);
 
   // Encode register def if there is one.
-  unsigned NumDefs = TID.getNumDefs();
+  unsigned NumDefs = MCID.getNumDefs();
   unsigned OpIdx = 0;
   if (NumDefs)
     Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
@@ -1014,7 +1014,7 @@
     // Special handling for implicit use (e.g. PC).
     Binary |= (getARMRegisterNumbering(ImplicitRd) << ARMII::RegRdShift);
 
-  if (TID.Opcode == ARM::MOVi16) {
+  if (MCID.Opcode == ARM::MOVi16) {
       // Get immediate from MI.
       unsigned Lo16 = getMovi32Value(MI, MI.getOperand(OpIdx),
                       ARM::reloc_arm_movw);
@@ -1023,14 +1023,14 @@
       Binary |= ((Lo16 >> 12) & 0xF) << 16;
       emitWordLE(Binary);
       return;
-  } else if(TID.Opcode == ARM::MOVTi16) {
+  } else if(MCID.Opcode == ARM::MOVTi16) {
       unsigned Hi16 = (getMovi32Value(MI, MI.getOperand(OpIdx),
                        ARM::reloc_arm_movt) >> 16);
       Binary |= Hi16 & 0xFFF;
       Binary |= ((Hi16 >> 12) & 0xF) << 16;
       emitWordLE(Binary);
       return;
-  } else if ((TID.Opcode == ARM::BFC) || (TID.Opcode == ARM::BFI)) {
+  } else if ((MCID.Opcode == ARM::BFC) || (MCID.Opcode == ARM::BFI)) {
       uint32_t v = ~MI.getOperand(2).getImm();
       int32_t lsb = CountTrailingZeros_32(v);
       int32_t msb = (32 - CountLeadingZeros_32(v)) - 1;
@@ -1039,7 +1039,7 @@
       Binary |= (lsb & 0x1F) << 7;
       emitWordLE(Binary);
       return;
-  } else if ((TID.Opcode == ARM::UBFX) || (TID.Opcode == ARM::SBFX)) {
+  } else if ((MCID.Opcode == ARM::UBFX) || (MCID.Opcode == ARM::SBFX)) {
       // Encode Rn in Instr{0-3}
       Binary |= getMachineOpValue(MI, OpIdx++);
 
@@ -1054,11 +1054,11 @@
   }
 
   // If this is a two-address operand, skip it. e.g. MOVCCr operand 1.
-  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+  if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
     ++OpIdx;
 
   // Encode first non-shifter register operand if there is one.
-  bool isUnary = TID.TSFlags & ARMII::UnaryDP;
+  bool isUnary = MCID.TSFlags & ARMII::UnaryDP;
   if (!isUnary) {
     if (ImplicitRn)
       // Special handling for implicit use (e.g. PC).
@@ -1071,9 +1071,9 @@
 
   // Encode shifter operand.
   const MachineOperand &MO = MI.getOperand(OpIdx);
-  if ((TID.TSFlags & ARMII::FormMask) == ARMII::DPSoRegFrm) {
+  if ((MCID.TSFlags & ARMII::FormMask) == ARMII::DPSoRegFrm) {
     // Encode SoReg.
-    emitWordLE(Binary | getMachineSoRegOpValue(MI, TID, MO, OpIdx));
+    emitWordLE(Binary | getMachineSoRegOpValue(MI, MCID, MO, OpIdx));
     return;
   }
 
@@ -1092,9 +1092,9 @@
 void ARMCodeEmitter::emitLoadStoreInstruction(const MachineInstr &MI,
                                               unsigned ImplicitRd,
                                               unsigned ImplicitRn) {
-  const TargetInstrDesc &TID = MI.getDesc();
-  unsigned Form = TID.TSFlags & ARMII::FormMask;
-  bool IsPrePost = (TID.TSFlags & ARMII::IndexModeMask) != 0;
+  const MCInstrDesc &MCID = MI.getDesc();
+  unsigned Form = MCID.TSFlags & ARMII::FormMask;
+  bool IsPrePost = (MCID.TSFlags & ARMII::IndexModeMask) != 0;
 
   // Part of binary is determined by TableGn.
   unsigned Binary = getBinaryCodeForInstr(MI);
@@ -1134,7 +1134,7 @@
     Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
 
   // If this is a two-address operand, skip it. e.g. LDR_PRE.
-  if (!Skipped && TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+  if (!Skipped && MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
     ++OpIdx;
 
   const MachineOperand &MO2 = MI.getOperand(OpIdx);
@@ -1170,9 +1170,9 @@
 
 void ARMCodeEmitter::emitMiscLoadStoreInstruction(const MachineInstr &MI,
                                                   unsigned ImplicitRn) {
-  const TargetInstrDesc &TID = MI.getDesc();
-  unsigned Form = TID.TSFlags & ARMII::FormMask;
-  bool IsPrePost = (TID.TSFlags & ARMII::IndexModeMask) != 0;
+  const MCInstrDesc &MCID = MI.getDesc();
+  unsigned Form = MCID.TSFlags & ARMII::FormMask;
+  bool IsPrePost = (MCID.TSFlags & ARMII::IndexModeMask) != 0;
 
   // Part of binary is determined by TableGn.
   unsigned Binary = getBinaryCodeForInstr(MI);
@@ -1194,7 +1194,7 @@
   Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
 
   // Skip LDRD and STRD's second operand.
-  if (TID.Opcode == ARM::LDRD || TID.Opcode == ARM::STRD)
+  if (MCID.Opcode == ARM::LDRD || MCID.Opcode == ARM::STRD)
     ++OpIdx;
 
   // Set second operand
@@ -1205,7 +1205,7 @@
     Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRnShift;
 
   // If this is a two-address operand, skip it. e.g. LDRH_POST.
-  if (!Skipped && TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+  if (!Skipped && MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
     ++OpIdx;
 
   const MachineOperand &MO2 = MI.getOperand(OpIdx);
@@ -1255,8 +1255,8 @@
 }
 
 void ARMCodeEmitter::emitLoadStoreMultipleInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
-  bool IsUpdating = (TID.TSFlags & ARMII::IndexModeMask) != 0;
+  const MCInstrDesc &MCID = MI.getDesc();
+  bool IsUpdating = (MCID.TSFlags & ARMII::IndexModeMask) != 0;
 
   // Part of binary is determined by TableGn.
   unsigned Binary = getBinaryCodeForInstr(MI);
@@ -1295,7 +1295,7 @@
 }
 
 void ARMCodeEmitter::emitMulFrmInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
+  const MCInstrDesc &MCID = MI.getDesc();
 
   // Part of binary is determined by TableGn.
   unsigned Binary = getBinaryCodeForInstr(MI);
@@ -1304,12 +1304,12 @@
   Binary |= II->getPredicate(&MI) << ARMII::CondShift;
 
   // Encode S bit if MI modifies CPSR.
-  Binary |= getAddrModeSBit(MI, TID);
+  Binary |= getAddrModeSBit(MI, MCID);
 
   // 32x32->64bit operations have two destination registers. The number
   // of register definitions will tell us if that's what we're dealing with.
   unsigned OpIdx = 0;
-  if (TID.getNumDefs() == 2)
+  if (MCID.getNumDefs() == 2)
     Binary |= getMachineOpValue (MI, OpIdx++) << ARMII::RegRdLoShift;
 
   // Encode Rd
@@ -1323,16 +1323,16 @@
 
   // Many multiple instructions (e.g. MLA) have three src operands. Encode
   // it as Rn (for multiply, that's in the same offset as RdLo.
-  if (TID.getNumOperands() > OpIdx &&
-      !TID.OpInfo[OpIdx].isPredicate() &&
-      !TID.OpInfo[OpIdx].isOptionalDef())
+  if (MCID.getNumOperands() > OpIdx &&
+      !MCID.OpInfo[OpIdx].isPredicate() &&
+      !MCID.OpInfo[OpIdx].isOptionalDef())
     Binary |= getMachineOpValue(MI, OpIdx) << ARMII::RegRdLoShift;
 
   emitWordLE(Binary);
 }
 
 void ARMCodeEmitter::emitExtendInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
+  const MCInstrDesc &MCID = MI.getDesc();
 
   // Part of binary is determined by TableGn.
   unsigned Binary = getBinaryCodeForInstr(MI);
@@ -1361,15 +1361,15 @@
 
   // Encode rot imm (0, 8, 16, or 24) if it has a rotate immediate operand.
   if (MI.getOperand(OpIdx).isImm() &&
-      !TID.OpInfo[OpIdx].isPredicate() &&
-      !TID.OpInfo[OpIdx].isOptionalDef())
+      !MCID.OpInfo[OpIdx].isPredicate() &&
+      !MCID.OpInfo[OpIdx].isOptionalDef())
     Binary |= (getMachineOpValue(MI, OpIdx) / 8) << ARMII::ExtRotImmShift;
 
   emitWordLE(Binary);
 }
 
 void ARMCodeEmitter::emitMiscArithInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
+  const MCInstrDesc &MCID = MI.getDesc();
 
   // Part of binary is determined by TableGn.
   unsigned Binary = getBinaryCodeForInstr(MI);
@@ -1378,7 +1378,7 @@
   Binary |= II->getPredicate(&MI) << ARMII::CondShift;
 
   // PKH instructions are finished at this point
-  if (TID.Opcode == ARM::PKHBT || TID.Opcode == ARM::PKHTB) {
+  if (MCID.Opcode == ARM::PKHBT || MCID.Opcode == ARM::PKHTB) {
     emitWordLE(Binary);
     return;
   }
@@ -1389,9 +1389,9 @@
   Binary |= getMachineOpValue(MI, OpIdx++) << ARMII::RegRdShift;
 
   const MachineOperand &MO = MI.getOperand(OpIdx++);
-  if (OpIdx == TID.getNumOperands() ||
-      TID.OpInfo[OpIdx].isPredicate() ||
-      TID.OpInfo[OpIdx].isOptionalDef()) {
+  if (OpIdx == MCID.getNumOperands() ||
+      MCID.OpInfo[OpIdx].isPredicate() ||
+      MCID.OpInfo[OpIdx].isOptionalDef()) {
     // Encode Rm and it's done.
     Binary |= getMachineOpValue(MI, MO);
     emitWordLE(Binary);
@@ -1406,7 +1406,7 @@
 
   // Encode shift_imm.
   unsigned ShiftAmt = MI.getOperand(OpIdx).getImm();
-  if (TID.Opcode == ARM::PKHTB) {
+  if (MCID.Opcode == ARM::PKHTB) {
     assert(ShiftAmt != 0 && "PKHTB shift_imm is 0!");
     if (ShiftAmt == 32)
       ShiftAmt = 0;
@@ -1418,7 +1418,7 @@
 }
 
 void ARMCodeEmitter::emitSaturateInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
+  const MCInstrDesc &MCID = MI.getDesc();
 
   // Part of binary is determined by TableGen.
   unsigned Binary = getBinaryCodeForInstr(MI);
@@ -1431,11 +1431,11 @@
 
   // Encode saturate bit position.
   unsigned Pos = MI.getOperand(1).getImm();
-  if (TID.Opcode == ARM::SSAT || TID.Opcode == ARM::SSAT16)
+  if (MCID.Opcode == ARM::SSAT || MCID.Opcode == ARM::SSAT16)
     Pos -= 1;
   assert((Pos < 16 || (Pos < 32 &&
-                       TID.Opcode != ARM::SSAT16 &&
-                       TID.Opcode != ARM::USAT16)) &&
+                       MCID.Opcode != ARM::SSAT16 &&
+                       MCID.Opcode != ARM::USAT16)) &&
          "saturate bit position out of range");
   Binary |= Pos << 16;
 
@@ -1443,7 +1443,7 @@
   Binary |= getMachineOpValue(MI, 2);
 
   // Encode shift_imm.
-  if (TID.getNumOperands() == 4) {
+  if (MCID.getNumOperands() == 4) {
     unsigned ShiftOp = MI.getOperand(3).getImm();
     ARM_AM::ShiftOpc Opc = ARM_AM::getSORegShOp(ShiftOp);
     if (Opc == ARM_AM::asr)
@@ -1459,9 +1459,9 @@
 }
 
 void ARMCodeEmitter::emitBranchInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
+  const MCInstrDesc &MCID = MI.getDesc();
 
-  if (TID.Opcode == ARM::TPsoft) {
+  if (MCID.Opcode == ARM::TPsoft) {
     llvm_unreachable("ARM::TPsoft FIXME"); // FIXME
   }
 
@@ -1498,20 +1498,20 @@
 }
 
 void ARMCodeEmitter::emitMiscBranchInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
+  const MCInstrDesc &MCID = MI.getDesc();
 
   // Handle jump tables.
-  if (TID.Opcode == ARM::BR_JTr || TID.Opcode == ARM::BR_JTadd) {
+  if (MCID.Opcode == ARM::BR_JTr || MCID.Opcode == ARM::BR_JTadd) {
     // First emit a ldr pc, [] instruction.
     emitDataProcessingInstruction(MI, ARM::PC);
 
     // Then emit the inline jump table.
     unsigned JTIndex =
-      (TID.Opcode == ARM::BR_JTr)
+      (MCID.Opcode == ARM::BR_JTr)
       ? MI.getOperand(1).getIndex() : MI.getOperand(2).getIndex();
     emitInlineJumpTable(JTIndex);
     return;
-  } else if (TID.Opcode == ARM::BR_JTm) {
+  } else if (MCID.Opcode == ARM::BR_JTm) {
     // First emit a ldr pc, [] instruction.
     emitLoadStoreInstruction(MI, ARM::PC);
 
@@ -1526,7 +1526,7 @@
   // Set the conditional execution predicate
   Binary |= II->getPredicate(&MI) << ARMII::CondShift;
 
-  if (TID.Opcode == ARM::BX_RET || TID.Opcode == ARM::MOVPCLR)
+  if (MCID.Opcode == ARM::BX_RET || MCID.Opcode == ARM::MOVPCLR)
     // The return register is LR.
     Binary |= getARMRegisterNumbering(ARM::LR);
   else
@@ -1579,7 +1579,7 @@
 }
 
 void ARMCodeEmitter::emitVFPArithInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
+  const MCInstrDesc &MCID = MI.getDesc();
 
   // Part of binary is determined by TableGn.
   unsigned Binary = getBinaryCodeForInstr(MI);
@@ -1596,16 +1596,16 @@
   Binary |= encodeVFPRd(MI, OpIdx++);
 
   // If this is a two-address operand, skip it, e.g. FMACD.
-  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+  if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
     ++OpIdx;
 
   // Encode Dn / Sn.
-  if ((TID.TSFlags & ARMII::FormMask) == ARMII::VFPBinaryFrm)
+  if ((MCID.TSFlags & ARMII::FormMask) == ARMII::VFPBinaryFrm)
     Binary |= encodeVFPRn(MI, OpIdx++);
 
-  if (OpIdx == TID.getNumOperands() ||
-      TID.OpInfo[OpIdx].isPredicate() ||
-      TID.OpInfo[OpIdx].isOptionalDef()) {
+  if (OpIdx == MCID.getNumOperands() ||
+      MCID.OpInfo[OpIdx].isPredicate() ||
+      MCID.OpInfo[OpIdx].isOptionalDef()) {
     // FCMPEZD etc. has only one operand.
     emitWordLE(Binary);
     return;
@@ -1618,8 +1618,8 @@
 }
 
 void ARMCodeEmitter::emitVFPConversionInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
-  unsigned Form = TID.TSFlags & ARMII::FormMask;
+  const MCInstrDesc &MCID = MI.getDesc();
+  unsigned Form = MCID.TSFlags & ARMII::FormMask;
 
   // Part of binary is determined by TableGn.
   unsigned Binary = getBinaryCodeForInstr(MI);
@@ -1709,8 +1709,8 @@
 
 void
 ARMCodeEmitter::emitVFPLoadStoreMultipleInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
-  bool IsUpdating = (TID.TSFlags & ARMII::IndexModeMask) != 0;
+  const MCInstrDesc &MCID = MI.getDesc();
+  bool IsUpdating = (MCID.TSFlags & ARMII::IndexModeMask) != 0;
 
   // Part of binary is determined by TableGn.
   unsigned Binary = getBinaryCodeForInstr(MI);
@@ -1795,8 +1795,8 @@
   unsigned Binary = getBinaryCodeForInstr(MI);
 
   unsigned RegTOpIdx, RegNOpIdx, LnOpIdx;
-  const TargetInstrDesc &TID = MI.getDesc();
-  if ((TID.TSFlags & ARMII::FormMask) == ARMII::NGetLnFrm) {
+  const MCInstrDesc &MCID = MI.getDesc();
+  if ((MCID.TSFlags & ARMII::FormMask) == ARMII::NGetLnFrm) {
     RegTOpIdx = 0;
     RegNOpIdx = 1;
     LnOpIdx = 2;
@@ -1863,12 +1863,12 @@
 }
 
 void ARMCodeEmitter::emitNEON2RegInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
+  const MCInstrDesc &MCID = MI.getDesc();
   unsigned Binary = getBinaryCodeForInstr(MI);
   // Destination register is encoded in Dd; source register in Dm.
   unsigned OpIdx = 0;
   Binary |= encodeNEONRd(MI, OpIdx++);
-  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+  if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
     ++OpIdx;
   Binary |= encodeNEONRm(MI, OpIdx);
   if (IsThumb)
@@ -1878,15 +1878,15 @@
 }
 
 void ARMCodeEmitter::emitNEON3RegInstruction(const MachineInstr &MI) {
-  const TargetInstrDesc &TID = MI.getDesc();
+  const MCInstrDesc &MCID = MI.getDesc();
   unsigned Binary = getBinaryCodeForInstr(MI);
   // Destination register is encoded in Dd; source registers in Dn and Dm.
   unsigned OpIdx = 0;
   Binary |= encodeNEONRd(MI, OpIdx++);
-  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+  if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
     ++OpIdx;
   Binary |= encodeNEONRn(MI, OpIdx++);
-  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)
+  if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)
     ++OpIdx;
   Binary |= encodeNEONRm(MI, OpIdx);
   if (IsThumb)

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMConstantIslandPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMConstantIslandPass.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMConstantIslandPass.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMConstantIslandPass.cpp Sat Jul  2 22:28:07 2011
@@ -1692,9 +1692,9 @@
   const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
   for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
     MachineInstr *MI = T2JumpTables[i];
-    const TargetInstrDesc &TID = MI->getDesc();
-    unsigned NumOps = TID.getNumOperands();
-    unsigned JTOpIdx = NumOps - (TID.isPredicable() ? 3 : 2);
+    const MCInstrDesc &MCID = MI->getDesc();
+    unsigned NumOps = MCID.getNumOperands();
+    unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2);
     MachineOperand JTOP = MI->getOperand(JTOpIdx);
     unsigned JTI = JTOP.getIndex();
     assert(JTI < JT.size());
@@ -1815,9 +1815,9 @@
   const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
   for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
     MachineInstr *MI = T2JumpTables[i];
-    const TargetInstrDesc &TID = MI->getDesc();
-    unsigned NumOps = TID.getNumOperands();
-    unsigned JTOpIdx = NumOps - (TID.isPredicable() ? 3 : 2);
+    const MCInstrDesc &MCID = MI->getDesc();
+    unsigned NumOps = MCID.getNumOperands();
+    unsigned JTOpIdx = NumOps - (MCID.isPredicable() ? 3 : 2);
     MachineOperand JTOP = MI->getOperand(JTOpIdx);
     unsigned JTI = JTOP.getIndex();
     assert(JTI < JT.size());

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMExpandPseudoInsts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMExpandPseudoInsts.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMExpandPseudoInsts.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMExpandPseudoInsts.cpp Sat Jul  2 22:28:07 2011
@@ -68,7 +68,7 @@
 void ARMExpandPseudo::TransferImpOps(MachineInstr &OldMI,
                                      MachineInstrBuilder &UseMI,
                                      MachineInstrBuilder &DefMI) {
-  const TargetInstrDesc &Desc = OldMI.getDesc();
+  const MCInstrDesc &Desc = OldMI.getDesc();
   for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands();
        i != e; ++i) {
     const MachineOperand &MO = OldMI.getOperand(i);
@@ -727,8 +727,10 @@
       MI.eraseFromParent();
       return true;
     }
+    case ARM::t2MOVCCr:
     case ARM::MOVCCr: {
-      BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVr),
+      unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVr : ARM::MOVr;
+      BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
               MI.getOperand(1).getReg())
         .addReg(MI.getOperand(2).getReg(),
                 getKillRegState(MI.getOperand(2).isKill()))
@@ -764,8 +766,10 @@
       MI.eraseFromParent();
       return true;
     }
+    case ARM::t2MOVCCi:
     case ARM::MOVCCi: {
-      BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVi),
+      unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVi : ARM::MOVi;
+      BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
               MI.getOperand(1).getReg())
         .addImm(MI.getOperand(2).getImm())
         .addImm(MI.getOperand(3).getImm()) // 'pred'
@@ -856,10 +860,11 @@
       MI.eraseFromParent();
       return true;
     }
+    case ARM::tTPsoft:
     case ARM::TPsoft: {
       MachineInstrBuilder MIB =
         BuildMI(MBB, MBBI, MI.getDebugLoc(),
-                TII->get(ARM::BL))
+                TII->get(Opcode == ARM::tTPsoft ? ARM::tBL : ARM::BL))
         .addExternalSymbol("__aeabi_read_tp", 0);
 
       MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMFastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMFastISel.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMFastISel.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMFastISel.cpp Sat Jul  2 22:28:07 2011
@@ -219,8 +219,8 @@
 // we don't care about implicit defs here, just places we'll need to add a
 // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
 bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
-  const TargetInstrDesc &TID = MI->getDesc();
-  if (!TID.hasOptionalDef())
+  const MCInstrDesc &MCID = MI->getDesc();
+  if (!MCID.hasOptionalDef())
     return false;
 
   // Look to see if our OptionalDef is defining CPSR or CCR.
@@ -234,15 +234,15 @@
 }
 
 bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
-  const TargetInstrDesc &TID = MI->getDesc();
+  const MCInstrDesc &MCID = MI->getDesc();
 
   // If we're a thumb2 or not NEON function we were handled via isPredicable.
-  if ((TID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
+  if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
        AFI->isThumb2Function())
     return false;
 
-  for (unsigned i = 0, e = TID.getNumOperands(); i != e; ++i)
-    if (TID.OpInfo[i].isPredicate())
+  for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
+    if (MCID.OpInfo[i].isPredicate())
       return true;
 
   return false;
@@ -278,7 +278,7 @@
 unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
                                     const TargetRegisterClass* RC) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
   return ResultReg;
@@ -288,7 +288,7 @@
                                      const TargetRegisterClass *RC,
                                      unsigned Op0, bool Op0IsKill) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -308,7 +308,7 @@
                                       unsigned Op0, bool Op0IsKill,
                                       unsigned Op1, bool Op1IsKill) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -331,7 +331,7 @@
                                        unsigned Op1, bool Op1IsKill,
                                        unsigned Op2, bool Op2IsKill) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -355,7 +355,7 @@
                                       unsigned Op0, bool Op0IsKill,
                                       uint64_t Imm) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -377,7 +377,7 @@
                                       unsigned Op0, bool Op0IsKill,
                                       const ConstantFP *FPImm) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -400,7 +400,7 @@
                                        unsigned Op1, bool Op1IsKill,
                                        uint64_t Imm) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -423,7 +423,7 @@
                                      const TargetRegisterClass *RC,
                                      uint64_t Imm) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -442,7 +442,7 @@
                                       const TargetRegisterClass *RC,
                                       uint64_t Imm1, uint64_t Imm2) {
   unsigned ResultReg = createResultReg(RC);
-  const TargetInstrDesc &II = TII.get(MachineInstOpcode);
+  const MCInstrDesc &II = TII.get(MachineInstOpcode);
 
   if (II.getNumDefs() >= 1)
     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
@@ -1549,7 +1549,7 @@
   NumBytes = CCInfo.getNextStackOffset();
 
   // Issue CALLSEQ_START
-  unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode();
+  unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
                           TII.get(AdjStackDown))
                   .addImm(NumBytes));
@@ -1647,7 +1647,7 @@
                              const Instruction *I, CallingConv::ID CC,
                              unsigned &NumBytes) {
   // Issue CALLSEQ_END
-  unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode();
+  unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
                           TII.get(AdjStackUp))
                   .addImm(NumBytes).addImm(0));

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMFrameLowering.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMFrameLowering.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMFrameLowering.cpp Sat Jul  2 22:28:07 2011
@@ -268,14 +268,14 @@
       // bic r4, r4, MaxAlign
       // mov sp, r4
       // FIXME: It will be better just to find spare register here.
-      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4)
-        .addReg(ARM::SP, RegState::Kill);
+      AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::R4)
+        .addReg(ARM::SP, RegState::Kill));
       AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
                                           TII.get(ARM::t2BICri), ARM::R4)
                                   .addReg(ARM::R4, RegState::Kill)
                                   .addImm(MaxAlign-1)));
-      BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
-        .addReg(ARM::R4, RegState::Kill);
+      AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP)
+        .addReg(ARM::R4, RegState::Kill));
     }
 
     AFI->setShouldRestoreSPFromFP(true);
@@ -293,9 +293,9 @@
         .addReg(ARM::SP)
         .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
     else
-      BuildMI(MBB, MBBI, dl,
-              TII.get(ARM::tMOVgpr2gpr), RegInfo->getBaseRegister())
-        .addReg(ARM::SP);
+      AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr),
+                             RegInfo->getBaseRegister())
+        .addReg(ARM::SP));
   }
 
   // If the frame has variable sized objects then the epilogue must restore
@@ -364,8 +364,9 @@
                  "No scratch register to restore SP from FP!");
           emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::R4, FramePtr, -NumBytes,
                                  ARMCC::AL, 0, TII);
-          BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP)
-            .addReg(ARM::R4);
+          AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr),
+                                 ARM::SP)
+            .addReg(ARM::R4));
         }
       } else {
         // Thumb2 or ARM.
@@ -373,8 +374,9 @@
           BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
             .addReg(FramePtr).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
         else
-          BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP)
-            .addReg(FramePtr);
+          AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr),
+                                 ARM::SP)
+            .addReg(FramePtr));
       }
     } else if (NumBytes)
       emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMGlobalMerge.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMGlobalMerge.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMGlobalMerge.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMGlobalMerge.cpp Sat Jul  2 22:28:07 2011
@@ -175,7 +175,9 @@
       continue;
 
     // Ignore fancy-aligned globals for now.
-    if (I->getAlignment() != 0)
+    unsigned Alignment = I->getAlignment();
+    unsigned AllocSize = TD->getTypeAllocSize(I->getType()->getElementType());
+    if (Alignment > AllocSize)
       continue;
 
     // Ignore all 'special' globals.
@@ -183,7 +185,7 @@
         I->getName().startswith(".llvm."))
       continue;
 
-    if (TD->getTypeAllocSize(I->getType()->getElementType()) < MaxOffset) {
+    if (AllocSize < MaxOffset) {
       const TargetLoweringObjectFile &TLOF = TLI->getObjFileLowering();
       if (TLOF.getKindForGlobal(I, TLI->getTargetMachine()).isBSSLocal())
         BSSGlobals.push_back(I);

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMHazardRecognizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMHazardRecognizer.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMHazardRecognizer.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMHazardRecognizer.cpp Sat Jul  2 22:28:07 2011
@@ -19,11 +19,11 @@
 static bool hasRAWHazard(MachineInstr *DefMI, MachineInstr *MI,
                          const TargetRegisterInfo &TRI) {
   // FIXME: Detect integer instructions properly.
-  const TargetInstrDesc &TID = MI->getDesc();
-  unsigned Domain = TID.TSFlags & ARMII::DomainMask;
-  if (TID.mayStore())
+  const MCInstrDesc &MCID = MI->getDesc();
+  unsigned Domain = MCID.TSFlags & ARMII::DomainMask;
+  if (MCID.mayStore())
     return false;
-  unsigned Opcode = TID.getOpcode();
+  unsigned Opcode = MCID.getOpcode();
   if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
     return false;
   if ((Domain & ARMII::DomainVFP) || (Domain & ARMII::DomainNEON))
@@ -43,15 +43,15 @@
 
     // Look for special VMLA / VMLS hazards. A VMUL / VADD / VSUB following
     // a VMLA / VMLS will cause 4 cycle stall.
-    const TargetInstrDesc &TID = MI->getDesc();
-    if (LastMI && (TID.TSFlags & ARMII::DomainMask) != ARMII::DomainGeneral) {
+    const MCInstrDesc &MCID = MI->getDesc();
+    if (LastMI && (MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainGeneral) {
       MachineInstr *DefMI = LastMI;
-      const TargetInstrDesc &LastTID = LastMI->getDesc();
+      const MCInstrDesc &LastMCID = LastMI->getDesc();
       // Skip over one non-VFP / NEON instruction.
-      if (!LastTID.isBarrier() &&
+      if (!LastMCID.isBarrier() &&
           // On A9, AGU and NEON/FPU are muxed.
-          !(STI.isCortexA9() && (LastTID.mayLoad() || LastTID.mayStore())) &&
-          (LastTID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
+          !(STI.isCortexA9() && (LastMCID.mayLoad() || LastMCID.mayStore())) &&
+          (LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) {
         MachineBasicBlock::iterator I = LastMI;
         if (I != LastMI->getParent()->begin()) {
           I = llvm::prior(I);

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelDAGToDAG.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelDAGToDAG.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelDAGToDAG.cpp Sat Jul  2 22:28:07 2011
@@ -329,10 +329,10 @@
   if (Use->getOpcode() == ISD::CopyToReg)
     return true;
   if (Use->isMachineOpcode()) {
-    const TargetInstrDesc &TID = TII->get(Use->getMachineOpcode());
-    if (TID.mayStore())
+    const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
+    if (MCID.mayStore())
       return true;
-    unsigned Opcode = TID.getOpcode();
+    unsigned Opcode = MCID.getOpcode();
     if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
       return true;
     // vmlx feeding into another vmlx. We actually want to unfold

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelLowering.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelLowering.cpp Sat Jul  2 22:28:07 2011
@@ -506,6 +506,9 @@
     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
     setTargetDAGCombine(ISD::STORE);
+    setTargetDAGCombine(ISD::FP_TO_SINT);
+    setTargetDAGCombine(ISD::FP_TO_UINT);
+    setTargetDAGCombine(ISD::FDIV);
   }
 
   computeRegisterProperties();
@@ -538,7 +541,8 @@
     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
   }
-  if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops())
+  if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
+      || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP()))
     setOperationAction(ISD::MULHS, MVT::i32, Expand);
 
   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
@@ -974,12 +978,12 @@
   // Load are scheduled for latency even if there instruction itinerary
   // is not available.
   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
-  const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
+  const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
 
-  if (TID.getNumDefs() == 0)
+  if (MCID.getNumDefs() == 0)
     return Sched::RegPressure;
   if (!Itins->isEmpty() &&
-      Itins->getOperandCycle(TID.getSchedClass(), 0) > 2)
+      Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
     return Sched::Latency;
 
   return Sched::RegPressure;
@@ -5523,7 +5527,7 @@
   return SDValue();
 }
 
-// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 
+// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction
 // (only after legalization).
 static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
                                  TargetLowering::DAGCombinerInfo &DCI,
@@ -5554,25 +5558,25 @@
   SDNode *V = Vec.getNode();
   unsigned nextIndex = 0;
 
-  // For each operands to the ADD which are BUILD_VECTORs, 
+  // For each operands to the ADD which are BUILD_VECTORs,
   // check to see if each of their operands are an EXTRACT_VECTOR with
   // the same vector and appropriate index.
   for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
     if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
         && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
-      
+
       SDValue ExtVec0 = N0->getOperand(i);
       SDValue ExtVec1 = N1->getOperand(i);
-      
+
       // First operand is the vector, verify its the same.
       if (V != ExtVec0->getOperand(0).getNode() ||
           V != ExtVec1->getOperand(0).getNode())
         return SDValue();
-      
+
       // Second is the constant, verify its correct.
       ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
       ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
-      
+
       // For the constant, we want to see all the even or all the odd.
       if (!C0 || !C1 || C0->getZExtValue() != nextIndex
           || C1->getZExtValue() != nextIndex+1)
@@ -5580,7 +5584,7 @@
 
       // Increment index.
       nextIndex+=2;
-    } else 
+    } else
       return SDValue();
   }
 
@@ -5595,7 +5599,7 @@
 
   // Input is the vector.
   Ops.push_back(Vec);
-  
+
   // Get widened type and narrowed type.
   MVT widenType;
   unsigned numElem = VT.getVectorNumElements();
@@ -5624,7 +5628,7 @@
   SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget);
   if (Result.getNode())
     return Result;
-  
+
   // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
   if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
     SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
@@ -6479,7 +6483,105 @@
   return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
 }
 
-/// getVShiftImm - Check if this is a valid build_vector for the immediate
+// isConstVecPow2 - Return true if each vector element is a power of 2, all
+// elements are the same constant, C, and Log2(C) ranges from 1 to 32.
+static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C)
+{
+  integerPart cN;
+  integerPart c0 = 0;
+  for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements();
+       I != E; I++) {
+    ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I));
+    if (!C)
+      return false;
+
+    bool isExact;
+    APFloat APF = C->getValueAPF();
+    if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact)
+        != APFloat::opOK || !isExact)
+      return false;
+
+    c0 = (I == 0) ? cN : c0;
+    if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32)
+      return false;
+  }
+  C = c0;
+  return true;
+}
+
+/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
+/// can replace combinations of VMUL and VCVT (floating-point to integer)
+/// when the VMUL has a constant operand that is a power of 2.
+///
+/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
+///  vmul.f32        d16, d17, d16
+///  vcvt.s32.f32    d16, d16
+/// becomes:
+///  vcvt.s32.f32    d16, d16, #3
+static SDValue PerformVCVTCombine(SDNode *N,
+                                  TargetLowering::DAGCombinerInfo &DCI,
+                                  const ARMSubtarget *Subtarget) {
+  SelectionDAG &DAG = DCI.DAG;
+  SDValue Op = N->getOperand(0);
+
+  if (!Subtarget->hasNEON() || !Op.getValueType().isVector() ||
+      Op.getOpcode() != ISD::FMUL)
+    return SDValue();
+
+  uint64_t C;
+  SDValue N0 = Op->getOperand(0);
+  SDValue ConstVec = Op->getOperand(1);
+  bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
+
+  if (ConstVec.getOpcode() != ISD::BUILD_VECTOR ||
+      !isConstVecPow2(ConstVec, isSigned, C))
+    return SDValue();
+
+  unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
+    Intrinsic::arm_neon_vcvtfp2fxu;
+  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
+                     N->getValueType(0),
+                     DAG.getConstant(IntrinsicOpcode, MVT::i32), N0,
+                     DAG.getConstant(Log2_64(C), MVT::i32));
+}
+
+/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
+/// can replace combinations of VCVT (integer to floating-point) and VDIV
+/// when the VDIV has a constant operand that is a power of 2.
+///
+/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
+///  vcvt.f32.s32    d16, d16
+///  vdiv.f32        d16, d17, d16
+/// becomes:
+///  vcvt.f32.s32    d16, d16, #3
+static SDValue PerformVDIVCombine(SDNode *N,
+                                  TargetLowering::DAGCombinerInfo &DCI,
+                                  const ARMSubtarget *Subtarget) {
+  SelectionDAG &DAG = DCI.DAG;
+  SDValue Op = N->getOperand(0);
+  unsigned OpOpcode = Op.getNode()->getOpcode();
+
+  if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() ||
+      (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
+    return SDValue();
+
+  uint64_t C;
+  SDValue ConstVec = N->getOperand(1);
+  bool isSigned = OpOpcode == ISD::SINT_TO_FP;
+
+  if (ConstVec.getOpcode() != ISD::BUILD_VECTOR ||
+      !isConstVecPow2(ConstVec, isSigned, C))
+    return SDValue();
+
+  unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
+    Intrinsic::arm_neon_vcvtfxu2fp;
+  return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(),
+                     Op.getValueType(),
+                     DAG.getConstant(IntrinsicOpcode, MVT::i32),
+                     Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32));
+}
+
+/// Getvshiftimm - Check if this is a valid build_vector for the immediate
 /// operand of a vector shift operation, where all the elements of the
 /// build_vector must have the same constant integer value.
 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
@@ -6868,6 +6970,9 @@
   case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
   case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
   case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
+  case ISD::FP_TO_SINT:
+  case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget);
+  case ISD::FDIV:       return PerformVDIVCombine(N, DCI, Subtarget);
   case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
   case ISD::SHL:
   case ISD::SRA:
@@ -7378,10 +7483,17 @@
     default:  break;
     case 'l': return C_RegisterClass;
     case 'w': return C_RegisterClass;
+    case 'h': return C_RegisterClass;
+    case 'x': return C_RegisterClass;
+    case 't': return C_RegisterClass;
+    case 'j': return C_Other; // Constant for movw.
+    }
+  } else if (Constraint.size() == 2) {
+    switch (Constraint[0]) {
+    default: break;
+    // All 'U+' constraints are addresses.
+    case 'U': return C_Memory;
     }
-  } else {
-    if (Constraint == "Uv")
-      return C_Memory;
   }
   return TargetLowering::getConstraintType(Constraint);
 }
@@ -7420,26 +7532,43 @@
   return weight;
 }
 
-std::pair<unsigned, const TargetRegisterClass*>
+typedef std::pair<unsigned, const TargetRegisterClass*> RCPair;
+RCPair
 ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
                                                 EVT VT) const {
   if (Constraint.size() == 1) {
     // GCC ARM Constraint Letters
     switch (Constraint[0]) {
-    case 'l':
+    case 'l': // Low regs or general regs.
       if (Subtarget->isThumb())
-        return std::make_pair(0U, ARM::tGPRRegisterClass);
+        return RCPair(0U, ARM::tGPRRegisterClass);
       else
-        return std::make_pair(0U, ARM::GPRRegisterClass);
+        return RCPair(0U, ARM::GPRRegisterClass);
+    case 'h': // High regs or no regs.
+      if (Subtarget->isThumb())
+	return RCPair(0U, ARM::hGPRRegisterClass);
+      break;
     case 'r':
-      return std::make_pair(0U, ARM::GPRRegisterClass);
+      return RCPair(0U, ARM::GPRRegisterClass);
     case 'w':
       if (VT == MVT::f32)
-        return std::make_pair(0U, ARM::SPRRegisterClass);
+        return RCPair(0U, ARM::SPRRegisterClass);
+      if (VT.getSizeInBits() == 64)
+        return RCPair(0U, ARM::DPRRegisterClass);
+      if (VT.getSizeInBits() == 128)
+        return RCPair(0U, ARM::QPRRegisterClass);
+      break;
+    case 'x':
+      if (VT == MVT::f32)
+	return RCPair(0U, ARM::SPR_8RegisterClass);
       if (VT.getSizeInBits() == 64)
-        return std::make_pair(0U, ARM::DPRRegisterClass);
+	return RCPair(0U, ARM::DPR_8RegisterClass);
       if (VT.getSizeInBits() == 128)
-        return std::make_pair(0U, ARM::QPRRegisterClass);
+	return RCPair(0U, ARM::QPR_8RegisterClass);
+      break;
+    case 't':
+      if (VT == MVT::f32)
+	return RCPair(0U, ARM::SPRRegisterClass);
       break;
     }
   }
@@ -7449,47 +7578,6 @@
   return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
 }
 
-std::vector<unsigned> ARMTargetLowering::
-getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                  EVT VT) const {
-  if (Constraint.size() != 1)
-    return std::vector<unsigned>();
-
-  switch (Constraint[0]) {      // GCC ARM Constraint Letters
-  default: break;
-  case 'l':
-    return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
-                                 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
-                                 0);
-  case 'r':
-    return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
-                                 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
-                                 ARM::R8, ARM::R9, ARM::R10, ARM::R11,
-                                 ARM::R12, ARM::LR, 0);
-  case 'w':
-    if (VT == MVT::f32)
-      return make_vector<unsigned>(ARM::S0, ARM::S1, ARM::S2, ARM::S3,
-                                   ARM::S4, ARM::S5, ARM::S6, ARM::S7,
-                                   ARM::S8, ARM::S9, ARM::S10, ARM::S11,
-                                   ARM::S12,ARM::S13,ARM::S14,ARM::S15,
-                                   ARM::S16,ARM::S17,ARM::S18,ARM::S19,
-                                   ARM::S20,ARM::S21,ARM::S22,ARM::S23,
-                                   ARM::S24,ARM::S25,ARM::S26,ARM::S27,
-                                   ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0);
-    if (VT.getSizeInBits() == 64)
-      return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3,
-                                   ARM::D4, ARM::D5, ARM::D6, ARM::D7,
-                                   ARM::D8, ARM::D9, ARM::D10,ARM::D11,
-                                   ARM::D12,ARM::D13,ARM::D14,ARM::D15, 0);
-    if (VT.getSizeInBits() == 128)
-      return make_vector<unsigned>(ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3,
-                                   ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7, 0);
-      break;
-  }
-
-  return std::vector<unsigned>();
-}
-
 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
 /// vector.  If it is invalid, don't add anything to Ops.
 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
@@ -7504,6 +7592,7 @@
   char ConstraintLetter = Constraint[0];
   switch (ConstraintLetter) {
   default: break;
+  case 'j':
   case 'I': case 'J': case 'K': case 'L':
   case 'M': case 'N': case 'O':
     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
@@ -7518,6 +7607,13 @@
       return;
 
     switch (ConstraintLetter) {
+      case 'j':
+	// Constant suitable for movw, must be between 0 and
+	// 65535.
+	if (Subtarget->hasV6T2Ops())
+	  if (CVal >= 0 && CVal <= 65535)
+	    break;
+	return;
       case 'I':
         if (Subtarget->isThumb1Only()) {
           // This must be a constant between 0 and 255, for ADD

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelLowering.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelLowering.h (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMISelLowering.h Sat Jul  2 22:28:07 2011
@@ -306,9 +306,6 @@
     std::pair<unsigned, const TargetRegisterClass*>
       getRegForInlineAsmConstraint(const std::string &Constraint,
                                    EVT VT) const;
-    std::vector<unsigned>
-    getRegClassForInlineAsmConstraint(const std::string &Constraint,
-                                      EVT VT) const;
 
     /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
     /// vector.  If it is invalid, don't add anything to Ops. If hasMemory is

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrInfo.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrInfo.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrInfo.cpp Sat Jul  2 22:28:07 2011
@@ -14,7 +14,6 @@
 #include "ARMInstrInfo.h"
 #include "ARM.h"
 #include "ARMAddressingModes.h"
-#include "ARMGenInstrInfo.inc"
 #include "ARMMachineFunctionInfo.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/CodeGen/LiveVariables.h"

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrInfo.td?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrInfo.td (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrInfo.td Sat Jul  2 22:28:07 2011
@@ -164,6 +164,8 @@
 def HasDivide        : Predicate<"Subtarget->hasDivide()">, AssemblerPredicate;
 def HasT2ExtractPack : Predicate<"Subtarget->hasT2ExtractPack()">,
                                  AssemblerPredicate;
+def HasThumb2DSP     : Predicate<"Subtarget->hasThumb2DSP()">,
+                                 AssemblerPredicate;
 def HasDB            : Predicate<"Subtarget->hasDataBarrier()">,
                                  AssemblerPredicate;
 def HasMP            : Predicate<"Subtarget->hasMPExtension()">,
@@ -676,7 +678,7 @@
 /// binop that produces a value.
 multiclass AsI1_bin_irs<bits<4> opcod, string opc,
                      InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
-                        PatFrag opnode, bit Commutable = 0> {
+                        PatFrag opnode, string baseOpc, bit Commutable = 0> {
   // The register-immediate version is re-materializable. This is useful
   // in particular for taking the address of a local.
   let isReMaterializable = 1 in {
@@ -716,6 +718,24 @@
     let Inst{15-12} = Rd;
     let Inst{11-0} = shift;
   }
+
+  // Assembly aliases for optional destination operand when it's the same
+  // as the source operand.
+  def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $imm"),
+     (!cast<Instruction>(!strconcat(baseOpc, "ri")) GPR:$Rdn, GPR:$Rdn,
+                                                    so_imm:$imm, pred:$p,
+                                                    cc_out:$s)>,
+     Requires<[IsARM]>;
+  def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $Rm"),
+     (!cast<Instruction>(!strconcat(baseOpc, "rr")) GPR:$Rdn, GPR:$Rdn,
+                                                    GPR:$Rm, pred:$p,
+                                                    cc_out:$s)>,
+     Requires<[IsARM]>;
+  def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $shift"),
+     (!cast<Instruction>(!strconcat(baseOpc, "rs")) GPR:$Rdn, GPR:$Rdn,
+                                                    so_reg:$shift, pred:$p,
+                                                    cc_out:$s)>,
+     Requires<[IsARM]>;
 }
 
 /// AI1_bin_s_irs - Similar to AsI1_bin_irs except it sets the 's' bit so the
@@ -1988,6 +2008,8 @@
 } // neverHasSideEffects
 
 // Load / Store Multiple Mnemonic Aliases
+def : MnemonicAlias<"ldmfd", "ldmia">;
+def : MnemonicAlias<"stmfd", "stmdb">;
 def : MnemonicAlias<"ldm", "ldmia">;
 def : MnemonicAlias<"stm", "stmia">;
 
@@ -2205,10 +2227,10 @@
 
 defm ADD  : AsI1_bin_irs<0b0100, "add",
                          IIC_iALUi, IIC_iALUr, IIC_iALUsr,
-                         BinOpFrag<(add  node:$LHS, node:$RHS)>, 1>;
+                         BinOpFrag<(add  node:$LHS, node:$RHS)>, "ADD", 1>;
 defm SUB  : AsI1_bin_irs<0b0010, "sub",
                          IIC_iALUi, IIC_iALUr, IIC_iALUsr,
-                         BinOpFrag<(sub  node:$LHS, node:$RHS)>>;
+                         BinOpFrag<(sub  node:$LHS, node:$RHS)>, "SUB">;
 
 // ADD and SUB with 's' bit set.
 defm ADDS : AI1_bin_s_irs<0b0100, "adds",
@@ -2531,16 +2553,16 @@
 
 defm AND   : AsI1_bin_irs<0b0000, "and",
                           IIC_iBITi, IIC_iBITr, IIC_iBITsr,
-                          BinOpFrag<(and node:$LHS, node:$RHS)>, 1>;
+                          BinOpFrag<(and node:$LHS, node:$RHS)>, "AND", 1>;
 defm ORR   : AsI1_bin_irs<0b1100, "orr",
                           IIC_iBITi, IIC_iBITr, IIC_iBITsr,
-                          BinOpFrag<(or  node:$LHS, node:$RHS)>, 1>;
+                          BinOpFrag<(or  node:$LHS, node:$RHS)>, "ORR", 1>;
 defm EOR   : AsI1_bin_irs<0b0001, "eor",
                           IIC_iBITi, IIC_iBITr, IIC_iBITsr,
-                          BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>;
+                          BinOpFrag<(xor node:$LHS, node:$RHS)>, "EOR", 1>;
 defm BIC   : AsI1_bin_irs<0b1110, "bic",
                           IIC_iBITi, IIC_iBITr, IIC_iBITsr,
-                          BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
+                          BinOpFrag<(and node:$LHS, (not node:$RHS))>, "BIC">;
 
 def BFC    : I<(outs GPR:$Rd), (ins GPR:$src, bf_inv_mask_imm:$imm),
                AddrMode1, Size4Bytes, IndexModeNone, DPFrm, IIC_iUNAsi,
@@ -3008,41 +3030,22 @@
               IIC_iUNAr, "rev", "\t$Rd, $Rm",
               [(set GPR:$Rd, (bswap GPR:$Rm))]>, Requires<[IsARM, HasV6]>;
 
+let AddedComplexity = 5 in
 def REV16 : AMiscA1I<0b01101011, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm),
                IIC_iUNAr, "rev16", "\t$Rd, $Rm",
-               [(set GPR:$Rd,
-                   (or (and (srl GPR:$Rm, (i32 8)), 0xFF),
-                       (or (and (shl GPR:$Rm, (i32 8)), 0xFF00),
-                           (or (and (srl GPR:$Rm, (i32 8)), 0xFF0000),
-                               (and (shl GPR:$Rm, (i32 8)), 0xFF000000)))))]>,
+               [(set GPR:$Rd, (rotr (bswap GPR:$Rm), (i32 16)))]>,
                Requires<[IsARM, HasV6]>;
 
-def : ARMV6Pat<(or (or (or (and (srl GPR:$Rm, (i32 8)), 0xFF0000),
-                           (and (shl GPR:$Rm, (i32 8)), 0xFF000000)),
-                       (and (srl GPR:$Rm, (i32 8)), 0xFF)),
-                   (and (shl GPR:$Rm, (i32 8)), 0xFF00)),
-               (REV16 GPR:$Rm)>;
-
+let AddedComplexity = 5 in
 def REVSH : AMiscA1I<0b01101111, 0b1011, (outs GPR:$Rd), (ins GPR:$Rm),
                IIC_iUNAr, "revsh", "\t$Rd, $Rm",
-               [(set GPR:$Rd,
-                  (sext_inreg
-                    (or (srl GPR:$Rm, (i32 8)),
-                        (shl GPR:$Rm, (i32 8))), i16))]>,
+               [(set GPR:$Rd, (sra (bswap GPR:$Rm), (i32 16)))]>,
                Requires<[IsARM, HasV6]>;
 
-def : ARMV6Pat<(sext_inreg (or (srl (and GPR:$Rm, 0xFF00), (i32 8)),
-                               (shl GPR:$Rm, (i32 8))), i16),
-               (REVSH GPR:$Rm)>;
-
 def : ARMV6Pat<(or (sra (shl GPR:$Rm, (i32 24)), (i32 16)),
                    (and (srl GPR:$Rm, (i32 8)), 0xFF)),
                (REVSH GPR:$Rm)>;
 
-// Need the AddedComplexity or else MOVs + REV would be chosen.
-let AddedComplexity = 5 in
-def : ARMV6Pat<(sra (bswap GPR:$Rm), (i32 16)), (REVSH GPR:$Rm)>;
-
 def lsl_shift_imm : SDNodeXForm<imm, [{
   unsigned Sh = ARM_AM::getSORegOpc(ARM_AM::lsl, N->getZExtValue());
   return CurDAG->getTargetConstant(Sh, MVT::i32);

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrThumb.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrThumb.td?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrThumb.td (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrThumb.td Sat Jul  2 22:28:07 2011
@@ -34,9 +34,10 @@
   return (uint32_t)-N->getZExtValue() < 8;
 }], imm_neg_XFORM>;
 
-def imm0_255 : ImmLeaf<i32, [{
-  return Imm >= 0 && Imm < 256;
-}]>;
+def imm0_255_asmoperand : AsmOperandClass { let Name = "Imm0_255"; }
+def imm0_255 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= 0 && Imm < 256; }]> {
+  let ParserMatchClass = imm0_255_asmoperand;
+}
 def imm0_255_comp : PatLeaf<(i32 imm), [{
   return ~((uint32_t)N->getZExtValue()) < 256;
 }]>;
@@ -407,15 +408,8 @@
 // FIXME: remove when we have a way to marking a MI with these properties.
 let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
     hasExtraDefRegAllocReq = 1 in
-def tPOP_RET : T1I<(outs), (ins pred:$p, reglist:$regs, variable_ops),
-                   IIC_iPop_Br,
-                   "pop${p}\t$regs", []>,
-               T1Misc<{1,1,0,?,?,?,?}> {
-  // A8.6.121
-  bits<16> regs;
-  let Inst{8}   = regs{15};     // registers = P:'0000000':register_list
-  let Inst{7-0} = regs{7-0};
-}
+def tPOP_RET : tPseudoInst<(outs), (ins pred:$p, reglist:$regs, variable_ops),
+                           Size2Bytes, IIC_iPop_Br, []>;
 
 // All calls clobber the non-callee saved registers. SP is marked as a use to
 // prevent stack-pointer assignments that appear immediately before calls from
@@ -685,19 +679,6 @@
   let Inst{7-0} = addr;
 }
 
-// Special instruction for restore. It cannot clobber condition register
-// when it's expanded by eliminateCallFramePseudoInstr().
-let canFoldAsLoad = 1, mayLoad = 1, neverHasSideEffects = 1 in
-// FIXME: Pseudo for tLDRspi
-def tRestore : T1pIs<(outs tGPR:$dst), (ins t_addrmode_sp:$addr), IIC_iLoad_i,
-                     "ldr", "\t$dst, $addr", []>,
-               T1LdStSP<{1,?,?}> {
-  bits<3> Rt;
-  bits<8> addr;
-  let Inst{10-8} = Rt;
-  let Inst{7-0} = addr;
-}
-
 // Load tconstpool
 // FIXME: Use ldr.n to work around a Darwin assembler bug.
 let canFoldAsLoad = 1, isReMaterializable = 1 in
@@ -754,19 +735,6 @@
   let Inst{7-0} = addr;
 }
 
-let mayStore = 1, neverHasSideEffects = 1 in
-// Special instruction for spill. It cannot clobber condition register when it's
-// expanded by eliminateCallFramePseudoInstr().
-// FIXME: Pseudo for tSTRspi
-def tSpill : T1pIs<(outs), (ins tGPR:$src, t_addrmode_sp:$addr), IIC_iStore_i,
-                  "str", "\t$src, $addr", []>,
-             T1LdStSP<{0,?,?}> {
-  bits<3> Rt;
-  bits<8> addr;
-  let Inst{10-8} = Rt;
-  let Inst{7-0} = addr;
-}
-
 //===----------------------------------------------------------------------===//
 //  Load / store multiple Instructions.
 //
@@ -1072,7 +1040,7 @@
 
 // Move register
 let isMoveImm = 1 in
-def tMOVi8 : T1sI<(outs tGPR:$Rd), (ins i32imm:$imm8), IIC_iMOVi,
+def tMOVi8 : T1sI<(outs tGPR:$Rd), (ins imm0_255:$imm8), IIC_iMOVi,
                   "mov", "\t$Rd, $imm8",
                   [(set tGPR:$Rd, imm0_255:$imm8)]>,
              T1General<{1,0,0,?,?}> {
@@ -1083,18 +1051,18 @@
   let Inst{7-0}  = imm8;
 }
 
-// TODO: A7-73: MOV(2) - mov setting flag.
+// A7-73: MOV(2) - mov setting flag.
 
 let neverHasSideEffects = 1 in {
-// FIXME: Make this predicable.
-def tMOVr       : T1I<(outs tGPR:$Rd), (ins tGPR:$Rm), IIC_iMOVr,
-                      "mov\t$Rd, $Rm", []>,
-                  T1Special<0b1000> {
+def tMOVr : Thumb1pI<(outs GPR:$Rd), (ins GPR:$Rm), AddrModeNone,
+                      Size2Bytes, IIC_iMOVr,
+                      "mov", "\t$Rd, $Rm", "", []>,
+                  T1Special<{1,0,?,?}> {
   // A8.6.97
   bits<4> Rd;
   bits<4> Rm;
-  // Bits {7-6} are encoded by the T1Special value.
-  let Inst{5-3} = Rm{2-0};
+  let Inst{7}   = Rd{3};
+  let Inst{6-3} = Rm;
   let Inst{2-0} = Rd{2-0};
 }
 let Defs = [CPSR] in
@@ -1107,39 +1075,6 @@
   let Inst{5-3}  = Rm;
   let Inst{2-0}  = Rd;
 }
-
-// FIXME: Make these predicable.
-def tMOVgpr2tgpr : T1I<(outs tGPR:$Rd), (ins GPR:$Rm), IIC_iMOVr,
-                       "mov\t$Rd, $Rm", []>,
-                   T1Special<{1,0,0,?}> {
-  // A8.6.97
-  bits<4> Rd;
-  bits<4> Rm;
-  // Bit {7} is encoded by the T1Special value.
-  let Inst{6-3} = Rm;
-  let Inst{2-0} = Rd{2-0};
-}
-def tMOVtgpr2gpr : T1I<(outs GPR:$Rd), (ins tGPR:$Rm), IIC_iMOVr,
-                       "mov\t$Rd, $Rm", []>,
-                   T1Special<{1,0,?,0}> {
-  // A8.6.97
-  bits<4> Rd;
-  bits<4> Rm;
-  // Bit {6} is encoded by the T1Special value.
-  let Inst{7}   = Rd{3};
-  let Inst{5-3} = Rm{2-0};
-  let Inst{2-0} = Rd{2-0};
-}
-def tMOVgpr2gpr  : T1I<(outs GPR:$Rd), (ins GPR:$Rm), IIC_iMOVr,
-                       "mov\t$Rd, $Rm", []>,
-                   T1Special<{1,0,?,?}> {
-  // A8.6.97
-  bits<4> Rd;
-  bits<4> Rm;
-  let Inst{7}   = Rd{3};
-  let Inst{6-3} = Rm;
-  let Inst{2-0} = Rd{2-0};
-}
 } // neverHasSideEffects
 
 // Multiply register
@@ -1176,31 +1111,16 @@
   T1pIMiscEncode<{1,0,1,0,0,1,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
                  IIC_iUNAr,
                  "rev16", "\t$Rd, $Rm",
-             [(set tGPR:$Rd,
-                   (or (and (srl tGPR:$Rm, (i32 8)), 0xFF),
-                       (or (and (shl tGPR:$Rm, (i32 8)), 0xFF00),
-                           (or (and (srl tGPR:$Rm, (i32 8)), 0xFF0000),
-                               (and (shl tGPR:$Rm, (i32 8)), 0xFF000000)))))]>,
+             [(set tGPR:$Rd, (rotr (bswap tGPR:$Rm), (i32 16)))]>,
                 Requires<[IsThumb, IsThumb1Only, HasV6]>;
 
 def tREVSH :                    // A8.6.136
   T1pIMiscEncode<{1,0,1,0,1,1,?}, (outs tGPR:$Rd), (ins tGPR:$Rm),
                  IIC_iUNAr,
                  "revsh", "\t$Rd, $Rm",
-                 [(set tGPR:$Rd,
-                       (sext_inreg
-                         (or (srl tGPR:$Rm, (i32 8)),
-                             (shl tGPR:$Rm, (i32 8))), i16))]>,
+                 [(set tGPR:$Rd, (sra (bswap tGPR:$Rm), (i32 16)))]>,
                  Requires<[IsThumb, IsThumb1Only, HasV6]>;
 
-def : T1Pat<(sext_inreg (or (srl (and tGPR:$Rm, 0xFF00), (i32 8)),
-                            (shl tGPR:$Rm, (i32 8))), i16),
-            (tREVSH tGPR:$Rm)>,
-      Requires<[IsThumb, IsThumb1Only, HasV6]>;
-
-def : T1Pat<(sra (bswap tGPR:$Rm), (i32 16)), (tREVSH tGPR:$Rm)>,
-      Requires<[IsThumb, IsThumb1Only, HasV6]>;
-
 // Rotate right register
 def tROR :                      // A8.6.139
   T1sItDPEncode<0b0111, (outs tGPR:$Rdn), (ins tGPR:$Rn, tGPR:$Rm),
@@ -1295,31 +1215,6 @@
               NoItinerary,
              [/*(set tGPR:$dst, (ARMcmov tGPR:$false, tGPR:$true, imm:$cc))*/]>;
 
-
-// 16-bit movcc in IT blocks for Thumb2.
-let neverHasSideEffects = 1 in {
-def tMOVCCr : T1pIt<(outs GPR:$Rdn), (ins GPR:$Rn, GPR:$Rm), IIC_iCMOVr,
-                    "mov", "\t$Rdn, $Rm", []>,
-              T1Special<{1,0,?,?}> {
-  bits<4> Rdn;
-  bits<4> Rm;
-  let Inst{7}   = Rdn{3};
-  let Inst{6-3} = Rm;
-  let Inst{2-0} = Rdn{2-0};
-}
-
-let isMoveImm = 1 in
-def tMOVCCi : T1pIt<(outs tGPR:$Rdn), (ins tGPR:$Rn, i32imm:$Rm), IIC_iCMOVi,
-                    "mov", "\t$Rdn, $Rm", []>,
-              T1General<{1,0,0,?,?}> {
-  bits<3> Rdn;
-  bits<8> Rm;
-  let Inst{10-8} = Rdn;
-  let Inst{7-0}  = Rm;
-}
-
-} // neverHasSideEffects
-
 // tLEApcrel - Load a pc-relative address into a register without offending the
 // assembler.
 
@@ -1439,13 +1334,11 @@
 //
 
 // __aeabi_read_tp preserves the registers r1-r3.
-let isCall = 1, Defs = [R0, LR], Uses = [SP] in
-def tTPsoft : TIx2<0b11110, 0b11, 1, (outs), (ins), IIC_Br,
-                   "bl\t__aeabi_read_tp",
-                   [(set R0, ARMthread_pointer)]> {
-  // Encoding is 0xf7fffffe.
-  let Inst = 0xf7fffffe;
-}
+// This is a pseudo inst so that we can get the encoding right,
+// complete with fixup for the aeabi_read_tp function.
+let isCall = 1, Defs = [R0, R12, LR, CPSR], Uses = [SP] in
+def tTPsoft : tPseudoInst<(outs), (ins), Size4Bytes, IIC_Br,
+                          [(set R0, ARMthread_pointer)]>;
 
 //===----------------------------------------------------------------------===//
 // SJLJ Exception handling intrinsics

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrThumb2.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrThumb2.td?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrThumb2.td (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrThumb2.td Sat Jul  2 22:28:07 2011
@@ -44,9 +44,11 @@
 // t2_so_imm - Match a 32-bit immediate operand, which is an
 // 8-bit immediate rotated by an arbitrary number of bits, or an 8-bit
 // immediate splatted into multiple bytes of the word.
+def t2_so_imm_asmoperand : AsmOperandClass { let Name = "T2SOImm"; }
 def t2_so_imm : Operand<i32>, ImmLeaf<i32, [{
     return ARM_AM::getT2SOImmVal(Imm) != -1;
   }]> {
+  let ParserMatchClass = t2_so_imm_asmoperand;
   let EncoderMethod = "getT2SOImmOpValue";
 }
 
@@ -463,7 +465,8 @@
 /// changed to modify CPSR.
 multiclass T2I_bin_irs<bits<4> opcod, string opc,
                      InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
-                       PatFrag opnode, bit Commutable = 0, string wide = ""> {
+                       PatFrag opnode, string baseOpc, bit Commutable = 0,
+                       string wide = ""> {
    // shifted imm
    def ri : T2sTwoRegImm<
                 (outs rGPR:$Rd), (ins rGPR:$Rn, t2_so_imm:$imm), iii,
@@ -495,14 +498,31 @@
      let Inst{26-25} = 0b01;
      let Inst{24-21} = opcod;
    }
+  // Assembly aliases for optional destination operand when it's the same
+  // as the source operand.
+  def : InstAlias<!strconcat(opc, "${s}${p} $Rdn, $imm"),
+     (!cast<Instruction>(!strconcat(baseOpc, "ri")) rGPR:$Rdn, rGPR:$Rdn,
+                                                    t2_so_imm:$imm, pred:$p,
+                                                    cc_out:$s)>,
+     Requires<[IsThumb2]>;
+  def : InstAlias<!strconcat(opc, "${s}${p}", wide, " $Rdn, $Rm"),
+     (!cast<Instruction>(!strconcat(baseOpc, "rr")) rGPR:$Rdn, rGPR:$Rdn,
+                                                    rGPR:$Rm, pred:$p,
+                                                    cc_out:$s)>,
+     Requires<[IsThumb2]>;
+  def : InstAlias<!strconcat(opc, "${s}${p}", wide, " $Rdn, $shift"),
+     (!cast<Instruction>(!strconcat(baseOpc, "rs")) rGPR:$Rdn, rGPR:$Rdn,
+                                                    t2_so_reg:$shift, pred:$p,
+                                                    cc_out:$s)>,
+     Requires<[IsThumb2]>;
 }
 
 /// T2I_bin_w_irs - Same as T2I_bin_irs except these operations need
-//  the ".w" prefix to indicate that they are wide.
+//  the ".w" suffix to indicate that they are wide.
 multiclass T2I_bin_w_irs<bits<4> opcod, string opc,
                      InstrItinClass iii, InstrItinClass iir, InstrItinClass iis,
-                         PatFrag opnode, bit Commutable = 0> :
-    T2I_bin_irs<opcod, opc, iii, iir, iis, opnode, Commutable, ".w">;
+                         PatFrag opnode, string baseOpc, bit Commutable = 0> :
+    T2I_bin_irs<opcod, opc, iii, iir, iis, opnode, baseOpc, Commutable, ".w">;
 
 /// T2I_rbin_is - Same as T2I_bin_irs except the order of operands are
 /// reversed.  The 'rr' form is only defined for the disassembler; for codegen
@@ -1018,7 +1038,8 @@
 // supported yet.
 multiclass T2I_ext_rrot_sxtb16<bits<3> opcod, string opc> {
   def r     : T2TwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iEXTr,
-                  opc, "\t$Rd, $Rm", []> {
+                  opc, "\t$Rd, $Rm", []>,
+          Requires<[IsThumb2, HasT2ExtractPack]> {
      let Inst{31-27} = 0b11111;
      let Inst{26-23} = 0b0100;
      let Inst{22-20} = opcod;
@@ -1028,7 +1049,8 @@
      let Inst{5-4} = 0b00; // rotate
    }
   def r_rot : T2TwoReg<(outs rGPR:$Rd), (ins rGPR:$Rm, i32imm:$rot), IIC_iEXTr,
-                  opc, "\t$Rd, $Rm, ror $rot", []> {
+                  opc, "\t$Rd, $Rm, ror $rot", []>,
+          Requires<[IsThumb2, HasT2ExtractPack]> {
      let Inst{31-27} = 0b11111;
      let Inst{26-23} = 0b0100;
      let Inst{22-20} = opcod;
@@ -1149,86 +1171,6 @@
                                 []>;
 
 
-// FIXME: None of these add/sub SP special instructions should be necessary
-// at all for thumb2 since they use the same encodings as the generic
-// add/sub instructions. In thumb1 we need them since they have dedicated
-// encodings. At the least, they should be pseudo instructions.
-// ADD r, sp, {so_imm|i12}
-let isCodeGenOnly = 1 in {
-def t2ADDrSPi   : T2sTwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, t2_so_imm:$imm),
-                        IIC_iALUi, "add", ".w\t$Rd, $Rn, $imm", []> {
-  let Inst{31-27} = 0b11110;
-  let Inst{25} = 0;
-  let Inst{24-21} = 0b1000;
-  let Inst{15} = 0;
-}
-def t2ADDrSPi12 : T2TwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, imm0_4095:$imm),
-                       IIC_iALUi, "addw", "\t$Rd, $Rn, $imm", []> {
-  let Inst{31-27} = 0b11110;
-  let Inst{25-20} = 0b100000;
-  let Inst{15} = 0;
-}
-
-// ADD r, sp, so_reg
-def t2ADDrSPs   : T2sTwoRegShiftedReg<
-                        (outs GPR:$Rd), (ins GPR:$Rn, t2_so_reg:$ShiftedRm),
-                        IIC_iALUsi, "add", ".w\t$Rd, $Rn, $ShiftedRm", []> {
-  let Inst{31-27} = 0b11101;
-  let Inst{26-25} = 0b01;
-  let Inst{24-21} = 0b1000;
-  let Inst{15} = 0;
-}
-
-// SUB r, sp, {so_imm|i12}
-def t2SUBrSPi   : T2sTwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, t2_so_imm:$imm),
-                        IIC_iALUi, "sub", ".w\t$Rd, $Rn, $imm", []> {
-  let Inst{31-27} = 0b11110;
-  let Inst{25} = 0;
-  let Inst{24-21} = 0b1101;
-  let Inst{15} = 0;
-}
-def t2SUBrSPi12 : T2TwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, imm0_4095:$imm),
-                       IIC_iALUi, "subw", "\t$Rd, $Rn, $imm", []> {
-  let Inst{31-27} = 0b11110;
-  let Inst{25-20} = 0b101010;
-  let Inst{15} = 0;
-}
-
-// SUB r, sp, so_reg
-def t2SUBrSPs   : T2sTwoRegImm<(outs GPR:$Rd), (ins GPR:$Rn, t2_so_reg:$imm),
-                       IIC_iALUsi,
-                       "sub", "\t$Rd, $Rn, $imm", []> {
-  let Inst{31-27} = 0b11101;
-  let Inst{26-25} = 0b01;
-  let Inst{24-21} = 0b1101;
-  let Inst{19-16} = 0b1101; // Rn = sp
-  let Inst{15} = 0;
-}
-} // end isCodeGenOnly = 1
-
-// Signed and unsigned division on v7-M
-def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi,
-                 "sdiv", "\t$Rd, $Rn, $Rm",
-                 [(set rGPR:$Rd, (sdiv rGPR:$Rn, rGPR:$Rm))]>,
-                 Requires<[HasDivide, IsThumb2]> {
-  let Inst{31-27} = 0b11111;
-  let Inst{26-21} = 0b011100;
-  let Inst{20} = 0b1;
-  let Inst{15-12} = 0b1111;
-  let Inst{7-4} = 0b1111;
-}
-
-def t2UDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi,
-                 "udiv", "\t$Rd, $Rn, $Rm",
-                 [(set rGPR:$Rd, (udiv rGPR:$Rn, rGPR:$Rm))]>,
-                 Requires<[HasDivide, IsThumb2]> {
-  let Inst{31-27} = 0b11111;
-  let Inst{26-21} = 0b011101;
-  let Inst{20} = 0b1;
-  let Inst{15-12} = 0b1111;
-  let Inst{7-4} = 0b1111;
-}
-
 //===----------------------------------------------------------------------===//
 //  Load / store Instructions.
 //
@@ -1668,6 +1610,10 @@
   let Inst{15} = 0;
 }
 
+def : InstAlias<"mov${s}${p} $Rd, $imm", (t2MOVi rGPR:$Rd, t2_so_imm:$imm,
+                                                 pred:$p, cc_out:$s)>,
+                Requires<[IsThumb2]>;
+
 let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in
 def t2MOVi16 : T2I<(outs rGPR:$Rd), (ins i32imm_hilo16:$imm), IIC_iMOVi,
                    "movw", "\t$Rd, $imm",
@@ -1835,7 +1781,8 @@
 // Select Bytes -- for disassembly only
 
 def t2SEL : T2ThreeReg<(outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm),
-                NoItinerary, "sel", "\t$Rd, $Rn, $Rm", []> {
+                NoItinerary, "sel", "\t$Rd, $Rn, $Rm", []>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{31-27} = 0b11111;
   let Inst{26-24} = 0b010;
   let Inst{23} = 0b1;
@@ -1851,7 +1798,8 @@
               list<dag> pat = [/* For disassembly only; pattern left blank */],
               dag iops = (ins rGPR:$Rn, rGPR:$Rm),
               string asm = "\t$Rd, $Rn, $Rm">
-  : T2I<(outs rGPR:$Rd), iops, NoItinerary, opc, asm, pat> {
+  : T2I<(outs rGPR:$Rd), iops, NoItinerary, opc, asm, pat>,
+    Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{31-27} = 0b11111;
   let Inst{26-23} = 0b0101;
   let Inst{22-20} = op22_20;
@@ -1949,12 +1897,14 @@
 
 def t2USAD8   : T2ThreeReg_mac<0, 0b111, 0b0000, (outs rGPR:$Rd),
                                            (ins rGPR:$Rn, rGPR:$Rm),
-                        NoItinerary, "usad8", "\t$Rd, $Rn, $Rm", []> {
+                        NoItinerary, "usad8", "\t$Rd, $Rn, $Rm", []>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{15-12} = 0b1111;
 }
 def t2USADA8  : T2FourReg_mac<0, 0b111, 0b0000, (outs rGPR:$Rd),
                        (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), NoItinerary,
-                        "usada8", "\t$Rd, $Rn, $Rm, $Ra", []>;
+                        "usada8", "\t$Rd, $Rn, $Rm, $Ra", []>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 
 // Signed/Unsigned saturate -- for disassembly only
 
@@ -1987,7 +1937,8 @@
 def t2SSAT16: T2SatI<
                 (outs rGPR:$Rd), (ins ssat_imm:$sat_imm, rGPR:$Rn), NoItinerary,
                 "ssat16", "\t$Rd, $sat_imm, $Rn",
-                [/* For disassembly only; pattern left blank */]> {
+                [/* For disassembly only; pattern left blank */]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{31-27} = 0b11110;
   let Inst{25-22} = 0b1100;
   let Inst{20} = 0;
@@ -2010,7 +1961,8 @@
 def t2USAT16: T2SatI<(outs rGPR:$dst), (ins i32imm:$sat_imm, rGPR:$Rn),
                      NoItinerary,
                      "usat16", "\t$dst, $sat_imm, $Rn",
-                     [/* For disassembly only; pattern left blank */]> {
+                     [/* For disassembly only; pattern left blank */]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{31-27} = 0b11110;
   let Inst{25-22} = 0b1110;
   let Inst{20} = 0;
@@ -2086,17 +2038,18 @@
 
 defm t2AND  : T2I_bin_w_irs<0b0000, "and",
                             IIC_iBITi, IIC_iBITr, IIC_iBITsi,
-                            BinOpFrag<(and node:$LHS, node:$RHS)>, 1>;
+                            BinOpFrag<(and node:$LHS, node:$RHS)>, "t2AND", 1>;
 defm t2ORR  : T2I_bin_w_irs<0b0010, "orr",
                             IIC_iBITi, IIC_iBITr, IIC_iBITsi,
-                            BinOpFrag<(or  node:$LHS, node:$RHS)>, 1>;
+                            BinOpFrag<(or  node:$LHS, node:$RHS)>, "t2ORR", 1>;
 defm t2EOR  : T2I_bin_w_irs<0b0100, "eor",
                             IIC_iBITi, IIC_iBITr, IIC_iBITsi,
-                            BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>;
+                            BinOpFrag<(xor node:$LHS, node:$RHS)>, "t2EOR", 1>;
 
 defm t2BIC  : T2I_bin_w_irs<0b0001, "bic",
                             IIC_iBITi, IIC_iBITr, IIC_iBITsi,
-                            BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
+                            BinOpFrag<(and node:$LHS, (not node:$RHS))>,
+                            "t2BIC">;
 
 class T2BitFI<dag oops, dag iops, InstrItinClass itin,
               string opc, string asm, list<dag> pattern>
@@ -2196,7 +2149,8 @@
 
 defm t2ORN  : T2I_bin_irs<0b0011, "orn",
                           IIC_iBITi, IIC_iBITr, IIC_iBITsi,
-                          BinOpFrag<(or  node:$LHS, (not node:$RHS))>, 0, "">;
+                          BinOpFrag<(or  node:$LHS, (not node:$RHS))>,
+                          "t2ORN", 0, "">;
 
 // Prefer over of t2EORri ra, rb, -1 because mvn has 16-bit version
 let AddedComplexity = 1 in
@@ -2279,7 +2233,8 @@
 def t2UMAAL : T2MulLong<0b110, 0b0110,
                   (outs rGPR:$RdLo, rGPR:$RdHi),
                   (ins rGPR:$Rn, rGPR:$Rm), IIC_iMAC64,
-                  "umaal", "\t$RdLo, $RdHi, $Rn, $Rm", []>;
+                  "umaal", "\t$RdLo, $RdHi, $Rn, $Rm", []>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 } // neverHasSideEffects
 
 // Rounding variants of the below included for disassembly only
@@ -2287,7 +2242,8 @@
 // Most significant word multiply
 def t2SMMUL : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL32,
                   "smmul", "\t$Rd, $Rn, $Rm",
-                  [(set rGPR:$Rd, (mulhs rGPR:$Rn, rGPR:$Rm))]> {
+                  [(set rGPR:$Rd, (mulhs rGPR:$Rn, rGPR:$Rm))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{31-27} = 0b11111;
   let Inst{26-23} = 0b0110;
   let Inst{22-20} = 0b101;
@@ -2296,7 +2252,8 @@
 }
 
 def t2SMMULR : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL32,
-                  "smmulr", "\t$Rd, $Rn, $Rm", []> {
+                  "smmulr", "\t$Rd, $Rn, $Rm", []>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{31-27} = 0b11111;
   let Inst{26-23} = 0b0110;
   let Inst{22-20} = 0b101;
@@ -2307,7 +2264,8 @@
 def t2SMMLA : T2FourReg<
         (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
                 "smmla", "\t$Rd, $Rn, $Rm, $Ra",
-                [(set rGPR:$Rd, (add (mulhs rGPR:$Rm, rGPR:$Rn), rGPR:$Ra))]> {
+                [(set rGPR:$Rd, (add (mulhs rGPR:$Rm, rGPR:$Rn), rGPR:$Ra))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{31-27} = 0b11111;
   let Inst{26-23} = 0b0110;
   let Inst{22-20} = 0b101;
@@ -2316,7 +2274,8 @@
 
 def t2SMMLAR: T2FourReg<
         (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
-                  "smmlar", "\t$Rd, $Rn, $Rm, $Ra", []> {
+                  "smmlar", "\t$Rd, $Rn, $Rm, $Ra", []>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{31-27} = 0b11111;
   let Inst{26-23} = 0b0110;
   let Inst{22-20} = 0b101;
@@ -2326,7 +2285,8 @@
 def t2SMMLS: T2FourReg<
         (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
                 "smmls", "\t$Rd, $Rn, $Rm, $Ra",
-                [(set rGPR:$Rd, (sub rGPR:$Ra, (mulhs rGPR:$Rn, rGPR:$Rm)))]> {
+                [(set rGPR:$Rd, (sub rGPR:$Ra, (mulhs rGPR:$Rn, rGPR:$Rm)))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{31-27} = 0b11111;
   let Inst{26-23} = 0b0110;
   let Inst{22-20} = 0b110;
@@ -2335,7 +2295,8 @@
 
 def t2SMMLSR:T2FourReg<
         (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32,
-                "smmlsr", "\t$Rd, $Rn, $Rm, $Ra", []> {
+                "smmlsr", "\t$Rd, $Rn, $Rm, $Ra", []>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{31-27} = 0b11111;
   let Inst{26-23} = 0b0110;
   let Inst{22-20} = 0b110;
@@ -2346,7 +2307,8 @@
   def BB : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
               !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm",
               [(set rGPR:$Rd, (opnode (sext_inreg rGPR:$Rn, i16),
-                                      (sext_inreg rGPR:$Rm, i16)))]> {
+                                      (sext_inreg rGPR:$Rm, i16)))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b001;
@@ -2358,7 +2320,8 @@
   def BT : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
               !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm",
               [(set rGPR:$Rd, (opnode (sext_inreg rGPR:$Rn, i16),
-                                      (sra rGPR:$Rm, (i32 16))))]> {
+                                      (sra rGPR:$Rm, (i32 16))))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b001;
@@ -2370,7 +2333,8 @@
   def TB : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
               !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm",
               [(set rGPR:$Rd, (opnode (sra rGPR:$Rn, (i32 16)),
-                                      (sext_inreg rGPR:$Rm, i16)))]> {
+                                      (sext_inreg rGPR:$Rm, i16)))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b001;
@@ -2382,7 +2346,8 @@
   def TT : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
               !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm",
               [(set rGPR:$Rd, (opnode (sra rGPR:$Rn, (i32 16)),
-                                      (sra rGPR:$Rm, (i32 16))))]> {
+                                      (sra rGPR:$Rm, (i32 16))))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b001;
@@ -2394,7 +2359,8 @@
   def WB : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
               !strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm",
               [(set rGPR:$Rd, (sra (opnode rGPR:$Rn,
-                                    (sext_inreg rGPR:$Rm, i16)), (i32 16)))]> {
+                                    (sext_inreg rGPR:$Rm, i16)), (i32 16)))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b011;
@@ -2406,7 +2372,8 @@
   def WT : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL16,
               !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm",
               [(set rGPR:$Rd, (sra (opnode rGPR:$Rn,
-                                    (sra rGPR:$Rm, (i32 16))), (i32 16)))]> {
+                                    (sra rGPR:$Rm, (i32 16))), (i32 16)))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b011;
@@ -2423,7 +2390,8 @@
               !strconcat(opc, "bb"), "\t$Rd, $Rn, $Rm, $Ra",
               [(set rGPR:$Rd, (add rGPR:$Ra,
                                (opnode (sext_inreg rGPR:$Rn, i16),
-                                       (sext_inreg rGPR:$Rm, i16))))]> {
+                                       (sext_inreg rGPR:$Rm, i16))))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b001;
@@ -2435,7 +2403,8 @@
        (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
              !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm, $Ra",
              [(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sext_inreg rGPR:$Rn, i16),
-                                                 (sra rGPR:$Rm, (i32 16)))))]> {
+                                                 (sra rGPR:$Rm, (i32 16)))))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b001;
@@ -2447,7 +2416,8 @@
         (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
               !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm, $Ra",
               [(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sra rGPR:$Rn, (i32 16)),
-                                               (sext_inreg rGPR:$Rm, i16))))]> {
+                                               (sext_inreg rGPR:$Rm, i16))))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b001;
@@ -2459,7 +2429,8 @@
         (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
               !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm, $Ra",
              [(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sra rGPR:$Rn, (i32 16)),
-                                                 (sra rGPR:$Rm, (i32 16)))))]> {
+                                                 (sra rGPR:$Rm, (i32 16)))))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b001;
@@ -2471,7 +2442,8 @@
         (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
               !strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm, $Ra",
               [(set rGPR:$Rd, (add rGPR:$Ra, (sra (opnode rGPR:$Rn,
-                                    (sext_inreg rGPR:$Rm, i16)), (i32 16))))]> {
+                                    (sext_inreg rGPR:$Rm, i16)), (i32 16))))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b011;
@@ -2483,7 +2455,8 @@
         (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC16,
               !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm, $Ra",
               [(set rGPR:$Rd, (add rGPR:$Ra, (sra (opnode rGPR:$Rn,
-                                      (sra rGPR:$Rm, (i32 16))), (i32 16))))]> {
+                                      (sra rGPR:$Rm, (i32 16))), (i32 16))))]>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
     let Inst{31-27} = 0b11111;
     let Inst{26-23} = 0b0110;
     let Inst{22-20} = 0b011;
@@ -2498,66 +2471,108 @@
 // Halfword multiple accumulate long: SMLAL<x><y> -- for disassembly only
 def t2SMLALBB : T2FourReg_mac<1, 0b100, 0b1000, (outs rGPR:$Ra,rGPR:$Rd),
          (ins rGPR:$Rn,rGPR:$Rm), IIC_iMAC64, "smlalbb", "\t$Ra, $Rd, $Rn, $Rm",
-           [/* For disassembly only; pattern left blank */]>;
+           [/* For disassembly only; pattern left blank */]>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 def t2SMLALBT : T2FourReg_mac<1, 0b100, 0b1001, (outs rGPR:$Ra,rGPR:$Rd),
          (ins rGPR:$Rn,rGPR:$Rm), IIC_iMAC64, "smlalbt", "\t$Ra, $Rd, $Rn, $Rm",
-           [/* For disassembly only; pattern left blank */]>;
+           [/* For disassembly only; pattern left blank */]>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 def t2SMLALTB : T2FourReg_mac<1, 0b100, 0b1010, (outs rGPR:$Ra,rGPR:$Rd),
          (ins rGPR:$Rn,rGPR:$Rm), IIC_iMAC64, "smlaltb", "\t$Ra, $Rd, $Rn, $Rm",
-           [/* For disassembly only; pattern left blank */]>;
+           [/* For disassembly only; pattern left blank */]>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 def t2SMLALTT : T2FourReg_mac<1, 0b100, 0b1011, (outs rGPR:$Ra,rGPR:$Rd),
          (ins rGPR:$Rn,rGPR:$Rm), IIC_iMAC64, "smlaltt", "\t$Ra, $Rd, $Rn, $Rm",
-           [/* For disassembly only; pattern left blank */]>;
+           [/* For disassembly only; pattern left blank */]>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 
 // Dual halfword multiple: SMUAD, SMUSD, SMLAD, SMLSD, SMLALD, SMLSLD
 // These are for disassembly only.
 
 def t2SMUAD: T2ThreeReg_mac<
             0, 0b010, 0b0000, (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm),
-            IIC_iMAC32, "smuad", "\t$Rd, $Rn, $Rm", []> {
+            IIC_iMAC32, "smuad", "\t$Rd, $Rn, $Rm", []>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{15-12} = 0b1111;
 }
 def t2SMUADX:T2ThreeReg_mac<
             0, 0b010, 0b0001, (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm),
-            IIC_iMAC32, "smuadx", "\t$Rd, $Rn, $Rm", []> {
+            IIC_iMAC32, "smuadx", "\t$Rd, $Rn, $Rm", []>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{15-12} = 0b1111;
 }
 def t2SMUSD: T2ThreeReg_mac<
             0, 0b100, 0b0000, (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm),
-            IIC_iMAC32, "smusd", "\t$Rd, $Rn, $Rm", []> {
+            IIC_iMAC32, "smusd", "\t$Rd, $Rn, $Rm", []>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{15-12} = 0b1111;
 }
 def t2SMUSDX:T2ThreeReg_mac<
             0, 0b100, 0b0001, (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm),
-            IIC_iMAC32, "smusdx", "\t$Rd, $Rn, $Rm", []> {
+            IIC_iMAC32, "smusdx", "\t$Rd, $Rn, $Rm", []>,
+          Requires<[IsThumb2, HasThumb2DSP]> {
   let Inst{15-12} = 0b1111;
 }
 def t2SMLAD   : T2ThreeReg_mac<
             0, 0b010, 0b0000, (outs rGPR:$Rd),
             (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "smlad",
-            "\t$Rd, $Rn, $Rm, $Ra", []>;
+            "\t$Rd, $Rn, $Rm, $Ra", []>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 def t2SMLADX  : T2FourReg_mac<
             0, 0b010, 0b0001, (outs rGPR:$Rd),
             (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "smladx",
-            "\t$Rd, $Rn, $Rm, $Ra", []>;
+            "\t$Rd, $Rn, $Rm, $Ra", []>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 def t2SMLSD   : T2FourReg_mac<0, 0b100, 0b0000, (outs rGPR:$Rd),
             (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "smlsd",
-            "\t$Rd, $Rn, $Rm, $Ra", []>;
+            "\t$Rd, $Rn, $Rm, $Ra", []>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 def t2SMLSDX  : T2FourReg_mac<0, 0b100, 0b0001, (outs rGPR:$Rd),
             (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "smlsdx",
-            "\t$Rd, $Rn, $Rm, $Ra", []>;
+            "\t$Rd, $Rn, $Rm, $Ra", []>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 def t2SMLALD  : T2FourReg_mac<1, 0b100, 0b1100, (outs rGPR:$Ra,rGPR:$Rd),
                         (ins rGPR:$Rm, rGPR:$Rn), IIC_iMAC64, "smlald",
-                        "\t$Ra, $Rd, $Rm, $Rn", []>;
+                        "\t$Ra, $Rd, $Rm, $Rn", []>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 def t2SMLALDX : T2FourReg_mac<1, 0b100, 0b1101, (outs rGPR:$Ra,rGPR:$Rd),
                         (ins rGPR:$Rm,rGPR:$Rn), IIC_iMAC64, "smlaldx",
-                        "\t$Ra, $Rd, $Rm, $Rn", []>;
+                        "\t$Ra, $Rd, $Rm, $Rn", []>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 def t2SMLSLD  : T2FourReg_mac<1, 0b101, 0b1100, (outs rGPR:$Ra,rGPR:$Rd),
                         (ins rGPR:$Rm,rGPR:$Rn), IIC_iMAC64, "smlsld",
-                        "\t$Ra, $Rd, $Rm, $Rn", []>;
+                        "\t$Ra, $Rd, $Rm, $Rn", []>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
 def t2SMLSLDX : T2FourReg_mac<1, 0b101, 0b1101, (outs rGPR:$Ra,rGPR:$Rd),
                         (ins rGPR:$Rm,rGPR:$Rn), IIC_iMAC64, "smlsldx",
-                        "\t$Ra, $Rd, $Rm, $Rn", []>;
+                        "\t$Ra, $Rd, $Rm, $Rn", []>,
+          Requires<[IsThumb2, HasThumb2DSP]>;
+
+//===----------------------------------------------------------------------===//
+//  Division Instructions.
+//  Signed and unsigned division on v7-M
+//
+def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi,
+                 "sdiv", "\t$Rd, $Rn, $Rm",
+                 [(set rGPR:$Rd, (sdiv rGPR:$Rn, rGPR:$Rm))]>,
+                 Requires<[HasDivide, IsThumb2]> {
+  let Inst{31-27} = 0b11111;
+  let Inst{26-21} = 0b011100;
+  let Inst{20} = 0b1;
+  let Inst{15-12} = 0b1111;
+  let Inst{7-4} = 0b1111;
+}
+
+def t2UDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi,
+                 "udiv", "\t$Rd, $Rn, $Rm",
+                 [(set rGPR:$Rd, (udiv rGPR:$Rn, rGPR:$Rm))]>,
+                 Requires<[HasDivide, IsThumb2]> {
+  let Inst{31-27} = 0b11111;
+  let Inst{26-21} = 0b011101;
+  let Inst{20} = 0b1;
+  let Inst{15-12} = 0b1111;
+  let Inst{7-4} = 0b1111;
+}
 
 //===----------------------------------------------------------------------===//
 //  Misc. Arithmetic Instructions.
@@ -2587,35 +2602,16 @@
 
 def t2REV16 : T2I_misc<0b01, 0b01, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
                        "rev16", ".w\t$Rd, $Rm",
-                [(set rGPR:$Rd,
-                    (or (and (srl rGPR:$Rm, (i32 8)), 0xFF),
-                        (or (and (shl rGPR:$Rm, (i32 8)), 0xFF00),
-                            (or (and (srl rGPR:$Rm, (i32 8)), 0xFF0000),
-                               (and (shl rGPR:$Rm, (i32 8)), 0xFF000000)))))]>;
-
-def : T2Pat<(or (or (or (and (srl rGPR:$Rm, (i32 8)), 0xFF0000),
-                        (and (shl rGPR:$Rm, (i32 8)), 0xFF000000)),
-                    (and (srl rGPR:$Rm, (i32 8)), 0xFF)),
-                (and (shl rGPR:$Rm, (i32 8)), 0xFF00)),
-            (t2REV16 rGPR:$Rm)>;
+                [(set rGPR:$Rd, (rotr (bswap rGPR:$Rm), (i32 16)))]>;
 
 def t2REVSH : T2I_misc<0b01, 0b11, (outs rGPR:$Rd), (ins rGPR:$Rm), IIC_iUNAr,
                        "revsh", ".w\t$Rd, $Rm",
-                 [(set rGPR:$Rd,
-                    (sext_inreg
-                      (or (srl rGPR:$Rm, (i32 8)),
-                          (shl rGPR:$Rm, (i32 8))), i16))]>;
-
-def : T2Pat<(sext_inreg (or (srl (and rGPR:$Rm, 0xFF00), (i32 8)),
-                            (shl rGPR:$Rm, (i32 8))), i16),
-            (t2REVSH rGPR:$Rm)>;
+                 [(set rGPR:$Rd, (sra (bswap rGPR:$Rm), (i32 16)))]>;
 
 def : T2Pat<(or (sra (shl rGPR:$Rm, (i32 24)), (i32 16)),
-                   (and (srl rGPR:$Rm, (i32 8)), 0xFF)),
+                (and (srl rGPR:$Rm, (i32 8)), 0xFF)),
             (t2REVSH rGPR:$Rm)>;
 
-def : T2Pat<(sra (bswap rGPR:$Rm), (i32 16)), (t2REVSH rGPR:$Rm)>;
-
 def t2PKHBT : T2ThreeReg<
             (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, shift_imm:$sh),
                   IIC_iBITsi, "pkhbt", "\t$Rd, $Rn, $Rm$sh",
@@ -2711,33 +2707,21 @@
 // FIXME: should be able to write a pattern for ARMcmov, but can't use
 // a two-value operand where a dag node expects two operands. :(
 let neverHasSideEffects = 1 in {
-def t2MOVCCr : T2TwoReg<
-                   (outs rGPR:$Rd), (ins rGPR:$false, rGPR:$Rm), IIC_iCMOVr,
-                   "mov", ".w\t$Rd, $Rm",
+def t2MOVCCr : t2PseudoInst<(outs rGPR:$Rd),
+                            (ins rGPR:$false, rGPR:$Rm, pred:$p),
+                            Size4Bytes, IIC_iCMOVr,
    [/*(set rGPR:$Rd, (ARMcmov rGPR:$false, rGPR:$Rm, imm:$cc, CCR:$ccr))*/]>,
-                RegConstraint<"$false = $Rd"> {
-  let Inst{31-27} = 0b11101;
-  let Inst{26-25} = 0b01;
-  let Inst{24-21} = 0b0010;
-  let Inst{20} = 0; // The S bit.
-  let Inst{19-16} = 0b1111; // Rn
-  let Inst{14-12} = 0b000;
-  let Inst{7-4} = 0b0000;
-}
+                RegConstraint<"$false = $Rd">;
 
 let isMoveImm = 1 in
-def t2MOVCCi : T2OneRegImm<(outs rGPR:$Rd), (ins rGPR:$false, t2_so_imm:$imm),
-                   IIC_iCMOVi, "mov", ".w\t$Rd, $imm",
+def t2MOVCCi : t2PseudoInst<(outs rGPR:$Rd),
+                            (ins rGPR:$false, t2_so_imm:$imm, pred:$p),
+                   Size4Bytes, IIC_iCMOVi,
 [/*(set rGPR:$Rd,(ARMcmov rGPR:$false,t2_so_imm:$imm, imm:$cc, CCR:$ccr))*/]>,
-                   RegConstraint<"$false = $Rd"> {
-  let Inst{31-27} = 0b11110;
-  let Inst{25} = 0;
-  let Inst{24-21} = 0b0010;
-  let Inst{20} = 0; // The S bit.
-  let Inst{19-16} = 0b1111; // Rn
-  let Inst{15} = 0;
-}
+                   RegConstraint<"$false = $Rd">;
 
+// FIXME: Pseudo-ize these. For now, just mark codegen only.
+let isCodeGenOnly = 1 in {
 let isMoveImm = 1 in
 def t2MOVCCi16 : T2I<(outs rGPR:$Rd), (ins rGPR:$false, i32imm_hilo16:$imm),
                       IIC_iCMOVi,
@@ -2804,6 +2788,7 @@
                              (ins rGPR:$false, rGPR:$Rm, i32imm:$imm),
                              IIC_iCMOVsi, "ror", ".w\t$Rd, $Rm, $imm", []>,
                  RegConstraint<"$false = $Rd">;
+} // isCodeGenOnly = 1
 } // neverHasSideEffects
 
 //===----------------------------------------------------------------------===//
@@ -2953,22 +2938,6 @@
 }
 
 //===----------------------------------------------------------------------===//
-// TLS Instructions
-//
-
-// __aeabi_read_tp preserves the registers r1-r3.
-let isCall = 1,
-  Defs = [R0, R12, LR, CPSR], Uses = [SP] in {
-  def t2TPsoft : T2XI<(outs), (ins), IIC_Br,
-                     "bl\t__aeabi_read_tp",
-                     [(set R0, ARMthread_pointer)]> {
-    let Inst{31-27} = 0b11110;
-    let Inst{15-14} = 0b11;
-    let Inst{12} = 1;
-  }
-}
-
-//===----------------------------------------------------------------------===//
 // SJLJ Exception handling intrinsics
 //   eh_sjlj_setjmp() is an instruction sequence to store the return
 //   address and save #0 in R0 for the non-longjmp case.
@@ -3006,28 +2975,13 @@
 //
 
 // FIXME: remove when we have a way to marking a MI with these properties.
-// FIXME: $dst1 should be a def. But the extra ops must be in the end of the
-// operand list.
 // FIXME: Should pc be an implicit operand like PICADD, etc?
 let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1,
     hasExtraDefRegAllocReq = 1, isCodeGenOnly = 1 in
-def t2LDMIA_RET: T2XIt<(outs GPR:$wb), (ins GPR:$Rn, pred:$p,
-                                        reglist:$regs, variable_ops),
-                        IIC_iLoad_mBr,
-                        "ldmia${p}.w\t$Rn!, $regs",
-                        "$Rn = $wb", []> {
-  bits<4>  Rn;
-  bits<16> regs;
-
-  let Inst{31-27} = 0b11101;
-  let Inst{26-25} = 0b00;
-  let Inst{24-23} = 0b01;     // Increment After
-  let Inst{22}    = 0;
-  let Inst{21}    = 1;        // Writeback
-  let Inst{20}    = 1;
-  let Inst{19-16} = Rn;
-  let Inst{15-0}  = regs;
-}
+def t2LDMIA_RET: t2PseudoInst<(outs GPR:$wb), (ins GPR:$Rn, pred:$p,
+                                                   reglist:$regs, variable_ops),
+                              Size4Bytes, IIC_iLoad_mBr, []>,
+                         RegConstraint<"$Rn = $wb">;
 
 let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
 let isPredicable = 1 in

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrVFP.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrVFP.td?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrVFP.td (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMInstrVFP.td Sat Jul  2 22:28:07 2011
@@ -166,6 +166,15 @@
 def : MnemonicAlias<"vldm", "vldmia">;
 def : MnemonicAlias<"vstm", "vstmia">;
 
+def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>,
+                Requires<[HasVFP2]>;
+def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>,
+                Requires<[HasVFP2]>;
+def : InstAlias<"vpop${p} $r",  (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>,
+                Requires<[HasVFP2]>;
+def : InstAlias<"vpop${p} $r",  (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>,
+                Requires<[HasVFP2]>;
+
 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
 
 //===----------------------------------------------------------------------===//

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMLoadStoreOptimizer.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMLoadStoreOptimizer.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMLoadStoreOptimizer.cpp Sat Jul  2 22:28:07 2011
@@ -329,13 +329,9 @@
       if (NewBase == 0)
         return false;
     }
-    int BaseOpc = !isThumb2
-      ? ARM::ADDri
-      : ((Base == ARM::SP) ? ARM::t2ADDrSPi : ARM::t2ADDri);
+    int BaseOpc = !isThumb2 ? ARM::ADDri : ARM::t2ADDri;
     if (Offset < 0) {
-      BaseOpc = !isThumb2
-        ? ARM::SUBri
-        : ((Base == ARM::SP) ? ARM::t2SUBrSPi : ARM::t2SUBri);
+      BaseOpc = !isThumb2 ? ARM::SUBri : ARM::t2SUBri;
       Offset = - Offset;
     }
     int ImmedOffset = isThumb2
@@ -516,8 +512,6 @@
   if (!MI)
     return false;
   if (MI->getOpcode() != ARM::t2SUBri &&
-      MI->getOpcode() != ARM::t2SUBrSPi &&
-      MI->getOpcode() != ARM::t2SUBrSPi12 &&
       MI->getOpcode() != ARM::tSUBspi &&
       MI->getOpcode() != ARM::SUBri)
     return false;
@@ -541,8 +535,6 @@
   if (!MI)
     return false;
   if (MI->getOpcode() != ARM::t2ADDri &&
-      MI->getOpcode() != ARM::t2ADDrSPi &&
-      MI->getOpcode() != ARM::t2ADDrSPi12 &&
       MI->getOpcode() != ARM::tADDspi &&
       MI->getOpcode() != ARM::ADDri)
     return false;
@@ -1461,19 +1453,19 @@
   while (++I != E) {
     if (I->isDebugValue() || MemOps.count(&*I))
       continue;
-    const TargetInstrDesc &TID = I->getDesc();
-    if (TID.isCall() || TID.isTerminator() || I->hasUnmodeledSideEffects())
+    const MCInstrDesc &MCID = I->getDesc();
+    if (MCID.isCall() || MCID.isTerminator() || I->hasUnmodeledSideEffects())
       return false;
-    if (isLd && TID.mayStore())
+    if (isLd && MCID.mayStore())
       return false;
     if (!isLd) {
-      if (TID.mayLoad())
+      if (MCID.mayLoad())
         return false;
       // It's not safe to move the first 'str' down.
       // str r1, [r0]
       // strh r5, [r0]
       // str r4, [r0, #+4]
-      if (TID.mayStore())
+      if (MCID.mayStore())
         return false;
     }
     for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
@@ -1672,14 +1664,14 @@
           Ops.pop_back();
           Ops.pop_back();
 
-          const TargetInstrDesc &TID = TII->get(NewOpc);
-          const TargetRegisterClass *TRC = TID.OpInfo[0].getRegClass(TRI);
+          const MCInstrDesc &MCID = TII->get(NewOpc);
+          const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI);
           MRI->constrainRegClass(EvenReg, TRC);
           MRI->constrainRegClass(OddReg, TRC);
 
           // Form the pair instruction.
           if (isLd) {
-            MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, TID)
+            MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
               .addReg(EvenReg, RegState::Define)
               .addReg(OddReg, RegState::Define)
               .addReg(BaseReg);
@@ -1691,7 +1683,7 @@
             MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
             ++NumLDRDFormed;
           } else {
-            MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, TID)
+            MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
               .addReg(EvenReg)
               .addReg(OddReg)
               .addReg(BaseReg);
@@ -1742,8 +1734,8 @@
   while (MBBI != E) {
     for (; MBBI != E; ++MBBI) {
       MachineInstr *MI = MBBI;
-      const TargetInstrDesc &TID = MI->getDesc();
-      if (TID.isCall() || TID.isTerminator()) {
+      const MCInstrDesc &MCID = MI->getDesc();
+      if (MCID.isCall() || MCID.isTerminator()) {
         // Stop at barriers.
         ++MBBI;
         break;

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMMCCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMMCCodeEmitter.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMMCCodeEmitter.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMMCCodeEmitter.cpp Sat Jul  2 22:28:07 2011
@@ -1274,7 +1274,7 @@
 EncodeInstruction(const MCInst &MI, raw_ostream &OS,
                   SmallVectorImpl<MCFixup> &Fixups) const {
   // Pseudo instructions don't get encoded.
-  const TargetInstrDesc &Desc = TII.get(MI.getOpcode());
+  const MCInstrDesc &Desc = TII.get(MI.getOpcode());
   uint64_t TSFlags = Desc.TSFlags;
   if ((TSFlags & ARMII::FormMask) == ARMII::Pseudo)
     return;

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMRegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMRegisterInfo.td?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMRegisterInfo.td (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMRegisterInfo.td Sat Jul  2 22:28:07 2011
@@ -228,6 +228,9 @@
 // the general GPR register class above (MOV, e.g.)
 def tGPR : RegisterClass<"ARM", [i32], 32, (trunc GPR, 8)>;
 
+// The high registers in thumb mode, R8-R15.
+def hGPR : RegisterClass<"ARM", [i32], 32, (sub GPR, tGPR)>;
+
 // For tail calls, we can't use callee-saved registers, as they are restored
 // to the saved value before the tail call, which would clobber a call address.
 // Note, getMinimalPhysRegClass(R0) returns tGPR because of the names of

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMSubtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMSubtarget.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMSubtarget.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMSubtarget.cpp Sat Jul  2 22:28:07 2011
@@ -7,17 +7,22 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file implements the ARM specific subclass of TargetSubtarget.
+// This file implements the ARM specific subclass of TargetSubtargetInfo.
 //
 //===----------------------------------------------------------------------===//
 
 #include "ARMSubtarget.h"
-#include "ARMGenSubtarget.inc"
 #include "ARMBaseRegisterInfo.h"
 #include "llvm/GlobalValue.h"
-#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/ADT/SmallVector.h"
+
+#define GET_SUBTARGETINFO_CTOR
+#define GET_SUBTARGETINFO_MC_DESC
+#define GET_SUBTARGETINFO_TARGET_DESC
+#include "ARMGenSubtargetInfo.inc"
+
 using namespace llvm;
 
 static cl::opt<bool>
@@ -31,9 +36,10 @@
 StrictAlign("arm-strict-align", cl::Hidden,
             cl::desc("Disallow all unaligned memory accesses"));
 
-ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &FS,
-                           bool isT)
-  : ARMArchVersion(V4)
+ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU,
+                           const std::string &FS, bool isT)
+  : ARMGenSubtargetInfo()
+  , ARMArchVersion(V4)
   , ARMProcFamily(Others)
   , ARMFPUType(None)
   , UseNEONForSinglePrecisionFP(false)
@@ -56,22 +62,21 @@
   , HasMPExtension(false)
   , FPOnlySP(false)
   , AllowsUnalignedMem(false)
+  , Thumb2DSP(false)
   , stackAlignment(4)
-  , CPUString("generic")
+  , CPUString(CPU)
   , TargetTriple(TT)
   , TargetABI(ARM_ABI_APCS) {
-  // Default to soft float ABI
-  if (FloatABIType == FloatABI::Default)
-    FloatABIType = FloatABI::Soft;
-
   // Determine default and user specified characteristics
 
   // When no arch is specified either by CPU or by attributes, make the default
   // ARMv4T.
   const char *ARMArchFeature = "";
+  if (CPUString.empty())
+    CPUString = "generic";
   if (CPUString == "generic" && (FS.empty() || FS == "generic")) {
     ARMArchVersion = V4T;
-    ARMArchFeature = ",+v4t";
+    ARMArchFeature = "+v4t";
   }
 
   // Set the boolean corresponding to the current target triple, or the default
@@ -90,29 +95,32 @@
     unsigned SubVer = TT[Idx];
     if (SubVer >= '7' && SubVer <= '9') {
       ARMArchVersion = V7A;
-      ARMArchFeature = ",+v7a";
+      ARMArchFeature = "+v7a";
       if (Len >= Idx+2 && TT[Idx+1] == 'm') {
         ARMArchVersion = V7M;
-        ARMArchFeature = ",+v7m";
+        ARMArchFeature = "+v7m";
+      } else if (Len >= Idx+3 && TT[Idx+1] == 'e'&& TT[Idx+2] == 'm') {
+        ARMArchVersion = V7EM;
+        ARMArchFeature = "+v7em";
       }
     } else if (SubVer == '6') {
       ARMArchVersion = V6;
-      ARMArchFeature = ",+v6";
+      ARMArchFeature = "+v6";
       if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == '2') {
         ARMArchVersion = V6T2;
-        ARMArchFeature = ",+v6t2";
+        ARMArchFeature = "+v6t2";
       }
     } else if (SubVer == '5') {
       ARMArchVersion = V5T;
-      ARMArchFeature = ",+v5t";
+      ARMArchFeature = "+v5t";
       if (Len >= Idx+3 && TT[Idx+1] == 't' && TT[Idx+2] == 'e') {
         ARMArchVersion = V5TE;
-        ARMArchFeature = ",+v5te";
+        ARMArchFeature = "+v5te";
       }
     } else if (SubVer == '4') {
       if (Len >= Idx+2 && TT[Idx+1] == 't') {
         ARMArchVersion = V4T;
-        ARMArchFeature = ",+v4t";
+        ARMArchFeature = "+v4t";
       } else {
         ARMArchVersion = V4;
         ARMArchFeature = "";
@@ -123,18 +131,18 @@
   if (TT.find("eabi") != std::string::npos)
     TargetABI = ARM_ABI_AAPCS;
 
-  // Parse features string.  If the first entry in FS (the CPU) is missing,
-  // insert the architecture feature derived from the target triple.  This is
-  // important for setting features that are implied based on the architecture
-  // version.
-  std::string FSWithArch;
-  if (FS.empty())
-    FSWithArch = std::string(ARMArchFeature);
-  else if (FS.find(',') == 0)
-    FSWithArch = std::string(ARMArchFeature) + FS;
-  else
+  // Insert the architecture feature derived from the target triple into the
+  // feature string. This is important for setting features that are implied
+  // based on the architecture version.
+  std::string FSWithArch = std::string(ARMArchFeature);
+  if (FSWithArch.empty())
     FSWithArch = FS;
-  CPUString = ParseSubtargetFeatures(FSWithArch, CPUString);
+  else if (!FS.empty())
+    FSWithArch = FSWithArch + "," + FS;
+  ParseSubtargetFeatures(FSWithArch, CPUString);
+
+  // Initialize scheduling itinerary for the specified CPU.
+  InstrItins = getInstrItineraryForCPU(CPUString);
 
   // After parsing Itineraries, set ItinData.IssueWidth.
   computeIssueWidth();
@@ -247,9 +255,9 @@
 
 bool ARMSubtarget::enablePostRAScheduler(
            CodeGenOpt::Level OptLevel,
-           TargetSubtarget::AntiDepBreakMode& Mode,
+           TargetSubtargetInfo::AntiDepBreakMode& Mode,
            RegClassVector& CriticalPathRCs) const {
-  Mode = TargetSubtarget::ANTIDEP_CRITICAL;
+  Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL;
   CriticalPathRCs.clear();
   CriticalPathRCs.push_back(&ARM::GPRRegClass);
   return PostRAScheduler && OptLevel >= CodeGenOpt::Default;

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMSubtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMSubtarget.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMSubtarget.h (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMSubtarget.h Sat Jul  2 22:28:07 2011
@@ -7,26 +7,28 @@
 //
 //===----------------------------------------------------------------------===//
 //
-// This file declares the ARM specific subclass of TargetSubtarget.
+// This file declares the ARM specific subclass of TargetSubtargetInfo.
 //
 //===----------------------------------------------------------------------===//
 
 #ifndef ARMSUBTARGET_H
 #define ARMSUBTARGET_H
 
-#include "llvm/Target/TargetInstrItineraries.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetSubtarget.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/MC/MCInstrItineraries.h"
 #include "llvm/ADT/Triple.h"
 #include <string>
 
+#define GET_SUBTARGETINFO_HEADER
+#include "ARMGenSubtargetInfo.inc"
+
 namespace llvm {
 class GlobalValue;
 
-class ARMSubtarget : public TargetSubtarget {
+class ARMSubtarget : public ARMGenSubtargetInfo {
 protected:
   enum ARMArchEnum {
-    V4, V4T, V5T, V5TE, V6, V6M, V6T2, V7A, V7M
+    V4, V4T, V5T, V5TE, V6, V6M, V6T2, V7A, V7M, V7EM
   };
 
   enum ARMProcFamilyEnum {
@@ -43,7 +45,7 @@
   };
 
   /// ARMArchVersion - ARM architecture version: V4, V4T (base), V5T, V5TE,
-  /// V6, V6T2, V7A, V7M.
+  /// V6, V6T2, V7A, V7M, V7EM.
   ARMArchEnum ARMArchVersion;
 
   /// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others.
@@ -128,6 +130,10 @@
   /// ARMTargetLowering::allowsUnalignedMemoryAccesses().
   bool AllowsUnalignedMem;
 
+  /// Thumb2DSP - If true, the subtarget supports the v7 DSP (saturating arith
+  /// and such) instructions in Thumb2 code.
+  bool Thumb2DSP;
+
   /// stackAlignment - The minimum alignment known to hold of the stack frame on
   /// entry to the function and which must be maintained by every function.
   unsigned stackAlignment;
@@ -154,7 +160,8 @@
   /// This constructor initializes the data members to match that
   /// of the specified triple.
   ///
-  ARMSubtarget(const std::string &TT, const std::string &FS, bool isThumb);
+  ARMSubtarget(const std::string &TT, const std::string &CPU,
+               const std::string &FS, bool isThumb);
 
   /// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
   /// that still makes it profitable to inline the call.
@@ -165,8 +172,7 @@
   }
   /// ParseSubtargetFeatures - Parses features string setting specified
   /// subtarget options.  Definition of function is auto generated by tblgen.
-  std::string ParseSubtargetFeatures(const std::string &FS,
-                                     const std::string &CPU);
+  void ParseSubtargetFeatures(const std::string &FS, const std::string &CPU);
 
   void computeIssueWidth();
 
@@ -197,6 +203,7 @@
   bool prefers32BitThumb() const { return Pref32BitThumb; }
   bool avoidCPSRPartialUpdate() const { return AvoidCPSRPartialUpdate; }
   bool hasMPExtension() const { return HasMPExtension; }
+  bool hasThumb2DSP() const { return Thumb2DSP; }
 
   bool hasFP16() const { return HasFP16; }
   bool hasD16() const { return HasD16; }
@@ -226,7 +233,7 @@
 
   /// enablePostRAScheduler - True at 'More' optimization.
   bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
-                             TargetSubtarget::AntiDepBreakMode& Mode,
+                             TargetSubtargetInfo::AntiDepBreakMode& Mode,
                              RegClassVector& CriticalPathRCs) const;
 
   /// getInstrItins - Return the instruction itineraies based on subtarget

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMTargetMachine.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMTargetMachine.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMTargetMachine.cpp Sat Jul  2 22:28:07 2011
@@ -78,18 +78,24 @@
 ///
 ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T,
                                            const std::string &TT,
+                                           const std::string &CPU,
                                            const std::string &FS,
                                            bool isThumb)
   : LLVMTargetMachine(T, TT),
-    Subtarget(TT, FS, isThumb),
+    Subtarget(TT, CPU, FS, isThumb),
     JITInfo(),
     InstrItins(Subtarget.getInstrItineraryData()) {
   DefRelocModel = getRelocationModel();
+
+  // Default to soft float ABI
+  if (FloatABIType == FloatABI::Default)
+    FloatABIType = FloatABI::Soft;
 }
 
 ARMTargetMachine::ARMTargetMachine(const Target &T, const std::string &TT,
+                                   const std::string &CPU,
                                    const std::string &FS)
-  : ARMBaseTargetMachine(T, TT, FS, false), InstrInfo(Subtarget),
+  : ARMBaseTargetMachine(T, TT, CPU, FS, false), InstrInfo(Subtarget),
     DataLayout(Subtarget.isAPCS_ABI() ?
                std::string("e-p:32:32-f64:32:64-i64:32:64-"
                            "v128:32:128-v64:32:64-n32") :
@@ -105,8 +111,9 @@
 }
 
 ThumbTargetMachine::ThumbTargetMachine(const Target &T, const std::string &TT,
+                                       const std::string &CPU,
                                        const std::string &FS)
-  : ARMBaseTargetMachine(T, TT, FS, true),
+  : ARMBaseTargetMachine(T, TT, CPU, FS, true),
     InstrInfo(Subtarget.hasThumb2()
               ? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget))
               : ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))),

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/ARMTargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/ARMTargetMachine.h?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/ARMTargetMachine.h (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/ARMTargetMachine.h Sat Jul  2 22:28:07 2011
@@ -41,7 +41,8 @@
 
 public:
   ARMBaseTargetMachine(const Target &T, const std::string &TT,
-                       const std::string &FS, bool isThumb);
+                       const std::string &CPU, const std::string &FS,
+                       bool isThumb);
 
   virtual       ARMJITInfo       *getJITInfo()         { return &JITInfo; }
   virtual const ARMSubtarget  *getSubtargetImpl() const { return &Subtarget; }
@@ -70,7 +71,7 @@
   ARMFrameLowering    FrameLowering;
  public:
   ARMTargetMachine(const Target &T, const std::string &TT,
-                   const std::string &FS);
+                   const std::string &CPU, const std::string &FS);
 
   virtual const ARMRegisterInfo  *getRegisterInfo() const {
     return &InstrInfo.getRegisterInfo();
@@ -109,7 +110,7 @@
   OwningPtr<ARMFrameLowering> FrameLowering;
 public:
   ThumbTargetMachine(const Target &T, const std::string &TT,
-                     const std::string &FS);
+                     const std::string &CPU, const std::string &FS);
 
   /// returns either Thumb1RegisterInfo or Thumb2RegisterInfo
   virtual const ARMBaseRegisterInfo *getRegisterInfo() const {

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/AsmParser/ARMAsmLexer.cpp Sat Jul  2 22:28:07 2011
@@ -87,8 +87,9 @@
     : ARMBaseAsmLexer(T, MAI) {
     std::string tripleString("arm-unknown-unknown");
     std::string featureString;
+    std::string CPU;
     OwningPtr<const TargetMachine>
-      targetMachine(T.createTargetMachine(tripleString, featureString));
+      targetMachine(T.createTargetMachine(tripleString, CPU, featureString));
     InitRegisterMap(targetMachine->getRegisterInfo());
   }
 };
@@ -99,8 +100,9 @@
     : ARMBaseAsmLexer(T, MAI) {
     std::string tripleString("thumb-unknown-unknown");
     std::string featureString;
+    std::string CPU;
     OwningPtr<const TargetMachine>
-      targetMachine(T.createTargetMachine(tripleString, featureString));
+      targetMachine(T.createTargetMachine(tripleString, CPU, featureString));
     InitRegisterMap(targetMachine->getRegisterInfo());
   }
 };

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/AsmParser/ARMAsmParser.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/AsmParser/ARMAsmParser.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/AsmParser/ARMAsmParser.cpp Sat Jul  2 22:28:07 2011
@@ -350,6 +350,22 @@
   bool isCondCode() const { return Kind == CondCode; }
   bool isCCOut() const { return Kind == CCOut; }
   bool isImm() const { return Kind == Immediate; }
+  bool isImm0_255() const {
+    if (Kind != Immediate)
+      return false;
+    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+    if (!CE) return false;
+    int64_t Value = CE->getValue();
+    return Value >= 0 && Value < 256;
+  }
+  bool isT2SOImm() const {
+    if (Kind != Immediate)
+      return false;
+    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+    if (!CE) return false;
+    int64_t Value = CE->getValue();
+    return ARM_AM::getT2SOImmVal(Value) != -1;
+  }
   bool isReg() const { return Kind == Register; }
   bool isRegList() const { return Kind == RegisterList; }
   bool isDPRRegList() const { return Kind == DPRRegisterList; }
@@ -515,6 +531,16 @@
     addExpr(Inst, getImm());
   }
 
+  void addImm0_255Operands(MCInst &Inst, unsigned N) const {
+    assert(N == 1 && "Invalid number of operands!");
+    addExpr(Inst, getImm());
+  }
+
+  void addT2SOImmOperands(MCInst &Inst, unsigned N) const {
+    assert(N == 1 && "Invalid number of operands!");
+    addExpr(Inst, getImm());
+  }
+
   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
     assert(N == 1 && "Invalid number of operands!");
     Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
@@ -1761,7 +1787,7 @@
       Mnemonic == "vcle" ||
       (Mnemonic == "smlal" || Mnemonic == "umaal" || Mnemonic == "umlal" ||
        Mnemonic == "vabal" || Mnemonic == "vmlal" || Mnemonic == "vpadal" ||
-       Mnemonic == "vqdmlal"))
+       Mnemonic == "vqdmlal" || Mnemonic == "bics"))
     return Mnemonic;
 
   // First, split out any predication code.
@@ -1769,7 +1795,9 @@
     .Case("eq", ARMCC::EQ)
     .Case("ne", ARMCC::NE)
     .Case("hs", ARMCC::HS)
+    .Case("cs", ARMCC::HS)
     .Case("lo", ARMCC::LO)
+    .Case("cc", ARMCC::LO)
     .Case("mi", ARMCC::MI)
     .Case("pl", ARMCC::PL)
     .Case("vs", ARMCC::VS)
@@ -1824,6 +1852,7 @@
 void ARMAsmParser::
 GetMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
                       bool &CanAcceptPredicationCode) {
+  bool isThumbOne = TM.getSubtarget<ARMSubtarget>().isThumb1Only();
   bool isThumb = TM.getSubtarget<ARMSubtarget>().isThumb();
 
   if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
@@ -1834,7 +1863,7 @@
       Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
       Mnemonic == "sbc" || Mnemonic == "mla" || Mnemonic == "umull" ||
       Mnemonic == "eor" || Mnemonic == "smlal" ||
-      (Mnemonic == "mov" && !isThumb)) {
+      (Mnemonic == "mov" && !isThumbOne)) {
     CanAcceptCarrySet = true;
   } else {
     CanAcceptCarrySet = false;
@@ -1853,8 +1882,7 @@
 
   if (isThumb)
     if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
-        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp" ||
-        Mnemonic == "mov")
+        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
       CanAcceptPredicationCode = false;
 }
 

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/CMakeLists.txt?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/CMakeLists.txt (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/CMakeLists.txt Sat Jul  2 22:28:07 2011
@@ -1,10 +1,7 @@
 set(LLVM_TARGET_DEFINITIONS ARM.td)
 
-tablegen(ARMGenRegisterInfo.h.inc -gen-register-desc-header)
-tablegen(ARMGenRegisterNames.inc -gen-register-enums)
-tablegen(ARMGenRegisterInfo.inc -gen-register-desc)
-tablegen(ARMGenInstrNames.inc -gen-instr-enums)
-tablegen(ARMGenInstrInfo.inc -gen-instr-desc)
+tablegen(ARMGenRegisterInfo.inc -gen-register-info)
+tablegen(ARMGenInstrInfo.inc -gen-instr-info)
 tablegen(ARMGenCodeEmitter.inc -gen-emitter)
 tablegen(ARMGenMCCodeEmitter.inc -gen-emitter -mc-emitter)
 tablegen(ARMGenAsmWriter.inc -gen-asm-writer)
@@ -12,7 +9,7 @@
 tablegen(ARMGenDAGISel.inc -gen-dag-isel)
 tablegen(ARMGenFastISel.inc -gen-fast-isel)
 tablegen(ARMGenCallingConv.inc -gen-callingconv)
-tablegen(ARMGenSubtarget.inc -gen-subtarget)
+tablegen(ARMGenSubtargetInfo.inc -gen-subtarget)
 tablegen(ARMGenEDInfo.inc -gen-enhanced-disassembly-info)
 tablegen(ARMGenDecoderTables.inc -gen-arm-decoder)
 
@@ -34,6 +31,7 @@
   ARMISelLowering.cpp
   ARMInstrInfo.cpp
   ARMJITInfo.cpp
+  ARMMachObjectWriter.cpp
   ARMMCCodeEmitter.cpp
   ARMMCExpr.cpp
   ARMLoadStoreOptimizer.cpp

Modified: llvm/branches/type-system-rewrite/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/type-system-rewrite/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp?rev=134363&r1=134362&r2=134363&view=diff
==============================================================================
--- llvm/branches/type-system-rewrite/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp (original)
+++ llvm/branches/type-system-rewrite/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp Sat Jul  2 22:28:07 2011
@@ -24,8 +24,8 @@
 //#define DEBUG(X) do { X; } while (0)
 
 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
-/// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
-/// describing the operand info for each ARMInsts[i].
+/// MCInstrDesc ARMInsts[] definition and the MCOperandInfo[]'s describing the
+/// operand info for each ARMInsts[i].
 ///
 /// Together with an instruction's encoding format, we can take advantage of the
 /// NumOperands and the OpInfo fields of the target instruction description in
@@ -46,10 +46,10 @@
 ///   dag DefaultOps = (ops (i32 14), (i32 zero_reg));
 /// }
 ///
-/// which is manifested by the TargetOperandInfo[] of:
+/// which is manifested by the MCOperandInfo[] of:
 ///
-/// { 0, 0|(1<<TOI::Predicate), 0 },
-/// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
+/// { 0, 0|(1<<MCOI::Predicate), 0 },
+/// { ARM::CCRRegClassID, 0|(1<<MCOI::Predicate), 0 }
 ///
 /// So the first predicate MCOperand corresponds to the immediate part of the
 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
@@ -66,11 +66,12 @@
 ///   dag DefaultOps = (ops (i32 zero_reg));
 /// }
 ///
-/// which is manifested by the one TargetOperandInfo of:
+/// which is manifested by the one MCOperandInfo of:
 ///
-/// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
+/// { ARM::CCRRegClassID, 0|(1<<MCOI::OptionalDef), 0 }
 ///
 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
+#define GET_INSTRINFO_MC_DESC
 #include "ARMGenInstrInfo.inc"
 
 using namespace llvm;
@@ -588,9 +589,9 @@
 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  unsigned short NumDefs = TID.getNumDefs();
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  unsigned short NumDefs = MCID.getNumDefs();
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
   unsigned &OpIdx = NumOpsAdded;
 
   OpIdx = 0;
@@ -739,9 +740,9 @@
     if (PW) {
       MI.addOperand(MCOperand::CreateReg(0));
       ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
-      const TargetInstrDesc &TID = ARMInsts[Opcode];
+      const MCInstrDesc &MCID = ARMInsts[Opcode];
       unsigned IndexMode =
-                  (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
+                 (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
       unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
                                           ARM_AM::no_shift, IndexMode);
       MI.addOperand(MCOperand::CreateImm(Offset));
@@ -802,7 +803,7 @@
   if (CoprocessorOpcode(Opcode))
     return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B);
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
   if (!OpInfo) return false;
 
   // MRS and MRSsys take one GPR reg Rd.
@@ -901,7 +902,7 @@
 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
   if (!OpInfo) return false;
 
   unsigned &OpIdx = NumOpsAdded;
@@ -976,10 +977,10 @@
 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  unsigned short NumDefs = TID.getNumDefs();
-  bool isUnary = isUnaryDP(TID.TSFlags);
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  unsigned short NumDefs = MCID.getNumDefs();
+  bool isUnary = isUnaryDP(MCID.TSFlags);
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
   unsigned &OpIdx = NumOpsAdded;
 
   OpIdx = 0;
@@ -1041,7 +1042,7 @@
   }
 
   // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
-  if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
+  if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) {
     MI.addOperand(MCOperand::CreateReg(0));
     ++OpIdx;
   }
@@ -1089,10 +1090,10 @@
 static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  unsigned short NumDefs = TID.getNumDefs();
-  bool isUnary = isUnaryDP(TID.TSFlags);
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  unsigned short NumDefs = MCID.getNumDefs();
+  bool isUnary = isUnaryDP(MCID.TSFlags);
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
   unsigned &OpIdx = NumOpsAdded;
 
   OpIdx = 0;
@@ -1118,7 +1119,7 @@
   }
 
   // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
-  if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
+  if (isUnary && (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1)) {
     MI.addOperand(MCOperand::CreateReg(0));
     ++OpIdx;
   }
@@ -1244,17 +1245,17 @@
 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  bool isPrePost = isPrePostLdSt(TID.TSFlags);
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  bool isPrePost = isPrePostLdSt(MCID.TSFlags);
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
   if (!OpInfo) return false;
 
   unsigned &OpIdx = NumOpsAdded;
 
   OpIdx = 0;
 
-  assert(((!isStore && TID.getNumDefs() > 0) ||
-          (isStore && (TID.getNumDefs() == 0 || isPrePost)))
+  assert(((!isStore && MCID.getNumDefs() > 0) ||
+          (isStore && (MCID.getNumDefs() == 0 || isPrePost)))
          && "Invalid arguments");
 
   // Operand 0 of a pre- and post-indexed store is the address base writeback.
@@ -1291,7 +1292,7 @@
 
   assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
          "Reg operand expected");
-  assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
+  assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1))
          && "Index mode or tied_to operand expected");
   MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
                                                      decodeRn(insn))));
@@ -1308,7 +1309,7 @@
 
   ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
   unsigned IndexMode =
-               (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
+               (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
   if (getIBit(insn) == 0) {
     // For pre- and post-indexed case, add a reg0 operand (Addressing Mode #2).
     // Otherwise, skip the reg operand since for addrmode_imm12, Rn has already
@@ -1379,17 +1380,17 @@
 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  bool isPrePost = isPrePostLdSt(TID.TSFlags);
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  bool isPrePost = isPrePostLdSt(MCID.TSFlags);
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
   if (!OpInfo) return false;
 
   unsigned &OpIdx = NumOpsAdded;
 
   OpIdx = 0;
 
-  assert(((!isStore && TID.getNumDefs() > 0) ||
-          (isStore && (TID.getNumDefs() == 0 || isPrePost)))
+  assert(((!isStore && MCID.getNumDefs() > 0) ||
+          (isStore && (MCID.getNumDefs() == 0 || isPrePost)))
          && "Invalid arguments");
 
   // Operand 0 of a pre- and post-indexed store is the address base writeback.
@@ -1433,7 +1434,7 @@
 
   assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
          "Reg operand expected");
-  assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
+  assert((!isPrePost || (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1))
          && "Offset mode or tied_to operand expected");
   MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
                                                      decodeRn(insn))));
@@ -1451,7 +1452,7 @@
 
   ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
   unsigned IndexMode =
-                  (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
+                 (MCID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
   if (getAM3IBit(insn) == 1) {
     MI.addOperand(MCOperand::CreateReg(0));
 
@@ -1539,7 +1540,7 @@
 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
   if (!OpInfo) return false;
 
   unsigned &OpIdx = NumOpsAdded;
@@ -1591,7 +1592,7 @@
 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
   unsigned &OpIdx = NumOpsAdded;
 
   OpIdx = 0;
@@ -1653,8 +1654,8 @@
   if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
     return false;
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  NumOpsAdded = TID.getNumOperands() - 2; // ignore predicate operands
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  NumOpsAdded = MCID.getNumOperands() - 2; // ignore predicate operands
 
   // Disassemble register def.
   MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
@@ -1696,7 +1697,7 @@
   if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
     return false;
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
   unsigned &OpIdx = NumOpsAdded;
 
   OpIdx = 0;
@@ -1802,7 +1803,7 @@
 
   assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
   unsigned &OpIdx = NumOpsAdded;
 
   OpIdx = 0;
@@ -1842,8 +1843,8 @@
 
   assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
   unsigned &OpIdx = NumOpsAdded;
 
   OpIdx = 0;
@@ -1858,7 +1859,7 @@
   ++OpIdx;
 
   // Skip tied_to operand constraint.
-  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
+  if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
     assert(NumOps >= 4 && "Expect >=4 operands");
     MI.addOperand(MCOperand::CreateReg(0));
     ++OpIdx;
@@ -1886,8 +1887,8 @@
 
   assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
   if (!OpInfo) return false;
 
   bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
@@ -1903,7 +1904,7 @@
                     getRegisterEnum(B, RegClassID,
                                     decodeVFPRd(insn, SP))));
 
-    assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
+    assert(MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 &&
            "Tied to operand expected");
     MI.addOperand(MI.getOperand(0));
 
@@ -1961,7 +1962,7 @@
 
   assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
   unsigned &OpIdx = NumOpsAdded;
 
   MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
@@ -2011,7 +2012,7 @@
 
   assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
   unsigned &OpIdx = NumOpsAdded;
 
   OpIdx = 0;
@@ -2136,7 +2137,7 @@
 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
   unsigned &OpIdx = NumOpsAdded;
 
   OpIdx = 0;
@@ -2402,8 +2403,8 @@
     unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced,
     unsigned alignment, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
 
   // At least one DPR register plus addressing mode #6.
   assert(NumOps >= 3 && "Expect >= 3 operands");
@@ -2507,7 +2508,7 @@
     }
 
     while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
-      assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
+      assert(MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1 &&
              "Tied to operand expected");
       MI.addOperand(MCOperand::CreateReg(0));
       ++OpIdx;
@@ -2757,8 +2758,8 @@
 static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
     uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
 
   assert(NumOps >= 2 &&
          (OpInfo[0].RegClass == ARM::DPRRegClassID ||
@@ -2848,8 +2849,8 @@
 static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opc];
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opc];
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
 
   assert(NumOps >= 2 &&
          (OpInfo[0].RegClass == ARM::DPRRegClassID ||
@@ -2878,7 +2879,7 @@
   ++OpIdx;
 
   // VPADAL...
-  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
+  if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
     // TIED_TO operand.
     MI.addOperand(MCOperand::CreateReg(0));
     ++OpIdx;
@@ -2892,7 +2893,7 @@
   // VZIP and others have two TIED_TO reg operands.
   int Idx;
   while (OpIdx < NumOps &&
-         (Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
+         (Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
     // Add TIED_TO operand.
     MI.addOperand(MI.getOperand(Idx));
     ++OpIdx;
@@ -2945,8 +2946,8 @@
 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
 
   assert(NumOps >= 3 &&
          (OpInfo[0].RegClass == ARM::DPRRegClassID ||
@@ -2964,7 +2965,7 @@
                                                      decodeNEONRd(insn))));
   ++OpIdx;
 
-  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
+  if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
     // TIED_TO operand.
     MI.addOperand(MCOperand::CreateReg(0));
     ++OpIdx;
@@ -3044,8 +3045,8 @@
 static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
 
   // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
   assert(NumOps >= 3 &&
@@ -3076,7 +3077,7 @@
   ++OpIdx;
 
   // VABA, VABAL, VBSLd, VBSLq, ...
-  if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
+  if (MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO) != -1) {
     // TIED_TO operand.
     MI.addOperand(MCOperand::CreateReg(0));
     ++OpIdx;
@@ -3163,8 +3164,8 @@
 static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
   if (!OpInfo) return false;
 
   assert(NumOps >= 3 &&
@@ -3192,7 +3193,7 @@
 
   // Process tied_to operand constraint.
   int Idx;
-  if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
+  if ((Idx = MCID.getOperandConstraint(OpIdx, MCOI::TIED_TO)) != -1) {
     MI.addOperand(MI.getOperand(Idx));
     ++OpIdx;
   }
@@ -3221,11 +3222,11 @@
 static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
   if (!OpInfo) return false;
 
-  assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
+  assert(MCID.getNumDefs() == 1 && NumOps >= 3 &&
          OpInfo[0].RegClass == ARM::GPRRegClassID &&
          OpInfo[1].RegClass == ARM::DPRRegClassID &&
          OpInfo[2].RegClass < 0 &&
@@ -3255,14 +3256,14 @@
 static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetInstrDesc &TID = ARMInsts[Opcode];
-  const TargetOperandInfo *OpInfo = TID.OpInfo;
+  const MCInstrDesc &MCID = ARMInsts[Opcode];
+  const MCOperandInfo *OpInfo = MCID.OpInfo;
   if (!OpInfo) return false;
 
-  assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
+  assert(MCID.getNumDefs() == 1 && NumOps >= 3 &&
          OpInfo[0].RegClass == ARM::DPRRegClassID &&
          OpInfo[1].RegClass == ARM::DPRRegClassID &&
-         TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
+         MCID.getOperandConstraint(1, MCOI::TIED_TO) != -1 &&
          OpInfo[2].RegClass == ARM::GPRRegClassID &&
          OpInfo[3].RegClass < 0 &&
          "Expect >= 3 operands with one dst operand");
@@ -3294,7 +3295,7 @@
 static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
     unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
 
   assert(NumOps >= 2 &&
          (OpInfo[0].RegClass == ARM::DPRRegClassID ||
@@ -3604,11 +3605,11 @@
 
   assert(NumOpsRemaining > 0 && "Invalid argument");
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
   unsigned Idx = MI.getNumOperands();
 
   // First, we check whether this instr specifies the PredicateOperand through
-  // a pair of TargetOperandInfos with isPredicate() property.
+  // a pair of MCOperandInfos with isPredicate() property.
   if (NumOpsRemaining >= 2 &&
       OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
       OpInfo[Idx].RegClass < 0 &&
@@ -3636,13 +3637,13 @@
 
   assert(NumOpsRemaining > 0 && "Invalid argument");
 
-  const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+  const MCOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
   const std::string &Name = ARMInsts[Opcode].Name;
   unsigned Idx = MI.getNumOperands();
   uint64_t TSFlags = ARMInsts[Opcode].TSFlags;
 
   // First, we check whether this instr specifies the PredicateOperand through
-  // a pair of TargetOperandInfos with isPredicate() property.
+  // a pair of MCOperandInfos with isPredicate() property.
   if (NumOpsRemaining >= 2 &&
       OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
       OpInfo[Idx].RegClass < 0 &&





More information about the llvm-branch-commits mailing list