[llvm-branch-commits] [llvm-branch] r107464 - in /llvm/branches/wendling/eh: ./ cmake/modules/ docs/ docs/tutorial/ examples/ExceptionDemo/ examples/Kaleidoscope/Chapter4/ examples/Kaleidoscope/Chapter5/ examples/Kaleidoscope/Chapter6/ examples/Kaleidoscope/Chapter7/ include/llvm/ include/llvm/ADT/ include/llvm/Analysis/ include/llvm/CodeGen/ include/llvm/MC/ include/llvm/Support/ include/llvm/Target/ include/llvm/Transforms/Utils/ lib/Analysis/ lib/CodeGen/ lib/CodeGen/AsmPrinter/ lib/CodeGen/SelectionDAG/ lib/ExecutionEngin...
Bill Wendling
isanbard at gmail.com
Fri Jul 2 02:34:53 PDT 2010
Author: void
Date: Fri Jul 2 04:34:51 2010
New Revision: 107464
URL: http://llvm.org/viewvc/llvm-project?rev=107464&view=rev
Log:
Another merge to ToT.
Added:
llvm/branches/wendling/eh/include/llvm/CodeGen/PostRAHazardRecognizer.h
- copied unchanged from r106308, llvm/trunk/include/llvm/CodeGen/PostRAHazardRecognizer.h
llvm/branches/wendling/eh/include/llvm/MC/MCObjectStreamer.h
- copied unchanged from r106308, llvm/trunk/include/llvm/MC/MCObjectStreamer.h
llvm/branches/wendling/eh/lib/CodeGen/PostRAHazardRecognizer.cpp
- copied unchanged from r106308, llvm/trunk/lib/CodeGen/PostRAHazardRecognizer.cpp
llvm/branches/wendling/eh/lib/MC/MCObjectStreamer.cpp
- copied unchanged from r106308, llvm/trunk/lib/MC/MCObjectStreamer.cpp
llvm/branches/wendling/eh/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/call-tc.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/ARM/call-tc.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/crash-O0.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/ARM/crash-O0.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/ifcvt6-tc.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/ARM/ifcvt6-tc.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/insn-sched1-tc.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/ARM/insn-sched1-tc.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/ldm-tc.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/ARM/ldm-tc.ll
llvm/branches/wendling/eh/test/CodeGen/Generic/v-binop-widen.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/Generic/v-binop-widen.ll
llvm/branches/wendling/eh/test/CodeGen/Generic/v-binop-widen2.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/Generic/v-binop-widen2.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/crash.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/Thumb2/crash.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-call-tc.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/Thumb2/thumb2-call-tc.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2010-06-09-FastAllocRegisters.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/X86/2010-06-09-FastAllocRegisters.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
llvm/branches/wendling/eh/test/CodeGen/X86/hidden-vis-pic.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/X86/hidden-vis-pic.ll
llvm/branches/wendling/eh/test/CodeGen/X86/zext-sext.ll
- copied unchanged from r106308, llvm/trunk/test/CodeGen/X86/zext-sext.ll
llvm/branches/wendling/eh/test/FrontendC/2010-06-11-SaveExpr.c
- copied unchanged from r106308, llvm/trunk/test/FrontendC/2010-06-11-SaveExpr.c
llvm/branches/wendling/eh/test/FrontendC/2010-06-17-asmcrash.c
- copied unchanged from r106308, llvm/trunk/test/FrontendC/2010-06-17-asmcrash.c
llvm/branches/wendling/eh/test/Scripts/macho-dump.bat
- copied unchanged from r106308, llvm/trunk/test/Scripts/macho-dump.bat
llvm/branches/wendling/eh/test/TableGen/LetInsideMultiClasses.td
- copied unchanged from r106308, llvm/trunk/test/TableGen/LetInsideMultiClasses.td
llvm/branches/wendling/eh/test/TableGen/defmclass.td
- copied unchanged from r106308, llvm/trunk/test/TableGen/defmclass.td
llvm/branches/wendling/eh/test/TableGen/eqbit.td
- copied unchanged from r106308, llvm/trunk/test/TableGen/eqbit.td
llvm/branches/wendling/eh/test/TableGen/ifbit.td
- copied unchanged from r106308, llvm/trunk/test/TableGen/ifbit.td
llvm/branches/wendling/eh/test/Transforms/SimplifyLibCalls/PR7357.ll
- copied unchanged from r106308, llvm/trunk/test/Transforms/SimplifyLibCalls/PR7357.ll
llvm/branches/wendling/eh/utils/TableGen/ClangAttrEmitter.cpp
- copied unchanged from r106308, llvm/trunk/utils/TableGen/ClangAttrEmitter.cpp
llvm/branches/wendling/eh/utils/TableGen/ClangAttrEmitter.h
- copied unchanged from r106308, llvm/trunk/utils/TableGen/ClangAttrEmitter.h
Removed:
llvm/branches/wendling/eh/lib/CodeGen/ExactHazardRecognizer.cpp
llvm/branches/wendling/eh/lib/CodeGen/ExactHazardRecognizer.h
llvm/branches/wendling/eh/lib/CodeGen/RegAllocLocal.cpp
llvm/branches/wendling/eh/lib/CodeGen/SimpleHazardRecognizer.h
llvm/branches/wendling/eh/test/CodeGen/X86/hidden-vis-5.ll
llvm/branches/wendling/eh/test/CodeGen/X86/local-liveness.ll
Modified:
llvm/branches/wendling/eh/ (props changed)
llvm/branches/wendling/eh/Makefile.rules
llvm/branches/wendling/eh/cmake/modules/TableGen.cmake
llvm/branches/wendling/eh/docs/CodeGenerator.html
llvm/branches/wendling/eh/docs/LangRef.html
llvm/branches/wendling/eh/docs/TableGenFundamentals.html
llvm/branches/wendling/eh/docs/tutorial/LangImpl3.html
llvm/branches/wendling/eh/docs/tutorial/LangImpl4.html
llvm/branches/wendling/eh/docs/tutorial/LangImpl5.html
llvm/branches/wendling/eh/docs/tutorial/LangImpl6.html
llvm/branches/wendling/eh/docs/tutorial/LangImpl7.html
llvm/branches/wendling/eh/examples/ExceptionDemo/ExceptionDemo.cpp
llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter4/toy.cpp
llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter5/toy.cpp
llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter6/toy.cpp
llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter7/toy.cpp
llvm/branches/wendling/eh/include/llvm/ADT/DenseMap.h
llvm/branches/wendling/eh/include/llvm/ADT/FoldingSet.h
llvm/branches/wendling/eh/include/llvm/ADT/SmallVector.h
llvm/branches/wendling/eh/include/llvm/ADT/ilist.h
llvm/branches/wendling/eh/include/llvm/Analysis/DebugInfo.h
llvm/branches/wendling/eh/include/llvm/Analysis/Dominators.h
llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolution.h
llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolutionExpressions.h
llvm/branches/wendling/eh/include/llvm/CodeGen/FastISel.h
llvm/branches/wendling/eh/include/llvm/CodeGen/ISDOpcodes.h
llvm/branches/wendling/eh/include/llvm/CodeGen/LinkAllCodegenComponents.h
llvm/branches/wendling/eh/include/llvm/CodeGen/LiveInterval.h
llvm/branches/wendling/eh/include/llvm/CodeGen/MachineFrameInfo.h
llvm/branches/wendling/eh/include/llvm/CodeGen/MachineJumpTableInfo.h
llvm/branches/wendling/eh/include/llvm/CodeGen/MachineRegisterInfo.h
llvm/branches/wendling/eh/include/llvm/CodeGen/Passes.h
llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterCoalescer.h
llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAG.h
llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGISel.h
llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGNodes.h
llvm/branches/wendling/eh/include/llvm/IntrinsicInst.h
llvm/branches/wendling/eh/include/llvm/MC/MCAssembler.h
llvm/branches/wendling/eh/include/llvm/Module.h
llvm/branches/wendling/eh/include/llvm/Support/Dwarf.h
llvm/branches/wendling/eh/include/llvm/Support/Timer.h
llvm/branches/wendling/eh/include/llvm/SymbolTableListTraits.h
llvm/branches/wendling/eh/include/llvm/Target/TargetInstrDesc.h
llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h
llvm/branches/wendling/eh/include/llvm/Target/TargetInstrItineraries.h
llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h
llvm/branches/wendling/eh/include/llvm/Target/TargetOpcodes.h
llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h
llvm/branches/wendling/eh/include/llvm/Transforms/Utils/BuildLibCalls.h
llvm/branches/wendling/eh/include/llvm/Use.h
llvm/branches/wendling/eh/lib/Analysis/ConstantFolding.cpp
llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp
llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp
llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp
llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.cpp
llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.h
llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp
llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt
llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.cpp
llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.h
llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp
llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp
llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp
llvm/branches/wendling/eh/lib/CodeGen/LiveVariables.cpp
llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp
llvm/branches/wendling/eh/lib/CodeGen/MachineDominators.cpp
llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp
llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp
llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp
llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp
llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp
llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp
llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp
llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp
llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp
llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.h
llvm/branches/wendling/eh/lib/CodeGen/SjLjEHPrepare.cpp
llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp
llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp
llvm/branches/wendling/eh/lib/ExecutionEngine/Interpreter/Execution.cpp
llvm/branches/wendling/eh/lib/MC/CMakeLists.txt
llvm/branches/wendling/eh/lib/MC/MCAssembler.cpp
llvm/branches/wendling/eh/lib/MC/MCMachOStreamer.cpp
llvm/branches/wendling/eh/lib/MC/MachObjectWriter.cpp
llvm/branches/wendling/eh/lib/Support/Dwarf.cpp
llvm/branches/wendling/eh/lib/Support/FileUtilities.cpp
llvm/branches/wendling/eh/lib/Support/Timer.cpp
llvm/branches/wendling/eh/lib/System/Disassembler.cpp
llvm/branches/wendling/eh/lib/Target/ARM/ARM.h
llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.h
llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp
llvm/branches/wendling/eh/lib/Target/ARM/ARMCodeEmitter.cpp
llvm/branches/wendling/eh/lib/Target/ARM/ARMConstantPoolValue.h
llvm/branches/wendling/eh/lib/Target/ARM/ARMISelDAGToDAG.cpp
llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp
llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.h
llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrInfo.td
llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrNEON.td
llvm/branches/wendling/eh/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td
llvm/branches/wendling/eh/lib/Target/ARM/ARMTargetMachine.cpp
llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h
llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h
llvm/branches/wendling/eh/lib/Target/ARM/NEONPreAllocPass.cpp
llvm/branches/wendling/eh/lib/Target/ARM/Thumb1InstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/ARM/Thumb2ITBlockPass.cpp
llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.h
llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.h
llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.h
llvm/branches/wendling/eh/lib/Target/CBackend/CBackend.cpp
llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp
llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.h
llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeInstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeInstrInfo.h
llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.h
llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.h
llvm/branches/wendling/eh/lib/Target/PIC16/PIC16ISelLowering.cpp
llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.h
llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.cpp
llvm/branches/wendling/eh/lib/Target/PowerPC/PPCInstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/PowerPC/PPCInstrInfo.h
llvm/branches/wendling/eh/lib/Target/README.txt
llvm/branches/wendling/eh/lib/Target/Sparc/SparcISelLowering.cpp
llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.h
llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.h
llvm/branches/wendling/eh/lib/Target/TargetInstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/TargetLoweringObjectFile.cpp
llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
llvm/branches/wendling/eh/lib/Target/X86/README-X86-64.txt
llvm/branches/wendling/eh/lib/Target/X86/X86FastISel.cpp
llvm/branches/wendling/eh/lib/Target/X86/X86ISelDAGToDAG.cpp
llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp
llvm/branches/wendling/eh/lib/Target/X86/X86Instr64bit.td
llvm/branches/wendling/eh/lib/Target/X86/X86InstrFormats.td
llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.h
llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td
llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp
llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.cpp
llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.cpp
llvm/branches/wendling/eh/lib/Target/XCore/XCoreInstrInfo.cpp
llvm/branches/wendling/eh/lib/Target/XCore/XCoreInstrInfo.h
llvm/branches/wendling/eh/lib/Transforms/Scalar/IndVarSimplify.cpp
llvm/branches/wendling/eh/lib/Transforms/Scalar/JumpThreading.cpp
llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyCFGPass.cpp
llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyLibCalls.cpp
llvm/branches/wendling/eh/lib/Transforms/Utils/BuildLibCalls.cpp
llvm/branches/wendling/eh/lib/Transforms/Utils/DemoteRegToStack.cpp
llvm/branches/wendling/eh/lib/VMCore/Module.cpp
llvm/branches/wendling/eh/lib/VMCore/Verifier.cpp
llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/2008-07-29-SMinExpr.ll
llvm/branches/wendling/eh/test/CMakeLists.txt
llvm/branches/wendling/eh/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2008-02-29-RegAllocLocal.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-05-07-RegAllocLocal.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-01-CommuteBug.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-18-RewriterBug.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-04-RegScavengerAssert-2.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-04-RegScavengerAssert.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-15-RegScavengerAssert.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-26-ScalarToVector.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-27-ScalarToVector.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-29-ExtractEltf32.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-29-TooLongSplat.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-09-AllOnes.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-24-spill-align.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-10-27-double-align.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-11-01-NeonMoves.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-09-NeonSelect.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-13-v2f64SplitArg.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-14-SplitVector.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-18-LocalAllocCrash.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-21-BuildVector.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/arm-frameaddr.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/arm-returnaddr.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/armv4.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/fpconsts.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/indirectbr.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/inlineasm3.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/remat.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/select-imm.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/spill-q.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/trap.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/unaligned_load_store.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/vdup.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/vext.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/vmov.ll
llvm/branches/wendling/eh/test/CodeGen/ARM/vrev.ll
llvm/branches/wendling/eh/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll
llvm/branches/wendling/eh/test/CodeGen/Generic/2010-ZeroSizedArg.ll
llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll
llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll
llvm/branches/wendling/eh/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll
llvm/branches/wendling/eh/test/CodeGen/PowerPC/cr_spilling.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-19-SPDecBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-20-TwoAddrBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-12-ConstIslandAssert.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-12-RegInfoAssert.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-20-ISelBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-12-17-pre-regalloc-taildup.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/2010-01-15-local-alloc-spill-physical.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/asmprinter-bug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/machine-licm.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/pop.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/push.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb/trap.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-23-CPIslandBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-02-24-BigStack.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-26-CopyRegCrash.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-05-24-rsbs.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/frameless.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/frameless2.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/ifcvt-neon.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/ldr-str-imm12.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/lsr-deficiency.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/pic-load.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/sign_extend_inreg.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-cbnz.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-spill-q.ll
llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-tbh.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2009-01-29-LocalRegAllocBug.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2009-04-14-IllegalRegs.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2009-04-24.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2010-03-17-ISelBug.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
llvm/branches/wendling/eh/test/CodeGen/X86/2010-05-06-LocalInlineAsmClobber.ll
llvm/branches/wendling/eh/test/CodeGen/X86/MachineSink-CritEdge.ll
llvm/branches/wendling/eh/test/CodeGen/X86/fp-stack-O0-crash.ll
llvm/branches/wendling/eh/test/CodeGen/X86/liveness-local-regalloc.ll
llvm/branches/wendling/eh/test/CodeGen/X86/phys-reg-local-regalloc.ll
llvm/branches/wendling/eh/test/CodeGen/X86/shift-folding.ll
llvm/branches/wendling/eh/test/CodeGen/X86/store-narrow.ll
llvm/branches/wendling/eh/test/CodeGen/X86/vec-trunc-store.ll
llvm/branches/wendling/eh/test/DebugInfo/2010-06-01-DeadArg-DbgInfo.ll
llvm/branches/wendling/eh/test/FrontendC++/2010-02-17-DbgArtificialArg.cpp
llvm/branches/wendling/eh/test/FrontendC/pr5406.c
llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-encoding.s
llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-encoding.s
llvm/branches/wendling/eh/test/Other/2010-05-06-Printer.ll
llvm/branches/wendling/eh/test/Transforms/GVN/load-pre-align.ll
llvm/branches/wendling/eh/test/Transforms/IndVarSimplify/indirectbr.ll
llvm/branches/wendling/eh/test/Transforms/IndVarSimplify/single-element-range.ll
llvm/branches/wendling/eh/test/Transforms/InstCombine/call.ll
llvm/branches/wendling/eh/test/Transforms/InstCombine/crash.ll
llvm/branches/wendling/eh/test/Transforms/JumpThreading/crash.ll
llvm/branches/wendling/eh/test/Transforms/LoopUnswitch/preserve-analyses.ll
llvm/branches/wendling/eh/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll
llvm/branches/wendling/eh/test/Transforms/ScalarRepl/2010-01-18-SelfCopy.ll
llvm/branches/wendling/eh/test/Transforms/ScalarRepl/memcpy-align.ll
llvm/branches/wendling/eh/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
llvm/branches/wendling/eh/test/Transforms/SimplifyLibCalls/StrNCmp.ll
llvm/branches/wendling/eh/test/Transforms/SimplifyLibCalls/StrStr.ll
llvm/branches/wendling/eh/test/lit.cfg
llvm/branches/wendling/eh/test/lit.site.cfg.in
llvm/branches/wendling/eh/tools/gold/gold-plugin.cpp
llvm/branches/wendling/eh/tools/llvm-mc/Makefile
llvm/branches/wendling/eh/utils/TableGen/ARMDecoderEmitter.cpp
llvm/branches/wendling/eh/utils/TableGen/CMakeLists.txt
llvm/branches/wendling/eh/utils/TableGen/DAGISelMatcherEmitter.cpp
llvm/branches/wendling/eh/utils/TableGen/EDEmitter.cpp
llvm/branches/wendling/eh/utils/TableGen/InstrInfoEmitter.cpp
llvm/branches/wendling/eh/utils/TableGen/NeonEmitter.cpp
llvm/branches/wendling/eh/utils/TableGen/NeonEmitter.h
llvm/branches/wendling/eh/utils/TableGen/Record.cpp
llvm/branches/wendling/eh/utils/TableGen/RegisterInfoEmitter.cpp
llvm/branches/wendling/eh/utils/TableGen/TGParser.cpp
llvm/branches/wendling/eh/utils/TableGen/TGParser.h
llvm/branches/wendling/eh/utils/TableGen/TableGen.cpp
llvm/branches/wendling/eh/utils/TableGen/X86RecognizableInstr.cpp
llvm/branches/wendling/eh/utils/buildit/build_llvm
llvm/branches/wendling/eh/utils/lit/lit/TestRunner.py
Propchange: llvm/branches/wendling/eh/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jul 2 04:34:51 2010
@@ -1 +1 @@
-/llvm/trunk:104459-105731
+/llvm/trunk:104459-106308
Modified: llvm/branches/wendling/eh/Makefile.rules
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/Makefile.rules?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/Makefile.rules (original)
+++ llvm/branches/wendling/eh/Makefile.rules Fri Jul 2 04:34:51 2010
@@ -808,7 +808,7 @@
$(RecursiveTargets)::
$(Verb) for dir in $(DIRS); do \
if ([ ! -f $$dir/Makefile ] || \
- [ $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ]); then \
+ command test $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ); then \
$(MKDIR) $$dir; \
$(CP) $(PROJ_SRC_DIR)/$$dir/Makefile $$dir/Makefile; \
fi; \
@@ -831,7 +831,7 @@
$(RecursiveTargets)::
$(Verb) for dir in $(EXPERIMENTAL_DIRS); do \
if ([ ! -f $$dir/Makefile ] || \
- [ $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ]); then \
+ command test $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ); then \
$(MKDIR) $$dir; \
$(CP) $(PROJ_SRC_DIR)/$$dir/Makefile $$dir/Makefile; \
fi; \
@@ -866,8 +866,8 @@
$(ParallelTargets) :
$(Verb) if ([ ! -f $(@D)/Makefile ] || \
- [ $(@D)/Makefile -ot \
- $(PROJ_SRC_DIR)/$(@D)/Makefile ]); then \
+ command test $(@D)/Makefile -ot \
+ $(PROJ_SRC_DIR)/$(@D)/Makefile ); then \
$(MKDIR) $(@D); \
$(CP) $(PROJ_SRC_DIR)/$(@D)/Makefile $(@D)/Makefile; \
fi; \
@@ -887,7 +887,7 @@
$(Verb) for dir in $(OPTIONAL_DIRS); do \
if [ -d $(PROJ_SRC_DIR)/$$dir ]; then\
if ([ ! -f $$dir/Makefile ] || \
- [ $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ]); then \
+ command test $$dir/Makefile -ot $(PROJ_SRC_DIR)/$$dir/Makefile ); then \
$(MKDIR) $$dir; \
$(CP) $(PROJ_SRC_DIR)/$$dir/Makefile $$dir/Makefile; \
fi; \
Modified: llvm/branches/wendling/eh/cmake/modules/TableGen.cmake
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/cmake/modules/TableGen.cmake?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/cmake/modules/TableGen.cmake (original)
+++ llvm/branches/wendling/eh/cmake/modules/TableGen.cmake Fri Jul 2 04:34:51 2010
@@ -6,10 +6,16 @@
file(GLOB local_tds "*.td")
file(GLOB_RECURSE global_tds "${LLVM_MAIN_SRC_DIR}/include/llvm/*.td")
+ if (IS_ABSOLUTE ${LLVM_TARGET_DEFINITIONS})
+ set(LLVM_TARGET_DEFINITIONS_ABSOLUTE ${LLVM_TARGET_DEFINITIONS})
+ else()
+ set(LLVM_TARGET_DEFINITIONS_ABSOLUTE
+ ${CMAKE_CURRENT_SOURCE_DIR}/${LLVM_TARGET_DEFINITIONS})
+ endif()
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}
COMMAND ${LLVM_TABLEGEN_EXE} ${ARGN} -I ${CMAKE_CURRENT_SOURCE_DIR}
-I ${LLVM_MAIN_SRC_DIR}/lib/Target -I ${LLVM_MAIN_INCLUDE_DIR}
- ${CMAKE_CURRENT_SOURCE_DIR}/${LLVM_TARGET_DEFINITIONS}
+ ${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
-o ${CMAKE_CURRENT_BINARY_DIR}/${ofn}
DEPENDS tblgen ${local_tds} ${global_tds}
COMMENT "Building ${ofn}..."
Modified: llvm/branches/wendling/eh/docs/CodeGenerator.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/CodeGenerator.html?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/CodeGenerator.html (original)
+++ llvm/branches/wendling/eh/docs/CodeGenerator.html Fri Jul 2 04:34:51 2010
@@ -1594,22 +1594,22 @@
different register allocators:</p>
<ul>
- <li><i>Simple</i> — This is a very simple implementation that does not
- keep values in registers across instructions. This register allocator
- immediately spills every value right after it is computed, and reloads all
- used operands from memory to temporary registers before each
- instruction.</li>
-
- <li><i>Local</i> — This register allocator is an improvement on the
- <i>Simple</i> implementation. It allocates registers on a basic block
- level, attempting to keep values in registers and reusing registers as
- appropriate.</li>
-
<li><i>Linear Scan</i> — <i>The default allocator</i>. This is the
well-know linear scan register allocator. Whereas the
<i>Simple</i> and <i>Local</i> algorithms use a direct mapping
implementation technique, the <i>Linear Scan</i> implementation
uses a spiller in order to place load and stores.</li>
+
+ <li><i>Fast</i> — This register allocator is the default for debug
+ builds. It allocates registers on a basic block level, attempting to keep
+ values in registers and reusing registers as appropriate.</li>
+
+ <li><i>PBQP</i> — A Partitioned Boolean Quadratic Programming (PBQP)
+ based register allocator. This allocator works by constructing a PBQP
+ problem representing the register allocation problem under consideration,
+ solving this using a PBQP solver, and mapping the solution back to a
+ register assignment.</li>
+
</ul>
<p>The type of register allocator used in <tt>llc</tt> can be chosen with the
@@ -1617,9 +1617,9 @@
<div class="doc_code">
<pre>
-$ llc -regalloc=simple file.bc -o sp.s;
-$ llc -regalloc=local file.bc -o lc.s;
$ llc -regalloc=linearscan file.bc -o ln.s;
+$ llc -regalloc=fast file.bc -o fa.s;
+$ llc -regalloc=pbqp file.bc -o pbqp.s;
</pre>
</div>
Modified: llvm/branches/wendling/eh/docs/LangRef.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/LangRef.html?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/LangRef.html (original)
+++ llvm/branches/wendling/eh/docs/LangRef.html Fri Jul 2 04:34:51 2010
@@ -1389,7 +1389,7 @@
<p>LLVM IR does not associate types with memory. The result type of a
<tt><a href="#i_load">load</a></tt> merely indicates the size and
alignment of the memory from which to load, as well as the
-interpretation of the value. The first operand of a
+interpretation of the value. The first operand type of a
<tt><a href="#i_store">store</a></tt> similarly only indicates the size
and alignment of the store.</p>
Modified: llvm/branches/wendling/eh/docs/TableGenFundamentals.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/TableGenFundamentals.html?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/TableGenFundamentals.html (original)
+++ llvm/branches/wendling/eh/docs/TableGenFundamentals.html Fri Jul 2 04:34:51 2010
@@ -422,11 +422,12 @@
<dt><tt>!null(a)</tt></dt>
<dd>An integer {0,1} indicating whether list 'a' is empty.</dd>
<dt><tt>!if(a,b,c)</tt></dt>
- <dd>'b' if the result of integer operator 'a' is nonzero, 'c' otherwise.</dd>
+ <dd>'b' if the result of 'int' or 'bit' operator 'a' is nonzero,
+ 'c' otherwise.</dd>
<dt><tt>!eq(a,b)</tt></dt>
<dd>Integer one if string a is equal to string b, zero otherwise. This
- only operates on string objects. Use !cast<string> to compare other
- types of objects.</dd>
+ only operates on string, int and bit objects. Use !cast<string> to
+ compare other types of objects.</dd>
</dl>
<p>Note that all of the values have rules specifying how they convert to values
@@ -731,6 +732,47 @@
</pre>
</div>
+<p>
+defm declarations can inherit from classes too, the
+rule to follow is that the class list must start after the
+last multiclass, and there must be at least one multiclass
+before them.
+</p>
+
+<div class="doc_code">
+<pre>
+<b>class</b> XD { bits<4> Prefix = 11; }
+<b>class</b> XS { bits<4> Prefix = 12; }
+
+<b>class</b> I<bits<4> op> {
+ bits<4> opcode = op;
+}
+
+<b>multiclass</b> R {
+ <b>def</b> rr : I<4>;
+ <b>def</b> rm : I<2>;
+}
+
+<b>multiclass</b> Y {
+ <b>defm</b> SS : R, XD;
+ <b>defm</b> SD : R, XS;
+}
+
+<b>defm</b> Instr : Y;
+
+<i>// Results</i>
+<b>def</b> InstrSDrm {
+ bits<4> opcode = { 0, 0, 1, 0 };
+ bits<4> Prefix = { 1, 1, 0, 0 };
+}
+...
+<b>def</b> InstrSSrr {
+ bits<4> opcode = { 0, 1, 0, 0 };
+ bits<4> Prefix = { 1, 0, 1, 1 };
+}
+</pre>
+</div>
+
</div>
<!-- ======================================================================= -->
@@ -798,6 +840,32 @@
need to be added to several records, and the records do not otherwise need to be
opened, as in the case with the <tt>CALL*</tt> instructions above.</p>
+<p>It's also possible to use "let" expressions inside multiclasses, providing
+more ways to factor out commonality from the records, specially if using
+several levels of multiclass instanciations. This also avoids the need of using
+"let" expressions within subsequent records inside a multiclass.</p>
+
+<div class="doc_code">
+<pre>
+<b>multiclass </b>basic_r<bits<4> opc> {
+ <b>let </b>Predicates = [HasSSE2] in {
+ <b>def </b>rr : Instruction<opc, "rr">;
+ <b>def </b>rm : Instruction<opc, "rm">;
+ }
+ <b>let </b>Predicates = [HasSSE3] in
+ <b>def </b>rx : Instruction<opc, "rx">;
+}
+
+<b>multiclass </b>basic_ss<bits<4> opc> {
+ <b>let </b>IsDouble = 0 in
+ <b>defm </b>SS : basic_r<opc>;
+
+ <b>let </b>IsDouble = 1 in
+ <b>defm </b>SD : basic_r<opc>;
+}
+
+<b>defm </b>ADD : basic_ss<0xf>;
+</pre>
</div>
<!-- *********************************************************************** -->
Modified: llvm/branches/wendling/eh/docs/tutorial/LangImpl3.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/tutorial/LangImpl3.html?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/tutorial/LangImpl3.html (original)
+++ llvm/branches/wendling/eh/docs/tutorial/LangImpl3.html Fri Jul 2 04:34:51 2010
@@ -200,9 +200,9 @@
if (L == 0 || R == 0) return 0;
switch (Op) {
- case '+': return Builder.CreateAdd(L, R, "addtmp");
- case '-': return Builder.CreateSub(L, R, "subtmp");
- case '*': return Builder.CreateMul(L, R, "multmp");
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
case '<':
L = Builder.CreateFCmpULT(L, R, "cmptmp");
// Convert bool 0/1 to double 0.0 or 1.0
Modified: llvm/branches/wendling/eh/docs/tutorial/LangImpl4.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/tutorial/LangImpl4.html?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/tutorial/LangImpl4.html (original)
+++ llvm/branches/wendling/eh/docs/tutorial/LangImpl4.html Fri Jul 2 04:34:51 2010
@@ -876,9 +876,9 @@
if (L == 0 || R == 0) return 0;
switch (Op) {
- case '+': return Builder.CreateAdd(L, R, "addtmp");
- case '-': return Builder.CreateSub(L, R, "subtmp");
- case '*': return Builder.CreateMul(L, R, "multmp");
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
case '<':
L = Builder.CreateFCmpULT(L, R, "cmptmp");
// Convert bool 0/1 to double 0.0 or 1.0
Modified: llvm/branches/wendling/eh/docs/tutorial/LangImpl5.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/tutorial/LangImpl5.html?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/tutorial/LangImpl5.html (original)
+++ llvm/branches/wendling/eh/docs/tutorial/LangImpl5.html Fri Jul 2 04:34:51 2010
@@ -1377,9 +1377,9 @@
if (L == 0 || R == 0) return 0;
switch (Op) {
- case '+': return Builder.CreateAdd(L, R, "addtmp");
- case '-': return Builder.CreateSub(L, R, "subtmp");
- case '*': return Builder.CreateMul(L, R, "multmp");
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
case '<':
L = Builder.CreateFCmpULT(L, R, "cmptmp");
// Convert bool 0/1 to double 0.0 or 1.0
Modified: llvm/branches/wendling/eh/docs/tutorial/LangImpl6.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/tutorial/LangImpl6.html?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/tutorial/LangImpl6.html (original)
+++ llvm/branches/wendling/eh/docs/tutorial/LangImpl6.html Fri Jul 2 04:34:51 2010
@@ -277,9 +277,9 @@
if (L == 0 || R == 0) return 0;
switch (Op) {
- case '+': return Builder.CreateAdd(L, R, "addtmp");
- case '-': return Builder.CreateSub(L, R, "subtmp");
- case '*': return Builder.CreateMul(L, R, "multmp");
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
case '<':
L = Builder.CreateFCmpULT(L, R, "cmptmp");
// Convert bool 0/1 to double 0.0 or 1.0
@@ -1392,9 +1392,9 @@
if (L == 0 || R == 0) return 0;
switch (Op) {
- case '+': return Builder.CreateAdd(L, R, "addtmp");
- case '-': return Builder.CreateSub(L, R, "subtmp");
- case '*': return Builder.CreateMul(L, R, "multmp");
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
case '<':
L = Builder.CreateFCmpULT(L, R, "cmptmp");
// Convert bool 0/1 to double 0.0 or 1.0
Modified: llvm/branches/wendling/eh/docs/tutorial/LangImpl7.html
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/docs/tutorial/LangImpl7.html?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/docs/tutorial/LangImpl7.html (original)
+++ llvm/branches/wendling/eh/docs/tutorial/LangImpl7.html Fri Jul 2 04:34:51 2010
@@ -1672,9 +1672,9 @@
if (L == 0 || R == 0) return 0;
switch (Op) {
- case '+': return Builder.CreateAdd(L, R, "addtmp");
- case '-': return Builder.CreateSub(L, R, "subtmp");
- case '*': return Builder.CreateMul(L, R, "multmp");
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
case '<':
L = Builder.CreateFCmpULT(L, R, "cmptmp");
// Convert bool 0/1 to double 0.0 or 1.0
Modified: llvm/branches/wendling/eh/examples/ExceptionDemo/ExceptionDemo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/examples/ExceptionDemo/ExceptionDemo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/examples/ExceptionDemo/ExceptionDemo.cpp (original)
+++ llvm/branches/wendling/eh/examples/ExceptionDemo/ExceptionDemo.cpp Fri Jul 2 04:34:51 2010
@@ -1574,7 +1574,7 @@
));
}
- ~OurCppRunException (void) throw () {};
+ ~OurCppRunException (void) throw () {}
};
Modified: llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter4/toy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter4/toy.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter4/toy.cpp (original)
+++ llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter4/toy.cpp Fri Jul 2 04:34:51 2010
@@ -374,9 +374,9 @@
if (L == 0 || R == 0) return 0;
switch (Op) {
- case '+': return Builder.CreateAdd(L, R, "addtmp");
- case '-': return Builder.CreateSub(L, R, "subtmp");
- case '*': return Builder.CreateMul(L, R, "multmp");
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
case '<':
L = Builder.CreateFCmpULT(L, R, "cmptmp");
// Convert bool 0/1 to double 0.0 or 1.0
Modified: llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter5/toy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter5/toy.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter5/toy.cpp (original)
+++ llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter5/toy.cpp Fri Jul 2 04:34:51 2010
@@ -475,9 +475,9 @@
if (L == 0 || R == 0) return 0;
switch (Op) {
- case '+': return Builder.CreateAdd(L, R, "addtmp");
- case '-': return Builder.CreateSub(L, R, "subtmp");
- case '*': return Builder.CreateMul(L, R, "multmp");
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
case '<':
L = Builder.CreateFCmpULT(L, R, "cmptmp");
// Convert bool 0/1 to double 0.0 or 1.0
Modified: llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter6/toy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter6/toy.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter6/toy.cpp (original)
+++ llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter6/toy.cpp Fri Jul 2 04:34:51 2010
@@ -571,9 +571,9 @@
if (L == 0 || R == 0) return 0;
switch (Op) {
- case '+': return Builder.CreateAdd(L, R, "addtmp");
- case '-': return Builder.CreateSub(L, R, "subtmp");
- case '*': return Builder.CreateMul(L, R, "multmp");
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
case '<':
L = Builder.CreateFCmpULT(L, R, "cmptmp");
// Convert bool 0/1 to double 0.0 or 1.0
Modified: llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter7/toy.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter7/toy.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter7/toy.cpp (original)
+++ llvm/branches/wendling/eh/examples/Kaleidoscope/Chapter7/toy.cpp Fri Jul 2 04:34:51 2010
@@ -667,9 +667,9 @@
if (L == 0 || R == 0) return 0;
switch (Op) {
- case '+': return Builder.CreateAdd(L, R, "addtmp");
- case '-': return Builder.CreateSub(L, R, "subtmp");
- case '*': return Builder.CreateMul(L, R, "multmp");
+ case '+': return Builder.CreateFAdd(L, R, "addtmp");
+ case '-': return Builder.CreateFSub(L, R, "subtmp");
+ case '*': return Builder.CreateFMul(L, R, "multmp");
case '<':
L = Builder.CreateFCmpULT(L, R, "cmptmp");
// Convert bool 0/1 to double 0.0 or 1.0
Modified: llvm/branches/wendling/eh/include/llvm/ADT/DenseMap.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/ADT/DenseMap.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/ADT/DenseMap.h (original)
+++ llvm/branches/wendling/eh/include/llvm/ADT/DenseMap.h Fri Jul 2 04:34:51 2010
@@ -22,6 +22,7 @@
#include <new>
#include <utility>
#include <cassert>
+#include <cstddef>
#include <cstring>
namespace llvm {
Modified: llvm/branches/wendling/eh/include/llvm/ADT/FoldingSet.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/ADT/FoldingSet.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/ADT/FoldingSet.h (original)
+++ llvm/branches/wendling/eh/include/llvm/ADT/FoldingSet.h Fri Jul 2 04:34:51 2010
@@ -196,6 +196,10 @@
template<typename T> struct FoldingSetTrait {
static inline void Profile(const T& X, FoldingSetNodeID& ID) { X.Profile(ID);}
static inline void Profile(T& X, FoldingSetNodeID& ID) { X.Profile(ID); }
+ template <typename Ctx>
+ static inline void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
+ X.Profile(ID, Context);
+ }
};
//===--------------------------------------------------------------------===//
@@ -322,6 +326,77 @@
};
//===----------------------------------------------------------------------===//
+/// ContextualFoldingSet - This template class is a further refinement
+/// of FoldingSet which provides a context argument when calling
+/// Profile on its nodes. Currently, that argument is fixed at
+/// initialization time.
+///
+/// T must be a subclass of FoldingSetNode and implement a Profile
+/// function with signature
+/// void Profile(llvm::FoldingSetNodeID &, Ctx);
+template <class T, class Ctx>
+class ContextualFoldingSet : public FoldingSetImpl {
+ // Unfortunately, this can't derive from FoldingSet<T> because the
+ // construction vtable for FoldingSet<T> requires
+ // FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
+ // requires a single-argument T::Profile().
+
+private:
+ Ctx Context;
+
+ /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+ /// way to convert nodes into a unique specifier.
+ virtual void GetNodeProfile(FoldingSetNodeID &ID,
+ FoldingSetImpl::Node *N) const {
+ T *TN = static_cast<T *>(N);
+
+ // We must use explicit template arguments in case Ctx is a
+ // reference type.
+ FoldingSetTrait<T>::template Profile<Ctx>(*TN, ID, Context);
+ }
+
+public:
+ explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
+ : FoldingSetImpl(Log2InitSize), Context(Context)
+ {}
+
+ Ctx getContext() const { return Context; }
+
+
+ typedef FoldingSetIterator<T> iterator;
+ iterator begin() { return iterator(Buckets); }
+ iterator end() { return iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetIterator<const T> const_iterator;
+ const_iterator begin() const { return const_iterator(Buckets); }
+ const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetBucketIterator<T> bucket_iterator;
+
+ bucket_iterator bucket_begin(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
+ }
+
+ bucket_iterator bucket_end(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
+ }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N'
+ /// and return it instead.
+ T *GetOrInsertNode(Node *N) {
+ return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
+ }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it
+ /// exists, return it. If not, return the insertion token that will
+ /// make insertion faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
+ }
+};
+
+//===----------------------------------------------------------------------===//
/// FoldingSetIteratorImpl - This is the common iterator support shared by all
/// folding sets, which knows how to walk the folding set hash table.
class FoldingSetIteratorImpl {
Modified: llvm/branches/wendling/eh/include/llvm/ADT/SmallVector.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/ADT/SmallVector.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/ADT/SmallVector.h (original)
+++ llvm/branches/wendling/eh/include/llvm/ADT/SmallVector.h Fri Jul 2 04:34:51 2010
@@ -17,6 +17,7 @@
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
+#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <memory>
@@ -268,6 +269,8 @@
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
typedef SmallVectorTemplateBase<T, isPodLike<T>::value > SuperClass;
+
+ SmallVectorImpl(const SmallVectorImpl&); // DISABLED.
public:
typedef typename SuperClass::iterator iterator;
typedef typename SuperClass::size_type size_type;
Modified: llvm/branches/wendling/eh/include/llvm/ADT/ilist.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/ADT/ilist.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/ADT/ilist.h (original)
+++ llvm/branches/wendling/eh/include/llvm/ADT/ilist.h Fri Jul 2 04:34:51 2010
@@ -39,6 +39,7 @@
#define LLVM_ADT_ILIST_H
#include <cassert>
+#include <cstddef>
#include <iterator>
namespace llvm {
Modified: llvm/branches/wendling/eh/include/llvm/Analysis/DebugInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/DebugInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/DebugInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/DebugInfo.h Fri Jul 2 04:34:51 2010
@@ -56,6 +56,7 @@
}
GlobalVariable *getGlobalVariableField(unsigned Elt) const;
+ Function *getFunctionField(unsigned Elt) const;
public:
explicit DIDescriptor() : DbgNode(0) {}
@@ -409,6 +410,8 @@
/// describes - Return true if this subprogram provides debugging
/// information for the function F.
bool describes(const Function *F);
+
+ Function *getFunction() const { return getFunctionField(16); }
};
/// DIGlobalVariable - This is a wrapper for a global variable.
@@ -658,7 +661,8 @@
unsigned VIndex = 0,
DIType = DIType(),
bool isArtificial = 0,
- bool isOptimized = false);
+ bool isOptimized = false,
+ Function *Fn = 0);
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
/// given declaration.
Modified: llvm/branches/wendling/eh/include/llvm/Analysis/Dominators.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/Dominators.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/Dominators.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/Dominators.h Fri Jul 2 04:34:51 2010
@@ -704,7 +704,6 @@
}
~DominatorTree() {
- DT->releaseMemory();
delete DT;
}
Modified: llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolution.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolution.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolution.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolution.h Fri Jul 2 04:34:51 2010
@@ -54,10 +54,6 @@
/// The ScalarEvolution's BumpPtrAllocator holds the data.
FoldingSetNodeIDRef FastID;
- /// AllocationSequenceNumber - This is used as a deterministic tie
- /// breaker when sorting SCEVs.
- unsigned AllocationSequenceNumber;
-
// The SCEV baseclass this node corresponds to
const unsigned short SCEVType;
@@ -72,18 +68,11 @@
protected:
virtual ~SCEV();
public:
- explicit SCEV(const FoldingSetNodeIDRef ID, unsigned num, unsigned SCEVTy) :
- FastID(ID), AllocationSequenceNumber(num),
- SCEVType(SCEVTy), SubclassData(0) {}
+ explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy) :
+ FastID(ID), SCEVType(SCEVTy), SubclassData(0) {}
unsigned getSCEVType() const { return SCEVType; }
- /// getAllocationSequenceNumber - Return an arbitrary value which can be
- /// used to deterministically order a sequence of SCEVs.
- unsigned getAllocationSequenceNumber() const {
- return AllocationSequenceNumber;
- }
-
/// Profile - FoldingSet support.
void Profile(FoldingSetNodeID& ID) { ID = FastID; }
@@ -541,10 +530,6 @@
/// widening.
const SCEV *getTruncateOrNoop(const SCEV *V, const Type *Ty);
- /// getIntegerSCEV - Given a SCEVable type, create a constant for the
- /// specified signed integer value and return a SCEV for the constant.
- const SCEV *getIntegerSCEV(int64_t Val, const Type *Ty);
-
/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
/// the types using zero-extension, and then perform a umax operation
/// with them.
@@ -678,7 +663,6 @@
private:
FoldingSet<SCEV> UniqueSCEVs;
BumpPtrAllocator SCEVAllocator;
- unsigned CurAllocationSequenceNumber;
};
}
Modified: llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolutionExpressions.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolutionExpressions.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolutionExpressions.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Analysis/ScalarEvolutionExpressions.h Fri Jul 2 04:34:51 2010
@@ -37,8 +37,8 @@
friend class ScalarEvolution;
ConstantInt *V;
- SCEVConstant(const FoldingSetNodeIDRef ID, unsigned Num, ConstantInt *v)
- : SCEV(ID, Num, scConstant), V(v) {}
+ SCEVConstant(const FoldingSetNodeIDRef ID, ConstantInt *v) :
+ SCEV(ID, scConstant), V(v) {}
public:
ConstantInt *getValue() const { return V; }
@@ -81,7 +81,7 @@
const SCEV *Op;
const Type *Ty;
- SCEVCastExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+ SCEVCastExpr(const FoldingSetNodeIDRef ID,
unsigned SCEVTy, const SCEV *op, const Type *ty);
public:
@@ -120,7 +120,7 @@
class SCEVTruncateExpr : public SCEVCastExpr {
friend class ScalarEvolution;
- SCEVTruncateExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+ SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty);
public:
@@ -140,7 +140,7 @@
class SCEVZeroExtendExpr : public SCEVCastExpr {
friend class ScalarEvolution;
- SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+ SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty);
public:
@@ -160,7 +160,7 @@
class SCEVSignExtendExpr : public SCEVCastExpr {
friend class ScalarEvolution;
- SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+ SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty);
public:
@@ -187,9 +187,9 @@
const SCEV *const *Operands;
size_t NumOperands;
- SCEVNAryExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+ SCEVNAryExpr(const FoldingSetNodeIDRef ID,
enum SCEVTypes T, const SCEV *const *O, size_t N)
- : SCEV(ID, Num, T), Operands(O), NumOperands(N) {}
+ : SCEV(ID, T), Operands(O), NumOperands(N) {}
public:
size_t getNumOperands() const { return NumOperands; }
@@ -262,9 +262,9 @@
///
class SCEVCommutativeExpr : public SCEVNAryExpr {
protected:
- SCEVCommutativeExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+ SCEVCommutativeExpr(const FoldingSetNodeIDRef ID,
enum SCEVTypes T, const SCEV *const *O, size_t N)
- : SCEVNAryExpr(ID, Num, T, O, N) {}
+ : SCEVNAryExpr(ID, T, O, N) {}
public:
virtual const char *getOperationStr() const = 0;
@@ -288,9 +288,9 @@
class SCEVAddExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
- SCEVAddExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+ SCEVAddExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N)
- : SCEVCommutativeExpr(ID, Num, scAddExpr, O, N) {
+ : SCEVCommutativeExpr(ID, scAddExpr, O, N) {
}
public:
@@ -316,9 +316,9 @@
class SCEVMulExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
- SCEVMulExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+ SCEVMulExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N)
- : SCEVCommutativeExpr(ID, Num, scMulExpr, O, N) {
+ : SCEVCommutativeExpr(ID, scMulExpr, O, N) {
}
public:
@@ -340,9 +340,8 @@
const SCEV *LHS;
const SCEV *RHS;
- SCEVUDivExpr(const FoldingSetNodeIDRef ID, unsigned Num,
- const SCEV *lhs, const SCEV *rhs)
- : SCEV(ID, Num, scUDivExpr), LHS(lhs), RHS(rhs) {}
+ SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
+ : SCEV(ID, scUDivExpr), LHS(lhs), RHS(rhs) {}
public:
const SCEV *getLHS() const { return LHS; }
@@ -391,9 +390,9 @@
const Loop *L;
- SCEVAddRecExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+ SCEVAddRecExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N, const Loop *l)
- : SCEVNAryExpr(ID, Num, scAddRecExpr, O, N), L(l) {
+ : SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {
for (size_t i = 0, e = NumOperands; i != e; ++i)
assert(Operands[i]->isLoopInvariant(l) &&
"Operands of AddRec must be loop-invariant!");
@@ -473,9 +472,9 @@
class SCEVSMaxExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
- SCEVSMaxExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+ SCEVSMaxExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N)
- : SCEVCommutativeExpr(ID, Num, scSMaxExpr, O, N) {
+ : SCEVCommutativeExpr(ID, scSMaxExpr, O, N) {
// Max never overflows.
setHasNoUnsignedWrap(true);
setHasNoSignedWrap(true);
@@ -498,9 +497,9 @@
class SCEVUMaxExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
- SCEVUMaxExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+ SCEVUMaxExpr(const FoldingSetNodeIDRef ID,
const SCEV *const *O, size_t N)
- : SCEVCommutativeExpr(ID, Num, scUMaxExpr, O, N) {
+ : SCEVCommutativeExpr(ID, scUMaxExpr, O, N) {
// Max never overflows.
setHasNoUnsignedWrap(true);
setHasNoSignedWrap(true);
@@ -525,8 +524,8 @@
friend class ScalarEvolution;
Value *V;
- SCEVUnknown(const FoldingSetNodeIDRef ID, unsigned Num, Value *v)
- : SCEV(ID, Num, scUnknown), V(v) {}
+ SCEVUnknown(const FoldingSetNodeIDRef ID, Value *v) :
+ SCEV(ID, scUnknown), V(v) {}
public:
Value *getValue() const { return V; }
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/FastISel.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/FastISel.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/FastISel.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/FastISel.h Fri Jul 2 04:34:51 2010
@@ -286,7 +286,7 @@
/// FastEmitBranch - Emit an unconditional branch to the given block,
/// unless it is the immediate (fall-through) successor, and update
/// the CFG.
- void FastEmitBranch(MachineBasicBlock *MBB);
+ void FastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL);
unsigned UpdateValueMap(const Value* I, unsigned Reg);
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/ISDOpcodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/ISDOpcodes.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/ISDOpcodes.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/ISDOpcodes.h Fri Jul 2 04:34:51 2010
@@ -130,7 +130,7 @@
/// This node represents a target intrinsic function with no side effects.
/// The first operand is the ID number of the intrinsic from the
/// llvm::Intrinsic namespace. The operands to the intrinsic follow. The
- /// node has returns the result of the intrinsic.
+ /// node returns the result of the intrinsic.
INTRINSIC_WO_CHAIN,
/// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/LinkAllCodegenComponents.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/LinkAllCodegenComponents.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/LinkAllCodegenComponents.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/LinkAllCodegenComponents.h Fri Jul 2 04:34:51 2010
@@ -33,7 +33,6 @@
(void) llvm::createDeadMachineInstructionElimPass();
- (void) llvm::createLocalRegisterAllocator();
(void) llvm::createFastRegisterAllocator();
(void) llvm::createLinearScanRegisterAllocator();
(void) llvm::createPBQPRegisterAllocator();
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/LiveInterval.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/LiveInterval.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/LiveInterval.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/LiveInterval.h Fri Jul 2 04:34:51 2010
@@ -258,6 +258,8 @@
LiveRange(); // DO NOT IMPLEMENT
};
+ template <> struct isPodLike<LiveRange> { static const bool value = true; };
+
raw_ostream& operator<<(raw_ostream& os, const LiveRange &LR);
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/MachineFrameInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/MachineFrameInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/MachineFrameInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/MachineFrameInfo.h Fri Jul 2 04:34:51 2010
@@ -36,7 +36,7 @@
int FrameIdx;
public:
- CalleeSavedInfo(unsigned R, int FI = 0)
+ explicit CalleeSavedInfo(unsigned R, int FI = 0)
: Reg(R), FrameIdx(FI) {}
// Accessors.
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/MachineJumpTableInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/MachineJumpTableInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/MachineJumpTableInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/MachineJumpTableInfo.h Fri Jul 2 04:34:51 2010
@@ -74,7 +74,7 @@
JTEntryKind EntryKind;
std::vector<MachineJumpTableEntry> JumpTables;
public:
- MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
+ explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
JTEntryKind getEntryKind() const { return EntryKind; }
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/MachineRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/MachineRegisterInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/MachineRegisterInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/MachineRegisterInfo.h Fri Jul 2 04:34:51 2010
@@ -35,7 +35,7 @@
/// RegClassVRegMap - This vector acts as a map from TargetRegisterClass to
/// virtual registers. For each target register class, it keeps a list of
/// virtual registers belonging to the class.
- std::vector<std::vector<unsigned> > RegClass2VRegMap;
+ std::vector<unsigned> *RegClass2VRegMap;
/// RegAllocHints - This vector records register allocation hints for virtual
/// registers. For each virtual register, it keeps a register and hint type
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/Passes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/Passes.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/Passes.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/Passes.h Fri Jul 2 04:34:51 2010
@@ -90,12 +90,6 @@
///
FunctionPass *createRegisterAllocator(CodeGenOpt::Level OptLevel);
- /// LocalRegisterAllocation Pass - This pass register allocates the input code
- /// a basic block at a time, yielding code better than the simple register
- /// allocator, but not as good as a global allocator.
- ///
- FunctionPass *createLocalRegisterAllocator();
-
/// FastRegisterAllocation Pass - This pass register allocates as fast as
/// possible. It is best suited for debug code where live ranges are short.
///
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterCoalescer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterCoalescer.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterCoalescer.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/RegisterCoalescer.h Fri Jul 2 04:34:51 2010
@@ -25,6 +25,9 @@
class RegallocQuery;
class AnalysisUsage;
class MachineInstr;
+ class TargetRegisterInfo;
+ class TargetRegisterClass;
+ class TargetInstrInfo;
/// An abstract interface for register coalescers. Coalescers must
/// implement this interface to be part of the coalescer analysis
@@ -141,6 +144,87 @@
return true;
}
};
+
+
+ /// CoalescerPair - A helper class for register coalescers. When deciding if
+ /// two registers can be coalesced, CoalescerPair can determine if a copy
+ /// instruction would become an identity copy after coalescing.
+ class CoalescerPair {
+ const TargetInstrInfo &tii_;
+ const TargetRegisterInfo &tri_;
+
+ /// dstReg_ - The register that will be left after coalescing. It can be a
+ /// virtual or physical register.
+ unsigned dstReg_;
+
+ /// srcReg_ - the virtual register that will be coalesced into dstReg.
+ unsigned srcReg_;
+
+ /// subReg_ - The subregister index of srcReg in dstReg_. It is possible the
+ /// coalesce srcReg_ into a subreg of the larger dstReg_ when dstReg_ is a
+ /// virtual register.
+ unsigned subIdx_;
+
+ /// partial_ - True when the original copy was a partial subregister copy.
+ bool partial_;
+
+ /// flipped_ - True when DstReg and SrcReg are reversed from the oriignal copy
+ /// instruction.
+ bool flipped_;
+
+ /// newRC_ - The register class of the coalesced register, or NULL if dstReg_
+ /// is a physreg.
+ const TargetRegisterClass *newRC_;
+
+ /// compose - Compose subreg indices a and b, either may be 0.
+ unsigned compose(unsigned, unsigned) const;
+
+ /// isMoveInstr - Return true if MI is a move or subreg instruction.
+ bool isMoveInstr(const MachineInstr *MI, unsigned &Src, unsigned &Dst,
+ unsigned &SrcSub, unsigned &DstSub) const;
+
+ public:
+ CoalescerPair(const TargetInstrInfo &tii, const TargetRegisterInfo &tri)
+ : tii_(tii), tri_(tri), dstReg_(0), srcReg_(0), subIdx_(0),
+ partial_(false), flipped_(false), newRC_(0) {}
+
+ /// setRegisters - set registers to match the copy instruction MI. Return
+ /// false if MI is not a coalescable copy instruction.
+ bool setRegisters(const MachineInstr*);
+
+ /// flip - Swap srcReg_ and dstReg_. Return false if swapping is impossible
+ /// because dstReg_ is a physical register, or subIdx_ is set.
+ bool flip();
+
+ /// isCoalescable - Return true if MI is a copy instruction that will become
+ /// an identity copy after coalescing.
+ bool isCoalescable(const MachineInstr*) const;
+
+ /// isPhys - Return true if DstReg is a physical register.
+ bool isPhys() const { return !newRC_; }
+
+ /// isPartial - Return true if the original copy instruction did not copy the
+ /// full register, but was a subreg operation.
+ bool isPartial() const { return partial_; }
+
+ /// isFlipped - Return true when getSrcReg is the register being defined by
+ /// the original copy instruction.
+ bool isFlipped() const { return flipped_; }
+
+ /// getDstReg - Return the register (virtual or physical) that will remain
+ /// after coalescing.
+ unsigned getDstReg() const { return dstReg_; }
+
+ /// getSrcReg - Return the virtual register that will be coalesced away.
+ unsigned getSrcReg() const { return srcReg_; }
+
+ /// getSubIdx - Return the subregister index in DstReg that SrcReg will be
+ /// coalesced into, or 0.
+ unsigned getSubIdx() const { return subIdx_; }
+
+ /// getNewRC - Return the register class of the coalesced register.
+ const TargetRegisterClass *getNewRC() const { return newRC_; }
+ };
}
// Because of the way .a files work, we must force the SimpleRC
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAG.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAG.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAG.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAG.h Fri Jul 2 04:34:51 2010
@@ -678,15 +678,15 @@
/// already exists. If the resultant node does not exist in the DAG, the
/// input node is returned. As a degenerate case, if you specify the same
/// input operands as the node already has, the input node is returned.
- SDValue UpdateNodeOperands(SDValue N, SDValue Op);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4);
- SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5);
- SDValue UpdateNodeOperands(SDValue N,
+ SDNode *UpdateNodeOperands(SDNode *N,
const SDValue *Ops, unsigned NumOps);
/// SelectNodeTo - These are used for target selectors to *mutate* the
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGISel.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGISel.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGISel.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGISel.h Fri Jul 2 04:34:51 2010
@@ -292,7 +292,6 @@
MachineBasicBlock *CodeGenAndEmitDAG(MachineBasicBlock *BB);
void LowerArguments(const BasicBlock *BB);
- void ShrinkDemandedOps();
void ComputeLiveOutVRegInfo();
/// Create the scheduler. If a specific scheduler was specified
Modified: llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGNodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGNodes.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGNodes.h (original)
+++ llvm/branches/wendling/eh/include/llvm/CodeGen/SelectionDAGNodes.h Fri Jul 2 04:34:51 2010
@@ -1082,6 +1082,7 @@
uint64_t getZExtValue() const { return Value->getZExtValue(); }
int64_t getSExtValue() const { return Value->getSExtValue(); }
+ bool isOne() const { return Value->isOne(); }
bool isNullValue() const { return Value->isNullValue(); }
bool isAllOnesValue() const { return Value->isAllOnesValue(); }
@@ -1130,7 +1131,7 @@
}
bool isExactlyValue(const APFloat& V) const;
- bool isValueValidForType(EVT VT, const APFloat& Val);
+ static bool isValueValidForType(EVT VT, const APFloat& Val);
static bool classof(const ConstantFPSDNode *) { return true; }
static bool classof(const SDNode *N) {
Modified: llvm/branches/wendling/eh/include/llvm/IntrinsicInst.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/IntrinsicInst.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/IntrinsicInst.h (original)
+++ llvm/branches/wendling/eh/include/llvm/IntrinsicInst.h Fri Jul 2 04:34:51 2010
@@ -136,7 +136,7 @@
return cast<ConstantInt>(const_cast<Value*>(getOperand(5)));
}
bool isVolatile() const {
- return getVolatileCst()->getZExtValue() != 0;
+ return !getVolatileCst()->isZero();
}
/// getDest - This is just like getRawDest, but it strips off any cast
Modified: llvm/branches/wendling/eh/include/llvm/MC/MCAssembler.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/MC/MCAssembler.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/MC/MCAssembler.h (original)
+++ llvm/branches/wendling/eh/include/llvm/MC/MCAssembler.h Fri Jul 2 04:34:51 2010
@@ -641,7 +641,7 @@
/// in the symbol table, or whether it can be discarded by the assembler. This
/// also effects whether the assembler treats the label as potentially
/// defining a separate atom.
- bool isSymbolLinkerVisible(const MCSymbolData *SD) const;
+ bool isSymbolLinkerVisible(const MCSymbol &SD) const;
/// Emit the section contents using the given object writer.
//
Modified: llvm/branches/wendling/eh/include/llvm/Module.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Module.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Module.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Module.h Fri Jul 2 04:34:51 2010
@@ -326,6 +326,7 @@
/// specified name. This method returns null if a NamedMDNode with the
/// specified name is not found.
NamedMDNode *getNamedMetadata(StringRef Name) const;
+ NamedMDNode *getNamedMetadataUsingTwine(Twine Name) const;
/// getOrInsertNamedMetadata - Return the first named MDNode in the module
/// with the specified name. This method returns a new NamedMDNode if a
Modified: llvm/branches/wendling/eh/include/llvm/Support/Dwarf.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Support/Dwarf.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Support/Dwarf.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Support/Dwarf.h Fri Jul 2 04:34:51 2010
@@ -300,12 +300,99 @@
DW_OP_ne = 0x2e,
DW_OP_lit0 = 0x30,
DW_OP_lit1 = 0x31,
+ DW_OP_lit2 = 0x32,
+ DW_OP_lit3 = 0x33,
+ DW_OP_lit4 = 0x34,
+ DW_OP_lit5 = 0x35,
+ DW_OP_lit6 = 0x36,
+ DW_OP_lit7 = 0x37,
+ DW_OP_lit8 = 0x38,
+ DW_OP_lit9 = 0x39,
+ DW_OP_lit10 = 0x3a,
+ DW_OP_lit11 = 0x3b,
+ DW_OP_lit12 = 0x3c,
+ DW_OP_lit13 = 0x3d,
+ DW_OP_lit14 = 0x3e,
+ DW_OP_lit15 = 0x3f,
+ DW_OP_lit16 = 0x40,
+ DW_OP_lit17 = 0x41,
+ DW_OP_lit18 = 0x42,
+ DW_OP_lit19 = 0x43,
+ DW_OP_lit20 = 0x44,
+ DW_OP_lit21 = 0x45,
+ DW_OP_lit22 = 0x46,
+ DW_OP_lit23 = 0x47,
+ DW_OP_lit24 = 0x48,
+ DW_OP_lit25 = 0x49,
+ DW_OP_lit26 = 0x4a,
+ DW_OP_lit27 = 0x4b,
+ DW_OP_lit28 = 0x4c,
+ DW_OP_lit29 = 0x4d,
+ DW_OP_lit30 = 0x4e,
DW_OP_lit31 = 0x4f,
DW_OP_reg0 = 0x50,
DW_OP_reg1 = 0x51,
+ DW_OP_reg2 = 0x52,
+ DW_OP_reg3 = 0x53,
+ DW_OP_reg4 = 0x54,
+ DW_OP_reg5 = 0x55,
+ DW_OP_reg6 = 0x56,
+ DW_OP_reg7 = 0x57,
+ DW_OP_reg8 = 0x58,
+ DW_OP_reg9 = 0x59,
+ DW_OP_reg10 = 0x5a,
+ DW_OP_reg11 = 0x5b,
+ DW_OP_reg12 = 0x5c,
+ DW_OP_reg13 = 0x5d,
+ DW_OP_reg14 = 0x5e,
+ DW_OP_reg15 = 0x5f,
+ DW_OP_reg16 = 0x60,
+ DW_OP_reg17 = 0x61,
+ DW_OP_reg18 = 0x62,
+ DW_OP_reg19 = 0x63,
+ DW_OP_reg20 = 0x64,
+ DW_OP_reg21 = 0x65,
+ DW_OP_reg22 = 0x66,
+ DW_OP_reg23 = 0x67,
+ DW_OP_reg24 = 0x68,
+ DW_OP_reg25 = 0x69,
+ DW_OP_reg26 = 0x6a,
+ DW_OP_reg27 = 0x6b,
+ DW_OP_reg28 = 0x6c,
+ DW_OP_reg29 = 0x6d,
+ DW_OP_reg30 = 0x6e,
DW_OP_reg31 = 0x6f,
DW_OP_breg0 = 0x70,
DW_OP_breg1 = 0x71,
+ DW_OP_breg2 = 0x72,
+ DW_OP_breg3 = 0x73,
+ DW_OP_breg4 = 0x74,
+ DW_OP_breg5 = 0x75,
+ DW_OP_breg6 = 0x76,
+ DW_OP_breg7 = 0x77,
+ DW_OP_breg8 = 0x78,
+ DW_OP_breg9 = 0x79,
+ DW_OP_breg10 = 0x7a,
+ DW_OP_breg11 = 0x7b,
+ DW_OP_breg12 = 0x7c,
+ DW_OP_breg13 = 0x7d,
+ DW_OP_breg14 = 0x7e,
+ DW_OP_breg15 = 0x7f,
+ DW_OP_breg16 = 0x80,
+ DW_OP_breg17 = 0x81,
+ DW_OP_breg18 = 0x82,
+ DW_OP_breg19 = 0x83,
+ DW_OP_breg20 = 0x84,
+ DW_OP_breg21 = 0x85,
+ DW_OP_breg22 = 0x86,
+ DW_OP_breg23 = 0x87,
+ DW_OP_breg24 = 0x88,
+ DW_OP_breg25 = 0x89,
+ DW_OP_breg26 = 0x8a,
+ DW_OP_breg27 = 0x8b,
+ DW_OP_breg28 = 0x8c,
+ DW_OP_breg29 = 0x8d,
+ DW_OP_breg30 = 0x8e,
DW_OP_breg31 = 0x8f,
DW_OP_regx = 0x90,
DW_OP_fbreg = 0x91,
Modified: llvm/branches/wendling/eh/include/llvm/Support/Timer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Support/Timer.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Support/Timer.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Support/Timer.h Fri Jul 2 04:34:51 2010
@@ -150,8 +150,10 @@
/// is primarily used for debugging and for hunting performance problems.
///
struct NamedRegionTimer : public TimeRegion {
- explicit NamedRegionTimer(StringRef Name);
- explicit NamedRegionTimer(StringRef Name, StringRef GroupName);
+ explicit NamedRegionTimer(StringRef Name,
+ bool Enabled = true);
+ explicit NamedRegionTimer(StringRef Name, StringRef GroupName,
+ bool Enabled = true);
};
Modified: llvm/branches/wendling/eh/include/llvm/SymbolTableListTraits.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/SymbolTableListTraits.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/SymbolTableListTraits.h (original)
+++ llvm/branches/wendling/eh/include/llvm/SymbolTableListTraits.h Fri Jul 2 04:34:51 2010
@@ -47,9 +47,8 @@
/// of instructions, it returns the BasicBlock that owns them.
ItemParentClass *getListOwner() {
typedef iplist<ValueSubClass> ItemParentClass::*Sublist;
- Sublist Sub(ItemParentClass::
- getSublistAccess(static_cast<ValueSubClass*>(0)));
- size_t Offset(size_t(&((ItemParentClass*)0->*Sub)));
+ size_t Offset(size_t(&((ItemParentClass*)0->*ItemParentClass::
+ getSublistAccess(static_cast<ValueSubClass*>(0)))));
iplist<ValueSubClass>* Anchor(static_cast<iplist<ValueSubClass>*>(this));
return reinterpret_cast<ItemParentClass*>(reinterpret_cast<char*>(Anchor)-
Offset);
Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetInstrDesc.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetInstrDesc.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetInstrDesc.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetInstrDesc.h Fri Jul 2 04:34:51 2010
@@ -55,7 +55,7 @@
///
/// NOTE: This member should be considered to be private, all access should go
/// through "getRegClass(TRI)" below.
- unsigned short RegClass;
+ short RegClass;
/// Flags - These are flags from the TOI::OperandFlags enum.
unsigned short Flags;
Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetInstrInfo.h Fri Jul 2 04:34:51 2010
@@ -20,12 +20,14 @@
namespace llvm {
class CalleeSavedInfo;
+class InstrItineraryData;
class LiveVariables;
class MCAsmInfo;
class MachineMemOperand;
class MDNode;
class MCInst;
class SDNode;
+class ScheduleHazardRecognizer;
class SelectionDAG;
class TargetRegisterClass;
class TargetRegisterInfo;
@@ -203,6 +205,14 @@
const MachineInstr *Orig,
const TargetRegisterInfo &TRI) const = 0;
+ /// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
+ /// two-addrss instruction inserted by two-address pass.
+ virtual void scheduleTwoAddrSource(MachineInstr *SrcMI,
+ MachineInstr *UseMI,
+ const TargetRegisterInfo &TRI) const {
+ // Do nothing.
+ }
+
/// duplicate - Create a duplicate of the Orig instruction in MF. This is like
/// MachineFunction::CloneMachineInstr(), but the target may update operands
/// that are required to be unique.
@@ -305,8 +315,9 @@
/// branch to analyze. At least this much must be implemented, else tail
/// merging needs to be disabled.
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!");
return 0;
}
@@ -567,6 +578,12 @@
/// length.
virtual unsigned getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const;
+
+ /// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer
+ /// to use for this target when scheduling the machine instructions after
+ /// register allocation.
+ virtual ScheduleHazardRecognizer*
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const = 0;
};
/// TargetInstrInfoImpl - This is the default implementation of
@@ -594,6 +611,9 @@
virtual bool produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1) const;
virtual unsigned GetFunctionSizeInBytes(const MachineFunction &MF) const;
+
+ virtual ScheduleHazardRecognizer *
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData&) const;
};
} // End llvm namespace
Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetInstrItineraries.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetInstrItineraries.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetInstrItineraries.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetInstrItineraries.h Fri Jul 2 04:34:51 2010
@@ -106,7 +106,8 @@
/// Instruction itinerary Data - Itinerary data supplied by a subtarget to be
/// used by a target.
///
-struct InstrItineraryData {
+class InstrItineraryData {
+public:
const InstrStage *Stages; ///< Array of stages selected
const unsigned *OperandCycles; ///< Array of operand cycles selected
const InstrItinerary *Itineratries; ///< Array of itineraries selected
Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetLowering.h Fri Jul 2 04:34:51 2010
@@ -766,14 +766,12 @@
SelectionDAG &DAG;
bool LegalTys;
bool LegalOps;
- bool ShrinkOps;
SDValue Old;
SDValue New;
explicit TargetLoweringOpt(SelectionDAG &InDAG,
- bool LT, bool LO,
- bool Shrink = false) :
- DAG(InDAG), LegalTys(LT), LegalOps(LO), ShrinkOps(Shrink) {}
+ bool LT, bool LO) :
+ DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
bool LegalTypes() const { return LegalTys; }
bool LegalOperations() const { return LegalOps; }
Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetOpcodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetOpcodes.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetOpcodes.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetOpcodes.h Fri Jul 2 04:34:51 2010
@@ -36,22 +36,21 @@
/// truncation operations on target architectures which support it.
EXTRACT_SUBREG = 6,
- /// INSERT_SUBREG - This instruction takes three operands: a register
- /// that has subregisters, a register providing an insert value, and a
- /// subregister index. It returns the value of the first register with
- /// the value of the second register inserted. The first register is
- /// often defined by an IMPLICIT_DEF, as is commonly used to implement
+ /// INSERT_SUBREG - This instruction takes three operands: a register that
+ /// has subregisters, a register providing an insert value, and a
+ /// subregister index. It returns the value of the first register with the
+ /// value of the second register inserted. The first register is often
+ /// defined by an IMPLICIT_DEF, because it is commonly used to implement
/// anyext operations on target architectures which support it.
INSERT_SUBREG = 7,
/// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
IMPLICIT_DEF = 8,
- /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except
- /// that the first operand is an immediate integer constant. This constant
- /// is often zero, as is commonly used to implement zext operations on
- /// target architectures which support it, such as with x86-64 (with
- /// zext from i32 to i64 via implicit zero-extension).
+ /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that
+ /// the first operand is an immediate integer constant. This constant is
+ /// often zero, because it is commonly used to assert that the instruction
+ /// defining the register implicitly clears the high bits.
SUBREG_TO_REG = 9,
/// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
Modified: llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Target/TargetRegisterInfo.h Fri Jul 2 04:34:51 2010
@@ -455,7 +455,7 @@
virtual unsigned getSubReg(unsigned RegNo, unsigned Index) const = 0;
/// getSubRegIndex - For a given register pair, return the sub-register index
- /// if the are second register is a sub-register of the first. Return zero
+ /// if the second register is a sub-register of the first. Return zero
/// otherwise.
virtual unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const = 0;
@@ -523,8 +523,8 @@
/// getRegClass - Returns the register class associated with the enumeration
/// value. See class TargetOperandInfo.
const TargetRegisterClass *getRegClass(unsigned i) const {
- assert(i <= getNumRegClasses() && "Register Class ID out of range");
- return i ? RegClassBegin[i - 1] : NULL;
+ assert(i < getNumRegClasses() && "Register Class ID out of range");
+ return RegClassBegin[i];
}
/// getPointerRegClass - Returns a TargetRegisterClass used for pointer
Modified: llvm/branches/wendling/eh/include/llvm/Transforms/Utils/BuildLibCalls.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Transforms/Utils/BuildLibCalls.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Transforms/Utils/BuildLibCalls.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Transforms/Utils/BuildLibCalls.h Fri Jul 2 04:34:51 2010
@@ -34,6 +34,10 @@
/// and the return value has 'i8*' type.
Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetData *TD);
+ /// EmitStrNCmp - Emit a call to the strncmp function to the builder.
+ Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+ const TargetData *TD);
+
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
Modified: llvm/branches/wendling/eh/include/llvm/Use.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/include/llvm/Use.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/include/llvm/Use.h (original)
+++ llvm/branches/wendling/eh/include/llvm/Use.h Fri Jul 2 04:34:51 2010
@@ -27,6 +27,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/ADT/PointerIntPair.h"
+#include <cstddef>
#include <iterator>
namespace llvm {
Modified: llvm/branches/wendling/eh/lib/Analysis/ConstantFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ConstantFolding.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ConstantFolding.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ConstantFolding.cpp Fri Jul 2 04:34:51 2010
@@ -208,7 +208,7 @@
i != e; ++i, ++GTI) {
ConstantInt *CI = dyn_cast<ConstantInt>(*i);
if (!CI) return false; // Index isn't a simple constant?
- if (CI->getZExtValue() == 0) continue; // Not adding anything.
+ if (CI->isZero()) continue; // Not adding anything.
if (const StructType *ST = dyn_cast<StructType>(*GTI)) {
// N = N + Offset
Modified: llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/DebugInfo.cpp Fri Jul 2 04:34:51 2010
@@ -73,6 +73,15 @@
return 0;
}
+Function *DIDescriptor::getFunctionField(unsigned Elt) const {
+ if (DbgNode == 0)
+ return 0;
+
+ if (Elt < DbgNode->getNumOperands())
+ return dyn_cast_or_null<Function>(DbgNode->getOperand(Elt));
+ return 0;
+}
+
unsigned DIVariable::getNumAddrElements() const {
return DbgNode->getNumOperands()-6;
}
@@ -397,6 +406,8 @@
/// information for the function F.
bool DISubprogram::describes(const Function *F) {
assert(F && "Invalid function");
+ if (F == getFunction())
+ return true;
StringRef Name = getLinkageName();
if (Name.empty())
Name = getName();
@@ -938,7 +949,8 @@
unsigned VK, unsigned VIndex,
DIType ContainingType,
bool isArtificial,
- bool isOptimized) {
+ bool isOptimized,
+ Function *Fn) {
Value *Elts[] = {
GetTagConstant(dwarf::DW_TAG_subprogram),
@@ -956,9 +968,10 @@
ConstantInt::get(Type::getInt32Ty(VMContext), VIndex),
ContainingType,
ConstantInt::get(Type::getInt1Ty(VMContext), isArtificial),
- ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized)
+ ConstantInt::get(Type::getInt1Ty(VMContext), isOptimized),
+ Fn
};
- return DISubprogram(MDNode::get(VMContext, &Elts[0], 16));
+ return DISubprogram(MDNode::get(VMContext, &Elts[0], 17));
}
/// CreateSubprogramDefinition - Create new subprogram descriptor for the
@@ -1042,8 +1055,12 @@
// The optimizer may remove local variable. If there is an interest
// to preserve variable info in such situation then stash it in a
// named mdnode.
- NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.lv");
- NMD->addOperand(Node);
+ DISubprogram Fn(getDISubprogram(Context));
+ const Twine FnLVName = Twine("llvm.dbg.lv.", Fn.getName());
+ NamedMDNode *FnLocals = M.getNamedMetadataUsingTwine(FnLVName);
+ if (!FnLocals)
+ FnLocals = NamedMDNode::Create(VMContext, FnLVName, NULL, 0, &M);
+ FnLocals->addOperand(Node);
}
return DIVariable(Node);
}
@@ -1110,18 +1127,6 @@
return DILocation(MDNode::get(VMContext, &Elts[0], 4));
}
-/// CreateLocation - Creates a debug info location.
-DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo,
- DIScope S, MDNode *OrigLoc) {
- Value *Elts[] = {
- ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),
- ConstantInt::get(Type::getInt32Ty(VMContext), ColumnNo),
- S,
- OrigLoc
- };
- return DILocation(MDNode::get(VMContext, &Elts[0], 4));
-}
-
//===----------------------------------------------------------------------===//
// DIFactory: Routines for inserting code into a function
//===----------------------------------------------------------------------===//
Modified: llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ScalarEvolution.cpp Fri Jul 2 04:34:51 2010
@@ -141,7 +141,7 @@
}
SCEVCouldNotCompute::SCEVCouldNotCompute() :
- SCEV(FoldingSetNodeIDRef(), 0, scCouldNotCompute) {}
+ SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
@@ -177,9 +177,7 @@
ID.AddPointer(V);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator),
- CurAllocationSequenceNumber++,
- V);
+ SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -200,9 +198,9 @@
WriteAsOperand(OS, V, false);
}
-SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
unsigned SCEVTy, const SCEV *op, const Type *ty)
- : SCEV(ID, Num, SCEVTy), Op(op), Ty(ty) {}
+ : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
return Op->dominates(BB, DT);
@@ -212,9 +210,9 @@
return Op->properlyDominates(BB, DT);
}
-SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
- : SCEVCastExpr(ID, Num, scTruncate, op, ty) {
+ : SCEVCastExpr(ID, scTruncate, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot truncate non-integer value!");
@@ -224,9 +222,9 @@
OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
}
-SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
- : SCEVCastExpr(ID, Num, scZeroExtend, op, ty) {
+ : SCEVCastExpr(ID, scZeroExtend, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot zero extend non-integer value!");
@@ -236,9 +234,9 @@
OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
}
-SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, unsigned Num,
+SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
- : SCEVCastExpr(ID, Num, scSignExtend, op, ty) {
+ : SCEVCastExpr(ID, scSignExtend, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot sign extend non-integer value!");
@@ -507,14 +505,109 @@
return false;
// Primarily, sort the SCEVs by their getSCEVType().
- unsigned LST = LHS->getSCEVType();
- unsigned RST = RHS->getSCEVType();
- if (LST != RST)
- return LST < RST;
-
- // Then, pick an arbitrary deterministic sort.
- return LHS->getAllocationSequenceNumber() <
- RHS->getAllocationSequenceNumber();
+ if (LHS->getSCEVType() != RHS->getSCEVType())
+ return LHS->getSCEVType() < RHS->getSCEVType();
+
+ // Aside from the getSCEVType() ordering, the particular ordering
+ // isn't very important except that it's beneficial to be consistent,
+ // so that (a + b) and (b + a) don't end up as different expressions.
+
+ // Sort SCEVUnknown values with some loose heuristics. TODO: This is
+ // not as complete as it could be.
+ if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
+ const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
+
+ // Order pointer values after integer values. This helps SCEVExpander
+ // form GEPs.
+ if (LU->getType()->isPointerTy() && !RU->getType()->isPointerTy())
+ return false;
+ if (RU->getType()->isPointerTy() && !LU->getType()->isPointerTy())
+ return true;
+
+ // Compare getValueID values.
+ if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
+ return LU->getValue()->getValueID() < RU->getValue()->getValueID();
+
+ // Sort arguments by their position.
+ if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
+ const Argument *RA = cast<Argument>(RU->getValue());
+ return LA->getArgNo() < RA->getArgNo();
+ }
+
+ // For instructions, compare their loop depth, and their opcode.
+ // This is pretty loose.
+ if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
+ Instruction *RV = cast<Instruction>(RU->getValue());
+
+ // Compare loop depths.
+ if (LI->getLoopDepth(LV->getParent()) !=
+ LI->getLoopDepth(RV->getParent()))
+ return LI->getLoopDepth(LV->getParent()) <
+ LI->getLoopDepth(RV->getParent());
+
+ // Compare opcodes.
+ if (LV->getOpcode() != RV->getOpcode())
+ return LV->getOpcode() < RV->getOpcode();
+
+ // Compare the number of operands.
+ if (LV->getNumOperands() != RV->getNumOperands())
+ return LV->getNumOperands() < RV->getNumOperands();
+ }
+
+ return false;
+ }
+
+ // Compare constant values.
+ if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
+ const SCEVConstant *RC = cast<SCEVConstant>(RHS);
+ if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
+ return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
+ return LC->getValue()->getValue().ult(RC->getValue()->getValue());
+ }
+
+ // Compare addrec loop depths.
+ if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
+ const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
+ if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
+ return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
+ }
+
+ // Lexicographically compare n-ary expressions.
+ if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
+ const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
+ for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
+ if (i >= RC->getNumOperands())
+ return false;
+ if (operator()(LC->getOperand(i), RC->getOperand(i)))
+ return true;
+ if (operator()(RC->getOperand(i), LC->getOperand(i)))
+ return false;
+ }
+ return LC->getNumOperands() < RC->getNumOperands();
+ }
+
+ // Lexicographically compare udiv expressions.
+ if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
+ const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
+ if (operator()(LC->getLHS(), RC->getLHS()))
+ return true;
+ if (operator()(RC->getLHS(), LC->getLHS()))
+ return false;
+ if (operator()(LC->getRHS(), RC->getRHS()))
+ return true;
+ if (operator()(RC->getRHS(), LC->getRHS()))
+ return false;
+ return false;
+ }
+
+ // Compare cast expressions by operand.
+ if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
+ const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
+ return operator()(LC->getOperand(), RC->getOperand());
+ }
+
+ llvm_unreachable("Unknown SCEV kind!");
+ return false;
}
};
}
@@ -532,18 +625,36 @@
static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
LoopInfo *LI) {
if (Ops.size() < 2) return; // Noop
-
- SCEVComplexityCompare Comp(LI);
-
if (Ops.size() == 2) {
// This is the common case, which also happens to be trivially simple.
// Special case it.
- if (Comp(Ops[1], Ops[0]))
+ if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
std::swap(Ops[0], Ops[1]);
return;
}
- std::stable_sort(Ops.begin(), Ops.end(), Comp);
+ // Do the rough sort by complexity.
+ std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
+
+ // Now that we are sorted by complexity, group elements of the same
+ // complexity. Note that this is, at worst, N^2, but the vector is likely to
+ // be extremely short in practice. Note that we take this approach because we
+ // do not want to depend on the addresses of the objects we are grouping.
+ for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
+ const SCEV *S = Ops[i];
+ unsigned Complexity = S->getSCEVType();
+
+ // If there are any objects of the same complexity and same value as this
+ // one, group them.
+ for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
+ if (Ops[j] == S) { // Found a duplicate.
+ // Move it to immediately after i'th element.
+ std::swap(Ops[i+1], Ops[j]);
+ ++i; // no need to rescan it.
+ if (i == e-2) return; // Done!
+ }
+ }
+ }
}
@@ -737,7 +848,6 @@
// Recompute the insert position, as it may have been invalidated.
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
- CurAllocationSequenceNumber++,
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
@@ -873,7 +983,6 @@
// Recompute the insert position, as it may have been invalidated.
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
- CurAllocationSequenceNumber++,
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
@@ -1009,7 +1118,6 @@
// Recompute the insert position, as it may have been invalidated.
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
- CurAllocationSequenceNumber++,
Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
@@ -1511,7 +1619,6 @@
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
- CurAllocationSequenceNumber++,
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
}
@@ -1646,17 +1753,9 @@
// NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
SmallVector<const SCEV *, 4> NewOps;
NewOps.reserve(AddRec->getNumOperands());
- if (LIOps.size() == 1) {
- const SCEV *Scale = LIOps[0];
- for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
- NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
- } else {
- for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
- SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
- MulOps.push_back(AddRec->getOperand(i));
- NewOps.push_back(getMulExpr(MulOps));
- }
- }
+ const SCEV *Scale = getMulExpr(LIOps);
+ for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
+ NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
// It's tempting to propagate the NSW flag here, but nsw multiplication
// is not associative so this isn't necessarily safe.
@@ -1722,7 +1821,6 @@
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
- CurAllocationSequenceNumber++,
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
}
@@ -1827,7 +1925,6 @@
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
- CurAllocationSequenceNumber++,
LHS, RHS);
UniqueSCEVs.InsertNode(S, IP);
return S;
@@ -1939,7 +2036,6 @@
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
std::uninitialized_copy(Operands.begin(), Operands.end(), O);
S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
- CurAllocationSequenceNumber++,
O, Operands.size(), L);
UniqueSCEVs.InsertNode(S, IP);
}
@@ -2048,7 +2144,6 @@
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
- CurAllocationSequenceNumber++,
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
return S;
@@ -2154,7 +2249,6 @@
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
- CurAllocationSequenceNumber++,
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
return S;
@@ -2235,9 +2329,7 @@
ID.AddPointer(V);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator),
- CurAllocationSequenceNumber++,
- V);
+ SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@@ -2308,13 +2400,6 @@
return S;
}
-/// getIntegerSCEV - Given a SCEVable type, create a constant for the
-/// specified signed integer value and return a SCEV for the constant.
-const SCEV *ScalarEvolution::getIntegerSCEV(int64_t Val, const Type *Ty) {
- const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
- return getConstant(ConstantInt::get(ITy, Val));
-}
-
/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
///
const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
@@ -5597,7 +5682,7 @@
//===----------------------------------------------------------------------===//
ScalarEvolution::ScalarEvolution()
- : FunctionPass(&ID), CurAllocationSequenceNumber(0) {
+ : FunctionPass(&ID) {
}
bool ScalarEvolution::runOnFunction(Function &F) {
Modified: llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp Fri Jul 2 04:34:51 2010
@@ -12,7 +12,7 @@
//
// This differs from traditional loop dependence analysis in that it tests
// for dependencies within a single iteration of a loop, rather than
-// dependences between different iterations.
+// dependencies between different iterations.
//
// ScalarEvolution has a more complete understanding of pointer arithmetic
// than BasicAliasAnalysis' collection of ad-hoc analyses.
Modified: llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp (original)
+++ llvm/branches/wendling/eh/lib/Analysis/ScalarEvolutionExpander.cpp Fri Jul 2 04:34:51 2010
@@ -97,7 +97,7 @@
BasicBlock::iterator It = I; ++It;
if (isa<InvokeInst>(I))
It = cast<InvokeInst>(I)->getNormalDest()->begin();
- while (isa<PHINode>(It)) ++It;
+ while (isa<PHINode>(It) || isa<DbgInfoIntrinsic>(It)) ++It;
if (It != BasicBlock::iterator(CI)) {
// Recreate the cast after the user.
// The old cast is left in place in case it is being used
@@ -115,7 +115,7 @@
BasicBlock::iterator IP = I; ++IP;
if (InvokeInst *II = dyn_cast<InvokeInst>(I))
IP = II->getNormalDest()->begin();
- while (isa<PHINode>(IP)) ++IP;
+ while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP)) ++IP;
Instruction *CI = CastInst::Create(Op, V, Ty, V->getName(), IP);
rememberInstruction(CI);
return CI;
@@ -1070,7 +1070,8 @@
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
BasicBlock::iterator NewInsertPt =
llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
- while (isa<PHINode>(NewInsertPt)) ++NewInsertPt;
+ while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt))
+ ++NewInsertPt;
V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
NewInsertPt);
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
Modified: llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.cpp Fri Jul 2 04:34:51 2010
@@ -21,6 +21,7 @@
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
@@ -114,6 +115,7 @@
TargetSubtarget::RegClassVector& CriticalPathRCs) :
AntiDepBreaker(), MF(MFi),
MRI(MF.getRegInfo()),
+ TII(MF.getTarget().getInstrInfo()),
TRI(MF.getTarget().getRegisterInfo()),
AllocatableSet(TRI->getAllocatableSet(MF)),
State(NULL) {
@@ -163,25 +165,27 @@
DefIndices[AliasReg] = ~0u;
}
}
- } else {
- // In a non-return block, examine the live-in regs of all successors.
- for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ }
+
+ // In a non-return block, examine the live-in regs of all successors.
+ // Note a return block can have successors if the return instruction is
+ // predicated.
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI)
- for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
+ for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- State->UnionGroups(Reg, 0);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = ~0u;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- State->UnionGroups(AliasReg, 0);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = ~0u;
- }
+ unsigned Reg = *I;
+ State->UnionGroups(Reg, 0);
+ KillIndices[Reg] = BB->size();
+ DefIndices[Reg] = ~0u;
+ // Repeat, for all aliases.
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ unsigned AliasReg = *Alias;
+ State->UnionGroups(AliasReg, 0);
+ KillIndices[AliasReg] = BB->size();
+ DefIndices[AliasReg] = ~0u;
}
- }
+ }
// Mark live-out callee-saved registers. In a return block this is
// all callee-saved registers. In non-return this is any
@@ -390,7 +394,8 @@
// If MI's defs have a special allocation requirement, don't allow
// any def registers to be changed. Also assume all registers
// defined in a call must not be changed (ABI).
- if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq()) {
+ if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() ||
+ TII->isPredicated(MI)) {
DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
State->UnionGroups(Reg, 0);
}
@@ -443,6 +448,26 @@
std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>&
RegRefs = State->GetRegRefs();
+ // If MI's uses have special allocation requirement, don't allow
+ // any use registers to be changed. Also assume all registers
+ // used in a call must not be changed (ABI).
+ // FIXME: The issue with predicated instruction is more complex. We are being
+ // conservatively here because the kill markers cannot be trusted after
+ // if-conversion:
+ // %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
+ // ...
+ // STR %R0, %R6<kill>, %reg0, 0, pred:0, pred:%CPSR; mem:ST4[%395]
+ // %R6<def> = LDR %SP, %reg0, 100, pred:0, pred:%CPSR; mem:LD4[FixedStack12]
+ // STR %R0, %R6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
+ //
+ // The first R6 kill is not really a kill since it's killed by a predicated
+ // instruction which may not be executed. The second R6 def may or may not
+ // re-define R6 so it's not safe to change it since the last R6 use cannot be
+ // changed.
+ bool Special = MI->getDesc().isCall() ||
+ MI->getDesc().hasExtraSrcRegAllocReq() ||
+ TII->isPredicated(MI);
+
// Scan the register uses for this instruction and update
// live-ranges, groups and RegRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -459,10 +484,7 @@
// for the register.
HandleLastUse(Reg, Count, "(last-use)");
- // If MI's uses have special allocation requirement, don't allow
- // any use registers to be changed. Also assume all registers
- // used in a call must not be changed (ABI).
- if (MI->getDesc().isCall() || MI->getDesc().hasExtraSrcRegAllocReq()) {
+ if (Special) {
DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)");
State->UnionGroups(Reg, 0);
}
Modified: llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AggressiveAntiDepBreaker.h Fri Jul 2 04:34:51 2010
@@ -115,6 +115,7 @@
class AggressiveAntiDepBreaker : public AntiDepBreaker {
MachineFunction& MF;
MachineRegisterInfo &MRI;
+ const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
/// AllocatableSet - The set of allocatable registers.
Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/AsmPrinter.cpp Fri Jul 2 04:34:51 2010
@@ -330,7 +330,6 @@
else if (GVKind.isThreadData()) {
OutStreamer.SwitchSection(TheSection);
- EmitLinkage(GV->getLinkage(), MangSym);
EmitAlignment(AlignLog, GV);
OutStreamer.EmitLabel(MangSym);
@@ -428,20 +427,12 @@
// Emit pre-function debug and/or EH information.
if (DE) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(EHTimerName, DWARFGroupName);
- DE->BeginFunction(MF);
- } else {
- DE->BeginFunction(MF);
- }
+ NamedRegionTimer T(EHTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DE->BeginFunction(MF);
}
if (DD) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- DD->beginFunction(MF);
- } else {
- DD->beginFunction(MF);
- }
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DD->beginFunction(MF);
}
}
@@ -611,12 +602,8 @@
}
if (ShouldPrintDebugScopes) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- DD->beginScope(II);
- } else {
- DD->beginScope(II);
- }
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DD->beginScope(II);
}
if (isVerbose())
@@ -649,12 +636,8 @@
}
if (ShouldPrintDebugScopes) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- DD->endScope(II);
- } else {
- DD->endScope(II);
- }
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DD->endScope(II);
}
}
}
@@ -692,20 +675,12 @@
// Emit post-function debug information.
if (DD) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- DD->endFunction(MF);
- } else {
- DD->endFunction(MF);
- }
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DD->endFunction(MF);
}
if (DE) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(EHTimerName, DWARFGroupName);
- DE->EndFunction();
- } else {
- DE->EndFunction();
- }
+ NamedRegionTimer T(EHTimerName, DWARFGroupName, TimePassesIsEnabled);
+ DE->EndFunction();
}
MMI->EndFunction();
@@ -730,19 +705,15 @@
// Finalize debug and EH information.
if (DE) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(EHTimerName, DWARFGroupName);
- DE->EndModule();
- } else {
+ {
+ NamedRegionTimer T(EHTimerName, DWARFGroupName, TimePassesIsEnabled);
DE->EndModule();
}
delete DE; DE = 0;
}
if (DD) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- DD->endModule();
- } else {
+ {
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
DD->endModule();
}
delete DD; DD = 0;
Modified: llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/AsmPrinter/DwarfDebug.cpp Fri Jul 2 04:34:51 2010
@@ -322,11 +322,9 @@
DwarfStrSectionSym = TextSectionSym = 0;
DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = 0;
FunctionBeginSym = FunctionEndSym = 0;
- if (TimePassesIsEnabled) {
- NamedRegionTimer T(DbgTimerName, DWARFGroupName);
- beginModule(M);
- } else {
- beginModule(M);
+ {
+ NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled);
+ beginModule(M);
}
}
DwarfDebug::~DwarfDebug() {
@@ -866,6 +864,10 @@
} else if (Context.isNameSpace()) {
DIE *ContextDIE = getOrCreateNameSpace(DINameSpace(Context));
ContextDIE->addChild(Die);
+ } else if (Context.isSubprogram()) {
+ DIE *ContextDIE = createSubprogramDIE(DISubprogram(Context),
+ /*MakeDecl=*/false);
+ ContextDIE->addChild(Die);
} else if (DIE *ContextDIE = getCompileUnit(Context)->getDIE(Context))
ContextDIE->addChild(Die);
else
@@ -1055,6 +1057,10 @@
if (DIDescriptor(ContainingType).isCompositeType())
addDIEEntry(&Buffer, dwarf::DW_AT_containing_type, dwarf::DW_FORM_ref4,
getOrCreateTypeDIE(DIType(ContainingType)));
+ else {
+ DIDescriptor Context = CTy.getContext();
+ addToContextOwner(&Buffer, Context);
+ }
break;
}
default:
@@ -1329,6 +1335,9 @@
// DW_TAG_inlined_subroutine may refer to this DIE.
SPCU->insertDIE(SP, SPDie);
+ // Add to context owner.
+ addToContextOwner(SPDie, SP.getContext());
+
return SPDie;
}
@@ -2248,8 +2257,9 @@
}
// Collect info for variables that were optimized out.
- if (NamedMDNode *NMD =
- MF->getFunction()->getParent()->getNamedMetadata("llvm.dbg.lv")) {
+ const Twine FnLVName = Twine("llvm.dbg.lv.", MF->getFunction()->getName());
+ if (NamedMDNode *NMD =
+ MF->getFunction()->getParent()->getNamedMetadataUsingTwine(FnLVName)) {
for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
DIVariable DV(cast_or_null<MDNode>(NMD->getOperand(i)));
if (!DV || !Processed.insert(DV))
@@ -2342,7 +2352,13 @@
if (!WScope->getParent()) {
StringRef SPName = DISubprogram(Scope).getLinkageName();
- if (SPName == Asm->MF->getFunction()->getName())
+ // We used to check only for a linkage name, but that fails
+ // since we began omitting the linkage name for private
+ // functions. The new way is to check for the name in metadata,
+ // but that's not supported in old .ll test cases. Ergo, we
+ // check both.
+ if (SPName == Asm->MF->getFunction()->getName() ||
+ DISubprogram(Scope).getFunction() == Asm->MF->getFunction())
CurrentFnDbgScope = WScope;
}
@@ -2803,6 +2819,16 @@
Src = GetOrCreateSourceID(Dir, Fn);
}
+#if 0
+ if (!Lines.empty()) {
+ SrcLineInfo lastSrcLineInfo = Lines.back();
+ // Emitting sequential line records with the same line number (but
+ // different addresses) seems to confuse GDB. Avoid this.
+ if (lastSrcLineInfo.getLine() == Line)
+ return NULL;
+ }
+#endif
+
MCSymbol *Label = MMI->getContext().CreateTempSymbol();
Lines.push_back(SrcLineInfo(Line, Col, Src, Label));
Modified: llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/BranchFolding.cpp Fri Jul 2 04:34:51 2010
@@ -373,7 +373,8 @@
// If OldBB isn't immediately before OldBB, insert a branch to it.
if (++MachineFunction::iterator(OldBB) != MachineFunction::iterator(NewDest))
- TII->InsertBranch(*OldBB, NewDest, 0, SmallVector<MachineOperand, 0>());
+ TII->InsertBranch(*OldBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
+ OldInst->getDebugLoc());
OldBB->addSuccessor(NewDest);
++NumTailMerge;
}
@@ -443,18 +444,20 @@
MachineFunction::iterator I = llvm::next(MachineFunction::iterator(CurMBB));
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 4> Cond;
+ DebugLoc dl; // FIXME: this is nowhere
if (I != MF->end() &&
!TII->AnalyzeBranch(*CurMBB, TBB, FBB, Cond, true)) {
MachineBasicBlock *NextBB = I;
if (TBB == NextBB && !Cond.empty() && !FBB) {
if (!TII->ReverseBranchCondition(Cond)) {
TII->RemoveBranch(*CurMBB);
- TII->InsertBranch(*CurMBB, SuccBB, NULL, Cond);
+ TII->InsertBranch(*CurMBB, SuccBB, NULL, Cond, dl);
return;
}
}
}
- TII->InsertBranch(*CurMBB, SuccBB, NULL, SmallVector<MachineOperand, 0>());
+ TII->InsertBranch(*CurMBB, SuccBB, NULL,
+ SmallVector<MachineOperand, 0>(), dl);
}
bool
@@ -874,10 +877,11 @@
}
// Remove the unconditional branch at the end, if any.
if (TBB && (Cond.empty() || FBB)) {
+ DebugLoc dl; // FIXME: this is nowhere
TII->RemoveBranch(*PBB);
if (!Cond.empty())
// reinsert conditional branch only, for now
- TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, 0, NewCond);
+ TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, 0, NewCond, dl);
}
MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(PBB), *P));
}
@@ -976,6 +980,7 @@
bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) {
bool MadeChange = false;
MachineFunction &MF = *MBB->getParent();
+ DebugLoc dl; // FIXME: this is nowhere
ReoptimizeBlock:
MachineFunction::iterator FallThrough = MBB;
@@ -1027,7 +1032,7 @@
TII->RemoveBranch(PrevBB);
PriorCond.clear();
if (PriorTBB != MBB)
- TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond);
+ TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1066,7 +1071,7 @@
// the condition is false, remove the uncond second branch.
if (PriorFBB == MBB) {
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond);
+ TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1079,7 +1084,7 @@
SmallVector<MachineOperand, 4> NewPriorCond(PriorCond);
if (!TII->ReverseBranchCondition(NewPriorCond)) {
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorFBB, 0, NewPriorCond);
+ TII->InsertBranch(PrevBB, PriorFBB, 0, NewPriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1116,7 +1121,7 @@
<< "To make fallthrough to: " << *PriorTBB << "\n");
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, MBB, 0, NewPriorCond);
+ TII->InsertBranch(PrevBB, MBB, 0, NewPriorCond, dl);
// Move this block to the end of the function.
MBB->moveAfter(--MF.end());
@@ -1145,7 +1150,7 @@
SmallVector<MachineOperand, 4> NewCond(CurCond);
if (!TII->ReverseBranchCondition(NewCond)) {
TII->RemoveBranch(*MBB);
- TII->InsertBranch(*MBB, CurFBB, CurTBB, NewCond);
+ TII->InsertBranch(*MBB, CurFBB, CurTBB, NewCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@@ -1200,7 +1205,7 @@
PriorFBB = MBB;
}
TII->RemoveBranch(PrevBB);
- TII->InsertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond);
+ TII->InsertBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, dl);
}
// Iterate through all the predecessors, revectoring each in-turn.
@@ -1226,7 +1231,7 @@
if (!NewCurUnAnalyzable && NewCurTBB && NewCurTBB == NewCurFBB) {
TII->RemoveBranch(*PMBB);
NewCurCond.clear();
- TII->InsertBranch(*PMBB, NewCurTBB, 0, NewCurCond);
+ TII->InsertBranch(*PMBB, NewCurTBB, 0, NewCurCond, dl);
MadeChange = true;
++NumBranchOpts;
PMBB->CorrectExtraCFGEdges(NewCurTBB, 0, false);
@@ -1246,7 +1251,7 @@
}
// Add the branch back if the block is more than just an uncond branch.
- TII->InsertBranch(*MBB, CurTBB, 0, CurCond);
+ TII->InsertBranch(*MBB, CurTBB, 0, CurCond, dl);
}
}
@@ -1286,7 +1291,7 @@
if (CurFallsThru) {
MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
CurCond.clear();
- TII->InsertBranch(*MBB, NextBB, 0, CurCond);
+ TII->InsertBranch(*MBB, NextBB, 0, CurCond, dl);
}
MBB->moveAfter(PredBB);
MadeChange = true;
Modified: llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/CMakeLists.txt Fri Jul 2 04:34:51 2010
@@ -1,6 +1,6 @@
add_llvm_library(LLVMCodeGen
- Analysis.cpp
AggressiveAntiDepBreaker.cpp
+ Analysis.cpp
BranchFolding.cpp
CalcSpillWeights.cpp
CodePlacementOpt.cpp
@@ -9,7 +9,6 @@
DwarfEHPrepare.cpp
ELFCodeEmitter.cpp
ELFWriter.cpp
- ExactHazardRecognizer.cpp
GCMetadata.cpp
GCMetadataPrinter.cpp
GCStrategy.cpp
@@ -45,6 +44,7 @@
OptimizePHIs.cpp
PHIElimination.cpp
Passes.cpp
+ PostRAHazardRecognizer.cpp
PostRASchedulerList.cpp
PreAllocSplitting.cpp
ProcessImplicitDefs.cpp
@@ -52,7 +52,6 @@
PseudoSourceValue.cpp
RegAllocFast.cpp
RegAllocLinearScan.cpp
- RegAllocLocal.cpp
RegAllocPBQP.cpp
RegisterCoalescer.cpp
RegisterScavenging.cpp
Modified: llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.cpp Fri Jul 2 04:34:51 2010
@@ -18,6 +18,7 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -29,6 +30,7 @@
CriticalAntiDepBreaker(MachineFunction& MFi) :
AntiDepBreaker(), MF(MFi),
MRI(MF.getRegInfo()),
+ TII(MF.getTarget().getInstrInfo()),
TRI(MF.getTarget().getRegisterInfo()),
AllocatableSet(TRI->getAllocatableSet(MF))
{
@@ -71,25 +73,27 @@
DefIndices[AliasReg] = ~0u;
}
}
- } else {
- // In a non-return block, examine the live-in regs of all successors.
- for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ }
+
+ // In a non-return block, examine the live-in regs of all successors.
+ // Note a return block can have successors if the return instruction is
+ // predicated.
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI)
- for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
+ for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
E = (*SI)->livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = ~0u;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = ~0u;
- }
+ unsigned Reg = *I;
+ Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[Reg] = BB->size();
+ DefIndices[Reg] = ~0u;
+ // Repeat, for all aliases.
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ unsigned AliasReg = *Alias;
+ Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[AliasReg] = BB->size();
+ DefIndices[AliasReg] = ~0u;
}
- }
+ }
// Mark live-out callee-saved registers. In a return block this is
// all callee-saved registers. In non-return this is any
@@ -164,6 +168,26 @@
}
void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
+ // It's not safe to change register allocation for source operands of
+ // that have special allocation requirements. Also assume all registers
+ // used in a call must not be changed (ABI).
+ // FIXME: The issue with predicated instruction is more complex. We are being
+ // conservatively here because the kill markers cannot be trusted after
+ // if-conversion:
+ // %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
+ // ...
+ // STR %R0, %R6<kill>, %reg0, 0, pred:0, pred:%CPSR; mem:ST4[%395]
+ // %R6<def> = LDR %SP, %reg0, 100, pred:0, pred:%CPSR; mem:LD4[FixedStack12]
+ // STR %R0, %R6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
+ //
+ // The first R6 kill is not really a kill since it's killed by a predicated
+ // instruction which may not be executed. The second R6 def may or may not
+ // re-define R6 so it's not safe to change it since the last R6 use cannot be
+ // changed.
+ bool Special = MI->getDesc().isCall() ||
+ MI->getDesc().hasExtraSrcRegAllocReq() ||
+ TII->isPredicated(MI);
+
// Scan the register operands for this instruction and update
// Classes and RegRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -199,9 +223,7 @@
if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
RegRefs.insert(std::make_pair(Reg, &MO));
- // It's not safe to change register allocation for source operands of
- // that have special allocation requirements.
- if (MO.isUse() && MI->getDesc().hasExtraSrcRegAllocReq()) {
+ if (MO.isUse() && Special) {
if (KeepRegs.insert(Reg)) {
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg)
@@ -216,38 +238,43 @@
// Update liveness.
// Proceding upwards, registers that are defed but not used in this
// instruction are now dead.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
- if (!MO.isDef()) continue;
- // Ignore two-addr defs.
- if (MI->isRegTiedToUseOperand(i)) continue;
-
- DefIndices[Reg] = Count;
- KillIndices[Reg] = ~0u;
- assert(((KillIndices[Reg] == ~0u) !=
- (DefIndices[Reg] == ~0u)) &&
- "Kill and Def maps aren't consistent for Reg!");
- KeepRegs.erase(Reg);
- Classes[Reg] = 0;
- RegRefs.erase(Reg);
- // Repeat, for all subregs.
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- unsigned SubregReg = *Subreg;
- DefIndices[SubregReg] = Count;
- KillIndices[SubregReg] = ~0u;
- KeepRegs.erase(SubregReg);
- Classes[SubregReg] = 0;
- RegRefs.erase(SubregReg);
- }
- // Conservatively mark super-registers as unusable.
- for (const unsigned *Super = TRI->getSuperRegisters(Reg);
- *Super; ++Super) {
- unsigned SuperReg = *Super;
- Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+
+ if (!TII->isPredicated(MI)) {
+ // Predicated defs are modeled as read + write, i.e. similar to two
+ // address updates.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+ if (!MO.isDef()) continue;
+ // Ignore two-addr defs.
+ if (MI->isRegTiedToUseOperand(i)) continue;
+
+ DefIndices[Reg] = Count;
+ KillIndices[Reg] = ~0u;
+ assert(((KillIndices[Reg] == ~0u) !=
+ (DefIndices[Reg] == ~0u)) &&
+ "Kill and Def maps aren't consistent for Reg!");
+ KeepRegs.erase(Reg);
+ Classes[Reg] = 0;
+ RegRefs.erase(Reg);
+ // Repeat, for all subregs.
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ unsigned SubregReg = *Subreg;
+ DefIndices[SubregReg] = Count;
+ KillIndices[SubregReg] = ~0u;
+ KeepRegs.erase(SubregReg);
+ Classes[SubregReg] = 0;
+ RegRefs.erase(SubregReg);
+ }
+ // Conservatively mark super-registers as unusable.
+ for (const unsigned *Super = TRI->getSuperRegisters(Reg);
+ *Super; ++Super) {
+ unsigned SuperReg = *Super;
+ Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ }
}
}
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -478,7 +505,11 @@
PrescanInstruction(MI);
- if (MI->getDesc().hasExtraDefRegAllocReq())
+ // If MI's defs have a special allocation requirement, don't allow
+ // any def registers to be changed. Also assume all registers
+ // defined in a call must not be changed (ABI).
+ if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq() ||
+ TII->isPredicated(MI))
// If this instruction's defs have special allocation requirement, don't
// break this anti-dependency.
AntiDepReg = 0;
@@ -490,7 +521,7 @@
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- if (MO.isUse() && AntiDepReg == Reg) {
+ if (MO.isUse() && TRI->regsOverlap(AntiDepReg, Reg)) {
AntiDepReg = 0;
break;
}
Modified: llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/CriticalAntiDepBreaker.h Fri Jul 2 04:34:51 2010
@@ -22,15 +22,18 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallSet.h"
#include <map>
namespace llvm {
+class TargetInstrInfo;
+class TargetRegisterInfo;
+
class CriticalAntiDepBreaker : public AntiDepBreaker {
MachineFunction& MF;
MachineRegisterInfo &MRI;
+ const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
/// AllocatableSet - The set of allocatable registers.
Modified: llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/DwarfEHPrepare.cpp Fri Jul 2 04:34:51 2010
@@ -89,6 +89,8 @@
/// initializer instead.
bool CleanupSelectors();
+ bool IsACleanupSelector(IntrinsicInst *);
+
/// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
void FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels);
@@ -186,25 +188,49 @@
return new DwarfEHPrepare(tm, fast);
}
+/// IsACleanupSelector - Return true if the intrinsic instruction is a clean-up
+/// selector instruction.
+bool DwarfEHPrepare::IsACleanupSelector(IntrinsicInst *II) {
+ unsigned NumOps = II->getNumOperands();
+ bool IsCleanUp = (NumOps == 3);
+
+ if (IsCleanUp)
+ return true;
+
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(3))) {
+ unsigned Val = CI->getZExtValue();
+
+ if (Val == 0 || Val + 3 == NumOps) {
+ // If the value is 0 or the selector has only filters in it, then it's
+ // a cleanup.
+ return true;
+ } else {
+ assert(Val + 3 < NumOps && "Ill-formed eh.selector!");
+
+ if (Val + 4 == NumOps) {
+ if (ConstantInt *FinalVal =
+ dyn_cast<ConstantInt>(II->getOperand(NumOps - 1)))
+ return FinalVal->isZero();
+ }
+ }
+ }
+
+ return false;
+}
+
/// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
void DwarfEHPrepare::
FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels) {
for (Value::use_iterator
I = SelectorIntrinsic->use_begin(),
E = SelectorIntrinsic->use_end(); I != E; ++I) {
- IntrinsicInst *SI = cast<IntrinsicInst>(I);
- if (!SI || SI->getParent()->getParent() != F) continue;
+ IntrinsicInst *II = cast<IntrinsicInst>(I);
- unsigned NumOps = SI->getNumOperands();
- if (NumOps > 4) continue;
- bool IsCleanUp = (NumOps == 3);
-
- if (!IsCleanUp)
- if (ConstantInt *CI = dyn_cast<ConstantInt>(SI->getOperand(3)))
- IsCleanUp = (CI->getZExtValue() == 0);
+ if (II->getParent()->getParent() != F)
+ continue;
- if (IsCleanUp)
- Sels.insert(SI);
+ if (IsACleanupSelector(II))
+ Sels.insert(II);
}
}
@@ -360,21 +386,9 @@
// an eh.selector intrinsic call. If the eh.selector is a 'clean-up', we
// need to convert it to a 'catch-all'.
for (SmallPtrSet<IntrinsicInst*, 8>::iterator
- SI = SelCalls.begin(), SE = SelCalls.end(); SI != SE; ++SI) {
- IntrinsicInst *II = *SI;
- unsigned NumOps = II->getNumOperands();
-
- if (NumOps <= 4) {
- bool IsCleanUp = (NumOps == 3);
-
- if (!IsCleanUp)
- if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(3)))
- IsCleanUp = (CI->getZExtValue() == 0);
-
- if (IsCleanUp)
- SelsToConvert.insert(II);
- }
- }
+ SI = SelCalls.begin(), SE = SelCalls.end(); SI != SE; ++SI)
+ if (IsACleanupSelector(*SI))
+ SelsToConvert.insert(*SI);
}
}
}
@@ -394,6 +408,15 @@
// from the original selector.
Args.push_back(II->getOperand(1)); // Exception object pointer.
Args.push_back(II->getOperand(2)); // Personality function.
+
+ unsigned I = 3;
+ unsigned E = II->getNumOperands() -
+ (isa<ConstantInt>(II->getOperand(II->getNumOperands() - 1)) ? 1 : 0);
+
+ // Add in any filter IDs.
+ for (; I < E; ++I)
+ Args.push_back(II->getOperand(I));
+
Args.push_back(EHCatchAllValue->getInitializer()); // Catch-all indicator.
CallInst *NewSelector =
Removed: llvm/branches/wendling/eh/lib/CodeGen/ExactHazardRecognizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ExactHazardRecognizer.cpp?rev=107463&view=auto
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ExactHazardRecognizer.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ExactHazardRecognizer.cpp (removed)
@@ -1,180 +0,0 @@
-//===----- ExactHazardRecognizer.cpp - hazard recognizer -------- ---------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This implements a hazard recognizer using the instructions itineraries
-// defined for the current target.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "post-RA-sched"
-#include "ExactHazardRecognizer.h"
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetInstrItineraries.h"
-
-using namespace llvm;
-
-ExactHazardRecognizer::
-ExactHazardRecognizer(const InstrItineraryData &LItinData) :
- ScheduleHazardRecognizer(), ItinData(LItinData)
-{
- // Determine the maximum depth of any itinerary. This determines the
- // depth of the scoreboard. We always make the scoreboard at least 1
- // cycle deep to avoid dealing with the boundary condition.
- unsigned ScoreboardDepth = 1;
- if (!ItinData.isEmpty()) {
- for (unsigned idx = 0; ; ++idx) {
- if (ItinData.isEndMarker(idx))
- break;
-
- const InstrStage *IS = ItinData.beginStage(idx);
- const InstrStage *E = ItinData.endStage(idx);
- unsigned ItinDepth = 0;
- for (; IS != E; ++IS)
- ItinDepth += IS->getCycles();
-
- ScoreboardDepth = std::max(ScoreboardDepth, ItinDepth);
- }
- }
-
- ReservedScoreboard.reset(ScoreboardDepth);
- RequiredScoreboard.reset(ScoreboardDepth);
-
- DEBUG(dbgs() << "Using exact hazard recognizer: ScoreboardDepth = "
- << ScoreboardDepth << '\n');
-}
-
-void ExactHazardRecognizer::Reset() {
- RequiredScoreboard.reset();
- ReservedScoreboard.reset();
-}
-
-void ExactHazardRecognizer::ScoreBoard::dump() const {
- dbgs() << "Scoreboard:\n";
-
- unsigned last = Depth - 1;
- while ((last > 0) && ((*this)[last] == 0))
- last--;
-
- for (unsigned i = 0; i <= last; i++) {
- unsigned FUs = (*this)[i];
- dbgs() << "\t";
- for (int j = 31; j >= 0; j--)
- dbgs() << ((FUs & (1 << j)) ? '1' : '0');
- dbgs() << '\n';
- }
-}
-
-ExactHazardRecognizer::HazardType ExactHazardRecognizer::getHazardType(SUnit *SU) {
- if (ItinData.isEmpty())
- return NoHazard;
-
- unsigned cycle = 0;
-
- // Use the itinerary for the underlying instruction to check for
- // free FU's in the scoreboard at the appropriate future cycles.
- unsigned idx = SU->getInstr()->getDesc().getSchedClass();
- for (const InstrStage *IS = ItinData.beginStage(idx),
- *E = ItinData.endStage(idx); IS != E; ++IS) {
- // We must find one of the stage's units free for every cycle the
- // stage is occupied. FIXME it would be more accurate to find the
- // same unit free in all the cycles.
- for (unsigned int i = 0; i < IS->getCycles(); ++i) {
- assert(((cycle + i) < RequiredScoreboard.getDepth()) &&
- "Scoreboard depth exceeded!");
-
- unsigned freeUnits = IS->getUnits();
- switch (IS->getReservationKind()) {
- default:
- assert(0 && "Invalid FU reservation");
- case InstrStage::Required:
- // Required FUs conflict with both reserved and required ones
- freeUnits &= ~ReservedScoreboard[cycle + i];
- // FALLTHROUGH
- case InstrStage::Reserved:
- // Reserved FUs can conflict only with required ones.
- freeUnits &= ~RequiredScoreboard[cycle + i];
- break;
- }
-
- if (!freeUnits) {
- DEBUG(dbgs() << "*** Hazard in cycle " << (cycle + i) << ", ");
- DEBUG(dbgs() << "SU(" << SU->NodeNum << "): ");
- DEBUG(SU->getInstr()->dump());
- return Hazard;
- }
- }
-
- // Advance the cycle to the next stage.
- cycle += IS->getNextCycles();
- }
-
- return NoHazard;
-}
-
-void ExactHazardRecognizer::EmitInstruction(SUnit *SU) {
- if (ItinData.isEmpty())
- return;
-
- unsigned cycle = 0;
-
- // Use the itinerary for the underlying instruction to reserve FU's
- // in the scoreboard at the appropriate future cycles.
- unsigned idx = SU->getInstr()->getDesc().getSchedClass();
- for (const InstrStage *IS = ItinData.beginStage(idx),
- *E = ItinData.endStage(idx); IS != E; ++IS) {
- // We must reserve one of the stage's units for every cycle the
- // stage is occupied. FIXME it would be more accurate to reserve
- // the same unit free in all the cycles.
- for (unsigned int i = 0; i < IS->getCycles(); ++i) {
- assert(((cycle + i) < RequiredScoreboard.getDepth()) &&
- "Scoreboard depth exceeded!");
-
- unsigned freeUnits = IS->getUnits();
- switch (IS->getReservationKind()) {
- default:
- assert(0 && "Invalid FU reservation");
- case InstrStage::Required:
- // Required FUs conflict with both reserved and required ones
- freeUnits &= ~ReservedScoreboard[cycle + i];
- // FALLTHROUGH
- case InstrStage::Reserved:
- // Reserved FUs can conflict only with required ones.
- freeUnits &= ~RequiredScoreboard[cycle + i];
- break;
- }
-
- // reduce to a single unit
- unsigned freeUnit = 0;
- do {
- freeUnit = freeUnits;
- freeUnits = freeUnit & (freeUnit - 1);
- } while (freeUnits);
-
- assert(freeUnit && "No function unit available!");
- if (IS->getReservationKind() == InstrStage::Required)
- RequiredScoreboard[cycle + i] |= freeUnit;
- else
- ReservedScoreboard[cycle + i] |= freeUnit;
- }
-
- // Advance the cycle to the next stage.
- cycle += IS->getNextCycles();
- }
-
- DEBUG(ReservedScoreboard.dump());
- DEBUG(RequiredScoreboard.dump());
-}
-
-void ExactHazardRecognizer::AdvanceCycle() {
- ReservedScoreboard[0] = 0; ReservedScoreboard.advance();
- RequiredScoreboard[0] = 0; RequiredScoreboard.advance();
-}
Removed: llvm/branches/wendling/eh/lib/CodeGen/ExactHazardRecognizer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/ExactHazardRecognizer.h?rev=107463&view=auto
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/ExactHazardRecognizer.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/ExactHazardRecognizer.h (removed)
@@ -1,86 +0,0 @@
-//=- llvm/CodeGen/ExactHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the ExactHazardRecognizer class, which
-// implements hazard-avoidance heuristics for scheduling, based on the
-// scheduling itineraries specified for the target.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
-#define LLVM_CODEGEN_EXACTHAZARDRECOGNIZER_H
-
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Target/TargetInstrItineraries.h"
-
-namespace llvm {
- class ExactHazardRecognizer : public ScheduleHazardRecognizer {
- // ScoreBoard to track function unit usage. ScoreBoard[0] is a
- // mask of the FUs in use in the cycle currently being
- // schedule. ScoreBoard[1] is a mask for the next cycle. The
- // ScoreBoard is used as a circular buffer with the current cycle
- // indicated by Head.
- class ScoreBoard {
- unsigned *Data;
-
- // The maximum number of cycles monitored by the Scoreboard. This
- // value is determined based on the target itineraries to ensure
- // that all hazards can be tracked.
- size_t Depth;
- // Indices into the Scoreboard that represent the current cycle.
- size_t Head;
- public:
- ScoreBoard():Data(NULL), Depth(0), Head(0) { }
- ~ScoreBoard() {
- delete[] Data;
- }
-
- size_t getDepth() const { return Depth; }
- unsigned& operator[](size_t idx) const {
- assert(Depth && "ScoreBoard was not initialized properly!");
-
- return Data[(Head + idx) % Depth];
- }
-
- void reset(size_t d = 1) {
- if (Data == NULL) {
- Depth = d;
- Data = new unsigned[Depth];
- }
-
- memset(Data, 0, Depth * sizeof(Data[0]));
- Head = 0;
- }
-
- void advance() {
- Head = (Head + 1) % Depth;
- }
-
- // Print the scoreboard.
- void dump() const;
- };
-
- // Itinerary data for the target.
- const InstrItineraryData &ItinData;
-
- ScoreBoard ReservedScoreboard;
- ScoreBoard RequiredScoreboard;
-
- public:
- ExactHazardRecognizer(const InstrItineraryData &ItinData);
-
- virtual HazardType getHazardType(SUnit *SU);
- virtual void Reset();
- virtual void EmitInstruction(SUnit *SU);
- virtual void AdvanceCycle();
- };
-}
-
-#endif
Modified: llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/IfConversion.cpp Fri Jul 2 04:34:51 2010
@@ -20,6 +20,7 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -33,20 +34,22 @@
static cl::opt<int> IfCvtFnStart("ifcvt-fn-start", cl::init(-1), cl::Hidden);
static cl::opt<int> IfCvtFnStop("ifcvt-fn-stop", cl::init(-1), cl::Hidden);
static cl::opt<int> IfCvtLimit("ifcvt-limit", cl::init(-1), cl::Hidden);
-static cl::opt<bool> DisableSimple("disable-ifcvt-simple",
+static cl::opt<bool> DisableSimple("disable-ifcvt-simple",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableSimpleF("disable-ifcvt-simple-false",
+static cl::opt<bool> DisableSimpleF("disable-ifcvt-simple-false",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangle("disable-ifcvt-triangle",
+static cl::opt<bool> DisableTriangle("disable-ifcvt-triangle",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangleR("disable-ifcvt-triangle-rev",
+static cl::opt<bool> DisableTriangleR("disable-ifcvt-triangle-rev",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangleF("disable-ifcvt-triangle-false",
+static cl::opt<bool> DisableTriangleF("disable-ifcvt-triangle-false",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableTriangleFR("disable-ifcvt-triangle-false-rev",
+static cl::opt<bool> DisableTriangleFR("disable-ifcvt-triangle-false-rev",
cl::init(false), cl::Hidden);
-static cl::opt<bool> DisableDiamond("disable-ifcvt-diamond",
+static cl::opt<bool> DisableDiamond("disable-ifcvt-diamond",
cl::init(false), cl::Hidden);
+static cl::opt<bool> IfCvtBranchFold("ifcvt-branch-fold",
+ cl::init(true), cl::Hidden);
STATISTIC(NumSimple, "Number of simple if-conversions performed");
STATISTIC(NumSimpleFalse, "Number of simple (F) if-conversions performed");
@@ -115,7 +118,7 @@
BB(0), TrueBB(0), FalseBB(0) {}
};
- /// IfcvtToken - Record information about pending if-conversions to attemp:
+ /// IfcvtToken - Record information about pending if-conversions to attempt:
/// BBI - Corresponding BBInfo.
/// Kind - Type of block. See IfcvtKind.
/// NeedSubsumption - True if the to-be-predicated BB has already been
@@ -146,6 +149,7 @@
const TargetLowering *TLI;
const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
bool MadeChange;
int FnNum;
public:
@@ -167,8 +171,7 @@
std::vector<IfcvtToken*> &Tokens);
bool FeasibilityAnalysis(BBInfo &BBI, SmallVectorImpl<MachineOperand> &Cond,
bool isTriangle = false, bool RevBranch = false);
- bool AnalyzeBlocks(MachineFunction &MF,
- std::vector<IfcvtToken*> &Tokens);
+ void AnalyzeBlocks(MachineFunction &MF, std::vector<IfcvtToken*> &Tokens);
void InvalidatePreds(MachineBasicBlock *BB);
void RemoveExtraEdges(BBInfo &BBI);
bool IfConvertSimple(BBInfo &BBI, IfcvtKind Kind);
@@ -177,9 +180,11 @@
unsigned NumDups1, unsigned NumDups2);
void PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
- SmallVectorImpl<MachineOperand> &Cond);
+ SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs);
void CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs,
bool IgnoreBr = false);
void MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI);
@@ -227,6 +232,7 @@
bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
TLI = MF.getTarget().getTargetLowering();
TII = MF.getTarget().getInstrInfo();
+ TRI = MF.getTarget().getRegisterInfo();
if (!TII) return false;
DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum << ") \'"
@@ -253,7 +259,8 @@
while (IfCvtLimit == -1 || (int)NumIfCvts < IfCvtLimit) {
// Do an initial analysis for each basic block and find all the potential
// candidates to perform if-conversion.
- bool Change = AnalyzeBlocks(MF, Tokens);
+ bool Change = false;
+ AnalyzeBlocks(MF, Tokens);
while (!Tokens.empty()) {
IfcvtToken *Token = Tokens.back();
Tokens.pop_back();
@@ -281,7 +288,8 @@
case ICSimpleFalse: {
bool isFalse = Kind == ICSimpleFalse;
if ((isFalse && DisableSimpleF) || (!isFalse && DisableSimple)) break;
- DEBUG(dbgs() << "Ifcvt (Simple" << (Kind == ICSimpleFalse ? " false" :"")
+ DEBUG(dbgs() << "Ifcvt (Simple" << (Kind == ICSimpleFalse ?
+ " false" : "")
<< "): BB#" << BBI.BB->getNumber() << " ("
<< ((Kind == ICSimpleFalse)
? BBI.FalseBB->getNumber()
@@ -361,7 +369,7 @@
Roots.clear();
BBAnalysis.clear();
- if (MadeChange) {
+ if (MadeChange && !IfCvtBranchFold) {
BranchFolder BF(false);
BF.OptimizeFunction(MF, TII,
MF.getTarget().getRegisterInfo(),
@@ -387,9 +395,10 @@
/// ReverseBranchCondition - Reverse the condition of the end of the block
/// branch. Swap block's 'true' and 'false' successors.
bool IfConverter::ReverseBranchCondition(BBInfo &BBI) {
+ DebugLoc dl; // FIXME: this is nowhere
if (!TII->ReverseBranchCondition(BBI.BrCond)) {
TII->RemoveBranch(*BBI.BB);
- TII->InsertBranch(*BBI.BB, BBI.FalseBB, BBI.TrueBB, BBI.BrCond);
+ TII->InsertBranch(*BBI.BB, BBI.FalseBB, BBI.TrueBB, BBI.BrCond, dl);
std::swap(BBI.TrueBB, BBI.FalseBB);
return true;
}
@@ -431,7 +440,7 @@
/// ValidTriangle - Returns true if the 'true' and 'false' blocks (along
/// with their common predecessor) forms a valid triangle shape for ifcvt.
/// If 'FalseBranch' is true, it checks if 'true' block's false branch
-/// branches to the false branch rather than the other way around. It also
+/// branches to the 'false' block rather than the other way around. It also
/// returns the number of instructions that the ifcvt would need to duplicate
/// if performed in 'Dups'.
bool IfConverter::ValidTriangle(BBInfo &TrueBBI, BBInfo &FalseBBI,
@@ -570,7 +579,7 @@
// No false branch. This BB must end with a conditional branch and a
// fallthrough.
if (!BBI.FalseBB)
- BBI.FalseBB = findFalseBlock(BBI.BB, BBI.TrueBB);
+ BBI.FalseBB = findFalseBlock(BBI.BB, BBI.TrueBB);
if (!BBI.FalseBB) {
// Malformed bcc? True and false blocks are the same?
BBI.IsUnpredicable = true;
@@ -749,7 +758,7 @@
Tokens.push_back(new IfcvtToken(BBI, ICTriangle, TNeedSub, Dups));
Enqueued = true;
}
-
+
if (ValidTriangle(TrueBBI, FalseBBI, true, Dups) &&
MeetIfcvtSizeLimit(TrueBBI.NonPredSize) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond, true, true)) {
@@ -765,7 +774,7 @@
// | \_
// | |
// | TBB---> exit
- // |
+ // |
// FBB
Tokens.push_back(new IfcvtToken(BBI, ICSimple, TNeedSub, Dups));
Enqueued = true;
@@ -802,11 +811,9 @@
}
/// AnalyzeBlocks - Analyze all blocks and find entries for all if-conversion
-/// candidates. It returns true if any CFG restructuring is done to expose more
-/// if-conversion opportunities.
-bool IfConverter::AnalyzeBlocks(MachineFunction &MF,
+/// candidates.
+void IfConverter::AnalyzeBlocks(MachineFunction &MF,
std::vector<IfcvtToken*> &Tokens) {
- bool Change = false;
std::set<MachineBasicBlock*> Visited;
for (unsigned i = 0, e = Roots.size(); i != e; ++i) {
for (idf_ext_iterator<MachineBasicBlock*> I=idf_ext_begin(Roots[i],Visited),
@@ -818,20 +825,23 @@
// Sort to favor more complex ifcvt scheme.
std::stable_sort(Tokens.begin(), Tokens.end(), IfcvtTokenCmp);
-
- return Change;
}
/// canFallThroughTo - Returns true either if ToBB is the next block after BB or
/// that all the intervening blocks are empty (given BB can fall through to its
/// next block).
static bool canFallThroughTo(MachineBasicBlock *BB, MachineBasicBlock *ToBB) {
- MachineFunction::iterator I = BB;
+ MachineFunction::iterator PI = BB;
+ MachineFunction::iterator I = llvm::next(PI);
MachineFunction::iterator TI = ToBB;
MachineFunction::iterator E = BB->getParent()->end();
- while (++I != TI)
- if (I == E || !I->empty())
+ while (I != TI) {
+ // Check isSuccessor to avoid case where the next block is empty, but
+ // it's not a successor.
+ if (I == E || !I->empty() || !PI->isSuccessor(I))
return false;
+ PI = I++;
+ }
return true;
}
@@ -853,8 +863,9 @@
///
static void InsertUncondBranch(MachineBasicBlock *BB, MachineBasicBlock *ToBB,
const TargetInstrInfo *TII) {
+ DebugLoc dl; // FIXME: this is nowhere
SmallVector<MachineOperand, 0> NoCond;
- TII->InsertBranch(*BB, ToBB, NULL, NoCond);
+ TII->InsertBranch(*BB, ToBB, NULL, NoCond, dl);
}
/// RemoveExtraEdges - Remove true / false edges if either / both are no longer
@@ -866,6 +877,66 @@
BBI.BB->CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
}
+/// InitPredRedefs / UpdatePredRedefs - Defs by predicated instructions are
+/// modeled as read + write (sort like two-address instructions). These
+/// routines track register liveness and add implicit uses to if-converted
+/// instructions to conform to the model.
+static void InitPredRedefs(MachineBasicBlock *BB, SmallSet<unsigned,4> &Redefs,
+ const TargetRegisterInfo *TRI) {
+ for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
+ E = BB->livein_end(); I != E; ++I) {
+ unsigned Reg = *I;
+ Redefs.insert(Reg);
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg)
+ Redefs.insert(*Subreg);
+ }
+}
+
+static void UpdatePredRedefs(MachineInstr *MI, SmallSet<unsigned,4> &Redefs,
+ const TargetRegisterInfo *TRI,
+ bool AddImpUse = false) {
+ SmallVector<unsigned, 4> Defs;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ if (MO.isDef())
+ Defs.push_back(Reg);
+ else if (MO.isKill()) {
+ Redefs.erase(Reg);
+ for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
+ Redefs.erase(*SR);
+ }
+ }
+ for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
+ unsigned Reg = Defs[i];
+ if (Redefs.count(Reg)) {
+ if (AddImpUse)
+ // Treat predicated update as read + write.
+ MI->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/,
+ true/*IsImp*/,false/*IsKill*/));
+ } else {
+ Redefs.insert(Reg);
+ for (const unsigned *SR = TRI->getSubRegisters(Reg); *SR; ++SR)
+ Redefs.insert(*SR);
+ }
+ }
+}
+
+static void UpdatePredRedefs(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator E,
+ SmallSet<unsigned,4> &Redefs,
+ const TargetRegisterInfo *TRI) {
+ while (I != E) {
+ UpdatePredRedefs(I, Redefs, TRI);
+ ++I;
+ }
+}
+
/// IfConvertSimple - If convert a simple (split, no rejoin) sub-CFG.
///
bool IfConverter::IfConvertSimple(BBInfo &BBI, IfcvtKind Kind) {
@@ -890,13 +961,19 @@
if (TII->ReverseBranchCondition(Cond))
assert(false && "Unable to reverse branch condition!");
+ // Initialize liveins to the first BB. These are potentiall re-defined by
+ // predicated instructions.
+ SmallSet<unsigned, 4> Redefs;
+ InitPredRedefs(CvtBBI->BB, Redefs, TRI);
+ InitPredRedefs(NextBBI->BB, Redefs, TRI);
+
if (CvtBBI->BB->pred_size() > 1) {
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
// Copy instructions in the true block, predicate them, and add them to
// the entry block.
- CopyAndPredicateBlock(BBI, *CvtBBI, Cond);
+ CopyAndPredicateBlock(BBI, *CvtBBI, Cond, Redefs);
} else {
- PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond);
+ PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond, Redefs);
// Merge converted block into entry block.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
@@ -939,6 +1016,7 @@
BBInfo &FalseBBI = BBAnalysis[BBI.FalseBB->getNumber()];
BBInfo *CvtBBI = &TrueBBI;
BBInfo *NextBBI = &FalseBBI;
+ DebugLoc dl; // FIXME: this is nowhere
SmallVector<MachineOperand, 4> Cond(BBI.BrCond.begin(), BBI.BrCond.end());
if (Kind == ICTriangleFalse || Kind == ICTriangleFRev)
@@ -974,17 +1052,23 @@
}
}
+ // Initialize liveins to the first BB. These are potentiall re-defined by
+ // predicated instructions.
+ SmallSet<unsigned, 4> Redefs;
+ InitPredRedefs(CvtBBI->BB, Redefs, TRI);
+ InitPredRedefs(NextBBI->BB, Redefs, TRI);
+
bool HasEarlyExit = CvtBBI->FalseBB != NULL;
bool DupBB = CvtBBI->BB->pred_size() > 1;
if (DupBB) {
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
// Copy instructions in the true block, predicate them, and add them to
// the entry block.
- CopyAndPredicateBlock(BBI, *CvtBBI, Cond, true);
+ CopyAndPredicateBlock(BBI, *CvtBBI, Cond, Redefs, true);
} else {
// Predicate the 'true' block after removing its branch.
CvtBBI->NonPredSize -= TII->RemoveBranch(*CvtBBI->BB);
- PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond);
+ PredicateBlock(*CvtBBI, CvtBBI->BB->end(), Cond, Redefs);
// Now merge the entry of the triangle with the true block.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
@@ -997,7 +1081,7 @@
CvtBBI->BrCond.end());
if (TII->ReverseBranchCondition(RevCond))
assert(false && "Unable to reverse branch condition!");
- TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, NULL, RevCond);
+ TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, NULL, RevCond, dl);
BBI.BB->addSuccessor(CvtBBI->FalseBB);
}
@@ -1026,7 +1110,7 @@
RemoveExtraEdges(BBI);
// Update block info. BB can be iteratively if-converted.
- if (!IterIfcvt)
+ if (!IterIfcvt)
BBI.IsDone = true;
InvalidatePreds(BBI.BB);
CvtBBI->IsDone = true;
@@ -1088,9 +1172,21 @@
// Remove the conditional branch from entry to the blocks.
BBI.NonPredSize -= TII->RemoveBranch(*BBI.BB);
+ // Initialize liveins to the first BB. These are potentiall re-defined by
+ // predicated instructions.
+ SmallSet<unsigned, 4> Redefs;
+ InitPredRedefs(BBI1->BB, Redefs, TRI);
+
// Remove the duplicated instructions at the beginnings of both paths.
MachineBasicBlock::iterator DI1 = BBI1->BB->begin();
MachineBasicBlock::iterator DI2 = BBI2->BB->begin();
+ MachineBasicBlock::iterator DIE1 = BBI1->BB->end();
+ MachineBasicBlock::iterator DIE2 = BBI2->BB->end();
+ // Skip dbg_value instructions
+ while (DI1 != DIE1 && DI1->isDebugValue())
+ ++DI1;
+ while (DI2 != DIE2 && DI2->isDebugValue())
+ ++DI2;
BBI1->NonPredSize -= NumDups1;
BBI2->NonPredSize -= NumDups1;
while (NumDups1 != 0) {
@@ -1098,25 +1194,39 @@
++DI2;
--NumDups1;
}
+
+ UpdatePredRedefs(BBI1->BB->begin(), DI1, Redefs, TRI);
BBI.BB->splice(BBI.BB->end(), BBI1->BB, BBI1->BB->begin(), DI1);
BBI2->BB->erase(BBI2->BB->begin(), DI2);
// Predicate the 'true' block after removing its branch.
BBI1->NonPredSize -= TII->RemoveBranch(*BBI1->BB);
DI1 = BBI1->BB->end();
- for (unsigned i = 0; i != NumDups2; ++i)
+ for (unsigned i = 0; i != NumDups2; ) {
+ // NumDups2 only counted non-dbg_value instructions, so this won't
+ // run off the head of the list.
+ assert (DI1 != BBI1->BB->begin());
--DI1;
+ // skip dbg_value instructions
+ if (!DI1->isDebugValue())
+ ++i;
+ }
BBI1->BB->erase(DI1, BBI1->BB->end());
- PredicateBlock(*BBI1, BBI1->BB->end(), *Cond1);
+ PredicateBlock(*BBI1, BBI1->BB->end(), *Cond1, Redefs);
// Predicate the 'false' block.
BBI2->NonPredSize -= TII->RemoveBranch(*BBI2->BB);
DI2 = BBI2->BB->end();
while (NumDups2 != 0) {
+ // NumDups2 only counted non-dbg_value instructions, so this won't
+ // run off the head of the list.
+ assert (DI2 != BBI2->BB->begin());
--DI2;
- --NumDups2;
+ // skip dbg_value instructions
+ if (!DI2->isDebugValue())
+ --NumDups2;
}
- PredicateBlock(*BBI2, DI2, *Cond2);
+ PredicateBlock(*BBI2, DI2, *Cond2, Redefs);
// Merge the true block into the entry of the diamond.
MergeBlocks(BBI, *BBI1);
@@ -1152,7 +1262,8 @@
/// specified end with the specified condition.
void IfConverter::PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
- SmallVectorImpl<MachineOperand> &Cond) {
+ SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs) {
for (MachineBasicBlock::iterator I = BBI.BB->begin(); I != E; ++I) {
if (I->isDebugValue() || TII->isPredicated(I))
continue;
@@ -1162,6 +1273,10 @@
#endif
llvm_unreachable(0);
}
+
+ // If the predicated instruction now re-defines a register as the result of
+ // if-conversion, add an implicit kill.
+ UpdatePredRedefs(I, Redefs, TRI, true);
}
std::copy(Cond.begin(), Cond.end(), std::back_inserter(BBI.Predicate));
@@ -1176,28 +1291,33 @@
/// the destination block. Skip end of block branches if IgnoreBr is true.
void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
SmallVectorImpl<MachineOperand> &Cond,
+ SmallSet<unsigned, 4> &Redefs,
bool IgnoreBr) {
MachineFunction &MF = *ToBBI.BB->getParent();
for (MachineBasicBlock::iterator I = FromBBI.BB->begin(),
E = FromBBI.BB->end(); I != E; ++I) {
const TargetInstrDesc &TID = I->getDesc();
- bool isPredicated = TII->isPredicated(I);
// Do not copy the end of the block branches.
- if (IgnoreBr && !isPredicated && TID.isBranch())
+ if (IgnoreBr && TID.isBranch())
break;
MachineInstr *MI = MF.CloneMachineInstr(I);
ToBBI.BB->insert(ToBBI.BB->end(), MI);
ToBBI.NonPredSize++;
- if (!isPredicated && !MI->isDebugValue())
+ if (!TII->isPredicated(I) && !MI->isDebugValue()) {
if (!TII->PredicateInstruction(MI, Cond)) {
#ifndef NDEBUG
dbgs() << "Unable to predicate " << *I << "!\n";
#endif
llvm_unreachable(0);
}
+ }
+
+ // If the predicated instruction now re-defines a register as the result of
+ // if-conversion, add an implicit kill.
+ UpdatePredRedefs(MI, Redefs, TRI, true);
}
std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
@@ -1238,7 +1358,7 @@
continue;
Pred->ReplaceUsesOfBlockWith(FromBBI.BB, ToBBI.BB);
}
-
+
std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
FromBBI.BB->succ_end());
MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
Modified: llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LiveIntervalAnalysis.cpp Fri Jul 2 04:34:51 2010
@@ -434,11 +434,6 @@
// are actually two values in the live interval. Because of this we
// need to take the LiveRegion that defines this register and split it
// into two values.
- // Two-address vregs should always only be redefined once. This means
- // that at this point, there should be exactly one value number in it.
- assert((PartReDef || interval.containsOneValue()) &&
- "Unexpected 2-addr liveint!");
- SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
SlotIndex RedefIndex = MIIdx.getDefIndex();
if (MO.isEarlyClobber())
RedefIndex = MIIdx.getUseIndex();
@@ -446,8 +441,9 @@
const LiveRange *OldLR =
interval.getLiveRangeContaining(RedefIndex.getUseIndex());
VNInfo *OldValNo = OldLR->valno;
+ SlotIndex DefIndex = OldValNo->def.getDefIndex();
- // Delete the initial value, which should be short and continuous,
+ // Delete the previous value, which should be short and continuous,
// because the 2-addr copy must be in the same MBB as the redef.
interval.removeRange(DefIndex, RedefIndex);
Modified: llvm/branches/wendling/eh/lib/CodeGen/LiveVariables.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/LiveVariables.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/LiveVariables.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/LiveVariables.cpp Fri Jul 2 04:34:51 2010
@@ -286,7 +286,7 @@
MachineInstr *LastDef = PhysRegDef[Reg];
MachineInstr *LastUse = PhysRegUse[Reg];
if (!LastDef && !LastUse)
- return false;
+ return 0;
MachineInstr *LastRefOrPartRef = LastUse ? LastUse : LastDef;
unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef];
Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineBasicBlock.cpp Fri Jul 2 04:34:51 2010
@@ -245,6 +245,7 @@
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 4> Cond;
+ DebugLoc dl; // FIXME: this is nowhere
bool B = TII->AnalyzeBranch(*this, TBB, FBB, Cond);
(void) B;
assert(!B && "UpdateTerminators requires analyzable predecessors!");
@@ -259,7 +260,7 @@
// its layout successor, insert a branch.
TBB = *succ_begin();
if (!isLayoutSuccessor(TBB))
- TII->InsertBranch(*this, TBB, 0, Cond);
+ TII->InsertBranch(*this, TBB, 0, Cond, dl);
}
} else {
if (FBB) {
@@ -270,10 +271,10 @@
if (TII->ReverseBranchCondition(Cond))
return;
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, FBB, 0, Cond);
+ TII->InsertBranch(*this, FBB, 0, Cond, dl);
} else if (isLayoutSuccessor(FBB)) {
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, TBB, 0, Cond);
+ TII->InsertBranch(*this, TBB, 0, Cond, dl);
}
} else {
// The block has a fallthrough conditional branch.
@@ -284,14 +285,14 @@
if (TII->ReverseBranchCondition(Cond)) {
// We can't reverse the condition, add an unconditional branch.
Cond.clear();
- TII->InsertBranch(*this, MBBA, 0, Cond);
+ TII->InsertBranch(*this, MBBA, 0, Cond, dl);
return;
}
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, MBBA, 0, Cond);
+ TII->InsertBranch(*this, MBBA, 0, Cond, dl);
} else if (!isLayoutSuccessor(MBBA)) {
TII->RemoveBranch(*this);
- TII->InsertBranch(*this, TBB, MBBA, Cond);
+ TII->InsertBranch(*this, TBB, MBBA, Cond, dl);
}
}
}
Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineDominators.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineDominators.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineDominators.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineDominators.cpp Fri Jul 2 04:34:51 2010
@@ -46,7 +46,6 @@
}
MachineDominatorTree::~MachineDominatorTree() {
- DT->releaseMemory();
delete DT;
}
Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineRegisterInfo.cpp Fri Jul 2 04:34:51 2010
@@ -20,7 +20,7 @@
MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI) {
VRegInfo.reserve(256);
RegAllocHints.reserve(256);
- RegClass2VRegMap.resize(TRI.getNumRegClasses()+1); // RC ID starts at 1.
+ RegClass2VRegMap = new std::vector<unsigned>[TRI.getNumRegClasses()];
UsedPhysRegs.resize(TRI.getNumRegs());
// Create the physreg use/def lists.
@@ -37,6 +37,7 @@
"PhysRegUseDefLists has entries after all instructions are deleted");
#endif
delete [] PhysRegUseDefLists;
+ delete [] RegClass2VRegMap;
}
/// setRegClass - Set the register class of the specified virtual register.
@@ -52,7 +53,7 @@
// Remove from old register class's vregs list. This may be slow but
// fortunately this operation is rarely needed.
std::vector<unsigned> &VRegs = RegClass2VRegMap[OldRC->getID()];
- std::vector<unsigned>::iterator I=std::find(VRegs.begin(), VRegs.end(), VR);
+ std::vector<unsigned>::iterator I = std::find(VRegs.begin(), VRegs.end(), VR);
VRegs.erase(I);
// Add to new register class's vregs list.
Modified: llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/MachineSink.cpp Fri Jul 2 04:34:51 2010
@@ -25,7 +25,6 @@
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
@@ -62,7 +61,6 @@
bool ProcessBlock(MachineBasicBlock &MBB);
bool SinkInstruction(MachineInstr *MI, bool &SawStore);
bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB) const;
- bool LiveOutOfBasicBlock(const MachineInstr *MI, unsigned Reg) const;
};
} // end anonymous namespace
@@ -168,44 +166,6 @@
return MadeChange;
}
-/// LiveOutOfBasicBlock - Determine if the physical register, defined and dead
-/// in MI, is live on exit from the basic block.
-bool MachineSinking::LiveOutOfBasicBlock(const MachineInstr *MI,
- unsigned Reg) const {
- assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
- "Only want to determine if a physical register is live out of a BB!");
-
- const MachineBasicBlock *MBB = MI->getParent();
- SmallSet<unsigned, 8> KilledRegs;
- MachineBasicBlock::const_iterator I = MBB->end();
- MachineBasicBlock::const_iterator E = MBB->begin();
- assert(I != E && "How can there be an empty block at this point?!");
-
- // Loop through the instructions bottom-up. If we see a kill of the preg
- // first, then it's not live out of the BB. If we see a use or def first, then
- // we assume that it is live out of the BB.
- do {
- const MachineInstr &CurMI = *--I;
-
- for (unsigned i = 0, e = CurMI.getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = CurMI.getOperand(i);
- if (!MO.isReg()) continue; // Ignore non-register operands.
-
- unsigned MOReg = MO.getReg();
- if (MOReg == 0) continue;
-
- if (MOReg == Reg) {
- if (MO.isKill())
- return false;
- if (MO.isUse() || MO.isDef())
- return true;
- }
- }
- } while (I != E);
-
- return false;
-}
-
/// SinkInstruction - Determine whether it is safe to sink the specified machine
/// instruction out of its current block into a successor.
bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
@@ -228,7 +188,6 @@
// SuccToSinkTo - This is the successor to sink this instruction to, once we
// decide.
MachineBasicBlock *SuccToSinkTo = 0;
- SmallVector<unsigned, 4> PhysRegs;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
@@ -257,12 +216,9 @@
if (AllocatableSet.test(AliasReg))
return false;
}
- } else {
- if (!MO.isDead())
- // A def that isn't dead. We can't move it.
- return false;
- else
- PhysRegs.push_back(Reg);
+ } else if (!MO.isDead()) {
+ // A def that isn't dead. We can't move it.
+ return false;
}
} else {
// Virtual register uses are always safe to sink.
@@ -326,13 +282,18 @@
if (MI->getParent() == SuccToSinkTo)
return false;
- // If the instruction to move defines a dead physical register which is live
- // when leaving the basic block, don't move it because it could turn into a
- // "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
- for (SmallVectorImpl<unsigned>::const_iterator
- I = PhysRegs.begin(), E = PhysRegs.end(); I != E; ++I)
- if (LiveOutOfBasicBlock(MI, *I))
+ // If the instruction to move defines or uses a dead physical register which
+ // is live when leaving the basic block, don't move it because it could turn
+ // into a zombie define or misuse of that preg. E.g., EFLAGS.
+ // (<rdar://problem/8030636>)
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
+ if (SuccToSinkTo->isLiveIn(Reg))
return false;
+ }
DEBUG(dbgs() << "Sink instr " << *MI << "\tinto block " << *SuccToSinkTo);
Modified: llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/OptimizeExts.cpp Fri Jul 2 04:34:51 2010
@@ -118,6 +118,26 @@
continue;
}
+ // It's an error to translate this:
+ //
+ // %reg1025 = <sext> %reg1024
+ // ...
+ // %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
+ //
+ // into this:
+ //
+ // %reg1025 = <sext> %reg1024
+ // ...
+ // %reg1027 = EXTRACT_SUBREG %reg1025, 4
+ // %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
+ //
+ // The problem here is that SUBREG_TO_REG is there to assert that an
+ // implicit zext occurs. It doesn't insert a zext instruction. If we allow
+ // the EXTRACT_SUBREG here, it will give us the value after the <sext>,
+ // not the original value of %reg1024 before <sext>.
+ if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
+ continue;
+
MachineBasicBlock *UseMBB = UseMI->getParent();
if (UseMBB == MBB) {
// Local uses that come after the extension.
Modified: llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PHIElimination.cpp Fri Jul 2 04:34:51 2010
@@ -402,6 +402,7 @@
assert(A && B && "Missing MBB end point");
MachineFunction *MF = A->getParent();
+ DebugLoc dl; // FIXME: this is nowhere
// We may need to update A's terminator, but we can't do that if AnalyzeBranch
// fails. If A uses a jump table, we won't touch it.
@@ -427,7 +428,7 @@
NMBB->addSuccessor(B);
if (!NMBB->isLayoutSuccessor(B)) {
Cond.clear();
- MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, B, NULL, Cond);
+ MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, B, NULL, Cond, dl);
}
// Fix PHI nodes in B so they refer to NMBB instead of A
Modified: llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/PostRASchedulerList.cpp Fri Jul 2 04:34:51 2010
@@ -22,8 +22,6 @@
#include "AntiDepBreaker.h"
#include "AggressiveAntiDepBreaker.h"
#include "CriticalAntiDepBreaker.h"
-#include "ExactHazardRecognizer.h"
-#include "SimpleHazardRecognizer.h"
#include "ScheduleDAGInstrs.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
@@ -65,10 +63,6 @@
cl::desc("Break post-RA scheduling anti-dependencies: "
"\"critical\", \"all\", or \"none\""),
cl::init("none"), cl::Hidden);
-static cl::opt<bool>
-EnablePostRAHazardAvoidance("avoid-hazards",
- cl::desc("Enable exact hazard avoidance"),
- cl::init(true), cl::Hidden);
// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
static cl::opt<int>
@@ -237,10 +231,10 @@
const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
- const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
- ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
- (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
- (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
+ const TargetMachine &TM = Fn.getTarget();
+ const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
+ ScheduleHazardRecognizer *HR =
+ TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins);
AntiDepBreaker *ADB =
((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
(AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) :
@@ -680,15 +674,6 @@
ScheduleNodeTopDown(FoundSUnit, CurCycle);
HazardRec->EmitInstruction(FoundSUnit);
CycleHasInsts = true;
-
- // If we are using the target-specific hazards, then don't
- // advance the cycle time just because we schedule a node. If
- // the target allows it we can schedule multiple nodes in the
- // same cycle.
- if (!EnablePostRAHazardAvoidance) {
- if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
- ++CurCycle;
- }
} else {
if (CycleHasInsts) {
DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
Modified: llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegAllocFast.cpp Fri Jul 2 04:34:51 2010
@@ -708,7 +708,8 @@
if (MO.isUse()) {
usePhysReg(MO);
} else if (MO.isEarlyClobber()) {
- definePhysReg(MI, Reg, MO.isDead() ? regFree : regReserved);
+ definePhysReg(MI, Reg, (MO.isImplicit() || MO.isDead()) ?
+ regFree : regReserved);
PhysECs.push_back(Reg);
}
}
@@ -731,8 +732,11 @@
// Note: defineVirtReg may invalidate MO.
LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, 0);
unsigned PhysReg = LRI->second.PhysReg;
- setPhysReg(MI, i, PhysReg);
+ if (setPhysReg(MI, i, PhysReg))
+ VirtDead.push_back(Reg);
PhysECs.push_back(PhysReg);
+ // Don't attempt coalescing when earlyclobbers are present.
+ CopyDst = 0;
}
}
@@ -767,7 +771,8 @@
// Allocate defs and collect dead defs.
for (unsigned i = 0; i != DefOpEnd; ++i) {
MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef() || !MO.getReg()) continue;
+ if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber())
+ continue;
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
Removed: llvm/branches/wendling/eh/lib/CodeGen/RegAllocLocal.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegAllocLocal.cpp?rev=107463&view=auto
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegAllocLocal.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegAllocLocal.cpp (removed)
@@ -1,1254 +0,0 @@
-//===-- RegAllocLocal.cpp - A BasicBlock generic register allocator -------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This register allocator allocates registers to a basic block at a time,
-// attempting to keep values in registers and reusing registers as appropriate.
-//
-//===----------------------------------------------------------------------===//
-
-#define DEBUG_TYPE "regalloc"
-#include "llvm/BasicBlock.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/RegAllocRegistry.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/IndexedMap.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
-#include <algorithm>
-using namespace llvm;
-
-STATISTIC(NumStores, "Number of stores added");
-STATISTIC(NumLoads , "Number of loads added");
-STATISTIC(NumCopies, "Number of copies coalesced");
-
-static RegisterRegAlloc
- localRegAlloc("local", "local register allocator",
- createLocalRegisterAllocator);
-
-namespace {
- class RALocal : public MachineFunctionPass {
- public:
- static char ID;
- RALocal() : MachineFunctionPass(&ID), StackSlotForVirtReg(-1) {}
- private:
- const TargetMachine *TM;
- MachineFunction *MF;
- MachineRegisterInfo *MRI;
- const TargetRegisterInfo *TRI;
- const TargetInstrInfo *TII;
-
- // StackSlotForVirtReg - Maps virtual regs to the frame index where these
- // values are spilled.
- IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg;
-
- // Virt2PhysRegMap - This map contains entries for each virtual register
- // that is currently available in a physical register.
- IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysRegMap;
-
- unsigned &getVirt2PhysRegMapSlot(unsigned VirtReg) {
- return Virt2PhysRegMap[VirtReg];
- }
-
- // PhysRegsUsed - This array is effectively a map, containing entries for
- // each physical register that currently has a value (ie, it is in
- // Virt2PhysRegMap). The value mapped to is the virtual register
- // corresponding to the physical register (the inverse of the
- // Virt2PhysRegMap), or 0. The value is set to 0 if this register is pinned
- // because it is used by a future instruction, and to -2 if it is not
- // allocatable. If the entry for a physical register is -1, then the
- // physical register is "not in the map".
- //
- std::vector<int> PhysRegsUsed;
-
- // PhysRegsUseOrder - This contains a list of the physical registers that
- // currently have a virtual register value in them. This list provides an
- // ordering of registers, imposing a reallocation order. This list is only
- // used if all registers are allocated and we have to spill one, in which
- // case we spill the least recently used register. Entries at the front of
- // the list are the least recently used registers, entries at the back are
- // the most recently used.
- //
- std::vector<unsigned> PhysRegsUseOrder;
-
- // Virt2LastUseMap - This maps each virtual register to its last use
- // (MachineInstr*, operand index pair).
- IndexedMap<std::pair<MachineInstr*, unsigned>, VirtReg2IndexFunctor>
- Virt2LastUseMap;
-
- std::pair<MachineInstr*,unsigned>& getVirtRegLastUse(unsigned Reg) {
- assert(TargetRegisterInfo::isVirtualRegister(Reg) && "Illegal VirtReg!");
- return Virt2LastUseMap[Reg];
- }
-
- // VirtRegModified - This bitset contains information about which virtual
- // registers need to be spilled back to memory when their registers are
- // scavenged. If a virtual register has simply been rematerialized, there
- // is no reason to spill it to memory when we need the register back.
- //
- BitVector VirtRegModified;
-
- // UsedInMultipleBlocks - Tracks whether a particular register is used in
- // more than one block.
- BitVector UsedInMultipleBlocks;
-
- void markVirtRegModified(unsigned Reg, bool Val = true) {
- assert(TargetRegisterInfo::isVirtualRegister(Reg) && "Illegal VirtReg!");
- Reg -= TargetRegisterInfo::FirstVirtualRegister;
- if (Val)
- VirtRegModified.set(Reg);
- else
- VirtRegModified.reset(Reg);
- }
-
- bool isVirtRegModified(unsigned Reg) const {
- assert(TargetRegisterInfo::isVirtualRegister(Reg) && "Illegal VirtReg!");
- assert(Reg - TargetRegisterInfo::FirstVirtualRegister <
- VirtRegModified.size() && "Illegal virtual register!");
- return VirtRegModified[Reg - TargetRegisterInfo::FirstVirtualRegister];
- }
-
- void AddToPhysRegsUseOrder(unsigned Reg) {
- std::vector<unsigned>::iterator It =
- std::find(PhysRegsUseOrder.begin(), PhysRegsUseOrder.end(), Reg);
- if (It != PhysRegsUseOrder.end())
- PhysRegsUseOrder.erase(It);
- PhysRegsUseOrder.push_back(Reg);
- }
-
- void MarkPhysRegRecentlyUsed(unsigned Reg) {
- if (PhysRegsUseOrder.empty() ||
- PhysRegsUseOrder.back() == Reg) return; // Already most recently used
-
- for (unsigned i = PhysRegsUseOrder.size(); i != 0; --i) {
- unsigned RegMatch = PhysRegsUseOrder[i-1]; // remove from middle
- if (!areRegsEqual(Reg, RegMatch)) continue;
-
- PhysRegsUseOrder.erase(PhysRegsUseOrder.begin()+i-1);
- // Add it to the end of the list
- PhysRegsUseOrder.push_back(RegMatch);
- if (RegMatch == Reg)
- return; // Found an exact match, exit early
- }
- }
-
- public:
- virtual const char *getPassName() const {
- return "Local Register Allocator";
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequiredID(PHIEliminationID);
- AU.addRequiredID(TwoAddressInstructionPassID);
- MachineFunctionPass::getAnalysisUsage(AU);
- }
-
- private:
- /// runOnMachineFunction - Register allocate the whole function
- bool runOnMachineFunction(MachineFunction &Fn);
-
- /// AllocateBasicBlock - Register allocate the specified basic block.
- void AllocateBasicBlock(MachineBasicBlock &MBB);
-
-
- /// areRegsEqual - This method returns true if the specified registers are
- /// related to each other. To do this, it checks to see if they are equal
- /// or if the first register is in the alias set of the second register.
- ///
- bool areRegsEqual(unsigned R1, unsigned R2) const {
- if (R1 == R2) return true;
- for (const unsigned *AliasSet = TRI->getAliasSet(R2);
- *AliasSet; ++AliasSet) {
- if (*AliasSet == R1) return true;
- }
- return false;
- }
-
- /// getStackSpaceFor - This returns the frame index of the specified virtual
- /// register on the stack, allocating space if necessary.
- int getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC);
-
- /// removePhysReg - This method marks the specified physical register as no
- /// longer being in use.
- ///
- void removePhysReg(unsigned PhysReg);
-
- void storeVirtReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- unsigned VirtReg, unsigned PhysReg, bool isKill);
-
- /// spillVirtReg - This method spills the value specified by PhysReg into
- /// the virtual register slot specified by VirtReg. It then updates the RA
- /// data structures to indicate the fact that PhysReg is now available.
- ///
- void spillVirtReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
- unsigned VirtReg, unsigned PhysReg);
-
- /// spillPhysReg - This method spills the specified physical register into
- /// the virtual register slot associated with it. If OnlyVirtRegs is set to
- /// true, then the request is ignored if the physical register does not
- /// contain a virtual register.
- ///
- void spillPhysReg(MachineBasicBlock &MBB, MachineInstr *I,
- unsigned PhysReg, bool OnlyVirtRegs = false);
-
- /// assignVirtToPhysReg - This method updates local state so that we know
- /// that PhysReg is the proper container for VirtReg now. The physical
- /// register must not be used for anything else when this is called.
- ///
- void assignVirtToPhysReg(unsigned VirtReg, unsigned PhysReg);
-
- /// isPhysRegAvailable - Return true if the specified physical register is
- /// free and available for use. This also includes checking to see if
- /// aliased registers are all free...
- ///
- bool isPhysRegAvailable(unsigned PhysReg) const;
-
- /// getFreeReg - Look to see if there is a free register available in the
- /// specified register class. If not, return 0.
- ///
- unsigned getFreeReg(const TargetRegisterClass *RC);
-
- /// getReg - Find a physical register to hold the specified virtual
- /// register. If all compatible physical registers are used, this method
- /// spills the last used virtual register to the stack, and uses that
- /// register. If NoFree is true, that means the caller knows there isn't
- /// a free register, do not call getFreeReg().
- unsigned getReg(MachineBasicBlock &MBB, MachineInstr *MI,
- unsigned VirtReg, bool NoFree = false);
-
- /// reloadVirtReg - This method transforms the specified virtual
- /// register use to refer to a physical register. This method may do this
- /// in one of several ways: if the register is available in a physical
- /// register already, it uses that physical register. If the value is not
- /// in a physical register, and if there are physical registers available,
- /// it loads it into a register: PhysReg if that is an available physical
- /// register, otherwise any physical register of the right class.
- /// If register pressure is high, and it is possible, it tries to fold the
- /// load of the virtual register into the instruction itself. It avoids
- /// doing this if register pressure is low to improve the chance that
- /// subsequent instructions can use the reloaded value. This method
- /// returns the modified instruction.
- ///
- MachineInstr *reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
- unsigned OpNum, SmallSet<unsigned, 4> &RRegs,
- unsigned PhysReg);
-
- /// ComputeLocalLiveness - Computes liveness of registers within a basic
- /// block, setting the killed/dead flags as appropriate.
- void ComputeLocalLiveness(MachineBasicBlock& MBB);
-
- void reloadPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I,
- unsigned PhysReg);
- };
- char RALocal::ID = 0;
-}
-
-/// getStackSpaceFor - This allocates space for the specified virtual register
-/// to be held on the stack.
-int RALocal::getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC) {
- // Find the location Reg would belong...
- int SS = StackSlotForVirtReg[VirtReg];
- if (SS != -1)
- return SS; // Already has space allocated?
-
- // Allocate a new stack object for this spill location...
- int FrameIdx = MF->getFrameInfo()->CreateSpillStackObject(RC->getSize(),
- RC->getAlignment());
-
- // Assign the slot.
- StackSlotForVirtReg[VirtReg] = FrameIdx;
- return FrameIdx;
-}
-
-
-/// removePhysReg - This method marks the specified physical register as no
-/// longer being in use.
-///
-void RALocal::removePhysReg(unsigned PhysReg) {
- PhysRegsUsed[PhysReg] = -1; // PhyReg no longer used
-
- std::vector<unsigned>::iterator It =
- std::find(PhysRegsUseOrder.begin(), PhysRegsUseOrder.end(), PhysReg);
- if (It != PhysRegsUseOrder.end())
- PhysRegsUseOrder.erase(It);
-}
-
-/// storeVirtReg - Store a virtual register to its assigned stack slot.
-void RALocal::storeVirtReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned VirtReg, unsigned PhysReg,
- bool isKill) {
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg);
- int FrameIndex = getStackSpaceFor(VirtReg, RC);
- DEBUG(dbgs() << " to stack slot #" << FrameIndex);
- TII->storeRegToStackSlot(MBB, I, PhysReg, isKill, FrameIndex, RC, TRI);
- ++NumStores; // Update statistics
-
- // Mark the spill instruction as last use if we're not killing the register.
- if (!isKill) {
- MachineInstr *Spill = llvm::prior(I);
- int OpNum = Spill->findRegisterUseOperandIdx(PhysReg);
- if (OpNum < 0)
- getVirtRegLastUse(VirtReg) = std::make_pair((MachineInstr*)0, 0);
- else
- getVirtRegLastUse(VirtReg) = std::make_pair(Spill, OpNum);
- }
-}
-
-/// spillVirtReg - This method spills the value specified by PhysReg into the
-/// virtual register slot specified by VirtReg. It then updates the RA data
-/// structures to indicate the fact that PhysReg is now available.
-///
-void RALocal::spillVirtReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned VirtReg, unsigned PhysReg) {
- assert(VirtReg && "Spilling a physical register is illegal!"
- " Must not have appropriate kill for the register or use exists beyond"
- " the intended one.");
- DEBUG(dbgs() << " Spilling register " << TRI->getName(PhysReg)
- << " containing %reg" << VirtReg);
-
- if (!isVirtRegModified(VirtReg)) {
- DEBUG(dbgs() << " which has not been modified, so no store necessary!");
- std::pair<MachineInstr*, unsigned> &LastUse = getVirtRegLastUse(VirtReg);
- if (LastUse.first)
- LastUse.first->getOperand(LastUse.second).setIsKill();
- } else {
- // Otherwise, there is a virtual register corresponding to this physical
- // register. We only need to spill it into its stack slot if it has been
- // modified.
- // If the instruction reads the register that's spilled, (e.g. this can
- // happen if it is a move to a physical register), then the spill
- // instruction is not a kill.
- bool isKill = !(I != MBB.end() && I->readsRegister(PhysReg));
- storeVirtReg(MBB, I, VirtReg, PhysReg, isKill);
- }
-
- getVirt2PhysRegMapSlot(VirtReg) = 0; // VirtReg no longer available
-
- DEBUG(dbgs() << '\n');
- removePhysReg(PhysReg);
-}
-
-
-/// spillPhysReg - This method spills the specified physical register into the
-/// virtual register slot associated with it. If OnlyVirtRegs is set to true,
-/// then the request is ignored if the physical register does not contain a
-/// virtual register.
-///
-void RALocal::spillPhysReg(MachineBasicBlock &MBB, MachineInstr *I,
- unsigned PhysReg, bool OnlyVirtRegs) {
- if (PhysRegsUsed[PhysReg] != -1) { // Only spill it if it's used!
- assert(PhysRegsUsed[PhysReg] != -2 && "Non allocable reg used!");
- if (PhysRegsUsed[PhysReg] || !OnlyVirtRegs)
- spillVirtReg(MBB, I, PhysRegsUsed[PhysReg], PhysReg);
- return;
- }
-
- // If the selected register aliases any other registers, we must make
- // sure that one of the aliases isn't alive.
- for (const unsigned *AliasSet = TRI->getAliasSet(PhysReg);
- *AliasSet; ++AliasSet) {
- if (PhysRegsUsed[*AliasSet] == -1 || // Spill aliased register.
- PhysRegsUsed[*AliasSet] == -2) // If allocatable.
- continue;
-
- if (PhysRegsUsed[*AliasSet])
- spillVirtReg(MBB, I, PhysRegsUsed[*AliasSet], *AliasSet);
- }
-}
-
-
-/// assignVirtToPhysReg - This method updates local state so that we know
-/// that PhysReg is the proper container for VirtReg now. The physical
-/// register must not be used for anything else when this is called.
-///
-void RALocal::assignVirtToPhysReg(unsigned VirtReg, unsigned PhysReg) {
- assert(PhysRegsUsed[PhysReg] == -1 && "Phys reg already assigned!");
- // Update information to note the fact that this register was just used, and
- // it holds VirtReg.
- PhysRegsUsed[PhysReg] = VirtReg;
- getVirt2PhysRegMapSlot(VirtReg) = PhysReg;
- AddToPhysRegsUseOrder(PhysReg); // New use of PhysReg
-}
-
-
-/// isPhysRegAvailable - Return true if the specified physical register is free
-/// and available for use. This also includes checking to see if aliased
-/// registers are all free...
-///
-bool RALocal::isPhysRegAvailable(unsigned PhysReg) const {
- if (PhysRegsUsed[PhysReg] != -1) return false;
-
- // If the selected register aliases any other allocated registers, it is
- // not free!
- for (const unsigned *AliasSet = TRI->getAliasSet(PhysReg);
- *AliasSet; ++AliasSet)
- if (PhysRegsUsed[*AliasSet] >= 0) // Aliased register in use?
- return false; // Can't use this reg then.
- return true;
-}
-
-
-/// getFreeReg - Look to see if there is a free register available in the
-/// specified register class. If not, return 0.
-///
-unsigned RALocal::getFreeReg(const TargetRegisterClass *RC) {
- // Get iterators defining the range of registers that are valid to allocate in
- // this class, which also specifies the preferred allocation order.
- TargetRegisterClass::iterator RI = RC->allocation_order_begin(*MF);
- TargetRegisterClass::iterator RE = RC->allocation_order_end(*MF);
-
- for (; RI != RE; ++RI)
- if (isPhysRegAvailable(*RI)) { // Is reg unused?
- assert(*RI != 0 && "Cannot use register!");
- return *RI; // Found an unused register!
- }
- return 0;
-}
-
-
-/// getReg - Find a physical register to hold the specified virtual
-/// register. If all compatible physical registers are used, this method spills
-/// the last used virtual register to the stack, and uses that register.
-///
-unsigned RALocal::getReg(MachineBasicBlock &MBB, MachineInstr *I,
- unsigned VirtReg, bool NoFree) {
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg);
-
- // First check to see if we have a free register of the requested type...
- unsigned PhysReg = NoFree ? 0 : getFreeReg(RC);
-
- if (PhysReg != 0) {
- // Assign the register.
- assignVirtToPhysReg(VirtReg, PhysReg);
- return PhysReg;
- }
-
- // If we didn't find an unused register, scavenge one now!
- assert(!PhysRegsUseOrder.empty() && "No allocated registers??");
-
- // Loop over all of the preallocated registers from the least recently used
- // to the most recently used. When we find one that is capable of holding
- // our register, use it.
- for (unsigned i = 0; PhysReg == 0; ++i) {
- assert(i != PhysRegsUseOrder.size() &&
- "Couldn't find a register of the appropriate class!");
-
- unsigned R = PhysRegsUseOrder[i];
-
- // We can only use this register if it holds a virtual register (ie, it
- // can be spilled). Do not use it if it is an explicitly allocated
- // physical register!
- assert(PhysRegsUsed[R] != -1 &&
- "PhysReg in PhysRegsUseOrder, but is not allocated?");
- if (PhysRegsUsed[R] && PhysRegsUsed[R] != -2) {
- // If the current register is compatible, use it.
- if (RC->contains(R)) {
- PhysReg = R;
- break;
- }
-
- // If one of the registers aliased to the current register is
- // compatible, use it.
- for (const unsigned *AliasIt = TRI->getAliasSet(R);
- *AliasIt; ++AliasIt) {
- if (!RC->contains(*AliasIt)) continue;
-
- // If this is pinned down for some reason, don't use it. For
- // example, if CL is pinned, and we run across CH, don't use
- // CH as justification for using scavenging ECX (which will
- // fail).
- if (PhysRegsUsed[*AliasIt] == 0) continue;
-
- // Make sure the register is allocatable. Don't allocate SIL on
- // x86-32.
- if (PhysRegsUsed[*AliasIt] == -2) continue;
-
- PhysReg = *AliasIt; // Take an aliased register
- break;
- }
- }
- }
-
- assert(PhysReg && "Physical register not assigned!?!?");
-
- // At this point PhysRegsUseOrder[i] is the least recently used register of
- // compatible register class. Spill it to memory and reap its remains.
- spillPhysReg(MBB, I, PhysReg);
-
- // Now that we know which register we need to assign this to, do it now!
- assignVirtToPhysReg(VirtReg, PhysReg);
- return PhysReg;
-}
-
-
-/// reloadVirtReg - This method transforms the specified virtual
-/// register use to refer to a physical register. This method may do this in
-/// one of several ways: if the register is available in a physical register
-/// already, it uses that physical register. If the value is not in a physical
-/// register, and if there are physical registers available, it loads it into a
-/// register: PhysReg if that is an available physical register, otherwise any
-/// register. If register pressure is high, and it is possible, it tries to
-/// fold the load of the virtual register into the instruction itself. It
-/// avoids doing this if register pressure is low to improve the chance that
-/// subsequent instructions can use the reloaded value. This method returns
-/// the modified instruction.
-///
-MachineInstr *RALocal::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
- unsigned OpNum,
- SmallSet<unsigned, 4> &ReloadedRegs,
- unsigned PhysReg) {
- unsigned VirtReg = MI->getOperand(OpNum).getReg();
- unsigned SubIdx = MI->getOperand(OpNum).getSubReg();
-
- // If the virtual register is already available, just update the instruction
- // and return.
- if (unsigned PR = getVirt2PhysRegMapSlot(VirtReg)) {
- if (SubIdx) {
- PR = TRI->getSubReg(PR, SubIdx);
- MI->getOperand(OpNum).setSubReg(0);
- }
- MI->getOperand(OpNum).setReg(PR); // Assign the input register
- if (!MI->isDebugValue()) {
- // Do not do these for DBG_VALUE as they can affect codegen.
- MarkPhysRegRecentlyUsed(PR); // Already have this value available!
- getVirtRegLastUse(VirtReg) = std::make_pair(MI, OpNum);
- }
- return MI;
- }
-
- // Otherwise, we need to fold it into the current instruction, or reload it.
- // If we have registers available to hold the value, use them.
- const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg);
- // If we already have a PhysReg (this happens when the instruction is a
- // reg-to-reg copy with a PhysReg destination) use that.
- if (!PhysReg || !TargetRegisterInfo::isPhysicalRegister(PhysReg) ||
- !isPhysRegAvailable(PhysReg))
- PhysReg = getFreeReg(RC);
- int FrameIndex = getStackSpaceFor(VirtReg, RC);
-
- if (PhysReg) { // Register is available, allocate it!
- assignVirtToPhysReg(VirtReg, PhysReg);
- } else { // No registers available.
- // Force some poor hapless value out of the register file to
- // make room for the new register, and reload it.
- PhysReg = getReg(MBB, MI, VirtReg, true);
- }
-
- markVirtRegModified(VirtReg, false); // Note that this reg was just reloaded
-
- DEBUG(dbgs() << " Reloading %reg" << VirtReg << " into "
- << TRI->getName(PhysReg) << "\n");
-
- // Add move instruction(s)
- TII->loadRegFromStackSlot(MBB, MI, PhysReg, FrameIndex, RC, TRI);
- ++NumLoads; // Update statistics
-
- MF->getRegInfo().setPhysRegUsed(PhysReg);
- // Assign the input register.
- if (SubIdx) {
- MI->getOperand(OpNum).setSubReg(0);
- MI->getOperand(OpNum).setReg(TRI->getSubReg(PhysReg, SubIdx));
- } else
- MI->getOperand(OpNum).setReg(PhysReg); // Assign the input register
- getVirtRegLastUse(VirtReg) = std::make_pair(MI, OpNum);
-
- if (!ReloadedRegs.insert(PhysReg)) {
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Ran out of registers during register allocation!";
- if (MI->isInlineAsm()) {
- Msg << "\nPlease check your inline asm statement for invalid "
- << "constraints:\n";
- MI->print(Msg, TM);
- }
- report_fatal_error(Msg.str());
- }
- for (const unsigned *SubRegs = TRI->getSubRegisters(PhysReg);
- *SubRegs; ++SubRegs) {
- if (ReloadedRegs.insert(*SubRegs)) continue;
-
- std::string msg;
- raw_string_ostream Msg(msg);
- Msg << "Ran out of registers during register allocation!";
- if (MI->isInlineAsm()) {
- Msg << "\nPlease check your inline asm statement for invalid "
- << "constraints:\n";
- MI->print(Msg, TM);
- }
- report_fatal_error(Msg.str());
- }
-
- return MI;
-}
-
-/// isReadModWriteImplicitKill - True if this is an implicit kill for a
-/// read/mod/write register, i.e. update partial register.
-static bool isReadModWriteImplicitKill(MachineInstr *MI, unsigned Reg) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == Reg && MO.isImplicit() &&
- MO.isDef() && !MO.isDead())
- return true;
- }
- return false;
-}
-
-/// isReadModWriteImplicitDef - True if this is an implicit def for a
-/// read/mod/write register, i.e. update partial register.
-static bool isReadModWriteImplicitDef(MachineInstr *MI, unsigned Reg) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == Reg && MO.isImplicit() &&
- !MO.isDef() && MO.isKill())
- return true;
- }
- return false;
-}
-
-// precedes - Helper function to determine with MachineInstr A
-// precedes MachineInstr B within the same MBB.
-static bool precedes(MachineBasicBlock::iterator A,
- MachineBasicBlock::iterator B) {
- if (A == B)
- return false;
-
- MachineBasicBlock::iterator I = A->getParent()->begin();
- while (I != A->getParent()->end()) {
- if (I == A)
- return true;
- else if (I == B)
- return false;
-
- ++I;
- }
-
- return false;
-}
-
-/// ComputeLocalLiveness - Computes liveness of registers within a basic
-/// block, setting the killed/dead flags as appropriate.
-void RALocal::ComputeLocalLiveness(MachineBasicBlock& MBB) {
- // Keep track of the most recently seen previous use or def of each reg,
- // so that we can update them with dead/kill markers.
- DenseMap<unsigned, std::pair<MachineInstr*, unsigned> > LastUseDef;
- for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
- I != E; ++I) {
- if (I->isDebugValue())
- continue;
-
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = I->getOperand(i);
- // Uses don't trigger any flags, but we need to save
- // them for later. Also, we have to process these
- // _before_ processing the defs, since an instr
- // uses regs before it defs them.
- if (!MO.isReg() || !MO.getReg() || !MO.isUse())
- continue;
-
- // Ignore helpful kill flags from earlier passes.
- MO.setIsKill(false);
-
- LastUseDef[MO.getReg()] = std::make_pair(I, i);
-
- if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) continue;
-
- const unsigned *Aliases = TRI->getAliasSet(MO.getReg());
- if (Aliases == 0)
- continue;
-
- while (*Aliases) {
- DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
- alias = LastUseDef.find(*Aliases);
-
- if (alias != LastUseDef.end() && alias->second.first != I)
- LastUseDef[*Aliases] = std::make_pair(I, i);
-
- ++Aliases;
- }
- }
-
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = I->getOperand(i);
- // Defs others than 2-addr redefs _do_ trigger flag changes:
- // - A def followed by a def is dead
- // - A use followed by a def is a kill
- if (!MO.isReg() || !MO.getReg() || !MO.isDef()) continue;
-
- unsigned SubIdx = MO.getSubReg();
- DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
- last = LastUseDef.find(MO.getReg());
- if (last != LastUseDef.end()) {
- // Check if this is a two address instruction. If so, then
- // the def does not kill the use.
- if (last->second.first == I && I->isRegTiedToUseOperand(i))
- continue;
-
- MachineOperand &lastUD =
- last->second.first->getOperand(last->second.second);
- if (SubIdx && lastUD.getSubReg() != SubIdx)
- // Partial re-def, the last def is not dead.
- // %reg1024:5<def> =
- // %reg1024:6<def> =
- // or
- // %reg1024:5<def> = op %reg1024, 5
- continue;
-
- if (lastUD.isDef())
- lastUD.setIsDead(true);
- else
- lastUD.setIsKill(true);
- }
-
- LastUseDef[MO.getReg()] = std::make_pair(I, i);
- }
- }
-
- // Live-out (of the function) registers contain return values of the function,
- // so we need to make sure they are alive at return time.
- MachineBasicBlock::iterator Ret = MBB.getFirstTerminator();
- bool BBEndsInReturn = (Ret != MBB.end() && Ret->getDesc().isReturn());
-
- if (BBEndsInReturn)
- for (MachineRegisterInfo::liveout_iterator
- I = MF->getRegInfo().liveout_begin(),
- E = MF->getRegInfo().liveout_end(); I != E; ++I)
- if (!Ret->readsRegister(*I)) {
- Ret->addOperand(MachineOperand::CreateReg(*I, false, true));
- LastUseDef[*I] = std::make_pair(Ret, Ret->getNumOperands()-1);
- }
-
- // Finally, loop over the final use/def of each reg
- // in the block and determine if it is dead.
- for (DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
- I = LastUseDef.begin(), E = LastUseDef.end(); I != E; ++I) {
- MachineInstr *MI = I->second.first;
- unsigned idx = I->second.second;
- MachineOperand &MO = MI->getOperand(idx);
-
- bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(MO.getReg());
-
- // A crude approximation of "live-out" calculation
- bool usedOutsideBlock = isPhysReg ? false :
- UsedInMultipleBlocks.test(MO.getReg() -
- TargetRegisterInfo::FirstVirtualRegister);
-
- // If the machine BB ends in a return instruction, then the value isn't used
- // outside of the BB.
- if (!isPhysReg && (!usedOutsideBlock || BBEndsInReturn)) {
- // DBG_VALUE complicates this: if the only refs of a register outside
- // this block are DBG_VALUE, we can't keep the reg live just for that,
- // as it will cause the reg to be spilled at the end of this block when
- // it wouldn't have been otherwise. Nullify the DBG_VALUEs when that
- // happens.
- bool UsedByDebugValueOnly = false;
- for (MachineRegisterInfo::reg_iterator UI = MRI->reg_begin(MO.getReg()),
- UE = MRI->reg_end(); UI != UE; ++UI) {
- // Two cases:
- // - used in another block
- // - used in the same block before it is defined (loop)
- if (UI->getParent() == &MBB &&
- !(MO.isDef() && UI.getOperand().isUse() && precedes(&*UI, MI)))
- continue;
-
- if (UI->isDebugValue()) {
- UsedByDebugValueOnly = true;
- continue;
- }
-
- // A non-DBG_VALUE use means we can leave DBG_VALUE uses alone.
- UsedInMultipleBlocks.set(MO.getReg() -
- TargetRegisterInfo::FirstVirtualRegister);
- usedOutsideBlock = true;
- UsedByDebugValueOnly = false;
- break;
- }
-
- if (UsedByDebugValueOnly)
- for (MachineRegisterInfo::reg_iterator UI = MRI->reg_begin(MO.getReg()),
- UE = MRI->reg_end(); UI != UE; ++UI)
- if (UI->isDebugValue() &&
- (UI->getParent() != &MBB ||
- (MO.isDef() && precedes(&*UI, MI))))
- UI.getOperand().setReg(0U);
- }
-
- // Physical registers and those that are not live-out of the block are
- // killed/dead at their last use/def within this block.
- if (isPhysReg || !usedOutsideBlock || BBEndsInReturn) {
- if (MO.isUse()) {
- // Don't mark uses that are tied to defs as kills.
- if (!MI->isRegTiedToDefOperand(idx))
- MO.setIsKill(true);
- } else {
- MO.setIsDead(true);
- }
- }
- }
-}
-
-void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) {
- // loop over each instruction
- MachineBasicBlock::iterator MII = MBB.begin();
-
- DEBUG({
- const BasicBlock *LBB = MBB.getBasicBlock();
- if (LBB)
- dbgs() << "\nStarting RegAlloc of BB: " << LBB->getName();
- });
-
- // Add live-in registers as active.
- for (MachineBasicBlock::livein_iterator I = MBB.livein_begin(),
- E = MBB.livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- MF->getRegInfo().setPhysRegUsed(Reg);
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(Reg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] == -2) continue;
-
- AddToPhysRegsUseOrder(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- }
- }
-
- ComputeLocalLiveness(MBB);
-
- // Otherwise, sequentially allocate each instruction in the MBB.
- while (MII != MBB.end()) {
- MachineInstr *MI = MII++;
- const TargetInstrDesc &TID = MI->getDesc();
- DEBUG({
- dbgs() << "\nStarting RegAlloc of: " << *MI;
- dbgs() << " Regs have values: ";
- for (unsigned i = 0; i != TRI->getNumRegs(); ++i)
- if (PhysRegsUsed[i] != -1 && PhysRegsUsed[i] != -2) {
- if (PhysRegsUsed[i] && isVirtRegModified(PhysRegsUsed[i]))
- dbgs() << "*";
- dbgs() << "[" << TRI->getName(i)
- << ",%reg" << PhysRegsUsed[i] << "] ";
- }
- dbgs() << '\n';
- });
-
- // Determine whether this is a copy instruction. The cases where the
- // source or destination are phys regs are handled specially.
- unsigned SrcCopyReg, DstCopyReg, SrcCopySubReg, DstCopySubReg;
- unsigned SrcCopyPhysReg = 0U;
- bool isCopy = TII->isMoveInstr(*MI, SrcCopyReg, DstCopyReg,
- SrcCopySubReg, DstCopySubReg) &&
- SrcCopySubReg == DstCopySubReg;
- if (isCopy && TargetRegisterInfo::isVirtualRegister(SrcCopyReg))
- SrcCopyPhysReg = getVirt2PhysRegMapSlot(SrcCopyReg);
-
- // Loop over the implicit uses, making sure that they are at the head of the
- // use order list, so they don't get reallocated.
- if (TID.ImplicitUses) {
- for (const unsigned *ImplicitUses = TID.ImplicitUses;
- *ImplicitUses; ++ImplicitUses)
- MarkPhysRegRecentlyUsed(*ImplicitUses);
- }
-
- SmallVector<unsigned, 8> Kills;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isKill()) continue;
-
- if (!MO.isImplicit())
- Kills.push_back(MO.getReg());
- else if (!isReadModWriteImplicitKill(MI, MO.getReg()))
- // These are extra physical register kills when a sub-register
- // is defined (def of a sub-register is a read/mod/write of the
- // larger registers). Ignore.
- Kills.push_back(MO.getReg());
- }
-
- // If any physical regs are earlyclobber, spill any value they might
- // have in them, then mark them unallocatable.
- // If any virtual regs are earlyclobber, allocate them now (before
- // freeing inputs that are killed).
- if (MI->isInlineAsm()) {
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef() || !MO.isEarlyClobber() ||
- !MO.getReg())
- continue;
-
- if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
- unsigned DestVirtReg = MO.getReg();
- unsigned DestPhysReg;
-
- // If DestVirtReg already has a value, use it.
- if (!(DestPhysReg = getVirt2PhysRegMapSlot(DestVirtReg)))
- DestPhysReg = getReg(MBB, MI, DestVirtReg);
- MF->getRegInfo().setPhysRegUsed(DestPhysReg);
- markVirtRegModified(DestVirtReg);
- getVirtRegLastUse(DestVirtReg) =
- std::make_pair((MachineInstr*)0, 0);
- DEBUG(dbgs() << " Assigning " << TRI->getName(DestPhysReg)
- << " to %reg" << DestVirtReg << "\n");
- if (unsigned DestSubIdx = MO.getSubReg()) {
- MO.setSubReg(0);
- DestPhysReg = TRI->getSubReg(DestPhysReg, DestSubIdx);
- }
- MO.setReg(DestPhysReg); // Assign the earlyclobber register
- } else {
- unsigned Reg = MO.getReg();
- if (PhysRegsUsed[Reg] == -2) continue; // Something like ESP.
- // These are extra physical register defs when a sub-register
- // is defined (def of a sub-register is a read/mod/write of the
- // larger registers). Ignore.
- if (isReadModWriteImplicitDef(MI, MO.getReg())) continue;
-
- MF->getRegInfo().setPhysRegUsed(Reg);
- spillPhysReg(MBB, MI, Reg, true); // Spill any existing value in reg
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(Reg);
-
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] == -2) continue;
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(*SubRegs);
- }
- }
- }
- }
-
- // If a DBG_VALUE says something is located in a spilled register,
- // change the DBG_VALUE to be undef, which prevents the register
- // from being reloaded here. Doing that would change the generated
- // code, unless another use immediately follows this instruction.
- if (MI->isDebugValue() &&
- MI->getNumOperands()==3 && MI->getOperand(0).isReg()) {
- unsigned VirtReg = MI->getOperand(0).getReg();
- if (VirtReg && TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- !getVirt2PhysRegMapSlot(VirtReg))
- MI->getOperand(0).setReg(0U);
- }
-
- // Get the used operands into registers. This has the potential to spill
- // incoming values if we are out of registers. Note that we completely
- // ignore physical register uses here. We assume that if an explicit
- // physical register is referenced by the instruction, that it is guaranteed
- // to be live-in, or the input is badly hosed.
- //
- SmallSet<unsigned, 4> ReloadedRegs;
- for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
- MachineOperand &MO = MI->getOperand(i);
- // here we are looking for only used operands (never def&use)
- if (MO.isReg() && !MO.isDef() && MO.getReg() && !MO.isImplicit() &&
- TargetRegisterInfo::isVirtualRegister(MO.getReg()))
- MI = reloadVirtReg(MBB, MI, i, ReloadedRegs,
- isCopy ? DstCopyReg : 0);
- }
-
- // If this instruction is the last user of this register, kill the
- // value, freeing the register being used, so it doesn't need to be
- // spilled to memory.
- //
- for (unsigned i = 0, e = Kills.size(); i != e; ++i) {
- unsigned VirtReg = Kills[i];
- unsigned PhysReg = VirtReg;
- if (TargetRegisterInfo::isVirtualRegister(VirtReg)) {
- // If the virtual register was never materialized into a register, it
- // might not be in the map, but it won't hurt to zero it out anyway.
- unsigned &PhysRegSlot = getVirt2PhysRegMapSlot(VirtReg);
- PhysReg = PhysRegSlot;
- PhysRegSlot = 0;
- } else if (PhysRegsUsed[PhysReg] == -2) {
- // Unallocatable register dead, ignore.
- continue;
- } else {
- assert((!PhysRegsUsed[PhysReg] || PhysRegsUsed[PhysReg] == -1) &&
- "Silently clearing a virtual register?");
- }
-
- if (!PhysReg) continue;
-
- DEBUG(dbgs() << " Last use of " << TRI->getName(PhysReg)
- << "[%reg" << VirtReg <<"], removing it from live set\n");
- removePhysReg(PhysReg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(PhysReg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] != -2) {
- DEBUG(dbgs() << " Last use of "
- << TRI->getName(*SubRegs) << "[%reg" << VirtReg
- <<"], removing it from live set\n");
- removePhysReg(*SubRegs);
- }
- }
- }
-
- // Loop over all of the operands of the instruction, spilling registers that
- // are defined, and marking explicit destinations in the PhysRegsUsed map.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef() || MO.isImplicit() || !MO.getReg() ||
- MO.isEarlyClobber() ||
- !TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
- continue;
-
- unsigned Reg = MO.getReg();
- if (PhysRegsUsed[Reg] == -2) continue; // Something like ESP.
- // These are extra physical register defs when a sub-register
- // is defined (def of a sub-register is a read/mod/write of the
- // larger registers). Ignore.
- if (isReadModWriteImplicitDef(MI, MO.getReg())) continue;
-
- MF->getRegInfo().setPhysRegUsed(Reg);
- spillPhysReg(MBB, MI, Reg, true); // Spill any existing value in reg
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(Reg);
-
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] == -2) continue;
-
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- AddToPhysRegsUseOrder(*SubRegs);
- }
- }
-
- // Loop over the implicit defs, spilling them as well.
- if (TID.ImplicitDefs) {
- for (const unsigned *ImplicitDefs = TID.ImplicitDefs;
- *ImplicitDefs; ++ImplicitDefs) {
- unsigned Reg = *ImplicitDefs;
- if (PhysRegsUsed[Reg] != -2) {
- spillPhysReg(MBB, MI, Reg, true);
- AddToPhysRegsUseOrder(Reg);
- PhysRegsUsed[Reg] = 0; // It is free and reserved now
- }
- MF->getRegInfo().setPhysRegUsed(Reg);
- for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
- *SubRegs; ++SubRegs) {
- if (PhysRegsUsed[*SubRegs] == -2) continue;
-
- AddToPhysRegsUseOrder(*SubRegs);
- PhysRegsUsed[*SubRegs] = 0; // It is free and reserved now
- MF->getRegInfo().setPhysRegUsed(*SubRegs);
- }
- }
- }
-
- SmallVector<unsigned, 8> DeadDefs;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.isDead())
- DeadDefs.push_back(MO.getReg());
- }
-
- // Okay, we have allocated all of the source operands and spilled any values
- // that would be destroyed by defs of this instruction. Loop over the
- // explicit defs and assign them to a register, spilling incoming values if
- // we need to scavenge a register.
- //
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef() || !MO.getReg() ||
- MO.isEarlyClobber() ||
- !TargetRegisterInfo::isVirtualRegister(MO.getReg()))
- continue;
-
- unsigned DestVirtReg = MO.getReg();
- unsigned DestPhysReg;
-
- // If DestVirtReg already has a value, use it.
- if (!(DestPhysReg = getVirt2PhysRegMapSlot(DestVirtReg))) {
- // If this is a copy try to reuse the input as the output;
- // that will make the copy go away.
- // If this is a copy, the source reg is a phys reg, and
- // that reg is available, use that phys reg for DestPhysReg.
- // If this is a copy, the source reg is a virtual reg, and
- // the phys reg that was assigned to that virtual reg is now
- // available, use that phys reg for DestPhysReg. (If it's now
- // available that means this was the last use of the source.)
- if (isCopy &&
- TargetRegisterInfo::isPhysicalRegister(SrcCopyReg) &&
- isPhysRegAvailable(SrcCopyReg)) {
- DestPhysReg = SrcCopyReg;
- assignVirtToPhysReg(DestVirtReg, DestPhysReg);
- } else if (isCopy &&
- TargetRegisterInfo::isVirtualRegister(SrcCopyReg) &&
- SrcCopyPhysReg && isPhysRegAvailable(SrcCopyPhysReg) &&
- MF->getRegInfo().getRegClass(DestVirtReg)->
- contains(SrcCopyPhysReg)) {
- DestPhysReg = SrcCopyPhysReg;
- assignVirtToPhysReg(DestVirtReg, DestPhysReg);
- } else
- DestPhysReg = getReg(MBB, MI, DestVirtReg);
- }
- MF->getRegInfo().setPhysRegUsed(DestPhysReg);
- markVirtRegModified(DestVirtReg);
- getVirtRegLastUse(DestVirtReg) = std::make_pair((MachineInstr*)0, 0);
- DEBUG(dbgs() << " Assigning " << TRI->getName(DestPhysReg)
- << " to %reg" << DestVirtReg << "\n");
-
- if (unsigned DestSubIdx = MO.getSubReg()) {
- MO.setSubReg(0);
- DestPhysReg = TRI->getSubReg(DestPhysReg, DestSubIdx);
- }
- MO.setReg(DestPhysReg); // Assign the output register
- }
-
- // If this instruction defines any registers that are immediately dead,
- // kill them now.
- //
- for (unsigned i = 0, e = DeadDefs.size(); i != e; ++i) {
- unsigned VirtReg = DeadDefs[i];
- unsigned PhysReg = VirtReg;
- if (TargetRegisterInfo::isVirtualRegister(VirtReg)) {
- unsigned &PhysRegSlot = getVirt2PhysRegMapSlot(VirtReg);
- PhysReg = PhysRegSlot;
- assert(PhysReg != 0);
- PhysRegSlot = 0;
- } else if (PhysRegsUsed[PhysReg] == -2) {
- // Unallocatable register dead, ignore.
- continue;
- } else if (!PhysReg)
- continue;
-
- DEBUG(dbgs() << " Register " << TRI->getName(PhysReg)
- << " [%reg" << VirtReg
- << "] is never used, removing it from live set\n");
- removePhysReg(PhysReg);
- for (const unsigned *AliasSet = TRI->getAliasSet(PhysReg);
- *AliasSet; ++AliasSet) {
- if (PhysRegsUsed[*AliasSet] != -2) {
- DEBUG(dbgs() << " Register " << TRI->getName(*AliasSet)
- << " [%reg" << *AliasSet
- << "] is never used, removing it from live set\n");
- removePhysReg(*AliasSet);
- }
- }
- }
-
- // If this instruction is a call, make sure there are no dirty registers. The
- // call might throw an exception, and the landing pad expects to find all
- // registers in stack slots.
- if (TID.isCall())
- for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i) {
- if (PhysRegsUsed[i] <= 0) continue;
- unsigned VirtReg = PhysRegsUsed[i];
- if (!isVirtRegModified(VirtReg)) continue;
- DEBUG(dbgs() << " Storing dirty %reg" << VirtReg);
- storeVirtReg(MBB, MI, VirtReg, i, false);
- markVirtRegModified(VirtReg, false);
- DEBUG(dbgs() << " because the call might throw\n");
- }
-
- // Finally, if this is a noop copy instruction, zap it. (Except that if
- // the copy is dead, it must be kept to avoid messing up liveness info for
- // the register scavenger. See pr4100.)
- if (TII->isMoveInstr(*MI, SrcCopyReg, DstCopyReg,
- SrcCopySubReg, DstCopySubReg) &&
- SrcCopyReg == DstCopyReg && SrcCopySubReg == DstCopySubReg &&
- DeadDefs.empty()) {
- ++NumCopies;
- MBB.erase(MI);
- }
- }
-
- MachineBasicBlock::iterator MI = MBB.getFirstTerminator();
-
- // Spill all physical registers holding virtual registers now.
- for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i)
- if (PhysRegsUsed[i] != -1 && PhysRegsUsed[i] != -2) {
- if (unsigned VirtReg = PhysRegsUsed[i])
- spillVirtReg(MBB, MI, VirtReg, i);
- else
- removePhysReg(i);
- }
-
-#if 0
- // This checking code is very expensive.
- bool AllOk = true;
- for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
- e = MF->getRegInfo().getLastVirtReg(); i <= e; ++i)
- if (unsigned PR = Virt2PhysRegMap[i]) {
- cerr << "Register still mapped: " << i << " -> " << PR << "\n";
- AllOk = false;
- }
- assert(AllOk && "Virtual registers still in phys regs?");
-#endif
-
- // Clear any physical register which appear live at the end of the basic
- // block, but which do not hold any virtual registers. e.g., the stack
- // pointer.
- PhysRegsUseOrder.clear();
-}
-
-/// runOnMachineFunction - Register allocate the whole function
-///
-bool RALocal::runOnMachineFunction(MachineFunction &Fn) {
- DEBUG(dbgs() << "Machine Function\n");
- MF = &Fn;
- MRI = &Fn.getRegInfo();
- TM = &Fn.getTarget();
- TRI = TM->getRegisterInfo();
- TII = TM->getInstrInfo();
-
- PhysRegsUsed.assign(TRI->getNumRegs(), -1);
-
- // At various places we want to efficiently check to see whether a register
- // is allocatable. To handle this, we mark all unallocatable registers as
- // being pinned down, permanently.
- {
- BitVector Allocable = TRI->getAllocatableSet(Fn);
- for (unsigned i = 0, e = Allocable.size(); i != e; ++i)
- if (!Allocable[i])
- PhysRegsUsed[i] = -2; // Mark the reg unallocable.
- }
-
- // initialize the virtual->physical register map to have a 'null'
- // mapping for all virtual registers
- unsigned LastVirtReg = MF->getRegInfo().getLastVirtReg();
- StackSlotForVirtReg.grow(LastVirtReg);
- Virt2PhysRegMap.grow(LastVirtReg);
- Virt2LastUseMap.grow(LastVirtReg);
- VirtRegModified.resize(LastVirtReg+1 -
- TargetRegisterInfo::FirstVirtualRegister);
- UsedInMultipleBlocks.resize(LastVirtReg+1 -
- TargetRegisterInfo::FirstVirtualRegister);
-
- // Loop over all of the basic blocks, eliminating virtual register references
- for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
- MBB != MBBe; ++MBB)
- AllocateBasicBlock(*MBB);
-
- StackSlotForVirtReg.clear();
- PhysRegsUsed.clear();
- VirtRegModified.clear();
- UsedInMultipleBlocks.clear();
- Virt2PhysRegMap.clear();
- Virt2LastUseMap.clear();
- return true;
-}
-
-FunctionPass *llvm::createLocalRegisterAllocator() {
- return new RALocal();
-}
Modified: llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegisterCoalescer.cpp Fri Jul 2 04:34:51 2010
@@ -16,6 +16,8 @@
#include "llvm/CodeGen/RegisterCoalescer.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Pass.h"
@@ -33,6 +35,151 @@
//
RegisterCoalescer::~RegisterCoalescer() {}
+unsigned CoalescerPair::compose(unsigned a, unsigned b) const {
+ if (!a) return b;
+ if (!b) return a;
+ return tri_.composeSubRegIndices(a, b);
+}
+
+bool CoalescerPair::isMoveInstr(const MachineInstr *MI,
+ unsigned &Src, unsigned &Dst,
+ unsigned &SrcSub, unsigned &DstSub) const {
+ if (MI->isExtractSubreg()) {
+ Dst = MI->getOperand(0).getReg();
+ DstSub = MI->getOperand(0).getSubReg();
+ Src = MI->getOperand(1).getReg();
+ SrcSub = compose(MI->getOperand(1).getSubReg(), MI->getOperand(2).getImm());
+ } else if (MI->isInsertSubreg() || MI->isSubregToReg()) {
+ Dst = MI->getOperand(0).getReg();
+ DstSub = compose(MI->getOperand(0).getSubReg(), MI->getOperand(3).getImm());
+ Src = MI->getOperand(2).getReg();
+ SrcSub = MI->getOperand(2).getSubReg();
+ } else if (!tii_.isMoveInstr(*MI, Src, Dst, SrcSub, DstSub)) {
+ return false;
+ }
+ return true;
+}
+
+bool CoalescerPair::setRegisters(const MachineInstr *MI) {
+ srcReg_ = dstReg_ = subIdx_ = 0;
+ newRC_ = 0;
+ flipped_ = false;
+
+ unsigned Src, Dst, SrcSub, DstSub;
+ if (!isMoveInstr(MI, Src, Dst, SrcSub, DstSub))
+ return false;
+ partial_ = SrcSub || DstSub;
+
+ // If one register is a physreg, it must be Dst.
+ if (TargetRegisterInfo::isPhysicalRegister(Src)) {
+ if (TargetRegisterInfo::isPhysicalRegister(Dst))
+ return false;
+ std::swap(Src, Dst);
+ std::swap(SrcSub, DstSub);
+ flipped_ = true;
+ }
+
+ const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+
+ if (TargetRegisterInfo::isPhysicalRegister(Dst)) {
+ // Eliminate DstSub on a physreg.
+ if (DstSub) {
+ Dst = tri_.getSubReg(Dst, DstSub);
+ if (!Dst) return false;
+ DstSub = 0;
+ }
+
+ // Eliminate SrcSub by picking a corresponding Dst superregister.
+ if (SrcSub) {
+ Dst = tri_.getMatchingSuperReg(Dst, SrcSub, MRI.getRegClass(Src));
+ if (!Dst) return false;
+ SrcSub = 0;
+ } else if (!MRI.getRegClass(Src)->contains(Dst)) {
+ return false;
+ }
+ } else {
+ // Both registers are virtual.
+
+ // Identical sub to sub.
+ if (SrcSub == DstSub)
+ SrcSub = DstSub = 0;
+ else if (SrcSub && DstSub)
+ return false; // FIXME: Qreg:ssub_3 + Dreg:ssub_1 => QReg:dsub_1 + Dreg.
+
+ // There can be no SrcSub.
+ if (SrcSub) {
+ std::swap(Src, Dst);
+ DstSub = SrcSub;
+ SrcSub = 0;
+ assert(!flipped_ && "Unexpected flip");
+ flipped_ = true;
+ }
+
+ // Find the new register class.
+ const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
+ const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
+ if (DstSub)
+ newRC_ = tri_.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
+ else
+ newRC_ = getCommonSubClass(DstRC, SrcRC);
+ if (!newRC_)
+ return false;
+ }
+ // Check our invariants
+ assert(TargetRegisterInfo::isVirtualRegister(Src) && "Src must be virtual");
+ assert(!(TargetRegisterInfo::isPhysicalRegister(Dst) && DstSub) &&
+ "Cannot have a physical SubIdx");
+ srcReg_ = Src;
+ dstReg_ = Dst;
+ subIdx_ = DstSub;
+ return true;
+}
+
+bool CoalescerPair::flip() {
+ if (subIdx_ || TargetRegisterInfo::isPhysicalRegister(dstReg_))
+ return false;
+ std::swap(srcReg_, dstReg_);
+ flipped_ = !flipped_;
+ return true;
+}
+
+bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
+ if (!MI)
+ return false;
+ unsigned Src, Dst, SrcSub, DstSub;
+ if (!isMoveInstr(MI, Src, Dst, SrcSub, DstSub))
+ return false;
+
+ // Find the virtual register that is srcReg_.
+ if (Dst == srcReg_) {
+ std::swap(Src, Dst);
+ std::swap(SrcSub, DstSub);
+ } else if (Src != srcReg_) {
+ return false;
+ }
+
+ // Now check that Dst matches dstReg_.
+ if (TargetRegisterInfo::isPhysicalRegister(dstReg_)) {
+ if (!TargetRegisterInfo::isPhysicalRegister(Dst))
+ return false;
+ assert(!subIdx_ && "Inconsistent CoalescerPair state.");
+ // DstSub could be set for a physreg from INSERT_SUBREG.
+ if (DstSub)
+ Dst = tri_.getSubReg(Dst, DstSub);
+ // Full copy of Src.
+ if (!SrcSub)
+ return dstReg_ == Dst;
+ // This is a partial register copy. Check that the parts match.
+ return tri_.getSubReg(dstReg_, SrcSub) == Dst;
+ } else {
+ // dstReg_ is virtual.
+ if (dstReg_ != Dst)
+ return false;
+ // Registers match, do the subregisters line up?
+ return compose(subIdx_, SrcSub) == DstSub;
+ }
+}
+
// Because of the way .a files work, we must force the SimpleRC
// implementation to be pulled in if the RegisterCoalescer classes are
// pulled in. Otherwise we run the risk of RegisterCoalescer being
Modified: llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/RegisterScavenging.cpp Fri Jul 2 04:34:51 2010
@@ -141,6 +141,10 @@
// Find out which registers are early clobbered, killed, defined, and marked
// def-dead in this instruction.
+ // FIXME: The scavenger is not predication aware. If the instruction is
+ // predicated, conservatively assume "kill" markers do not actually kill the
+ // register. Similarly ignores "dead" markers.
+ bool isPred = TII->isPredicated(MI);
BitVector EarlyClobberRegs(NumPhysRegs);
BitVector KillRegs(NumPhysRegs);
BitVector DefRegs(NumPhysRegs);
@@ -155,11 +159,11 @@
if (MO.isUse()) {
// Two-address operands implicitly kill.
- if (MO.isKill() || MI->isRegTiedToDefOperand(i))
+ if (!isPred && (MO.isKill() || MI->isRegTiedToDefOperand(i)))
addRegWithSubRegs(KillRegs, Reg);
} else {
assert(MO.isDef());
- if (MO.isDead())
+ if (!isPred && MO.isDead())
addRegWithSubRegs(DeadRegs, Reg);
else
addRegWithSubRegs(DefRegs, Reg);
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Fri Jul 2 04:34:51 2010
@@ -2028,7 +2028,7 @@
// fold (OP (zext x), (zext y)) -> (zext (OP x, y))
// fold (OP (sext x), (sext y)) -> (sext (OP x, y))
// fold (OP (aext x), (aext y)) -> (aext (OP x, y))
- // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y))
+ // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free)
//
// do not sink logical op inside of a vector extend, since it may combine
// into a vsetcc.
@@ -2038,7 +2038,10 @@
// Avoid infinite looping with PromoteIntBinOp.
(N0.getOpcode() == ISD::ANY_EXTEND &&
(!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) ||
- (N0.getOpcode() == ISD::TRUNCATE && TLI.isTypeLegal(Op0VT))) &&
+ (N0.getOpcode() == ISD::TRUNCATE &&
+ (!TLI.isZExtFree(VT, Op0VT) ||
+ !TLI.isTruncateFree(Op0VT, VT)) &&
+ TLI.isTypeLegal(Op0VT))) &&
!VT.isVector() &&
Op0VT == N1.getOperand(0).getValueType() &&
(!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) {
@@ -2425,6 +2428,11 @@
if (SDNode *Rot = MatchRotate(N0, N1, N->getDebugLoc()))
return SDValue(Rot, 0);
+ // Simplify the operands using demanded-bits information.
+ if (!VT.isVector() &&
+ SimplifyDemandedBits(SDValue(N, 0)))
+ return SDValue(N, 0);
+
return SDValue();
}
@@ -3158,6 +3166,11 @@
return NewSRL;
}
+ // Attempt to convert a srl of a load into a narrower zero-extending load.
+ SDValue NarrowLoad = ReduceLoadWidth(N);
+ if (NarrowLoad.getNode())
+ return NarrowLoad;
+
// Here is a common situation. We want to optimize:
//
// %a = ...
@@ -3635,10 +3648,7 @@
// fold (zext (truncate x)) -> (and x, mask)
if (N0.getOpcode() == ISD::TRUNCATE &&
- (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT)) &&
- (!TLI.isTruncateFree(N0.getOperand(0).getValueType(),
- N0.getValueType()) ||
- !TLI.isZExtFree(N0.getValueType(), VT))) {
+ (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) {
SDValue Op = N0.getOperand(0);
if (Op.getValueType().bitsLT(VT)) {
Op = DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), VT, Op);
@@ -4024,6 +4034,7 @@
/// extended, also fold the extension to form a extending load.
SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
unsigned Opc = N->getOpcode();
+
ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -4040,6 +4051,15 @@
ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT();
if (LegalOperations && !TLI.isLoadExtLegal(ISD::SEXTLOAD, ExtVT))
return SDValue();
+ } else if (Opc == ISD::SRL) {
+ // Annother special-case: SRL is basically zero-extending a narrower
+ // value.
+ ExtType = ISD::ZEXTLOAD;
+ N0 = SDValue(N, 0);
+ ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ if (!N01) return SDValue();
+ ExtVT = EVT::getIntegerVT(*DAG.getContext(),
+ VT.getSizeInBits() - N01->getZExtValue());
}
unsigned EVTBits = ExtVT.getSizeInBits();
@@ -4243,8 +4263,17 @@
// fold (truncate (load x)) -> (smaller load x)
// fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
- if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT))
- return ReduceLoadWidth(N);
+ if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
+ SDValue Reduced = ReduceLoadWidth(N);
+ if (Reduced.getNode())
+ return Reduced;
+ }
+
+ // Simplify the operands using demanded-bits information.
+ if (!VT.isVector() &&
+ SimplifyDemandedBits(SDValue(N, 0)))
+ return SDValue(N, 0);
+
return SDValue();
}
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/FastISel.cpp Fri Jul 2 04:34:51 2010
@@ -345,7 +345,7 @@
// If this is a constant subscript, handle it quickly.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
- if (CI->getZExtValue() == 0) continue;
+ if (CI->isZero()) continue;
uint64_t Offs =
TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
@@ -655,12 +655,12 @@
/// unless it is the immediate (fall-through) successor, and update
/// the CFG.
void
-FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
+FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
if (MBB->isLayoutSuccessor(MSucc)) {
// The unconditional fall-through case, which needs no instructions.
} else {
// The unconditional branch case.
- TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
+ TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>(), DL);
}
MBB->addSuccessor(MSucc);
}
@@ -763,7 +763,7 @@
if (BI->isUnconditional()) {
const BasicBlock *LLVMSucc = BI->getSuccessor(0);
MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
- FastEmitBranch(MSucc);
+ FastEmitBranch(MSucc, BI->getDebugLoc());
return true;
}
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/InstrEmitter.cpp Fri Jul 2 04:34:51 2010
@@ -821,14 +821,18 @@
case InlineAsm::Kind_RegDef:
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
- MI->addOperand(MachineOperand::CreateReg(Reg, true));
+ // FIXME: Add dead flags for physical and virtual registers defined.
+ // For now, mark physical register defs as implicit to help fast
+ // regalloc. This makes inline asm look a lot like calls.
+ MI->addOperand(MachineOperand::CreateReg(Reg, true,
+ /*isImp=*/ TargetRegisterInfo::isPhysicalRegister(Reg)));
}
break;
case InlineAsm::Kind_RegDefEarlyClobber:
for (; NumVals; --NumVals, ++i) {
unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
MI->addOperand(MachineOperand::CreateReg(Reg, /*isDef=*/ true,
- /*isImp=*/ false,
+ /*isImp=*/ TargetRegisterInfo::isPhysicalRegister(Reg),
/*isKill=*/ false,
/*isDead=*/ false,
/*isUndef=*/false,
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp Fri Jul 2 04:34:51 2010
@@ -357,7 +357,7 @@
EVT SVT = VT;
while (SVT != MVT::f32) {
SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1);
- if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) &&
+ if (ConstantFPSDNode::isValueValidForType(SVT, CFP->getValueAPF()) &&
// Only do this if the target has a native EXTLOAD instruction from
// smaller type.
TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) &&
@@ -927,8 +927,8 @@
break;
}
- Result = DAG.UpdateNodeOperands(Result.getValue(0), Ops.data(),
- Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), Ops.data(),
+ Ops.size()), 0);
switch (Action) {
case TargetLowering::Legal:
for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i)
@@ -1018,7 +1018,8 @@
if (Tmp1 != Node->getOperand(0)) {
SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
Ops[0] = Tmp1;
- Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(), &Ops[0], Ops.size()),
+ Result.getResNo());
}
// Remember that the CALLSEQ_START is legalized.
@@ -1060,7 +1061,9 @@
if (Tmp1 != Node->getOperand(0)) {
SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
Ops[0] = Tmp1;
- Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ &Ops[0], Ops.size()),
+ Result.getResNo());
}
} else {
Tmp2 = LegalizeOp(Node->getOperand(Node->getNumOperands()-1));
@@ -1069,7 +1072,9 @@
SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
Ops[0] = Tmp1;
Ops.back() = Tmp2;
- Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ &Ops[0], Ops.size()),
+ Result.getResNo());
}
}
assert(IsLegalizingCall && "Call sequence imbalance between start/end?");
@@ -1089,7 +1094,9 @@
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType == ISD::NON_EXTLOAD) {
EVT VT = Node->getValueType(0);
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp2, LD->getOffset()),
+ Result.getResNo());
Tmp3 = Result.getValue(0);
Tmp4 = Result.getValue(1);
@@ -1269,7 +1276,9 @@
isCustom = true;
// FALLTHROUGH
case TargetLowering::Legal:
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp2, LD->getOffset()),
+ Result.getResNo());
Tmp1 = Result.getValue(0);
Tmp2 = Result.getValue(1);
@@ -1357,8 +1366,10 @@
{
Tmp3 = LegalizeOp(ST->getValue());
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2,
- ST->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp3, Tmp2,
+ ST->getOffset()),
+ Result.getResNo());
EVT VT = Tmp3.getValueType();
switch (TLI.getOperationAction(ISD::STORE, VT)) {
@@ -1461,8 +1472,10 @@
} else {
if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() ||
Tmp2 != ST->getBasePtr())
- Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp3, Tmp2,
- ST->getOffset());
+ Result = SDValue(DAG.UpdateNodeOperands(Result.getNode(),
+ Tmp1, Tmp3, Tmp2,
+ ST->getOffset()),
+ Result.getResNo());
switch (TLI.getTruncStoreAction(ST->getValue().getValueType(), StVT)) {
default: assert(0 && "This action is not supported yet!");
@@ -2359,10 +2372,42 @@
case ISD::EH_RETURN:
case ISD::EH_LABEL:
case ISD::PREFETCH:
- case ISD::MEMBARRIER:
case ISD::VAEND:
Results.push_back(Node->getOperand(0));
break;
+ case ISD::MEMBARRIER: {
+ // If the target didn't lower this, lower it to '__sync_synchronize()' call
+ TargetLowering::ArgListTy Args;
+ std::pair<SDValue, SDValue> CallResult =
+ TLI.LowerCallTo(Node->getOperand(0), Type::getVoidTy(*DAG.getContext()),
+ false, false, false, false, 0, CallingConv::C, false,
+ /*isReturnValueUsed=*/true,
+ DAG.getExternalSymbol("__sync_synchronize",
+ TLI.getPointerTy()),
+ Args, DAG, dl);
+ Results.push_back(CallResult.second);
+ break;
+ }
+ // By default, atomic intrinsics are marked Legal and lowered. Targets
+ // which don't support them directly, however, may want libcalls, in which
+ // case they mark them Expand, and we get here.
+ // FIXME: Unimplemented for now. Add libcalls.
+ case ISD::ATOMIC_SWAP:
+ case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
+ case ISD::ATOMIC_LOAD_AND:
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_NAND:
+ case ISD::ATOMIC_LOAD_MIN:
+ case ISD::ATOMIC_LOAD_MAX:
+ case ISD::ATOMIC_LOAD_UMIN:
+ case ISD::ATOMIC_LOAD_UMAX:
+ case ISD::ATOMIC_CMP_SWAP: {
+ assert (0 && "atomic intrinsic not lowered!");
+ Results.push_back(Node->getOperand(0));
+ break;
+ }
case ISD::DYNAMIC_STACKALLOC:
ExpandDYNAMIC_STACKALLOC(Node, Results);
break;
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp Fri Jul 2 04:34:51 2010
@@ -698,9 +698,10 @@
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
- N->getOperand(4));
+ N->getOperand(4)),
+ 0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_SINT(SDNode *N) {
@@ -739,9 +740,10 @@
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
- DAG.getCondCode(CCCode));
+ DAG.getCondCode(CCCode)),
+ 0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_SETCC(SDNode *N) {
@@ -757,8 +759,9 @@
}
// Otherwise, update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
- DAG.getCondCode(CCCode));
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
+ DAG.getCondCode(CCCode)),
+ 0);
}
SDValue DAGTypeLegalizer::SoftenFloatOp_STORE(SDNode *N, unsigned OpNo) {
@@ -1294,9 +1297,9 @@
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
- N->getOperand(4));
+ N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_FP_ROUND(SDNode *N) {
@@ -1375,9 +1378,9 @@
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
- DAG.getCondCode(CCCode));
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) {
@@ -1393,8 +1396,8 @@
}
// Otherwise, update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
- DAG.getCondCode(CCCode));
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) {
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp Fri Jul 2 04:34:51 2010
@@ -725,8 +725,9 @@
// The chain (Op#0), CC (#1) and basic block destination (Op#4) are always
// legal types.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
- N->getOperand(1), LHS, RHS, N->getOperand(4));
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
+ N->getOperand(1), LHS, RHS, N->getOperand(4)),
+ 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) {
@@ -737,8 +738,8 @@
SDValue Cond = PromoteTargetBoolean(N->getOperand(1), SVT);
// The chain (Op#0) and basic block destination (Op#2) are always legal types.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), Cond,
- N->getOperand(2));
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Cond,
+ N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_PAIR(SDNode *N) {
@@ -773,7 +774,7 @@
for (unsigned i = 0; i < NumElts; ++i)
NewOps.push_back(GetPromotedInteger(N->getOperand(i)));
- return DAG.UpdateNodeOperands(SDValue(N, 0), &NewOps[0], NumElts);
+ return SDValue(DAG.UpdateNodeOperands(N, &NewOps[0], NumElts), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_CONVERT_RNDSAT(SDNode *N) {
@@ -798,17 +799,18 @@
assert(N->getOperand(1).getValueType().getSizeInBits() >=
N->getValueType(0).getVectorElementType().getSizeInBits() &&
"Type of inserted value narrower than vector element type!");
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
GetPromotedInteger(N->getOperand(1)),
- N->getOperand(2));
+ N->getOperand(2)),
+ 0);
}
assert(OpNo == 2 && "Different operand and result vector types?");
// Promote the index.
SDValue Idx = ZExtPromotedInteger(N->getOperand(2));
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
- N->getOperand(1), Idx);
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
+ N->getOperand(1), Idx), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_MEMBARRIER(SDNode *N) {
@@ -819,15 +821,14 @@
SDValue Flag = GetPromotedInteger(N->getOperand(i));
NewOps[i] = DAG.getZeroExtendInReg(Flag, dl, MVT::i1);
}
- return DAG.UpdateNodeOperands(SDValue (N, 0), NewOps,
- array_lengthof(NewOps));
+ return SDValue(DAG.UpdateNodeOperands(N, NewOps, array_lengthof(NewOps)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SCALAR_TO_VECTOR(SDNode *N) {
// Integer SCALAR_TO_VECTOR operands are implicitly truncated, so just promote
// the operand in place.
- return DAG.UpdateNodeOperands(SDValue(N, 0),
- GetPromotedInteger(N->getOperand(0)));
+ return SDValue(DAG.UpdateNodeOperands(N,
+ GetPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) {
@@ -837,8 +838,8 @@
EVT SVT = TLI.getSetCCResultType(N->getOperand(1).getValueType());
SDValue Cond = PromoteTargetBoolean(N->getOperand(0), SVT);
- return DAG.UpdateNodeOperands(SDValue(N, 0), Cond,
- N->getOperand(1), N->getOperand(2));
+ return SDValue(DAG.UpdateNodeOperands(N, Cond,
+ N->getOperand(1), N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo) {
@@ -849,8 +850,8 @@
PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(4))->get());
// The CC (#4) and the possible return values (#2 and #3) have legal types.
- return DAG.UpdateNodeOperands(SDValue(N, 0), LHS, RHS, N->getOperand(2),
- N->getOperand(3), N->getOperand(4));
+ return SDValue(DAG.UpdateNodeOperands(N, LHS, RHS, N->getOperand(2),
+ N->getOperand(3), N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SETCC(SDNode *N, unsigned OpNo) {
@@ -861,12 +862,12 @@
PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(2))->get());
// The CC (#2) is always legal.
- return DAG.UpdateNodeOperands(SDValue(N, 0), LHS, RHS, N->getOperand(2));
+ return SDValue(DAG.UpdateNodeOperands(N, LHS, RHS, N->getOperand(2)), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_Shift(SDNode *N) {
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
- ZExtPromotedInteger(N->getOperand(1)));
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
+ ZExtPromotedInteger(N->getOperand(1))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_SIGN_EXTEND(SDNode *N) {
@@ -878,8 +879,8 @@
}
SDValue DAGTypeLegalizer::PromoteIntOp_SINT_TO_FP(SDNode *N) {
- return DAG.UpdateNodeOperands(SDValue(N, 0),
- SExtPromotedInteger(N->getOperand(0)));
+ return SDValue(DAG.UpdateNodeOperands(N,
+ SExtPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo){
@@ -905,8 +906,8 @@
}
SDValue DAGTypeLegalizer::PromoteIntOp_UINT_TO_FP(SDNode *N) {
- return DAG.UpdateNodeOperands(SDValue(N, 0),
- ZExtPromotedInteger(N->getOperand(0)));
+ return SDValue(DAG.UpdateNodeOperands(N,
+ ZExtPromotedInteger(N->getOperand(0))), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_ZERO_EXTEND(SDNode *N) {
@@ -2224,9 +2225,9 @@
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
DAG.getCondCode(CCCode), NewLHS, NewRHS,
- N->getOperand(4));
+ N->getOperand(4)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SELECT_CC(SDNode *N) {
@@ -2242,9 +2243,9 @@
}
// Update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
N->getOperand(2), N->getOperand(3),
- DAG.getCondCode(CCCode));
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SETCC(SDNode *N) {
@@ -2260,8 +2261,8 @@
}
// Otherwise, update N to have the operands specified.
- return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS,
- DAG.getCondCode(CCCode));
+ return SDValue(DAG.UpdateNodeOperands(N, NewLHS, NewRHS,
+ DAG.getCondCode(CCCode)), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_Shift(SDNode *N) {
@@ -2270,7 +2271,7 @@
// upper half of the shift amount is zero. Just use the lower half.
SDValue Lo, Hi;
GetExpandedInteger(N->getOperand(1), Lo, Hi);
- return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), Lo);
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Lo), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_RETURNADDR(SDNode *N) {
@@ -2279,7 +2280,7 @@
// constant to valid type.
SDValue Lo, Hi;
GetExpandedInteger(N->getOperand(0), Lo, Hi);
- return DAG.UpdateNodeOperands(SDValue(N, 0), Lo);
+ return SDValue(DAG.UpdateNodeOperands(N, Lo), 0);
}
SDValue DAGTypeLegalizer::ExpandIntOp_SINT_TO_FP(SDNode *N) {
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp Fri Jul 2 04:34:51 2010
@@ -492,8 +492,7 @@
// Some operands changed - update the node.
if (!NewOps.empty()) {
- SDNode *M = DAG.UpdateNodeOperands(SDValue(N, 0), &NewOps[0],
- NewOps.size()).getNode();
+ SDNode *M = DAG.UpdateNodeOperands(N, &NewOps[0], NewOps.size());
if (M != N) {
// The node morphed into a different node. Normally for this to happen
// the original node would have to be marked NewNode. However this can
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp Fri Jul 2 04:34:51 2010
@@ -116,7 +116,7 @@
Ops.push_back(LegalizeOp(Node->getOperand(i)));
SDValue Result =
- DAG.UpdateNodeOperands(Op.getValue(0), Ops.data(), Ops.size());
+ SDValue(DAG.UpdateNodeOperands(Op.getNode(), Ops.data(), Ops.size()), 0);
bool HasVectorValue = false;
for (SDNode::value_iterator J = Node->value_begin(), E = Node->value_end();
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp Fri Jul 2 04:34:51 2010
@@ -1087,10 +1087,11 @@
uint64_t LoElts = Lo.getValueType().getVectorNumElements();
if (IdxVal < LoElts)
- return DAG.UpdateNodeOperands(SDValue(N, 0), Lo, Idx);
- return DAG.UpdateNodeOperands(SDValue(N, 0), Hi,
+ return SDValue(DAG.UpdateNodeOperands(N, Lo, Idx), 0);
+ return SDValue(DAG.UpdateNodeOperands(N, Hi,
DAG.getConstant(IdxVal - LoElts,
- Idx.getValueType()));
+ Idx.getValueType())),
+ 0);
}
// Store the vector to the stack.
@@ -1271,7 +1272,7 @@
EVT WidenEltVT = WidenVT.getVectorElementType();
EVT VT = WidenVT;
unsigned NumElts = VT.getVectorNumElements();
- while (!TLI.isTypeLegal(VT) && NumElts != 1) {
+ while (!TLI.isTypeSynthesizable(VT) && NumElts != 1) {
NumElts = NumElts / 2;
VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
}
@@ -1286,13 +1287,20 @@
return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements());
} else {
// Since the operation can trap, apply operation on the original vector.
+ EVT MaxVT = VT;
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
unsigned CurNumElts = N->getValueType(0).getVectorNumElements();
SmallVector<SDValue, 16> ConcatOps(CurNumElts);
unsigned ConcatEnd = 0; // Current ConcatOps index.
- unsigned Idx = 0; // Current Idx into input vectors.
+ int Idx = 0; // Current Idx into input vectors.
+
+ // NumElts := greatest synthesizable vector size (at most WidenVT)
+ // while (orig. vector has unhandled elements) {
+ // take munches of size NumElts from the beginning and add to ConcatOps
+ // NumElts := next smaller supported vector size or 1
+ // }
while (CurNumElts != 0) {
while (CurNumElts >= NumElts) {
SDValue EOp1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, InOp1,
@@ -1303,26 +1311,21 @@
Idx += NumElts;
CurNumElts -= NumElts;
}
- EVT PrevVecVT = VT;
do {
NumElts = NumElts / 2;
VT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NumElts);
- } while (!TLI.isTypeLegal(VT) && NumElts != 1);
+ } while (!TLI.isTypeSynthesizable(VT) && NumElts != 1);
if (NumElts == 1) {
- // Since we are using concat vector, build a vector from the scalar ops.
- SDValue VecOp = DAG.getUNDEF(PrevVecVT);
for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
InOp1, DAG.getIntPtrConstant(Idx));
SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
InOp2, DAG.getIntPtrConstant(Idx));
- VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, PrevVecVT, VecOp,
- DAG.getNode(Opcode, dl, WidenEltVT, EOp1, EOp2),
- DAG.getIntPtrConstant(i));
+ ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
+ EOp1, EOp2);
}
CurNumElts = 0;
- ConcatOps[ConcatEnd++] = VecOp;
}
}
@@ -1333,23 +1336,65 @@
return ConcatOps[0];
}
- // Rebuild vector to one with the widen type
- Idx = ConcatEnd - 1;
- while (Idx != 0) {
+ // while (Some element of ConcatOps is not of type MaxVT) {
+ // From the end of ConcatOps, collect elements of the same type and put
+ // them into an op of the next larger supported type
+ // }
+ while (ConcatOps[ConcatEnd-1].getValueType() != MaxVT) {
+ Idx = ConcatEnd - 1;
VT = ConcatOps[Idx--].getValueType();
- while (Idx != 0 && ConcatOps[Idx].getValueType() == VT)
- --Idx;
- if (Idx != 0) {
- VT = ConcatOps[Idx].getValueType();
- ConcatOps[Idx+1] = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
- &ConcatOps[Idx+1], ConcatEnd - Idx - 1);
+ while (Idx >= 0 && ConcatOps[Idx].getValueType() == VT)
+ Idx--;
+
+ int NextSize = VT.isVector() ? VT.getVectorNumElements() : 1;
+ EVT NextVT;
+ do {
+ NextSize *= 2;
+ NextVT = EVT::getVectorVT(*DAG.getContext(), WidenEltVT, NextSize);
+ } while (!TLI.isTypeSynthesizable(NextVT));
+
+ if (!VT.isVector()) {
+ // Scalar type, create an INSERT_VECTOR_ELEMENT of type NextVT
+ SDValue VecOp = DAG.getUNDEF(NextVT);
+ unsigned NumToInsert = ConcatEnd - Idx - 1;
+ for (unsigned i = 0, OpIdx = Idx+1; i < NumToInsert; i++, OpIdx++) {
+ VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NextVT, VecOp,
+ ConcatOps[OpIdx], DAG.getIntPtrConstant(i));
+ }
+ ConcatOps[Idx+1] = VecOp;
ConcatEnd = Idx + 2;
+ }
+ else {
+ // Vector type, create a CONCAT_VECTORS of type NextVT
+ SDValue undefVec = DAG.getUNDEF(VT);
+ unsigned OpsToConcat = NextSize/VT.getVectorNumElements();
+ SmallVector<SDValue, 16> SubConcatOps(OpsToConcat);
+ unsigned RealVals = ConcatEnd - Idx - 1;
+ unsigned SubConcatEnd = 0;
+ unsigned SubConcatIdx = Idx + 1;
+ while (SubConcatEnd < RealVals)
+ SubConcatOps[SubConcatEnd++] = ConcatOps[++Idx];
+ while (SubConcatEnd < OpsToConcat)
+ SubConcatOps[SubConcatEnd++] = undefVec;
+ ConcatOps[SubConcatIdx] = DAG.getNode(ISD::CONCAT_VECTORS, dl,
+ NextVT, &SubConcatOps[0],
+ OpsToConcat);
+ ConcatEnd = SubConcatIdx + 1;
}
}
+
+ // Check to see if we have a single operation with the widen type.
+ if (ConcatEnd == 1) {
+ VT = ConcatOps[0].getValueType();
+ if (VT == WidenVT)
+ return ConcatOps[0];
+ }
- unsigned NumOps = WidenVT.getVectorNumElements()/VT.getVectorNumElements();
+ // add undefs of size MaxVT until ConcatOps grows to length of WidenVT
+ unsigned NumOps =
+ WidenVT.getVectorNumElements()/MaxVT.getVectorNumElements();
if (NumOps != ConcatEnd ) {
- SDValue UndefVal = DAG.getUNDEF(VT);
+ SDValue UndefVal = DAG.getUNDEF(MaxVT);
for (unsigned j = ConcatEnd; j < NumOps; ++j)
ConcatOps[j] = UndefVal;
}
@@ -1379,7 +1424,7 @@
return DAG.getNode(Opcode, dl, WidenVT, InOp);
}
- if (TLI.isTypeLegal(InWidenVT)) {
+ if (TLI.isTypeSynthesizable(InWidenVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1521,7 +1566,7 @@
NewInVT = EVT::getVectorVT(*DAG.getContext(), InVT, NewNumElts);
}
- if (TLI.isTypeLegal(NewInVT)) {
+ if (TLI.isTypeSynthesizable(NewInVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1662,7 +1707,7 @@
SatOp, CvtCode);
}
- if (TLI.isTypeLegal(InWidenVT)) {
+ if (TLI.isTypeSynthesizable(InWidenVT)) {
// Because the result and the input are different vector types, widening
// the result could create a legal type but widening the input might make
// it an illegal type that might lead to repeatedly splitting the input
@@ -1988,7 +2033,7 @@
if (InWidenSize % Size == 0 && !VT.isVector()) {
unsigned NewNumElts = InWidenSize / Size;
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts);
- if (TLI.isTypeLegal(NewVT)) {
+ if (TLI.isTypeSynthesizable(NewVT)) {
SDValue BitOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, InOp);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp,
DAG.getIntPtrConstant(0));
@@ -2086,7 +2131,7 @@
unsigned MemVTWidth = MemVT.getSizeInBits();
if (MemVT.getSizeInBits() <= WidenEltWidth)
break;
- if (TLI.isTypeLegal(MemVT) && (WidenWidth % MemVTWidth) == 0 &&
+ if (TLI.isTypeSynthesizable(MemVT) && (WidenWidth % MemVTWidth) == 0 &&
(MemVTWidth <= Width ||
(Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
RetVT = MemVT;
@@ -2100,7 +2145,7 @@
VT >= (unsigned)MVT::FIRST_VECTOR_VALUETYPE; --VT) {
EVT MemVT = (MVT::SimpleValueType) VT;
unsigned MemVTWidth = MemVT.getSizeInBits();
- if (TLI.isTypeLegal(MemVT) && WidenEltVT == MemVT.getVectorElementType() &&
+ if (TLI.isTypeSynthesizable(MemVT) && WidenEltVT == MemVT.getVectorElementType() &&
(WidenWidth % MemVTWidth) == 0 &&
(MemVTWidth <= Width ||
(Align!=0 && MemVTWidth<=AlignInBits && MemVTWidth<=Width+WidenEx))) {
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp Fri Jul 2 04:34:51 2010
@@ -128,98 +128,99 @@
/// offsets are not far apart (target specific), it add MVT::Flag inputs and
/// outputs to ensure they are scheduled together and in order. This
/// optimization may benefit some targets by improving cache locality.
-void ScheduleDAGSDNodes::ClusterNeighboringLoads() {
+void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
+ SDNode *Chain = 0;
+ unsigned NumOps = Node->getNumOperands();
+ if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
+ Chain = Node->getOperand(NumOps-1).getNode();
+ if (!Chain)
+ return;
+
+ // Look for other loads of the same chain. Find loads that are loading from
+ // the same base pointer and different offsets.
SmallPtrSet<SDNode*, 16> Visited;
SmallVector<int64_t, 4> Offsets;
DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode.
- for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
- E = DAG->allnodes_end(); NI != E; ++NI) {
- SDNode *Node = &*NI;
- if (!Node || !Node->isMachineOpcode())
+ bool Cluster = false;
+ SDNode *Base = Node;
+ int64_t BaseOffset;
+ for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
+ I != E; ++I) {
+ SDNode *User = *I;
+ if (User == Node || !Visited.insert(User))
continue;
-
- unsigned Opc = Node->getMachineOpcode();
- const TargetInstrDesc &TID = TII->get(Opc);
- if (!TID.mayLoad())
+ int64_t Offset1, Offset2;
+ if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
+ Offset1 == Offset2)
+ // FIXME: Should be ok if they addresses are identical. But earlier
+ // optimizations really should have eliminated one of the loads.
continue;
+ if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
+ Offsets.push_back(Offset1);
+ O2SMap.insert(std::make_pair(Offset2, User));
+ Offsets.push_back(Offset2);
+ if (Offset2 < Offset1) {
+ Base = User;
+ BaseOffset = Offset2;
+ } else {
+ BaseOffset = Offset1;
+ }
+ Cluster = true;
+ }
- SDNode *Chain = 0;
- unsigned NumOps = Node->getNumOperands();
- if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
- Chain = Node->getOperand(NumOps-1).getNode();
- if (!Chain)
- continue;
+ if (!Cluster)
+ return;
- // Look for other loads of the same chain. Find loads that are loading from
- // the same base pointer and different offsets.
- Visited.clear();
- Offsets.clear();
- O2SMap.clear();
- bool Cluster = false;
- SDNode *Base = Node;
- int64_t BaseOffset;
- for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
- I != E; ++I) {
- SDNode *User = *I;
- if (User == Node || !Visited.insert(User))
- continue;
- int64_t Offset1, Offset2;
- if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
- Offset1 == Offset2)
- // FIXME: Should be ok if they addresses are identical. But earlier
- // optimizations really should have eliminated one of the loads.
- continue;
- if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
- Offsets.push_back(Offset1);
- O2SMap.insert(std::make_pair(Offset2, User));
- Offsets.push_back(Offset2);
- if (Offset2 < Offset1) {
- Base = User;
- BaseOffset = Offset2;
- } else {
- BaseOffset = Offset1;
- }
- Cluster = true;
- }
+ // Sort them in increasing order.
+ std::sort(Offsets.begin(), Offsets.end());
- if (!Cluster)
- continue;
+ // Check if the loads are close enough.
+ SmallVector<SDNode*, 4> Loads;
+ unsigned NumLoads = 0;
+ int64_t BaseOff = Offsets[0];
+ SDNode *BaseLoad = O2SMap[BaseOff];
+ Loads.push_back(BaseLoad);
+ for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
+ int64_t Offset = Offsets[i];
+ SDNode *Load = O2SMap[Offset];
+ if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads))
+ break; // Stop right here. Ignore loads that are further away.
+ Loads.push_back(Load);
+ ++NumLoads;
+ }
- // Sort them in increasing order.
- std::sort(Offsets.begin(), Offsets.end());
+ if (NumLoads == 0)
+ return;
- // Check if the loads are close enough.
- SmallVector<SDNode*, 4> Loads;
- unsigned NumLoads = 0;
- int64_t BaseOff = Offsets[0];
- SDNode *BaseLoad = O2SMap[BaseOff];
- Loads.push_back(BaseLoad);
- for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
- int64_t Offset = Offsets[i];
- SDNode *Load = O2SMap[Offset];
- if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,
- NumLoads))
- break; // Stop right here. Ignore loads that are further away.
- Loads.push_back(Load);
- ++NumLoads;
- }
+ // Cluster loads by adding MVT::Flag outputs and inputs. This also
+ // ensure they are scheduled in order of increasing addresses.
+ SDNode *Lead = Loads[0];
+ AddFlags(Lead, SDValue(0,0), true, DAG);
+ SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1);
+ for (unsigned i = 1, e = Loads.size(); i != e; ++i) {
+ bool OutFlag = i < e-1;
+ SDNode *Load = Loads[i];
+ AddFlags(Load, InFlag, OutFlag, DAG);
+ if (OutFlag)
+ InFlag = SDValue(Load, Load->getNumValues()-1);
+ ++LoadsClustered;
+ }
+}
- if (NumLoads == 0)
+/// ClusterNodes - Cluster certain nodes which should be scheduled together.
+///
+void ScheduleDAGSDNodes::ClusterNodes() {
+ for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
+ E = DAG->allnodes_end(); NI != E; ++NI) {
+ SDNode *Node = &*NI;
+ if (!Node || !Node->isMachineOpcode())
continue;
- // Cluster loads by adding MVT::Flag outputs and inputs. This also
- // ensure they are scheduled in order of increasing addresses.
- SDNode *Lead = Loads[0];
- AddFlags(Lead, SDValue(0,0), true, DAG);
- SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1);
- for (unsigned i = 1, e = Loads.size(); i != e; ++i) {
- bool OutFlag = i < e-1;
- SDNode *Load = Loads[i];
- AddFlags(Load, InFlag, OutFlag, DAG);
- if (OutFlag)
- InFlag = SDValue(Load, Load->getNumValues()-1);
- ++LoadsClustered;
- }
+ unsigned Opc = Node->getMachineOpcode();
+ const TargetInstrDesc &TID = TII->get(Opc);
+ if (TID.mayLoad())
+ // Cluster loads from "near" addresses into combined SUnits.
+ ClusterNeighboringLoads(Node);
}
}
@@ -388,8 +389,8 @@
/// excludes nodes that aren't interesting to scheduling, and represents
/// flagged together nodes with a single SUnit.
void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
- // Cluster loads from "near" addresses into combined SUnits.
- ClusterNeighboringLoads();
+ // Cluster certain nodes which should be scheduled together.
+ ClusterNodes();
// Populate the SUnits array.
BuildSchedUnits();
// Compute all the scheduling dependencies between nodes.
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h Fri Jul 2 04:34:51 2010
@@ -108,7 +108,10 @@
private:
/// ClusterNeighboringLoads - Cluster loads from "near" addresses into
/// combined SUnits.
- void ClusterNeighboringLoads();
+ void ClusterNeighboringLoads(SDNode *Node);
+ /// ClusterNodes - Cluster certain nodes which should be scheduled together.
+ ///
+ void ClusterNodes();
/// BuildSchedUnits, AddSchedEdges - Helper functions for BuildSchedGraph.
void BuildSchedUnits();
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAG.cpp Fri Jul 2 04:34:51 2010
@@ -807,7 +807,6 @@
SelectionDAG::~SelectionDAG() {
allnodes_clear();
delete Ordering;
- DbgInfo->clear();
delete DbgInfo;
}
@@ -834,11 +833,8 @@
EntryNode.UseList = 0;
AllNodes.push_back(&EntryNode);
Root = getEntryNode();
- delete Ordering;
- Ordering = new SDNodeOrdering();
+ Ordering->clear();
DbgInfo->clear();
- delete DbgInfo;
- DbgInfo = new SDDbgInfo();
}
SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
@@ -2474,9 +2470,18 @@
VT.getVectorNumElements() ==
Operand.getValueType().getVectorNumElements()) &&
"Vector element count mismatch!");
- if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND)
+
+ if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
+ OpOpcode == ISD::ANY_EXTEND)
// (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
+
+ // (ext (trunx x)) -> x
+ if (OpOpcode == ISD::TRUNCATE) {
+ SDValue OpOp = Operand.getNode()->getOperand(0);
+ if (OpOp.getValueType() == VT)
+ return OpOp;
+ }
break;
case ISD::TRUNCATE:
assert(VT.isInteger() && Operand.getValueType().isInteger() &&
@@ -4430,17 +4435,16 @@
/// already exists. If the resultant node does not exist in the DAG, the
/// input node is returned. As a degenerate case, if you specify the same
/// input operands as the node already has, the input node is returned.
-SDValue SelectionDAG::UpdateNodeOperands(SDValue InN, SDValue Op) {
- SDNode *N = InN.getNode();
+SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
// Check to see if there is no change.
- if (Op == N->getOperand(0)) return InN;
+ if (Op == N->getOperand(0)) return N;
// See if the modified node already exists.
void *InsertPos = 0;
if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
- return SDValue(Existing, InN.getResNo());
+ return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
@@ -4452,22 +4456,20 @@
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
- return InN;
+ return N;
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue InN, SDValue Op1, SDValue Op2) {
- SDNode *N = InN.getNode();
+SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
// Check to see if there is no change.
if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
- return InN; // No operands changed, just return the input node.
+ return N; // No operands changed, just return the input node.
// See if the modified node already exists.
void *InsertPos = 0;
if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
- return SDValue(Existing, InN.getResNo());
+ return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
@@ -4482,32 +4484,31 @@
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
- return InN;
+ return N;
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2, SDValue Op3) {
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
SDValue Ops[] = { Op1, Op2, Op3 };
return UpdateNodeOperands(N, Ops, 3);
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4) {
SDValue Ops[] = { Op1, Op2, Op3, Op4 };
return UpdateNodeOperands(N, Ops, 4);
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
SDValue Op3, SDValue Op4, SDValue Op5) {
SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
return UpdateNodeOperands(N, Ops, 5);
}
-SDValue SelectionDAG::
-UpdateNodeOperands(SDValue InN, const SDValue *Ops, unsigned NumOps) {
- SDNode *N = InN.getNode();
+SDNode *SelectionDAG::
+UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
assert(N->getNumOperands() == NumOps &&
"Update with wrong number of operands");
@@ -4521,12 +4522,12 @@
}
// No operands changed, just return the input node.
- if (!AnyChange) return InN;
+ if (!AnyChange) return N;
// See if the modified node already exists.
void *InsertPos = 0;
if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
- return SDValue(Existing, InN.getResNo());
+ return Existing;
// Nope it doesn't. Remove the node from its current place in the maps.
if (InsertPos)
@@ -4540,7 +4541,7 @@
// If this gets put into a CSE map, add it.
if (InsertPos) CSEMap.InsertNode(N, InsertPos);
- return InN;
+ return N;
}
/// DropOperands - Release the operands and set this node to have
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Fri Jul 2 04:34:51 2010
@@ -1602,7 +1602,7 @@
MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
- const Value *Callee(I.getCalledValue());
+ const Value *Callee = I.getCalledValue();
if (isa<InlineAsm>(Callee))
visitInlineAsm(&I);
else
@@ -2734,7 +2734,7 @@
// If this is a constant subscript, handle it quickly.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
- if (CI->getZExtValue() == 0) continue;
+ if (CI->isZero()) continue;
uint64_t Offs =
TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
SDValue OffsVal;
@@ -4118,8 +4118,7 @@
assert(FuncInfo.MBBMap[I.getParent()]->isLandingPad() &&
"Call to eh.exception not in landing pad!");
SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
- SDValue Ops[1];
- Ops[0] = DAG.getRoot();
+ SDValue Ops[1] = { DAG.getRoot() };
SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
setValue(&I, Op);
DAG.setRoot(Op.getValue(1));
@@ -4367,7 +4366,7 @@
SDValue Arg = getValue(I.getOperand(0));
EVT Ty = Arg.getValueType();
- if (CI->getZExtValue() == 0)
+ if (CI->isZero())
Res = DAG.getConstant(-1ULL, Ty);
else
Res = DAG.getConstant(0, Ty);
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp Fri Jul 2 04:34:51 2010
@@ -372,102 +372,6 @@
};
}
-/// TrivialTruncElim - Eliminate some trivial nops that can result from
-/// ShrinkDemandedOps: (trunc (ext n)) -> n.
-static bool TrivialTruncElim(SDValue Op,
- TargetLowering::TargetLoweringOpt &TLO) {
- SDValue N0 = Op.getOperand(0);
- EVT VT = Op.getValueType();
- if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
- N0.getOpcode() == ISD::SIGN_EXTEND ||
- N0.getOpcode() == ISD::ANY_EXTEND) &&
- N0.getOperand(0).getValueType() == VT) {
- return TLO.CombineTo(Op, N0.getOperand(0));
- }
- return false;
-}
-
-/// ShrinkDemandedOps - A late transformation pass that shrink expressions
-/// using TargetLowering::TargetLoweringOpt::ShrinkDemandedOp. It converts
-/// x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
-void SelectionDAGISel::ShrinkDemandedOps() {
- SmallVector<SDNode*, 128> Worklist;
- SmallPtrSet<SDNode*, 128> InWorklist;
-
- // Add all the dag nodes to the worklist.
- Worklist.reserve(CurDAG->allnodes_size());
- for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
- E = CurDAG->allnodes_end(); I != E; ++I) {
- Worklist.push_back(I);
- InWorklist.insert(I);
- }
-
- TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true, true);
- while (!Worklist.empty()) {
- SDNode *N = Worklist.pop_back_val();
- InWorklist.erase(N);
-
- if (N->use_empty() && N != CurDAG->getRoot().getNode()) {
- // Deleting this node may make its operands dead, add them to the worklist
- // if they aren't already there.
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- if (InWorklist.insert(N->getOperand(i).getNode()))
- Worklist.push_back(N->getOperand(i).getNode());
-
- CurDAG->DeleteNode(N);
- continue;
- }
-
- // Run ShrinkDemandedOp on scalar binary operations.
- if (N->getNumValues() != 1 ||
- !N->getValueType(0).isSimple() || !N->getValueType(0).isInteger())
- continue;
-
- unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
- APInt Demanded = APInt::getAllOnesValue(BitWidth);
- APInt KnownZero, KnownOne;
- if (!TLI.SimplifyDemandedBits(SDValue(N, 0), Demanded,
- KnownZero, KnownOne, TLO) &&
- (N->getOpcode() != ISD::TRUNCATE ||
- !TrivialTruncElim(SDValue(N, 0), TLO)))
- continue;
-
- // Revisit the node.
- assert(!InWorklist.count(N) && "Already in worklist");
- Worklist.push_back(N);
- InWorklist.insert(N);
-
- // Replace the old value with the new one.
- DEBUG(errs() << "\nShrinkDemandedOps replacing ";
- TLO.Old.getNode()->dump(CurDAG);
- errs() << "\nWith: ";
- TLO.New.getNode()->dump(CurDAG);
- errs() << '\n');
-
- if (InWorklist.insert(TLO.New.getNode()))
- Worklist.push_back(TLO.New.getNode());
-
- SDOPsWorkListRemover DeadNodes(Worklist, InWorklist);
- CurDAG->ReplaceAllUsesOfValueWith(TLO.Old, TLO.New, &DeadNodes);
-
- if (!TLO.Old.getNode()->use_empty()) continue;
-
- for (unsigned i = 0, e = TLO.Old.getNode()->getNumOperands();
- i != e; ++i) {
- SDNode *OpNode = TLO.Old.getNode()->getOperand(i).getNode();
- if (OpNode->hasOneUse()) {
- // Add OpNode to the end of the list to revisit.
- DeadNodes.RemoveFromWorklist(OpNode);
- Worklist.push_back(OpNode);
- InWorklist.insert(OpNode);
- }
- }
-
- DeadNodes.RemoveFromWorklist(TLO.Old.getNode());
- CurDAG->DeleteNode(TLO.Old.getNode());
- }
-}
-
void SelectionDAGISel::ComputeLiveOutVRegInfo() {
SmallPtrSet<SDNode*, 128> VisitedNodes;
SmallVector<SDNode*, 128> Worklist;
@@ -533,21 +437,17 @@
BlockName = MF->getFunction()->getNameStr() + ":" +
BB->getBasicBlock()->getNameStr();
- DEBUG(dbgs() << "Initial selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Initial selection DAG:\n"; CurDAG->dump());
if (ViewDAGCombine1) CurDAG->viewGraph("dag-combine1 input for " + BlockName);
// Run the DAG combiner in pre-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining 1", GroupName);
- CurDAG->Combine(Unrestricted, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining 1", GroupName, TimePassesIsEnabled);
CurDAG->Combine(Unrestricted, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized lowered selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Optimized lowered selection DAG:\n"; CurDAG->dump());
// Second step, hack on the DAG until it only uses operations and types that
// the target supports.
@@ -555,44 +455,36 @@
BlockName);
bool Changed;
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Type Legalization", GroupName);
- Changed = CurDAG->LegalizeTypes();
- } else {
+ {
+ NamedRegionTimer T("Type Legalization", GroupName, TimePassesIsEnabled);
Changed = CurDAG->LegalizeTypes();
}
- DEBUG(dbgs() << "Type-legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Type-legalized selection DAG:\n"; CurDAG->dump());
if (Changed) {
if (ViewDAGCombineLT)
CurDAG->viewGraph("dag-combine-lt input for " + BlockName);
// Run the DAG combiner in post-type-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining after legalize types", GroupName);
- CurDAG->Combine(NoIllegalTypes, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining after legalize types", GroupName,
+ TimePassesIsEnabled);
CurDAG->Combine(NoIllegalTypes, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized type-legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Optimized type-legalized selection DAG:\n";
+ CurDAG->dump());
}
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Vector Legalization", GroupName);
- Changed = CurDAG->LegalizeVectors();
- } else {
+ {
+ NamedRegionTimer T("Vector Legalization", GroupName, TimePassesIsEnabled);
Changed = CurDAG->LegalizeVectors();
}
if (Changed) {
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Type Legalization 2", GroupName);
- CurDAG->LegalizeTypes();
- } else {
+ {
+ NamedRegionTimer T("Type Legalization 2", GroupName, TimePassesIsEnabled);
CurDAG->LegalizeTypes();
}
@@ -600,69 +492,56 @@
CurDAG->viewGraph("dag-combine-lv input for " + BlockName);
// Run the DAG combiner in post-type-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining after legalize vectors", GroupName);
- CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining after legalize vectors", GroupName,
+ TimePassesIsEnabled);
CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized vector-legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Optimized vector-legalized selection DAG:\n";
+ CurDAG->dump());
}
if (ViewLegalizeDAGs) CurDAG->viewGraph("legalize input for " + BlockName);
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Legalization", GroupName);
- CurDAG->Legalize(OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Legalization", GroupName, TimePassesIsEnabled);
CurDAG->Legalize(OptLevel);
}
- DEBUG(dbgs() << "Legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Legalized selection DAG:\n"; CurDAG->dump());
if (ViewDAGCombine2) CurDAG->viewGraph("dag-combine2 input for " + BlockName);
// Run the DAG combiner in post-legalize mode.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("DAG Combining 2", GroupName);
- CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
- } else {
+ {
+ NamedRegionTimer T("DAG Combining 2", GroupName, TimePassesIsEnabled);
CurDAG->Combine(NoIllegalOperations, *AA, OptLevel);
}
- DEBUG(dbgs() << "Optimized legalized selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Optimized legalized selection DAG:\n"; CurDAG->dump());
- if (OptLevel != CodeGenOpt::None) {
- ShrinkDemandedOps();
+ if (OptLevel != CodeGenOpt::None)
ComputeLiveOutVRegInfo();
- }
if (ViewISelDAGs) CurDAG->viewGraph("isel input for " + BlockName);
// Third, instruction select all of the operations to machine code, adding the
// code to the MachineBasicBlock.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Selection", GroupName);
- DoInstructionSelection();
- } else {
+ {
+ NamedRegionTimer T("Instruction Selection", GroupName, TimePassesIsEnabled);
DoInstructionSelection();
}
- DEBUG(dbgs() << "Selected selection DAG:\n");
- DEBUG(CurDAG->dump());
+ DEBUG(dbgs() << "Selected selection DAG:\n"; CurDAG->dump());
if (ViewSchedDAGs) CurDAG->viewGraph("scheduler input for " + BlockName);
// Schedule machine code.
ScheduleDAGSDNodes *Scheduler = CreateScheduler();
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Scheduling", GroupName);
- Scheduler->Run(CurDAG, BB, BB->end());
- } else {
+ {
+ NamedRegionTimer T("Instruction Scheduling", GroupName,
+ TimePassesIsEnabled);
Scheduler->Run(CurDAG, BB, BB->end());
}
@@ -670,18 +549,15 @@
// Emit machine code to BB. This can change 'BB' to the last block being
// inserted into.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Creation", GroupName);
- BB = Scheduler->EmitSchedule();
- } else {
+ {
+ NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled);
BB = Scheduler->EmitSchedule();
}
// Free the scheduler state.
- if (TimePassesIsEnabled) {
- NamedRegionTimer T("Instruction Scheduling Cleanup", GroupName);
- delete Scheduler;
- } else {
+ {
+ NamedRegionTimer T("Instruction Scheduling Cleanup", GroupName,
+ TimePassesIsEnabled);
delete Scheduler;
}
@@ -902,8 +778,8 @@
SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) {
DEBUG(dbgs() << "Total amount of phi nodes to update: "
- << FuncInfo->PHINodesToUpdate.size() << "\n");
- DEBUG(for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
+ << FuncInfo->PHINodesToUpdate.size() << "\n";
+ for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i)
dbgs() << "Node " << i << " : ("
<< FuncInfo->PHINodesToUpdate[i].first
<< ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
Modified: llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SelectionDAG/TargetLowering.cpp Fri Jul 2 04:34:51 2010
@@ -1042,7 +1042,7 @@
if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
return true;
// If the operation can be done in a smaller type, do so.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// Output known-1 bits are only known if set in both the LHS & RHS.
@@ -1076,7 +1076,7 @@
if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
// If the operation can be done in a smaller type, do so.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// Output known-0 bits are only known if clear in both the LHS & RHS.
@@ -1101,7 +1101,7 @@
if ((KnownZero2 & NewMask) == NewMask)
return TLO.CombineTo(Op, Op.getOperand(1));
// If the operation can be done in a smaller type, do so.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
// If all of the unknown bits are known to be zero on one side or the other
@@ -1548,7 +1548,7 @@
KnownOne2, TLO, Depth+1))
return true;
// See if the operation should be performed at a smaller bit width.
- if (TLO.ShrinkOps && TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
+ if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl))
return true;
}
// FALL THROUGH
Removed: llvm/branches/wendling/eh/lib/CodeGen/SimpleHazardRecognizer.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SimpleHazardRecognizer.h?rev=107463&view=auto
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SimpleHazardRecognizer.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SimpleHazardRecognizer.h (removed)
@@ -1,89 +0,0 @@
-//=- llvm/CodeGen/SimpleHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements the SimpleHazardRecognizer class, which
-// implements hazard-avoidance heuristics for scheduling, based on the
-// scheduling itineraries specified for the target.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CODEGEN_SIMPLEHAZARDRECOGNIZER_H
-#define LLVM_CODEGEN_SIMPLEHAZARDRECOGNIZER_H
-
-#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
-#include "llvm/CodeGen/ScheduleDAG.h"
-#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/TargetInstrInfo.h"
-
-namespace llvm {
- /// SimpleHazardRecognizer - A *very* simple hazard recognizer. It uses
- /// a coarse classification and attempts to avoid that instructions of
- /// a given class aren't grouped too densely together.
- class SimpleHazardRecognizer : public ScheduleHazardRecognizer {
- /// Class - A simple classification for SUnits.
- enum Class {
- Other, Load, Store
- };
-
- /// Window - The Class values of the most recently issued
- /// instructions.
- Class Window[8];
-
- /// getClass - Classify the given SUnit.
- Class getClass(const SUnit *SU) {
- const MachineInstr *MI = SU->getInstr();
- const TargetInstrDesc &TID = MI->getDesc();
- if (TID.mayLoad())
- return Load;
- if (TID.mayStore())
- return Store;
- return Other;
- }
-
- /// Step - Rotate the existing entries in Window and insert the
- /// given class value in position as the most recent.
- void Step(Class C) {
- std::copy(Window+1, array_endof(Window), Window);
- Window[array_lengthof(Window)-1] = C;
- }
-
- public:
- SimpleHazardRecognizer() : Window() {
- Reset();
- }
-
- virtual HazardType getHazardType(SUnit *SU) {
- Class C = getClass(SU);
- if (C == Other)
- return NoHazard;
- unsigned Score = 0;
- for (unsigned i = 0; i != array_lengthof(Window); ++i)
- if (Window[i] == C)
- Score += i + 1;
- if (Score > array_lengthof(Window) * 2)
- return Hazard;
- return NoHazard;
- }
-
- virtual void Reset() {
- for (unsigned i = 0; i != array_lengthof(Window); ++i)
- Window[i] = Other;
- }
-
- virtual void EmitInstruction(SUnit *SU) {
- Step(getClass(SU));
- }
-
- virtual void AdvanceCycle() {
- Step(Other);
- }
- };
-}
-
-#endif
Modified: llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.cpp Fri Jul 2 04:34:51 2010
@@ -1395,6 +1395,12 @@
return false; // Not coalescable.
}
+ CoalescerPair CP(*tii_, *tri_);
+ if (!CP.setRegisters(CopyMI)) {
+ DEBUG(dbgs() << "\tNot coalescable.\n");
+ return false;
+ }
+
bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
@@ -1722,7 +1728,7 @@
DEBUG(dbgs() << "\tNot profitable!\n");
return false;
}
- } else if (!JoinIntervals(DstInt, SrcInt, Swapped)) {
+ } else if (!JoinIntervals(DstInt, SrcInt, Swapped, CP)) {
// Coalescing failed.
// If definition of source is defined by trivial computation, try
@@ -1919,33 +1925,13 @@
return std::find(V.begin(), V.end(), Val) != V.end();
}
-static bool isValNoDefMove(const MachineInstr *MI, unsigned DR, unsigned SR,
- const TargetInstrInfo *TII,
- const TargetRegisterInfo *TRI) {
- unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
- if (TII->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
- ;
- else if (MI->isExtractSubreg()) {
- DstReg = MI->getOperand(0).getReg();
- SrcReg = MI->getOperand(1).getReg();
- } else if (MI->isSubregToReg() ||
- MI->isInsertSubreg()) {
- DstReg = MI->getOperand(0).getReg();
- SrcReg = MI->getOperand(2).getReg();
- } else
- return false;
- return (SrcReg == SR || TRI->isSuperRegister(SR, SrcReg)) &&
- (DstReg == DR || TRI->isSuperRegister(DR, DstReg));
-}
-
/// RangeIsDefinedByCopyFromReg - Return true if the specified live range of
/// the specified live interval is defined by a copy from the specified
/// register.
-bool SimpleRegisterCoalescing::RangeIsDefinedByCopyFromReg(LiveInterval &li,
- LiveRange *LR,
- unsigned Reg) {
- unsigned SrcReg = li_->getVNInfoSourceReg(LR->valno);
- if (SrcReg == Reg)
+bool SimpleRegisterCoalescing::RangeIsDefinedByCopy(LiveInterval &li,
+ LiveRange *LR,
+ CoalescerPair &CP) {
+ if (CP.isCoalescable(LR->valno->getCopy()))
return true;
// FIXME: Do isPHIDef and isDefAccurate both need to be tested?
if ((LR->valno->isPHIDef() || !LR->valno->isDefAccurate()) &&
@@ -1954,7 +1940,7 @@
// It's a sub-register live interval, we may not have precise information.
// Re-compute it.
MachineInstr *DefMI = li_->getInstructionFromIndex(LR->start);
- if (DefMI && isValNoDefMove(DefMI, li.reg, Reg, tii_, tri_)) {
+ if (CP.isCoalescable(DefMI)) {
// Cache computed info.
LR->valno->def = LR->start;
LR->valno->setCopy(DefMI);
@@ -1986,7 +1972,8 @@
/// value number and that the RHS is not defined by a copy from this
/// interval. This returns false if the intervals are not joinable, or it
/// joins them and returns true.
-bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval &LHS, LiveInterval &RHS){
+bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval &LHS, LiveInterval &RHS,
+ CoalescerPair &CP) {
assert(RHS.containsOneValue());
// Some number (potentially more than one) value numbers in the current
@@ -2028,7 +2015,7 @@
if (LHSIt->valno->hasRedefByEC())
return false;
// Copy from the RHS?
- if (!RangeIsDefinedByCopyFromReg(LHS, LHSIt, RHS.reg))
+ if (!RangeIsDefinedByCopy(LHS, LHSIt, CP))
return false; // Nope, bail out.
if (ValueLiveAt(LHSIt, LHS.end(), RHSIt->valno->def))
@@ -2072,7 +2059,7 @@
return false;
// Otherwise, if this is a copy from the RHS, mark it as being merged
// in.
- if (RangeIsDefinedByCopyFromReg(LHS, LHSIt, RHS.reg)) {
+ if (RangeIsDefinedByCopy(LHS, LHSIt, CP)) {
if (ValueLiveAt(LHSIt, LHS.end(), RHSIt->valno->def))
// Here is an interesting situation:
// BB1:
@@ -2171,7 +2158,7 @@
/// below to update aliases.
bool
SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
- bool &Swapped) {
+ bool &Swapped, CoalescerPair &CP) {
// Compute the final value assignment, assuming that the live ranges can be
// coalesced.
SmallVector<int, 16> LHSValNoAssignments;
@@ -2252,7 +2239,7 @@
// faster checks to see if the live ranges are coalescable. This joiner
// can't swap the LHS/RHS intervals though.
if (!TargetRegisterInfo::isPhysicalRegister(RHS.reg)) {
- return SimpleJoin(LHS, RHS);
+ return SimpleJoin(LHS, RHS, CP);
} else {
RHSValNoInfo = RHSValNoInfo0;
}
@@ -2318,12 +2305,13 @@
// DstReg is known to be a register in the LHS interval. If the src is
// from the RHS interval, we can use its value #.
- if (li_->getVNInfoSourceReg(VNI) != RHS.reg)
+ if (!CP.isCoalescable(VNI->getCopy()))
continue;
// Figure out the value # from the RHS.
LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
- assert(lr && "Cannot find live range");
+ // The copy could be to an aliased physreg.
+ if (!lr) continue;
LHSValsDefinedFromRHS[VNI] = lr->valno;
}
@@ -2337,12 +2325,13 @@
// DstReg is known to be a register in the RHS interval. If the src is
// from the LHS interval, we can use its value #.
- if (li_->getVNInfoSourceReg(VNI) != LHS.reg)
+ if (!CP.isCoalescable(VNI->getCopy()))
continue;
// Figure out the value # from the LHS.
LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
- assert(lr && "Cannot find live range");
+ // The copy could be to an aliased physreg.
+ if (!lr) continue;
RHSValsDefinedFromLHS[VNI] = lr->valno;
}
Modified: llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.h (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SimpleRegisterCoalescing.h Fri Jul 2 04:34:51 2010
@@ -111,14 +111,15 @@
/// physreg, this method always canonicalizes DestInt to be it. The output
/// "SrcInt" will not have been modified, so we can use this information
/// below to update aliases.
- bool JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, bool &Swapped);
+ bool JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, bool &Swapped,
+ CoalescerPair &CP);
/// SimpleJoin - Attempt to join the specified interval into this one. The
/// caller of this method must guarantee that the RHS only contains a single
/// value number and that the RHS is not defined by a copy from this
/// interval. This returns false if the intervals are not joinable, or it
/// joins them and returns true.
- bool SimpleJoin(LiveInterval &LHS, LiveInterval &RHS);
+ bool SimpleJoin(LiveInterval &LHS, LiveInterval &RHS, CoalescerPair &CP);
/// Return true if the two specified registers belong to different register
/// classes. The registers may be either phys or virt regs.
@@ -210,11 +211,10 @@
bool ValueLiveAt(LiveInterval::iterator LRItr, LiveInterval::iterator LREnd,
SlotIndex defPoint) const;
- /// RangeIsDefinedByCopyFromReg - Return true if the specified live range of
- /// the specified live interval is defined by a copy from the specified
- /// register.
- bool RangeIsDefinedByCopyFromReg(LiveInterval &li, LiveRange *LR,
- unsigned Reg);
+ /// RangeIsDefinedByCopy - Return true if the specified live range of the
+ /// specified live interval is defined by a coalescable copy.
+ bool RangeIsDefinedByCopy(LiveInterval &li, LiveRange *LR,
+ CoalescerPair &CP);
/// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
/// update the subregister number if it is not zero. If DstReg is a
Modified: llvm/branches/wendling/eh/lib/CodeGen/SjLjEHPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/SjLjEHPrepare.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/SjLjEHPrepare.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/SjLjEHPrepare.cpp Fri Jul 2 04:34:51 2010
@@ -71,7 +71,7 @@
void insertCallSiteStore(Instruction *I, int Number, Value *CallSite);
void markInvokeCallSite(InvokeInst *II, int InvokeNo, Value *CallSite,
SwitchInst *CatchSwitch);
- void splitLiveRangesLiveAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes);
+ void splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes);
bool insertSjLjEHSupport(Function &F);
};
} // end anonymous namespace
@@ -182,7 +182,7 @@
/// FIXME: Move this function to a common utility file (Local.cpp?) so
/// both SjLj and LowerInvoke can use it.
void SjLjEHPass::
-splitLiveRangesLiveAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
+splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
// First step, split all critical edges from invoke instructions.
for (unsigned i = 0, e = Invokes.size(); i != e; ++i) {
InvokeInst *II = Invokes[i];
@@ -289,6 +289,9 @@
}
// If we decided we need a spill, do it.
+ // FIXME: Spilling this way is overkill, as it forces all uses of
+ // the value to be reloaded from the stack slot, even those that aren't
+ // in the unwind blocks. We should be more selective.
if (NeedsSpill) {
++NumSpilled;
DemoteRegToStack(*Inst, true);
@@ -364,7 +367,7 @@
// we spill into a stack location, guaranteeing that there is nothing live
// across the unwind edge. This process also splits all critical edges
// coming out of invoke's.
- splitLiveRangesLiveAcrossInvokes(Invokes);
+ splitLiveRangesAcrossInvokes(Invokes);
BasicBlock *EntryBB = F.begin();
// Create an alloca for the incoming jump buffer ptr and the new jump buffer
Modified: llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/TargetInstrInfoImpl.cpp Fri Jul 2 04:34:51 2010
@@ -21,6 +21,7 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PostRAHazardRecognizer.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -232,11 +233,9 @@
return NewMI;
}
-bool
-TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(const MachineInstr *
- MI,
- AliasAnalysis *
- AA) const {
+bool TargetInstrInfo::
+isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
+ AliasAnalysis *AA) const {
const MachineFunction &MF = *MI->getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetMachine &TM = MF.getTarget();
@@ -316,3 +315,9 @@
// Everything checked out.
return true;
}
+
+// Default implementation of CreateTargetPostRAHazardRecognizer.
+ScheduleHazardRecognizer *TargetInstrInfoImpl::
+CreateTargetPostRAHazardRecognizer(const InstrItineraryData &II) const {
+ return (ScheduleHazardRecognizer *)new PostRAHazardRecognizer(II);
+}
Modified: llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/CodeGen/TwoAddressInstructionPass.cpp Fri Jul 2 04:34:51 2010
@@ -33,6 +33,7 @@
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -1104,7 +1105,12 @@
}
}
}
-
+
+ // Schedule the source copy / remat inserted to form two-address
+ // instruction. FIXME: Does it matter the distance map may not be
+ // accurate after it's scheduled?
+ TII->scheduleTwoAddrSource(prior(mi), mi, *TRI);
+
MadeChange = true;
DEBUG(dbgs() << "\t\trewrite to:\t" << *mi);
@@ -1173,60 +1179,110 @@
// If there are no other uses than extract_subreg which feed into
// the reg_sequence, then we might be able to coalesce them.
bool CanCoalesce = true;
- SmallVector<unsigned, 4> SubIndices;
+ SmallVector<unsigned, 4> SrcSubIndices, DstSubIndices;
for (MachineRegisterInfo::use_nodbg_iterator
UI = MRI->use_nodbg_begin(SrcReg),
UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
MachineInstr *UseMI = &*UI;
- // FIXME: For now require that the destination subregs match the subregs
- // being extracted.
if (!UseMI->isExtractSubreg() ||
UseMI->getOperand(0).getReg() != DstReg ||
- UseMI->getOperand(0).getSubReg() != UseMI->getOperand(2).getImm() ||
UseMI->getOperand(1).getSubReg() != 0) {
CanCoalesce = false;
break;
}
- SubIndices.push_back(UseMI->getOperand(2).getImm());
+ SrcSubIndices.push_back(UseMI->getOperand(2).getImm());
+ DstSubIndices.push_back(UseMI->getOperand(0).getSubReg());
}
- if (!CanCoalesce || SubIndices.size() < 2)
+ if (!CanCoalesce || SrcSubIndices.size() < 2)
continue;
- // FIXME: For now require that the src and dst registers are in the
- // same regclass.
- if (MRI->getRegClass(SrcReg) != MRI->getRegClass(DstReg))
+ // Check that the source subregisters can be combined.
+ std::sort(SrcSubIndices.begin(), SrcSubIndices.end());
+ unsigned NewSrcSubIdx = 0;
+ if (!TRI->canCombineSubRegIndices(MRI->getRegClass(SrcReg), SrcSubIndices,
+ NewSrcSubIdx))
continue;
- std::sort(SubIndices.begin(), SubIndices.end());
- unsigned NewSubIdx = 0;
- if (TRI->canCombineSubRegIndices(MRI->getRegClass(SrcReg), SubIndices,
- NewSubIdx)) {
- bool Proceed = true;
- if (NewSubIdx)
- for (MachineRegisterInfo::reg_nodbg_iterator
- RI = MRI->reg_nodbg_begin(SrcReg), RE = MRI->reg_nodbg_end();
- RI != RE; ) {
- MachineOperand &MO = RI.getOperand();
- ++RI;
- // FIXME: If the sub-registers do not combine to the whole
- // super-register, i.e. NewSubIdx != 0, and any of the use has a
- // sub-register index, then abort the coalescing attempt.
- if (MO.getSubReg()) {
- Proceed = false;
- break;
- }
- }
- if (Proceed)
- for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(SrcReg),
- RE = MRI->reg_end(); RI != RE; ) {
- MachineOperand &MO = RI.getOperand();
- ++RI;
- MO.setReg(DstReg);
- if (NewSubIdx)
- MO.setSubReg(NewSubIdx);
- }
+ // Check that the destination subregisters can also be combined.
+ std::sort(DstSubIndices.begin(), DstSubIndices.end());
+ unsigned NewDstSubIdx = 0;
+ if (!TRI->canCombineSubRegIndices(MRI->getRegClass(DstReg), DstSubIndices,
+ NewDstSubIdx))
+ continue;
+
+ // If neither source nor destination can be combined to the full register,
+ // just give up. This could be improved if it ever matters.
+ if (NewSrcSubIdx != 0 && NewDstSubIdx != 0)
+ continue;
+
+ // Now that we know that all the uses are extract_subregs and that those
+ // subregs can somehow be combined, scan all the extract_subregs again to
+ // make sure the subregs are in the right order and can be composed.
+ MachineInstr *SomeMI = 0;
+ CanCoalesce = true;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(SrcReg),
+ UE = MRI->use_nodbg_end(); UI != UE; ++UI) {
+ MachineInstr *UseMI = &*UI;
+ assert(UseMI->isExtractSubreg());
+ unsigned DstSubIdx = UseMI->getOperand(0).getSubReg();
+ unsigned SrcSubIdx = UseMI->getOperand(2).getImm();
+ assert(DstSubIdx != 0 && "missing subreg from RegSequence elimination");
+ if ((NewDstSubIdx == 0 &&
+ TRI->composeSubRegIndices(NewSrcSubIdx, DstSubIdx) != SrcSubIdx) ||
+ (NewSrcSubIdx == 0 &&
+ TRI->composeSubRegIndices(NewDstSubIdx, SrcSubIdx) != DstSubIdx)) {
+ CanCoalesce = false;
+ break;
}
+ // Keep track of one of the uses.
+ SomeMI = UseMI;
+ }
+ if (!CanCoalesce)
+ continue;
+
+ // Insert a copy or an extract to replace the original extracts.
+ MachineBasicBlock::iterator InsertLoc = SomeMI;
+ if (NewSrcSubIdx) {
+ // Insert an extract subreg.
+ BuildMI(*SomeMI->getParent(), InsertLoc, SomeMI->getDebugLoc(),
+ TII->get(TargetOpcode::EXTRACT_SUBREG), DstReg)
+ .addReg(SrcReg).addImm(NewSrcSubIdx);
+ } else if (NewDstSubIdx) {
+ // Do a subreg insertion.
+ BuildMI(*SomeMI->getParent(), InsertLoc, SomeMI->getDebugLoc(),
+ TII->get(TargetOpcode::INSERT_SUBREG), DstReg)
+ .addReg(DstReg).addReg(SrcReg).addImm(NewDstSubIdx);
+ } else {
+ // Insert a copy.
+ bool Emitted =
+ TII->copyRegToReg(*SomeMI->getParent(), InsertLoc, DstReg, SrcReg,
+ MRI->getRegClass(DstReg), MRI->getRegClass(SrcReg),
+ SomeMI->getDebugLoc());
+ (void)Emitted;
+ }
+ MachineBasicBlock::iterator CopyMI = prior(InsertLoc);
+
+ // Remove all the old extract instructions.
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI->use_nodbg_begin(SrcReg),
+ UE = MRI->use_nodbg_end(); UI != UE; ) {
+ MachineInstr *UseMI = &*UI;
+ ++UI;
+ if (UseMI == CopyMI)
+ continue;
+ assert(UseMI->isExtractSubreg());
+ // Move any kills to the new copy or extract instruction.
+ if (UseMI->getOperand(1).isKill()) {
+ MachineOperand *KillMO = CopyMI->findRegisterUseOperand(SrcReg);
+ KillMO->setIsKill();
+ if (LV)
+ // Update live variables
+ LV->replaceKillInstruction(SrcReg, UseMI, &*CopyMI);
+ }
+ UseMI->eraseFromParent();
+ }
}
}
Modified: llvm/branches/wendling/eh/lib/ExecutionEngine/Interpreter/Execution.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/ExecutionEngine/Interpreter/Execution.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/ExecutionEngine/Interpreter/Execution.cpp (original)
+++ llvm/branches/wendling/eh/lib/ExecutionEngine/Interpreter/Execution.cpp Fri Jul 2 04:34:51 2010
@@ -591,7 +591,7 @@
ECStack.pop_back();
if (ECStack.empty()) { // Finished main. Put result into exit code...
- if (RetTy && RetTy->isIntegerTy()) { // Nonvoid return type?
+ if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
ExitValue = Result; // Capture the exit value of the program
} else {
memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
Modified: llvm/branches/wendling/eh/lib/MC/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/CMakeLists.txt?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/lib/MC/CMakeLists.txt Fri Jul 2 04:34:51 2010
@@ -14,6 +14,7 @@
MCLoggingStreamer.cpp
MCMachOStreamer.cpp
MCNullStreamer.cpp
+ MCObjectStreamer.cpp
MCObjectWriter.cpp
MCSection.cpp
MCSectionCOFF.cpp
Modified: llvm/branches/wendling/eh/lib/MC/MCAssembler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MCAssembler.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MCAssembler.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MCAssembler.cpp Fri Jul 2 04:34:51 2010
@@ -308,24 +308,23 @@
return !B_Base && BaseSymbol == A_Base;
}
-bool MCAssembler::isSymbolLinkerVisible(const MCSymbolData *SD) const {
+bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const {
// Non-temporary labels should always be visible to the linker.
- if (!SD->getSymbol().isTemporary())
+ if (!Symbol.isTemporary())
return true;
// Absolute temporary labels are never visible.
- if (!SD->getFragment())
+ if (!Symbol.isInSection())
return false;
// Otherwise, check if the section requires symbols even for temporary labels.
- return getBackend().doesSectionRequireSymbols(
- SD->getFragment()->getParent()->getSection());
+ return getBackend().doesSectionRequireSymbols(Symbol.getSection());
}
const MCSymbolData *MCAssembler::getAtom(const MCAsmLayout &Layout,
const MCSymbolData *SD) const {
// Linker visible symbols define atoms.
- if (isSymbolLinkerVisible(SD))
+ if (isSymbolLinkerVisible(SD->getSymbol()))
return SD;
// Absolute and undefined symbols have no defining atom.
Modified: llvm/branches/wendling/eh/lib/MC/MCMachOStreamer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MCMachOStreamer.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MCMachOStreamer.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MCMachOStreamer.cpp Fri Jul 2 04:34:51 2010
@@ -14,6 +14,7 @@
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCMachOSymbolFlags.h"
@@ -25,21 +26,13 @@
namespace {
-class MCMachOStreamer : public MCStreamer {
-
-private:
- MCAssembler Assembler;
- MCSectionData *CurSectionData;
-
- /// Track the current atom for each section.
- DenseMap<const MCSectionData*, MCSymbolData*> CurrentAtomMap;
-
+class MCMachOStreamer : public MCObjectStreamer {
private:
MCFragment *getCurrentFragment() const {
- assert(CurSectionData && "No current section!");
+ assert(getCurrentSectionData() && "No current section!");
- if (!CurSectionData->empty())
- return &CurSectionData->getFragmentList().back();
+ if (!getCurrentSectionData()->empty())
+ return &getCurrentSectionData()->getFragmentList().back();
return 0;
}
@@ -49,28 +42,17 @@
MCDataFragment *getOrCreateDataFragment() const {
MCDataFragment *F = dyn_cast_or_null<MCDataFragment>(getCurrentFragment());
if (!F)
- F = createDataFragment();
+ F = new MCDataFragment(getCurrentSectionData());
return F;
}
- /// Create a new data fragment in the current section.
- MCDataFragment *createDataFragment() const {
- MCDataFragment *DF = new MCDataFragment(CurSectionData);
- DF->setAtom(CurrentAtomMap.lookup(CurSectionData));
- return DF;
- }
-
void EmitInstToFragment(const MCInst &Inst);
void EmitInstToData(const MCInst &Inst);
public:
MCMachOStreamer(MCContext &Context, TargetAsmBackend &TAB,
- raw_ostream &_OS, MCCodeEmitter *_Emitter)
- : MCStreamer(Context), Assembler(Context, TAB, *_Emitter, _OS),
- CurSectionData(0) {}
- ~MCMachOStreamer() {}
-
- MCAssembler &getAssembler() { return Assembler; }
+ raw_ostream &OS, MCCodeEmitter *Emitter)
+ : MCObjectStreamer(Context, TAB, OS, Emitter) {}
const MCExpr *AddValueSymbols(const MCExpr *Value) {
switch (Value->getKind()) {
@@ -86,7 +68,7 @@
}
case MCExpr::SymbolRef:
- Assembler.getOrCreateSymbolData(
+ getAssembler().getOrCreateSymbolData(
cast<MCSymbolRefExpr>(Value)->getSymbol());
break;
@@ -101,7 +83,6 @@
/// @name MCStreamer Interface
/// @{
- virtual void SwitchSection(const MCSection *Section);
virtual void EmitLabel(MCSymbol *Symbol);
virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
@@ -152,6 +133,7 @@
}
virtual void EmitInstruction(const MCInst &Inst);
+
virtual void Finish();
/// @}
@@ -159,38 +141,25 @@
} // end anonymous namespace.
-void MCMachOStreamer::SwitchSection(const MCSection *Section) {
- assert(Section && "Cannot switch to a null section!");
-
- // If already in this section, then this is a noop.
- if (Section == CurSection) return;
-
- CurSection = Section;
- CurSectionData = &Assembler.getOrCreateSectionData(*Section);
-}
-
void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) {
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
assert(!Symbol->isVariable() && "Cannot emit a variable symbol!");
assert(CurSection && "Cannot emit before setting section!");
- MCSymbolData &SD = Assembler.getOrCreateSymbolData(*Symbol);
+ Symbol->setSection(*CurSection);
- // Update the current atom map, if necessary.
- bool MustCreateFragment = false;
- if (Assembler.isSymbolLinkerVisible(&SD)) {
- CurrentAtomMap[CurSectionData] = &SD;
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
- // We have to create a new fragment, fragments cannot span atoms.
- MustCreateFragment = true;
- }
+ // We have to create a new fragment if this is an atom defining symbol,
+ // fragments cannot span atoms.
+ if (getAssembler().isSymbolLinkerVisible(SD.getSymbol()))
+ new MCDataFragment(getCurrentSectionData());
// FIXME: This is wasteful, we don't necessarily need to create a data
// fragment. Instead, we should mark the symbol as pointing into the data
// fragment if it exists, otherwise we should just queue the label and set its
// fragment pointer when we emit the next fragment.
- MCDataFragment *F =
- MustCreateFragment ? createDataFragment() : getOrCreateDataFragment();
+ MCDataFragment *F = getOrCreateDataFragment();
assert(!SD.getFragment() && "Unexpected fragment on symbol data!");
SD.setFragment(F);
SD.setOffset(F->getContents().size());
@@ -203,14 +172,12 @@
// FIXME: Cleanup this code, these bits should be emitted based on semantic
// properties, not on the order of definition, etc.
SD.setFlags(SD.getFlags() & ~SF_ReferenceTypeMask);
-
- Symbol->setSection(*CurSection);
}
void MCMachOStreamer::EmitAssemblerFlag(MCAssemblerFlag Flag) {
switch (Flag) {
case MCAF_SubsectionsViaSymbols:
- Assembler.setSubsectionsViaSymbols(true);
+ getAssembler().setSubsectionsViaSymbols(true);
return;
}
@@ -219,7 +186,7 @@
void MCMachOStreamer::EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) {
// FIXME: Lift context changes into super class.
- Assembler.getOrCreateSymbolData(*Symbol);
+ getAssembler().getOrCreateSymbolData(*Symbol);
Symbol->setVariableValue(AddValueSymbols(Value));
}
@@ -232,15 +199,15 @@
// important for matching the string table that 'as' generates.
IndirectSymbolData ISD;
ISD.Symbol = Symbol;
- ISD.SectionData = CurSectionData;
- Assembler.getIndirectSymbols().push_back(ISD);
+ ISD.SectionData = getCurrentSectionData();
+ getAssembler().getIndirectSymbols().push_back(ISD);
return;
}
// Adding a symbol attribute always introduces the symbol, note that an
// important side effect of calling getOrCreateSymbolData here is to register
// the symbol with the assembler.
- MCSymbolData &SD = Assembler.getOrCreateSymbolData(*Symbol);
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
// The implementation of symbol attributes is designed to match 'as', but it
// leaves much to desired. It doesn't really make sense to arbitrarily add and
@@ -313,7 +280,8 @@
// Encode the 'desc' value into the lowest implementation defined bits.
assert(DescValue == (DescValue & SF_DescFlagsMask) &&
"Invalid .desc value!");
- Assembler.getOrCreateSymbolData(*Symbol).setFlags(DescValue&SF_DescFlagsMask);
+ getAssembler().getOrCreateSymbolData(*Symbol).setFlags(
+ DescValue & SF_DescFlagsMask);
}
void MCMachOStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
@@ -321,14 +289,14 @@
// FIXME: Darwin 'as' does appear to allow redef of a .comm by itself.
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- MCSymbolData &SD = Assembler.getOrCreateSymbolData(*Symbol);
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
SD.setExternal(true);
SD.setCommon(Size, ByteAlignment);
}
void MCMachOStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol,
unsigned Size, unsigned ByteAlignment) {
- MCSectionData &SectData = Assembler.getOrCreateSectionData(*Section);
+ MCSectionData &SectData = getAssembler().getOrCreateSectionData(*Section);
// The symbol may not be present, which only creates the section.
if (!Symbol)
@@ -338,7 +306,7 @@
assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
- MCSymbolData &SD = Assembler.getOrCreateSymbolData(*Symbol);
+ MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol);
// Emit an align fragment if necessary.
if (ByteAlignment != 1)
@@ -346,8 +314,6 @@
MCFragment *F = new MCFillFragment(0, 0, Size, &SectData);
SD.setFragment(F);
- if (Assembler.isSymbolLinkerVisible(&SD))
- F->setAtom(&SD);
Symbol->setSection(*Section);
@@ -391,13 +357,12 @@
unsigned MaxBytesToEmit) {
if (MaxBytesToEmit == 0)
MaxBytesToEmit = ByteAlignment;
- MCFragment *F = new MCAlignFragment(ByteAlignment, Value, ValueSize,
- MaxBytesToEmit, CurSectionData);
- F->setAtom(CurrentAtomMap.lookup(CurSectionData));
+ new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit,
+ getCurrentSectionData());
// Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > CurSectionData->getAlignment())
- CurSectionData->setAlignment(ByteAlignment);
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
}
void MCMachOStreamer::EmitCodeAlignment(unsigned ByteAlignment,
@@ -405,24 +370,21 @@
if (MaxBytesToEmit == 0)
MaxBytesToEmit = ByteAlignment;
MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit,
- CurSectionData);
+ getCurrentSectionData());
F->setEmitNops(true);
- F->setAtom(CurrentAtomMap.lookup(CurSectionData));
// Update the maximum alignment on the current section if necessary.
- if (ByteAlignment > CurSectionData->getAlignment())
- CurSectionData->setAlignment(ByteAlignment);
+ if (ByteAlignment > getCurrentSectionData()->getAlignment())
+ getCurrentSectionData()->setAlignment(ByteAlignment);
}
void MCMachOStreamer::EmitValueToOffset(const MCExpr *Offset,
unsigned char Value) {
- MCFragment *F = new MCOrgFragment(*Offset, Value, CurSectionData);
- F->setAtom(CurrentAtomMap.lookup(CurSectionData));
+ new MCOrgFragment(*Offset, Value, getCurrentSectionData());
}
void MCMachOStreamer::EmitInstToFragment(const MCInst &Inst) {
- MCInstFragment *IF = new MCInstFragment(Inst, CurSectionData);
- IF->setAtom(CurrentAtomMap.lookup(CurSectionData));
+ MCInstFragment *IF = new MCInstFragment(Inst, getCurrentSectionData());
// Add the fixups and data.
//
@@ -431,7 +393,7 @@
SmallVector<MCFixup, 4> Fixups;
SmallString<256> Code;
raw_svector_ostream VecOS(Code);
- Assembler.getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
VecOS.flush();
IF->getCode() = Code;
@@ -444,7 +406,7 @@
SmallVector<MCFixup, 4> Fixups;
SmallString<256> Code;
raw_svector_ostream VecOS(Code);
- Assembler.getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
+ getAssembler().getEmitter().EncodeInstruction(Inst, VecOS, Fixups);
VecOS.flush();
// Add the fixups and data.
@@ -461,21 +423,21 @@
if (Inst.getOperand(i).isExpr())
AddValueSymbols(Inst.getOperand(i).getExpr());
- CurSectionData->setHasInstructions(true);
+ getCurrentSectionData()->setHasInstructions(true);
// If this instruction doesn't need relaxation, just emit it as data.
- if (!Assembler.getBackend().MayNeedRelaxation(Inst)) {
+ if (!getAssembler().getBackend().MayNeedRelaxation(Inst)) {
EmitInstToData(Inst);
return;
}
// Otherwise, if we are relaxing everything, relax the instruction as much as
// possible and emit it as data.
- if (Assembler.getRelaxAll()) {
+ if (getAssembler().getRelaxAll()) {
MCInst Relaxed;
- Assembler.getBackend().RelaxInstruction(Inst, Relaxed);
- while (Assembler.getBackend().MayNeedRelaxation(Relaxed))
- Assembler.getBackend().RelaxInstruction(Relaxed, Relaxed);
+ getAssembler().getBackend().RelaxInstruction(Inst, Relaxed);
+ while (getAssembler().getBackend().MayNeedRelaxation(Relaxed))
+ getAssembler().getBackend().RelaxInstruction(Relaxed, Relaxed);
EmitInstToData(Relaxed);
return;
}
@@ -485,7 +447,36 @@
}
void MCMachOStreamer::Finish() {
- Assembler.Finish();
+ // We have to set the fragment atom associations so we can relax properly for
+ // Mach-O.
+
+ // First, scan the symbol table to build a lookup table from fragments to
+ // defining symbols.
+ DenseMap<const MCFragment*, MCSymbolData*> DefiningSymbolMap;
+ for (MCAssembler::symbol_iterator it = getAssembler().symbol_begin(),
+ ie = getAssembler().symbol_end(); it != ie; ++it) {
+ if (getAssembler().isSymbolLinkerVisible(it->getSymbol()) &&
+ it->getFragment()) {
+ // An atom defining symbol should never be internal to a fragment.
+ assert(it->getOffset() == 0 && "Invalid offset in atom defining symbol!");
+ DefiningSymbolMap[it->getFragment()] = it;
+ }
+ }
+
+ // Set the fragment atom associations by tracking the last seen atom defining
+ // symbol.
+ for (MCAssembler::iterator it = getAssembler().begin(),
+ ie = getAssembler().end(); it != ie; ++it) {
+ MCSymbolData *CurrentAtom = 0;
+ for (MCSectionData::iterator it2 = it->begin(),
+ ie2 = it->end(); it2 != ie2; ++it2) {
+ if (MCSymbolData *SD = DefiningSymbolMap.lookup(it2))
+ CurrentAtom = SD;
+ it2->setAtom(CurrentAtom);
+ }
+ }
+
+ this->MCObjectStreamer::Finish();
}
MCStreamer *llvm::createMachOStreamer(MCContext &Context, TargetAsmBackend &TAB,
Modified: llvm/branches/wendling/eh/lib/MC/MachObjectWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/MC/MachObjectWriter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/MC/MachObjectWriter.cpp (original)
+++ llvm/branches/wendling/eh/lib/MC/MachObjectWriter.cpp Fri Jul 2 04:34:51 2010
@@ -738,6 +738,45 @@
Relocations[Fragment->getParent()].push_back(MRE);
}
+ void RecordTLVPRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) {
+ assert(Target.getSymA()->getKind() == MCSymbolRefExpr::VK_TLVP &&
+ !Is64Bit &&
+ "Should only be called with a 32-bit TLVP relocation!");
+
+ // If this is a subtraction then we're pcrel.
+ unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
+ uint32_t Value = Layout.getFragmentOffset(Fragment)+Fixup.getOffset();
+ unsigned IsPCRel = 0;
+
+ // Get the symbol data.
+ MCSymbolData *SD_A = &Asm.getSymbolData(Target.getSymA()->getSymbol());
+ unsigned Index = SD_A->getIndex();
+
+ // We're only going to have a second symbol in pic mode and it'll be a
+ // subtraction from the picbase. For 32-bit pic the addend is the difference
+ // between the picbase and the next address. For 32-bit static the addend
+ // is zero.
+ if (Target.getSymB()) {
+ IsPCRel = 1;
+ } else {
+ FixedValue = 0;
+ }
+
+ // struct relocation_info (8 bytes)
+ MachRelocationEntry MRE;
+ MRE.Word0 = Value;
+ MRE.Word1 = ((Index << 0) |
+ (IsPCRel << 24) |
+ (Log2Size << 25) |
+ (1 << 27) | // Extern
+ (RIT_TLV << 28)); // Type
+ Relocations[Fragment->getParent()].push_back(MRE);
+ }
+
void RecordRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
const MCFragment *Fragment, const MCFixup &Fixup,
MCValue Target, uint64_t &FixedValue) {
@@ -749,6 +788,12 @@
unsigned IsPCRel = isFixupKindPCRel(Fixup.getKind());
unsigned Log2Size = getFixupKindLog2Size(Fixup.getKind());
+ // If this is a 32-bit TLVP reloc it's handled a bit differently.
+ if (Target.getSymA()->getKind() == MCSymbolRefExpr::VK_TLVP) {
+ RecordTLVPRelocation(Asm, Layout, Fragment, Fixup, Target, FixedValue);
+ return;
+ }
+
// If this is a difference or a defined symbol plus an offset, then we need
// a scattered relocation entry.
// Differences always require scattered relocations.
@@ -898,7 +943,7 @@
const MCSymbol &Symbol = it->getSymbol();
// Ignore non-linker visible symbols.
- if (!Asm.isSymbolLinkerVisible(it))
+ if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
continue;
if (!it->isExternal() && !Symbol.isUndefined())
@@ -934,7 +979,7 @@
const MCSymbol &Symbol = it->getSymbol();
// Ignore non-linker visible symbols.
- if (!Asm.isSymbolLinkerVisible(it))
+ if (!Asm.isSymbolLinkerVisible(it->getSymbol()))
continue;
if (it->isExternal() || Symbol.isUndefined())
Modified: llvm/branches/wendling/eh/lib/Support/Dwarf.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Support/Dwarf.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Support/Dwarf.cpp (original)
+++ llvm/branches/wendling/eh/lib/Support/Dwarf.cpp Fri Jul 2 04:34:51 2010
@@ -86,8 +86,8 @@
///
const char *llvm::dwarf::ChildrenString(unsigned Children) {
switch (Children) {
- case DW_CHILDREN_no: return "CHILDREN_no";
- case DW_CHILDREN_yes: return "CHILDREN_yes";
+ case DW_CHILDREN_no: return "DW_CHILDREN_no";
+ case DW_CHILDREN_yes: return "DW_CHILDREN_yes";
}
return 0;
}
@@ -207,27 +207,27 @@
///
const char *llvm::dwarf::FormEncodingString(unsigned Encoding) {
switch (Encoding) {
- case DW_FORM_addr: return "FORM_addr";
- case DW_FORM_block2: return "FORM_block2";
- case DW_FORM_block4: return "FORM_block4";
- case DW_FORM_data2: return "FORM_data2";
- case DW_FORM_data4: return "FORM_data4";
- case DW_FORM_data8: return "FORM_data8";
- case DW_FORM_string: return "FORM_string";
- case DW_FORM_block: return "FORM_block";
- case DW_FORM_block1: return "FORM_block1";
- case DW_FORM_data1: return "FORM_data1";
- case DW_FORM_flag: return "FORM_flag";
- case DW_FORM_sdata: return "FORM_sdata";
- case DW_FORM_strp: return "FORM_strp";
- case DW_FORM_udata: return "FORM_udata";
- case DW_FORM_ref_addr: return "FORM_ref_addr";
- case DW_FORM_ref1: return "FORM_ref1";
- case DW_FORM_ref2: return "FORM_ref2";
- case DW_FORM_ref4: return "FORM_ref4";
- case DW_FORM_ref8: return "FORM_ref8";
- case DW_FORM_ref_udata: return "FORM_ref_udata";
- case DW_FORM_indirect: return "FORM_indirect";
+ case DW_FORM_addr: return "DW_FORM_addr";
+ case DW_FORM_block2: return "DW_FORM_block2";
+ case DW_FORM_block4: return "DW_FORM_block4";
+ case DW_FORM_data2: return "DW_FORM_data2";
+ case DW_FORM_data4: return "DW_FORM_data4";
+ case DW_FORM_data8: return "DW_FORM_data8";
+ case DW_FORM_string: return "DW_FORM_string";
+ case DW_FORM_block: return "DW_FORM_block";
+ case DW_FORM_block1: return "DW_FORM_block1";
+ case DW_FORM_data1: return "DW_FORM_data1";
+ case DW_FORM_flag: return "DW_FORM_flag";
+ case DW_FORM_sdata: return "DW_FORM_sdata";
+ case DW_FORM_strp: return "DW_FORM_strp";
+ case DW_FORM_udata: return "DW_FORM_udata";
+ case DW_FORM_ref_addr: return "DW_FORM_ref_addr";
+ case DW_FORM_ref1: return "DW_FORM_ref1";
+ case DW_FORM_ref2: return "DW_FORM_ref2";
+ case DW_FORM_ref4: return "DW_FORM_ref4";
+ case DW_FORM_ref8: return "DW_FORM_ref8";
+ case DW_FORM_ref_udata: return "DW_FORM_ref_udata";
+ case DW_FORM_indirect: return "DW_FORM_indirect";
}
return 0;
}
@@ -236,72 +236,159 @@
/// encoding.
const char *llvm::dwarf::OperationEncodingString(unsigned Encoding) {
switch (Encoding) {
- case DW_OP_addr: return "OP_addr";
- case DW_OP_deref: return "OP_deref";
- case DW_OP_const1u: return "OP_const1u";
- case DW_OP_const1s: return "OP_const1s";
- case DW_OP_const2u: return "OP_const2u";
- case DW_OP_const2s: return "OP_const2s";
- case DW_OP_const4u: return "OP_const4u";
- case DW_OP_const4s: return "OP_const4s";
- case DW_OP_const8u: return "OP_const8u";
- case DW_OP_const8s: return "OP_const8s";
- case DW_OP_constu: return "OP_constu";
- case DW_OP_consts: return "OP_consts";
- case DW_OP_dup: return "OP_dup";
- case DW_OP_drop: return "OP_drop";
- case DW_OP_over: return "OP_over";
- case DW_OP_pick: return "OP_pick";
- case DW_OP_swap: return "OP_swap";
- case DW_OP_rot: return "OP_rot";
- case DW_OP_xderef: return "OP_xderef";
- case DW_OP_abs: return "OP_abs";
- case DW_OP_and: return "OP_and";
- case DW_OP_div: return "OP_div";
- case DW_OP_minus: return "OP_minus";
- case DW_OP_mod: return "OP_mod";
- case DW_OP_mul: return "OP_mul";
- case DW_OP_neg: return "OP_neg";
- case DW_OP_not: return "OP_not";
- case DW_OP_or: return "OP_or";
- case DW_OP_plus: return "OP_plus";
- case DW_OP_plus_uconst: return "OP_plus_uconst";
- case DW_OP_shl: return "OP_shl";
- case DW_OP_shr: return "OP_shr";
- case DW_OP_shra: return "OP_shra";
- case DW_OP_xor: return "OP_xor";
- case DW_OP_skip: return "OP_skip";
- case DW_OP_bra: return "OP_bra";
- case DW_OP_eq: return "OP_eq";
- case DW_OP_ge: return "OP_ge";
- case DW_OP_gt: return "OP_gt";
- case DW_OP_le: return "OP_le";
- case DW_OP_lt: return "OP_lt";
- case DW_OP_ne: return "OP_ne";
- case DW_OP_lit0: return "OP_lit0";
- case DW_OP_lit1: return "OP_lit1";
- case DW_OP_lit31: return "OP_lit31";
- case DW_OP_reg0: return "OP_reg0";
- case DW_OP_reg1: return "OP_reg1";
- case DW_OP_reg31: return "OP_reg31";
- case DW_OP_breg0: return "OP_breg0";
- case DW_OP_breg1: return "OP_breg1";
- case DW_OP_breg31: return "OP_breg31";
- case DW_OP_regx: return "OP_regx";
- case DW_OP_fbreg: return "OP_fbreg";
- case DW_OP_bregx: return "OP_bregx";
- case DW_OP_piece: return "OP_piece";
- case DW_OP_deref_size: return "OP_deref_size";
- case DW_OP_xderef_size: return "OP_xderef_size";
- case DW_OP_nop: return "OP_nop";
- case DW_OP_push_object_address: return "OP_push_object_address";
- case DW_OP_call2: return "OP_call2";
- case DW_OP_call4: return "OP_call4";
- case DW_OP_call_ref: return "OP_call_ref";
- case DW_OP_form_tls_address: return "OP_form_tls_address";
- case DW_OP_call_frame_cfa: return "OP_call_frame_cfa";
- case DW_OP_lo_user: return "OP_lo_user";
- case DW_OP_hi_user: return "OP_hi_user";
+ case DW_OP_addr: return "DW_OP_addr";
+ case DW_OP_deref: return "DW_OP_deref";
+ case DW_OP_const1u: return "DW_OP_const1u";
+ case DW_OP_const1s: return "DW_OP_const1s";
+ case DW_OP_const2u: return "DW_OP_const2u";
+ case DW_OP_const2s: return "DW_OP_const2s";
+ case DW_OP_const4u: return "DW_OP_const4u";
+ case DW_OP_const4s: return "DW_OP_const4s";
+ case DW_OP_const8u: return "DW_OP_const8u";
+ case DW_OP_const8s: return "DW_OP_const8s";
+ case DW_OP_constu: return "DW_OP_constu";
+ case DW_OP_consts: return "DW_OP_consts";
+ case DW_OP_dup: return "DW_OP_dup";
+ case DW_OP_drop: return "DW_OP_drop";
+ case DW_OP_over: return "DW_OP_over";
+ case DW_OP_pick: return "DW_OP_pick";
+ case DW_OP_swap: return "DW_OP_swap";
+ case DW_OP_rot: return "DW_OP_rot";
+ case DW_OP_xderef: return "DW_OP_xderef";
+ case DW_OP_abs: return "DW_OP_abs";
+ case DW_OP_and: return "DW_OP_and";
+ case DW_OP_div: return "DW_OP_div";
+ case DW_OP_minus: return "DW_OP_minus";
+ case DW_OP_mod: return "DW_OP_mod";
+ case DW_OP_mul: return "DW_OP_mul";
+ case DW_OP_neg: return "DW_OP_neg";
+ case DW_OP_not: return "DW_OP_not";
+ case DW_OP_or: return "DW_OP_or";
+ case DW_OP_plus: return "DW_OP_plus";
+ case DW_OP_plus_uconst: return "DW_OP_plus_uconst";
+ case DW_OP_shl: return "DW_OP_shl";
+ case DW_OP_shr: return "DW_OP_shr";
+ case DW_OP_shra: return "DW_OP_shra";
+ case DW_OP_xor: return "DW_OP_xor";
+ case DW_OP_skip: return "DW_OP_skip";
+ case DW_OP_bra: return "DW_OP_bra";
+ case DW_OP_eq: return "DW_OP_eq";
+ case DW_OP_ge: return "DW_OP_ge";
+ case DW_OP_gt: return "DW_OP_gt";
+ case DW_OP_le: return "DW_OP_le";
+ case DW_OP_lt: return "DW_OP_lt";
+ case DW_OP_ne: return "DW_OP_ne";
+ case DW_OP_lit0: return "DW_OP_lit0";
+ case DW_OP_lit1: return "DW_OP_lit1";
+ case DW_OP_lit2: return "DW_OP_lit2";
+ case DW_OP_lit3: return "DW_OP_lit3";
+ case DW_OP_lit4: return "DW_OP_lit4";
+ case DW_OP_lit5: return "DW_OP_lit5";
+ case DW_OP_lit6: return "DW_OP_lit6";
+ case DW_OP_lit7: return "DW_OP_lit7";
+ case DW_OP_lit8: return "DW_OP_lit8";
+ case DW_OP_lit9: return "DW_OP_lit9";
+ case DW_OP_lit10: return "DW_OP_lit10";
+ case DW_OP_lit11: return "DW_OP_lit11";
+ case DW_OP_lit12: return "DW_OP_lit12";
+ case DW_OP_lit13: return "DW_OP_lit13";
+ case DW_OP_lit14: return "DW_OP_lit14";
+ case DW_OP_lit15: return "DW_OP_lit15";
+ case DW_OP_lit16: return "DW_OP_lit16";
+ case DW_OP_lit17: return "DW_OP_lit17";
+ case DW_OP_lit18: return "DW_OP_lit18";
+ case DW_OP_lit19: return "DW_OP_lit19";
+ case DW_OP_lit20: return "DW_OP_lit20";
+ case DW_OP_lit21: return "DW_OP_lit21";
+ case DW_OP_lit22: return "DW_OP_lit22";
+ case DW_OP_lit23: return "DW_OP_lit23";
+ case DW_OP_lit24: return "DW_OP_lit24";
+ case DW_OP_lit25: return "DW_OP_lit25";
+ case DW_OP_lit26: return "DW_OP_lit26";
+ case DW_OP_lit27: return "DW_OP_lit27";
+ case DW_OP_lit28: return "DW_OP_lit28";
+ case DW_OP_lit29: return "DW_OP_lit29";
+ case DW_OP_lit30: return "DW_OP_lit30";
+ case DW_OP_lit31: return "DW_OP_lit31";
+ case DW_OP_reg0: return "DW_OP_reg0";
+ case DW_OP_reg1: return "DW_OP_reg1";
+ case DW_OP_reg2: return "DW_OP_reg2";
+ case DW_OP_reg3: return "DW_OP_reg3";
+ case DW_OP_reg4: return "DW_OP_reg4";
+ case DW_OP_reg5: return "DW_OP_reg5";
+ case DW_OP_reg6: return "DW_OP_reg6";
+ case DW_OP_reg7: return "DW_OP_reg7";
+ case DW_OP_reg8: return "DW_OP_reg8";
+ case DW_OP_reg9: return "DW_OP_reg9";
+ case DW_OP_reg10: return "DW_OP_reg10";
+ case DW_OP_reg11: return "DW_OP_reg11";
+ case DW_OP_reg12: return "DW_OP_reg12";
+ case DW_OP_reg13: return "DW_OP_reg13";
+ case DW_OP_reg14: return "DW_OP_reg14";
+ case DW_OP_reg15: return "DW_OP_reg15";
+ case DW_OP_reg16: return "DW_OP_reg16";
+ case DW_OP_reg17: return "DW_OP_reg17";
+ case DW_OP_reg18: return "DW_OP_reg18";
+ case DW_OP_reg19: return "DW_OP_reg19";
+ case DW_OP_reg20: return "DW_OP_reg20";
+ case DW_OP_reg21: return "DW_OP_reg21";
+ case DW_OP_reg22: return "DW_OP_reg22";
+ case DW_OP_reg23: return "DW_OP_reg23";
+ case DW_OP_reg24: return "DW_OP_reg24";
+ case DW_OP_reg25: return "DW_OP_reg25";
+ case DW_OP_reg26: return "DW_OP_reg26";
+ case DW_OP_reg27: return "DW_OP_reg27";
+ case DW_OP_reg28: return "DW_OP_reg28";
+ case DW_OP_reg29: return "DW_OP_reg29";
+ case DW_OP_reg30: return "DW_OP_reg30";
+ case DW_OP_reg31: return "DW_OP_reg31";
+ case DW_OP_breg0: return "DW_OP_breg0";
+ case DW_OP_breg1: return "DW_OP_breg1";
+ case DW_OP_breg2: return "DW_OP_breg2";
+ case DW_OP_breg3: return "DW_OP_breg3";
+ case DW_OP_breg4: return "DW_OP_breg4";
+ case DW_OP_breg5: return "DW_OP_breg5";
+ case DW_OP_breg6: return "DW_OP_breg6";
+ case DW_OP_breg7: return "DW_OP_breg7";
+ case DW_OP_breg8: return "DW_OP_breg8";
+ case DW_OP_breg9: return "DW_OP_breg9";
+ case DW_OP_breg10: return "DW_OP_breg10";
+ case DW_OP_breg11: return "DW_OP_breg11";
+ case DW_OP_breg12: return "DW_OP_breg12";
+ case DW_OP_breg13: return "DW_OP_breg13";
+ case DW_OP_breg14: return "DW_OP_breg14";
+ case DW_OP_breg15: return "DW_OP_breg15";
+ case DW_OP_breg16: return "DW_OP_breg16";
+ case DW_OP_breg17: return "DW_OP_breg17";
+ case DW_OP_breg18: return "DW_OP_breg18";
+ case DW_OP_breg19: return "DW_OP_breg19";
+ case DW_OP_breg20: return "DW_OP_breg20";
+ case DW_OP_breg21: return "DW_OP_breg21";
+ case DW_OP_breg22: return "DW_OP_breg22";
+ case DW_OP_breg23: return "DW_OP_breg23";
+ case DW_OP_breg24: return "DW_OP_breg24";
+ case DW_OP_breg25: return "DW_OP_breg25";
+ case DW_OP_breg26: return "DW_OP_breg26";
+ case DW_OP_breg27: return "DW_OP_breg27";
+ case DW_OP_breg28: return "DW_OP_breg28";
+ case DW_OP_breg29: return "DW_OP_breg29";
+ case DW_OP_breg30: return "DW_OP_breg30";
+ case DW_OP_breg31: return "DW_OP_breg31";
+ case DW_OP_regx: return "DW_OP_regx";
+ case DW_OP_fbreg: return "DW_OP_fbreg";
+ case DW_OP_bregx: return "DW_OP_bregx";
+ case DW_OP_piece: return "DW_OP_piece";
+ case DW_OP_deref_size: return "DW_OP_deref_size";
+ case DW_OP_xderef_size: return "DW_OP_xderef_size";
+ case DW_OP_nop: return "DW_OP_nop";
+ case DW_OP_push_object_address: return "DW_OP_push_object_address";
+ case DW_OP_call2: return "DW_OP_call2";
+ case DW_OP_call4: return "DW_OP_call4";
+ case DW_OP_call_ref: return "DW_OP_call_ref";
+ case DW_OP_form_tls_address: return "DW_OP_form_tls_address";
+ case DW_OP_call_frame_cfa: return "DW_OP_call_frame_cfa";
+ case DW_OP_lo_user: return "DW_OP_lo_user";
+ case DW_OP_hi_user: return "DW_OP_hi_user";
}
return 0;
}
@@ -310,23 +397,23 @@
/// encoding.
const char *llvm::dwarf::AttributeEncodingString(unsigned Encoding) {
switch (Encoding) {
- case DW_ATE_address: return "ATE_address";
- case DW_ATE_boolean: return "ATE_boolean";
- case DW_ATE_complex_float: return "ATE_complex_float";
- case DW_ATE_float: return "ATE_float";
- case DW_ATE_signed: return "ATE_signed";
- case DW_ATE_signed_char: return "ATE_signed_char";
- case DW_ATE_unsigned: return "ATE_unsigned";
- case DW_ATE_unsigned_char: return "ATE_unsigned_char";
- case DW_ATE_imaginary_float: return "ATE_imaginary_float";
- case DW_ATE_packed_decimal: return "ATE_packed_decimal";
- case DW_ATE_numeric_string: return "ATE_numeric_string";
- case DW_ATE_edited: return "ATE_edited";
- case DW_ATE_signed_fixed: return "ATE_signed_fixed";
- case DW_ATE_unsigned_fixed: return "ATE_unsigned_fixed";
- case DW_ATE_decimal_float: return "ATE_decimal_float";
- case DW_ATE_lo_user: return "ATE_lo_user";
- case DW_ATE_hi_user: return "ATE_hi_user";
+ case DW_ATE_address: return "DW_ATE_address";
+ case DW_ATE_boolean: return "DW_ATE_boolean";
+ case DW_ATE_complex_float: return "DW_ATE_complex_float";
+ case DW_ATE_float: return "DW_ATE_float";
+ case DW_ATE_signed: return "DW_ATE_signed";
+ case DW_ATE_signed_char: return "DW_ATE_signed_char";
+ case DW_ATE_unsigned: return "DW_ATE_unsigned";
+ case DW_ATE_unsigned_char: return "DW_ATE_unsigned_char";
+ case DW_ATE_imaginary_float: return "DW_ATE_imaginary_float";
+ case DW_ATE_packed_decimal: return "DW_ATE_packed_decimal";
+ case DW_ATE_numeric_string: return "DW_ATE_numeric_string";
+ case DW_ATE_edited: return "DW_ATE_edited";
+ case DW_ATE_signed_fixed: return "DW_ATE_signed_fixed";
+ case DW_ATE_unsigned_fixed: return "DW_ATE_unsigned_fixed";
+ case DW_ATE_decimal_float: return "DW_ATE_decimal_float";
+ case DW_ATE_lo_user: return "DW_ATE_lo_user";
+ case DW_ATE_hi_user: return "DW_ATE_hi_user";
}
return 0;
}
@@ -335,11 +422,11 @@
/// attribute.
const char *llvm::dwarf::DecimalSignString(unsigned Sign) {
switch (Sign) {
- case DW_DS_unsigned: return "DS_unsigned";
- case DW_DS_leading_overpunch: return "DS_leading_overpunch";
- case DW_DS_trailing_overpunch: return "DS_trailing_overpunch";
- case DW_DS_leading_separate: return "DS_leading_separate";
- case DW_DS_trailing_separate: return "DS_trailing_separate";
+ case DW_DS_unsigned: return "DW_DS_unsigned";
+ case DW_DS_leading_overpunch: return "DW_DS_leading_overpunch";
+ case DW_DS_trailing_overpunch: return "DW_DS_trailing_overpunch";
+ case DW_DS_leading_separate: return "DW_DS_leading_separate";
+ case DW_DS_trailing_separate: return "DW_DS_trailing_separate";
}
return 0;
}
@@ -348,11 +435,11 @@
///
const char *llvm::dwarf::EndianityString(unsigned Endian) {
switch (Endian) {
- case DW_END_default: return "END_default";
- case DW_END_big: return "END_big";
- case DW_END_little: return "END_little";
- case DW_END_lo_user: return "END_lo_user";
- case DW_END_hi_user: return "END_hi_user";
+ case DW_END_default: return "DW_END_default";
+ case DW_END_big: return "DW_END_big";
+ case DW_END_little: return "DW_END_little";
+ case DW_END_lo_user: return "DW_END_lo_user";
+ case DW_END_hi_user: return "DW_END_hi_user";
}
return 0;
}
@@ -362,9 +449,9 @@
const char *llvm::dwarf::AccessibilityString(unsigned Access) {
switch (Access) {
// Accessibility codes
- case DW_ACCESS_public: return "ACCESS_public";
- case DW_ACCESS_protected: return "ACCESS_protected";
- case DW_ACCESS_private: return "ACCESS_private";
+ case DW_ACCESS_public: return "DW_ACCESS_public";
+ case DW_ACCESS_protected: return "DW_ACCESS_protected";
+ case DW_ACCESS_private: return "DW_ACCESS_private";
}
return 0;
}
@@ -373,9 +460,9 @@
///
const char *llvm::dwarf::VisibilityString(unsigned Visibility) {
switch (Visibility) {
- case DW_VIS_local: return "VIS_local";
- case DW_VIS_exported: return "VIS_exported";
- case DW_VIS_qualified: return "VIS_qualified";
+ case DW_VIS_local: return "DW_VIS_local";
+ case DW_VIS_exported: return "DW_VIS_exported";
+ case DW_VIS_qualified: return "DW_VIS_qualified";
}
return 0;
}
@@ -384,9 +471,9 @@
///
const char *llvm::dwarf::VirtualityString(unsigned Virtuality) {
switch (Virtuality) {
- case DW_VIRTUALITY_none: return "VIRTUALITY_none";
- case DW_VIRTUALITY_virtual: return "VIRTUALITY_virtual";
- case DW_VIRTUALITY_pure_virtual: return "VIRTUALITY_pure_virtual";
+ case DW_VIRTUALITY_none: return "DW_VIRTUALITY_none";
+ case DW_VIRTUALITY_virtual: return "DW_VIRTUALITY_virtual";
+ case DW_VIRTUALITY_pure_virtual: return "DW_VIRTUALITY_pure_virtual";
}
return 0;
}
@@ -395,27 +482,27 @@
///
const char *llvm::dwarf::LanguageString(unsigned Language) {
switch (Language) {
- case DW_LANG_C89: return "LANG_C89";
- case DW_LANG_C: return "LANG_C";
- case DW_LANG_Ada83: return "LANG_Ada83";
- case DW_LANG_C_plus_plus: return "LANG_C_plus_plus";
- case DW_LANG_Cobol74: return "LANG_Cobol74";
- case DW_LANG_Cobol85: return "LANG_Cobol85";
- case DW_LANG_Fortran77: return "LANG_Fortran77";
- case DW_LANG_Fortran90: return "LANG_Fortran90";
- case DW_LANG_Pascal83: return "LANG_Pascal83";
- case DW_LANG_Modula2: return "LANG_Modula2";
- case DW_LANG_Java: return "LANG_Java";
- case DW_LANG_C99: return "LANG_C99";
- case DW_LANG_Ada95: return "LANG_Ada95";
- case DW_LANG_Fortran95: return "LANG_Fortran95";
- case DW_LANG_PLI: return "LANG_PLI";
- case DW_LANG_ObjC: return "LANG_ObjC";
- case DW_LANG_ObjC_plus_plus: return "LANG_ObjC_plus_plus";
- case DW_LANG_UPC: return "LANG_UPC";
- case DW_LANG_D: return "LANG_D";
- case DW_LANG_lo_user: return "LANG_lo_user";
- case DW_LANG_hi_user: return "LANG_hi_user";
+ case DW_LANG_C89: return "DW_LANG_C89";
+ case DW_LANG_C: return "DW_LANG_C";
+ case DW_LANG_Ada83: return "DW_LANG_Ada83";
+ case DW_LANG_C_plus_plus: return "DW_LANG_C_plus_plus";
+ case DW_LANG_Cobol74: return "DW_LANG_Cobol74";
+ case DW_LANG_Cobol85: return "DW_LANG_Cobol85";
+ case DW_LANG_Fortran77: return "DW_LANG_Fortran77";
+ case DW_LANG_Fortran90: return "DW_LANG_Fortran90";
+ case DW_LANG_Pascal83: return "DW_LANG_Pascal83";
+ case DW_LANG_Modula2: return "DW_LANG_Modula2";
+ case DW_LANG_Java: return "DW_LANG_Java";
+ case DW_LANG_C99: return "DW_LANG_C99";
+ case DW_LANG_Ada95: return "DW_LANG_Ada95";
+ case DW_LANG_Fortran95: return "DW_LANG_Fortran95";
+ case DW_LANG_PLI: return "DW_LANG_PLI";
+ case DW_LANG_ObjC: return "DW_LANG_ObjC";
+ case DW_LANG_ObjC_plus_plus: return "DW_LANG_ObjC_plus_plus";
+ case DW_LANG_UPC: return "DW_LANG_UPC";
+ case DW_LANG_D: return "DW_LANG_D";
+ case DW_LANG_lo_user: return "DW_LANG_lo_user";
+ case DW_LANG_hi_user: return "DW_LANG_hi_user";
}
return 0;
}
@@ -424,10 +511,10 @@
///
const char *llvm::dwarf::CaseString(unsigned Case) {
switch (Case) {
- case DW_ID_case_sensitive: return "ID_case_sensitive";
- case DW_ID_up_case: return "ID_up_case";
- case DW_ID_down_case: return "ID_down_case";
- case DW_ID_case_insensitive: return "ID_case_insensitive";
+ case DW_ID_case_sensitive: return "DW_ID_case_sensitive";
+ case DW_ID_up_case: return "DW_ID_up_case";
+ case DW_ID_down_case: return "DW_ID_down_case";
+ case DW_ID_case_insensitive: return "DW_ID_case_insensitive";
}
return 0;
}
@@ -436,11 +523,11 @@
///
const char *llvm::dwarf::ConventionString(unsigned Convention) {
switch (Convention) {
- case DW_CC_normal: return "CC_normal";
- case DW_CC_program: return "CC_program";
- case DW_CC_nocall: return "CC_nocall";
- case DW_CC_lo_user: return "CC_lo_user";
- case DW_CC_hi_user: return "CC_hi_user";
+ case DW_CC_normal: return "DW_CC_normal";
+ case DW_CC_program: return "DW_CC_program";
+ case DW_CC_nocall: return "DW_CC_nocall";
+ case DW_CC_lo_user: return "DW_CC_lo_user";
+ case DW_CC_hi_user: return "DW_CC_hi_user";
}
return 0;
}
@@ -449,10 +536,10 @@
///
const char *llvm::dwarf::InlineCodeString(unsigned Code) {
switch (Code) {
- case DW_INL_not_inlined: return "INL_not_inlined";
- case DW_INL_inlined: return "INL_inlined";
- case DW_INL_declared_not_inlined: return "INL_declared_not_inlined";
- case DW_INL_declared_inlined: return "INL_declared_inlined";
+ case DW_INL_not_inlined: return "DW_INL_not_inlined";
+ case DW_INL_inlined: return "DW_INL_inlined";
+ case DW_INL_declared_not_inlined: return "DW_INL_declared_not_inlined";
+ case DW_INL_declared_inlined: return "DW_INL_declared_inlined";
}
return 0;
}
@@ -461,8 +548,8 @@
///
const char *llvm::dwarf::ArrayOrderString(unsigned Order) {
switch (Order) {
- case DW_ORD_row_major: return "ORD_row_major";
- case DW_ORD_col_major: return "ORD_col_major";
+ case DW_ORD_row_major: return "DW_ORD_row_major";
+ case DW_ORD_col_major: return "DW_ORD_col_major";
}
return 0;
}
@@ -471,8 +558,8 @@
/// descriptor.
const char *llvm::dwarf::DiscriminantString(unsigned Discriminant) {
switch (Discriminant) {
- case DW_DSC_label: return "DSC_label";
- case DW_DSC_range: return "DSC_range";
+ case DW_DSC_label: return "DW_DSC_label";
+ case DW_DSC_range: return "DW_DSC_range";
}
return 0;
}
@@ -481,18 +568,18 @@
///
const char *llvm::dwarf::LNStandardString(unsigned Standard) {
switch (Standard) {
- case DW_LNS_copy: return "LNS_copy";
- case DW_LNS_advance_pc: return "LNS_advance_pc";
- case DW_LNS_advance_line: return "LNS_advance_line";
- case DW_LNS_set_file: return "LNS_set_file";
- case DW_LNS_set_column: return "LNS_set_column";
- case DW_LNS_negate_stmt: return "LNS_negate_stmt";
- case DW_LNS_set_basic_block: return "LNS_set_basic_block";
- case DW_LNS_const_add_pc: return "LNS_const_add_pc";
- case DW_LNS_fixed_advance_pc: return "LNS_fixed_advance_pc";
- case DW_LNS_set_prologue_end: return "LNS_set_prologue_end";
- case DW_LNS_set_epilogue_begin: return "LNS_set_epilogue_begin";
- case DW_LNS_set_isa: return "LNS_set_isa";
+ case DW_LNS_copy: return "DW_LNS_copy";
+ case DW_LNS_advance_pc: return "DW_LNS_advance_pc";
+ case DW_LNS_advance_line: return "DW_LNS_advance_line";
+ case DW_LNS_set_file: return "DW_LNS_set_file";
+ case DW_LNS_set_column: return "DW_LNS_set_column";
+ case DW_LNS_negate_stmt: return "DW_LNS_negate_stmt";
+ case DW_LNS_set_basic_block: return "DW_LNS_set_basic_block";
+ case DW_LNS_const_add_pc: return "DW_LNS_const_add_pc";
+ case DW_LNS_fixed_advance_pc: return "DW_LNS_fixed_advance_pc";
+ case DW_LNS_set_prologue_end: return "DW_LNS_set_prologue_end";
+ case DW_LNS_set_epilogue_begin: return "DW_LNS_set_epilogue_begin";
+ case DW_LNS_set_isa: return "DW_LNS_set_isa";
}
return 0;
}
@@ -502,11 +589,11 @@
const char *llvm::dwarf::LNExtendedString(unsigned Encoding) {
switch (Encoding) {
// Line Number Extended Opcode Encodings
- case DW_LNE_end_sequence: return "LNE_end_sequence";
- case DW_LNE_set_address: return "LNE_set_address";
- case DW_LNE_define_file: return "LNE_define_file";
- case DW_LNE_lo_user: return "LNE_lo_user";
- case DW_LNE_hi_user: return "LNE_hi_user";
+ case DW_LNE_end_sequence: return "DW_LNE_end_sequence";
+ case DW_LNE_set_address: return "DW_LNE_set_address";
+ case DW_LNE_define_file: return "DW_LNE_define_file";
+ case DW_LNE_lo_user: return "DW_LNE_lo_user";
+ case DW_LNE_hi_user: return "DW_LNE_hi_user";
}
return 0;
}
@@ -516,11 +603,11 @@
const char *llvm::dwarf::MacinfoString(unsigned Encoding) {
switch (Encoding) {
// Macinfo Type Encodings
- case DW_MACINFO_define: return "MACINFO_define";
- case DW_MACINFO_undef: return "MACINFO_undef";
- case DW_MACINFO_start_file: return "MACINFO_start_file";
- case DW_MACINFO_end_file: return "MACINFO_end_file";
- case DW_MACINFO_vendor_ext: return "MACINFO_vendor_ext";
+ case DW_MACINFO_define: return "DW_MACINFO_define";
+ case DW_MACINFO_undef: return "DW_MACINFO_undef";
+ case DW_MACINFO_start_file: return "DW_MACINFO_start_file";
+ case DW_MACINFO_end_file: return "DW_MACINFO_end_file";
+ case DW_MACINFO_vendor_ext: return "DW_MACINFO_vendor_ext";
}
return 0;
}
@@ -529,33 +616,33 @@
/// encodings.
const char *llvm::dwarf::CallFrameString(unsigned Encoding) {
switch (Encoding) {
- case DW_CFA_advance_loc: return "CFA_advance_loc";
- case DW_CFA_offset: return "CFA_offset";
- case DW_CFA_restore: return "CFA_restore";
- case DW_CFA_set_loc: return "CFA_set_loc";
- case DW_CFA_advance_loc1: return "CFA_advance_loc1";
- case DW_CFA_advance_loc2: return "CFA_advance_loc2";
- case DW_CFA_advance_loc4: return "CFA_advance_loc4";
- case DW_CFA_offset_extended: return "CFA_offset_extended";
- case DW_CFA_restore_extended: return "CFA_restore_extended";
- case DW_CFA_undefined: return "CFA_undefined";
- case DW_CFA_same_value: return "CFA_same_value";
- case DW_CFA_register: return "CFA_register";
- case DW_CFA_remember_state: return "CFA_remember_state";
- case DW_CFA_restore_state: return "CFA_restore_state";
- case DW_CFA_def_cfa: return "CFA_def_cfa";
- case DW_CFA_def_cfa_register: return "CFA_def_cfa_register";
- case DW_CFA_def_cfa_offset: return "CFA_def_cfa_offset";
- case DW_CFA_def_cfa_expression: return "CFA_def_cfa_expression";
- case DW_CFA_expression: return "CFA_expression";
- case DW_CFA_offset_extended_sf: return "CFA_offset_extended_sf";
- case DW_CFA_def_cfa_sf: return "CFA_def_cfa_sf";
- case DW_CFA_def_cfa_offset_sf: return "CFA_def_cfa_offset_sf";
- case DW_CFA_val_offset: return "CFA_val_offset";
- case DW_CFA_val_offset_sf: return "CFA_val_offset_sf";
- case DW_CFA_val_expression: return "CFA_val_expression";
- case DW_CFA_lo_user: return "CFA_lo_user";
- case DW_CFA_hi_user: return "CFA_hi_user";
+ case DW_CFA_advance_loc: return "DW_CFA_advance_loc";
+ case DW_CFA_offset: return "DW_CFA_offset";
+ case DW_CFA_restore: return "DW_CFA_restore";
+ case DW_CFA_set_loc: return "DW_CFA_set_loc";
+ case DW_CFA_advance_loc1: return "DW_CFA_advance_loc1";
+ case DW_CFA_advance_loc2: return "DW_CFA_advance_loc2";
+ case DW_CFA_advance_loc4: return "DW_CFA_advance_loc4";
+ case DW_CFA_offset_extended: return "DW_CFA_offset_extended";
+ case DW_CFA_restore_extended: return "DW_CFA_restore_extended";
+ case DW_CFA_undefined: return "DW_CFA_undefined";
+ case DW_CFA_same_value: return "DW_CFA_same_value";
+ case DW_CFA_register: return "DW_CFA_register";
+ case DW_CFA_remember_state: return "DW_CFA_remember_state";
+ case DW_CFA_restore_state: return "DW_CFA_restore_state";
+ case DW_CFA_def_cfa: return "DW_CFA_def_cfa";
+ case DW_CFA_def_cfa_register: return "DW_CFA_def_cfa_register";
+ case DW_CFA_def_cfa_offset: return "DW_CFA_def_cfa_offset";
+ case DW_CFA_def_cfa_expression: return "DW_CFA_def_cfa_expression";
+ case DW_CFA_expression: return "DW_CFA_expression";
+ case DW_CFA_offset_extended_sf: return "DW_CFA_offset_extended_sf";
+ case DW_CFA_def_cfa_sf: return "DW_CFA_def_cfa_sf";
+ case DW_CFA_def_cfa_offset_sf: return "DW_CFA_def_cfa_offset_sf";
+ case DW_CFA_val_offset: return "DW_CFA_val_offset";
+ case DW_CFA_val_offset_sf: return "DW_CFA_val_offset_sf";
+ case DW_CFA_val_expression: return "DW_CFA_val_expression";
+ case DW_CFA_lo_user: return "DW_CFA_lo_user";
+ case DW_CFA_hi_user: return "DW_CFA_hi_user";
}
return 0;
}
Modified: llvm/branches/wendling/eh/lib/Support/FileUtilities.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Support/FileUtilities.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Support/FileUtilities.cpp (original)
+++ llvm/branches/wendling/eh/lib/Support/FileUtilities.cpp Fri Jul 2 04:34:51 2010
@@ -51,7 +51,15 @@
if (!isNumberChar(*Pos)) return Pos;
// Otherwise, return to the start of the number.
+ bool HasPeriod = false;
while (Pos > FirstChar && isNumberChar(Pos[-1])) {
+ // Backup over at most one period.
+ if (Pos[-1] == '.') {
+ if (HasPeriod)
+ break;
+ HasPeriod = true;
+ }
+
--Pos;
if (Pos > FirstChar && isSignedChar(Pos[0]) && !isExponentChar(Pos[-1]))
break;
@@ -204,16 +212,16 @@
const char *F1P = File1Start;
const char *F2P = File2Start;
- if (A_size == B_size) {
- // Are the buffers identical? Common case: Handle this efficiently.
- if (std::memcmp(File1Start, File2Start, A_size) == 0)
- return 0;
-
- if (AbsTol == 0 && RelTol == 0) {
- if (Error)
- *Error = "Files differ without tolerance allowance";
- return 1; // Files different!
- }
+ // Are the buffers identical? Common case: Handle this efficiently.
+ if (A_size == B_size &&
+ std::memcmp(File1Start, File2Start, A_size) == 0)
+ return 0;
+
+ // Otherwise, we are done a tolerances are set.
+ if (AbsTol == 0 && RelTol == 0) {
+ if (Error)
+ *Error = "Files differ without tolerance allowance";
+ return 1; // Files different!
}
bool CompareFailed = false;
Modified: llvm/branches/wendling/eh/lib/Support/Timer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Support/Timer.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Support/Timer.cpp (original)
+++ llvm/branches/wendling/eh/lib/Support/Timer.cpp Fri Jul 2 04:34:51 2010
@@ -236,11 +236,13 @@
return T;
}
-NamedRegionTimer::NamedRegionTimer(StringRef Name)
- : TimeRegion(getNamedRegionTimer(Name)) {}
-
-NamedRegionTimer::NamedRegionTimer(StringRef Name, StringRef GroupName)
- : TimeRegion(NamedGroupedTimers->get(Name, GroupName)) {}
+NamedRegionTimer::NamedRegionTimer(StringRef Name,
+ bool Enabled)
+ : TimeRegion(!Enabled ? 0 : &getNamedRegionTimer(Name)) {}
+
+NamedRegionTimer::NamedRegionTimer(StringRef Name, StringRef GroupName,
+ bool Enabled)
+ : TimeRegion(!Enabled ? 0 : &NamedGroupedTimers->get(Name, GroupName)) {}
//===----------------------------------------------------------------------===//
// TimerGroup Implementation
Modified: llvm/branches/wendling/eh/lib/System/Disassembler.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/System/Disassembler.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/System/Disassembler.cpp (original)
+++ llvm/branches/wendling/eh/lib/System/Disassembler.cpp Fri Jul 2 04:34:51 2010
@@ -44,33 +44,29 @@
uint64_t pc) {
std::stringstream res;
-#if defined (__i386__) || defined (__amd64__) || defined (__x86_64__)
+#if (defined (__i386__) || defined (__amd64__) || defined (__x86_64__)) \
+ && USE_UDIS86
unsigned bits;
# if defined(__i386__)
bits = 32;
# else
bits = 64;
# endif
-
-# if USE_UDIS86
+
ud_t ud_obj;
-
+
ud_init(&ud_obj);
ud_set_input_buffer(&ud_obj, start, length);
ud_set_mode(&ud_obj, bits);
ud_set_pc(&ud_obj, pc);
ud_set_syntax(&ud_obj, UD_SYN_ATT);
-
+
res << std::setbase(16)
<< std::setw(bits/4);
-
+
while (ud_disassemble(&ud_obj)) {
res << ud_insn_off(&ud_obj) << ":\t" << ud_insn_asm(&ud_obj) << "\n";
}
-# else
- res << "No disassembler available. See configure help for options.\n";
-# endif
-
#else
res << "No disassembler available. See configure help for options.\n";
#endif
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARM.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARM.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARM.h (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARM.h Fri Jul 2 04:34:51 2010
@@ -90,10 +90,6 @@
}
}
-/// ModelWithRegSequence - Return true if isel should use REG_SEQUENCE to model
-/// operations involving sub-registers.
-bool ModelWithRegSequence();
-
FunctionPass *createARMISelDag(ARMBaseTargetMachine &TM,
CodeGenOpt::Level OptLevel);
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -347,11 +347,9 @@
unsigned
ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl;
-
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
int BOpc = !AFI->isThumbFunction()
? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
@@ -365,17 +363,17 @@
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch?
- BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
else
- BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
+ BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
return 1;
}
// Two-way conditional branch.
- BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
+ BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
- BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
return 2;
}
@@ -596,6 +594,7 @@
return true;
}
case ARM::MOVr:
+ case ARM::MOVr_TC:
case ARM::tMOVr:
case ARM::tMOVgpr2tgpr:
case ARM::tMOVtgpr2gpr:
@@ -701,11 +700,11 @@
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC,
DebugLoc DL) const {
- // tGPR is used sometimes in ARM instructions that need to avoid using
- // certain registers. Just treat it as GPR here.
- if (DestRC == ARM::tGPRRegisterClass)
+ // tGPR or tcGPR is used sometimes in ARM instructions that need to avoid
+ // using certain registers. Just treat them as GPR here.
+ if (DestRC == ARM::tGPRRegisterClass || DestRC == ARM::tcGPRRegisterClass)
DestRC = ARM::GPRRegisterClass;
- if (SrcRC == ARM::tGPRRegisterClass)
+ if (SrcRC == ARM::tGPRRegisterClass || SrcRC == ARM::tcGPRRegisterClass)
SrcRC = ARM::GPRRegisterClass;
// Allow DPR / DPR_VFP2 / DPR_8 cross-class copies.
@@ -759,7 +758,10 @@
else
return false;
- AddDefaultPred(BuildMI(MBB, I, DL, get(Opc), DestReg).addReg(SrcReg));
+ MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
+ MIB.addReg(SrcReg);
+ if (Opc != ARM::VMOVQQ && Opc != ARM::VMOVQQQQ)
+ AddDefaultPred(MIB);
}
return true;
@@ -796,7 +798,7 @@
// tGPR is used sometimes in ARM instructions that need to avoid using
// certain registers. Just treat it as GPR here.
- if (RC == ARM::tGPRRegisterClass)
+ if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass)
RC = ARM::GPRRegisterClass;
if (RC == ARM::GPRRegisterClass) {
@@ -887,7 +889,7 @@
// tGPR is used sometimes in ARM instructions that need to avoid using
// certain registers. Just treat it as GPR here.
- if (RC == ARM::tGPRRegisterClass)
+ if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass)
RC = ARM::GPRRegisterClass;
if (RC == ARM::GPRRegisterClass) {
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseInstrInfo.h Fri Jul 2 04:34:51 2010
@@ -116,11 +116,23 @@
// Thumb format
ThumbFrm = 24 << FormShift,
- // NEON format
+ // NEON formats
NEONFrm = 25 << FormShift,
NEONGetLnFrm = 26 << FormShift,
NEONSetLnFrm = 27 << FormShift,
NEONDupFrm = 28 << FormShift,
+ NLdStFrm = 31 << FormShift,
+ N1RegModImmFrm= 32 << FormShift,
+ N2RegFrm = 33 << FormShift,
+ NVCVTFrm = 34 << FormShift,
+ NVDupLnFrm = 35 << FormShift,
+ N2RegVShLFrm = 36 << FormShift,
+ N2RegVShRFrm = 37 << FormShift,
+ N3RegFrm = 38 << FormShift,
+ N3RegVShFrm = 39 << FormShift,
+ NVExtFrm = 40 << FormShift,
+ NVMulSLFrm = 41 << FormShift,
+ NVTBLFrm = 42 << FormShift,
//===------------------------------------------------------------------===//
// Misc flags.
@@ -213,7 +225,8 @@
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMBaseRegisterInfo.cpp Fri Jul 2 04:34:51 2010
@@ -1662,13 +1662,15 @@
addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
JumpTarget.getTargetFlags());
} else if (RetOpcode == ARM::TCRETURNri) {
- BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr), JumpTarget.getReg());
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
} else if (RetOpcode == ARM::TCRETURNriND) {
- BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND), JumpTarget.getReg());
+ BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
}
MachineInstr *NewMI = prior(MBBI);
- for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
+ for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
NewMI->addOperand(MBBI->getOperand(i));
// Delete the pseudo instruction TCRETURN.
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMCodeEmitter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMCodeEmitter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMCodeEmitter.cpp Fri Jul 2 04:34:51 2010
@@ -139,6 +139,8 @@
void emitMiscInstruction(const MachineInstr &MI);
+ void emitNEON1RegModImm(const MachineInstr &MI);
+
/// getMachineOpValue - Return binary encoding of operand. If the machine
/// operand requires relocation, record the relocation and return zero.
unsigned getMachineOpValue(const MachineInstr &MI,const MachineOperand &MO);
@@ -408,6 +410,10 @@
case ARMII::VFPMiscFrm:
emitMiscInstruction(MI);
break;
+ // NEON instructions.
+ case ARMII::N1RegModImmFrm:
+ emitNEON1RegModImm(MI);
+ break;
}
MCE.processDebugLoc(MI.getDebugLoc(), false);
}
@@ -1540,4 +1546,32 @@
emitWordLE(Binary);
}
+static unsigned encodeNEONRd(const MachineInstr &MI, unsigned OpIdx) {
+ unsigned RegD = MI.getOperand(OpIdx).getReg();
+ unsigned Binary = 0;
+ RegD = ARMRegisterInfo::getRegisterNumbering(RegD);
+ Binary |= (RegD & 0xf) << ARMII::RegRdShift;
+ Binary |= ((RegD >> 4) & 1) << ARMII::D_BitShift;
+ return Binary;
+}
+
+void ARMCodeEmitter::emitNEON1RegModImm(const MachineInstr &MI) {
+ unsigned Binary = getBinaryCodeForInstr(MI);
+ // Destination register is encoded in Dd.
+ Binary |= encodeNEONRd(MI, 0);
+ // Immediate fields: Op, Cmode, I, Imm3, Imm4
+ unsigned Imm = MI.getOperand(1).getImm();
+ unsigned Op = (Imm >> 12) & 1;
+ Binary |= (Op << 5);
+ unsigned Cmode = (Imm >> 8) & 0xf;
+ Binary |= (Cmode << 8);
+ unsigned I = (Imm >> 7) & 1;
+ Binary |= (I << 24);
+ unsigned Imm3 = (Imm >> 4) & 0x7;
+ Binary |= (Imm3 << 16);
+ unsigned Imm4 = Imm & 0xf;
+ Binary |= Imm4;
+ emitWordLE(Binary);
+}
+
#include "ARMGenCodeEmitter.inc"
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMConstantPoolValue.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMConstantPoolValue.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMConstantPoolValue.h (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMConstantPoolValue.h Fri Jul 2 04:34:51 2010
@@ -15,6 +15,7 @@
#define LLVM_TARGET_ARM_CONSTANTPOOLVALUE_H
#include "llvm/CodeGen/MachineConstantPool.h"
+#include <cstddef>
namespace llvm {
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMISelDAGToDAG.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMISelDAGToDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMISelDAGToDAG.cpp Fri Jul 2 04:34:51 2010
@@ -36,11 +36,6 @@
using namespace llvm;
-static cl::opt<bool>
-UseRegSeq("neon-reg-sequence", cl::Hidden,
- cl::desc("Use reg_sequence to model ld / st of multiple neon regs"),
- cl::init(true));
-
//===--------------------------------------------------------------------===//
/// ARMDAGToDAGISel - ARM specific code to select ARM machine
/// instructions for SelectionDAG operations.
@@ -541,7 +536,7 @@
DebugLoc dl = Op->getDebugLoc();
if (N.getOpcode() != ISD::ADD) {
ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
- if (!NC || NC->getZExtValue() != 0)
+ if (!NC || !NC->isNullValue())
return false;
Base = Offset = N;
@@ -962,16 +957,8 @@
DebugLoc dl = V0.getNode()->getDebugLoc();
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
- if (llvm::ModelWithRegSequence()) {
- const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 };
- return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
- }
- SDValue Undef =
- SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0);
- SDNode *Pair = CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
- VT, Undef, V0, SubReg0);
- return CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
- VT, SDValue(Pair, 0), V1, SubReg1);
+ const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 };
+ return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
}
/// PairDRegs - Form a quad register from a pair of D registers.
@@ -980,16 +967,8 @@
DebugLoc dl = V0.getNode()->getDebugLoc();
SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
- if (llvm::ModelWithRegSequence()) {
- const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 };
- return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
- }
- SDValue Undef =
- SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0);
- SDNode *Pair = CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
- VT, Undef, V0, SubReg0);
- return CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
- VT, SDValue(Pair, 0), V1, SubReg1);
+ const SDValue Ops[] = { V0, SubReg0, V1, SubReg1 };
+ return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 4);
}
/// PairQRegs - Form 4 consecutive D registers from a pair of Q registers.
@@ -1115,7 +1094,7 @@
std::vector<EVT> ResTys(NumVecs, VT);
ResTys.push_back(MVT::Other);
SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops, 5);
- if (!llvm::ModelWithRegSequence() || NumVecs < 2)
+ if (NumVecs < 2)
return VLd;
SDValue RegSeq;
@@ -1156,24 +1135,17 @@
Chain = SDValue(VLd, 2 * NumVecs);
// Combine the even and odd subregs to produce the result.
- if (llvm::ModelWithRegSequence()) {
- if (NumVecs == 1) {
- SDNode *Q = PairDRegs(VT, SDValue(VLd, 0), SDValue(VLd, 1));
- ReplaceUses(SDValue(N, 0), SDValue(Q, 0));
- } else {
- SDValue QQ = SDValue(QuadDRegs(MVT::v4i64,
- SDValue(VLd, 0), SDValue(VLd, 1),
- SDValue(VLd, 2), SDValue(VLd, 3)), 0);
- SDValue Q0 = CurDAG->getTargetExtractSubreg(ARM::qsub_0, dl, VT, QQ);
- SDValue Q1 = CurDAG->getTargetExtractSubreg(ARM::qsub_1, dl, VT, QQ);
- ReplaceUses(SDValue(N, 0), Q0);
- ReplaceUses(SDValue(N, 1), Q1);
- }
+ if (NumVecs == 1) {
+ SDNode *Q = PairDRegs(VT, SDValue(VLd, 0), SDValue(VLd, 1));
+ ReplaceUses(SDValue(N, 0), SDValue(Q, 0));
} else {
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDNode *Q = PairDRegs(VT, SDValue(VLd, 2*Vec), SDValue(VLd, 2*Vec+1));
- ReplaceUses(SDValue(N, Vec), SDValue(Q, 0));
- }
+ SDValue QQ = SDValue(QuadDRegs(MVT::v4i64,
+ SDValue(VLd, 0), SDValue(VLd, 1),
+ SDValue(VLd, 2), SDValue(VLd, 3)), 0);
+ SDValue Q0 = CurDAG->getTargetExtractSubreg(ARM::qsub_0, dl, VT, QQ);
+ SDValue Q1 = CurDAG->getTargetExtractSubreg(ARM::qsub_1, dl, VT, QQ);
+ ReplaceUses(SDValue(N, 0), Q0);
+ ReplaceUses(SDValue(N, 1), Q1);
}
} else {
// Otherwise, quad registers are loaded with two separate instructions,
@@ -1196,37 +1168,27 @@
SDNode *VLdB = CurDAG->getMachineNode(Opc, dl, ResTys, OpsB, 6);
Chain = SDValue(VLdB, NumVecs+1);
- if (llvm::ModelWithRegSequence()) {
- SDValue V0 = SDValue(VLdA, 0);
- SDValue V1 = SDValue(VLdB, 0);
- SDValue V2 = SDValue(VLdA, 1);
- SDValue V3 = SDValue(VLdB, 1);
- SDValue V4 = SDValue(VLdA, 2);
- SDValue V5 = SDValue(VLdB, 2);
- SDValue V6 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,RegVT),
- 0)
- : SDValue(VLdA, 3);
- SDValue V7 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,RegVT),
- 0)
- : SDValue(VLdB, 3);
- SDValue RegSeq = SDValue(OctoDRegs(MVT::v8i64, V0, V1, V2, V3,
- V4, V5, V6, V7), 0);
+ SDValue V0 = SDValue(VLdA, 0);
+ SDValue V1 = SDValue(VLdB, 0);
+ SDValue V2 = SDValue(VLdA, 1);
+ SDValue V3 = SDValue(VLdB, 1);
+ SDValue V4 = SDValue(VLdA, 2);
+ SDValue V5 = SDValue(VLdB, 2);
+ SDValue V6 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,RegVT), 0)
+ : SDValue(VLdA, 3);
+ SDValue V7 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,RegVT), 0)
+ : SDValue(VLdB, 3);
+ SDValue RegSeq = SDValue(OctoDRegs(MVT::v8i64, V0, V1, V2, V3,
+ V4, V5, V6, V7), 0);
- // Extract out the 3 / 4 Q registers.
- assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDValue Q = CurDAG->getTargetExtractSubreg(ARM::qsub_0+Vec,
- dl, VT, RegSeq);
- ReplaceUses(SDValue(N, Vec), Q);
- }
- } else {
- // Combine the even and odd subregs to produce the result.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDNode *Q = PairDRegs(VT, SDValue(VLdA, Vec), SDValue(VLdB, Vec));
- ReplaceUses(SDValue(N, Vec), SDValue(Q, 0));
- }
+ // Extract out the 3 / 4 Q registers.
+ assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
+ SDValue Q = CurDAG->getTargetExtractSubreg(ARM::qsub_0+Vec,
+ dl, VT, RegSeq);
+ ReplaceUses(SDValue(N, Vec), Q);
}
}
ReplaceUses(SDValue(N, NumVecs), Chain);
@@ -1274,7 +1236,7 @@
Ops.push_back(Align);
if (is64BitVector) {
- if (llvm::ModelWithRegSequence() && NumVecs >= 2) {
+ if (NumVecs >= 2) {
SDValue RegSeq;
SDValue V0 = N->getOperand(0+3);
SDValue V1 = N->getOperand(1+3);
@@ -1319,7 +1281,7 @@
// Quad registers are directly supported for VST1 and VST2,
// storing pairs of D regs.
unsigned Opc = QOpcodes0[OpcodeIndex];
- if (llvm::ModelWithRegSequence() && NumVecs == 2) {
+ if (NumVecs == 2) {
// First extract the pair of Q registers.
SDValue Q0 = N->getOperand(3);
SDValue Q1 = N->getOperand(4);
@@ -1357,76 +1319,48 @@
// Otherwise, quad registers are stored with two separate instructions,
// where one stores the even registers and the other stores the odd registers.
- if (llvm::ModelWithRegSequence()) {
- // Form the QQQQ REG_SEQUENCE.
- SDValue V[8];
- for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) {
- V[i] = CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, RegVT,
- N->getOperand(Vec+3));
- V[i+1] = CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, RegVT,
- N->getOperand(Vec+3));
- }
- if (NumVecs == 3)
- V[6] = V[7] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, RegVT), 0);
-
- SDValue RegSeq = SDValue(OctoDRegs(MVT::v8i64, V[0], V[1], V[2], V[3],
- V[4], V[5], V[6], V[7]), 0);
-
- // Store the even D registers.
- assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
- Ops.push_back(Reg0); // post-access address offset
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0+Vec*2, dl,
- RegVT, RegSeq));
- Ops.push_back(Pred);
- Ops.push_back(Reg0); // predicate register
- Ops.push_back(Chain);
- unsigned Opc = QOpcodes0[OpcodeIndex];
- SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), NumVecs+6);
- Chain = SDValue(VStA, 1);
-
- // Store the odd D registers.
- Ops[0] = SDValue(VStA, 0); // MemAddr
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops[Vec+3] = CurDAG->getTargetExtractSubreg(ARM::dsub_1+Vec*2, dl,
- RegVT, RegSeq);
- Ops[NumVecs+5] = Chain;
- Opc = QOpcodes1[OpcodeIndex];
- SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), NumVecs+6);
- Chain = SDValue(VStB, 1);
- ReplaceUses(SDValue(N, 0), Chain);
- return NULL;
- } else {
- Ops.push_back(Reg0); // post-access address offset
-
- // Store the even subregs.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, RegVT,
- N->getOperand(Vec+3)));
- Ops.push_back(Pred);
- Ops.push_back(Reg0); // predicate register
- Ops.push_back(Chain);
- unsigned Opc = QOpcodes0[OpcodeIndex];
- SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), NumVecs+6);
- Chain = SDValue(VStA, 1);
- // Store the odd subregs.
- Ops[0] = SDValue(VStA, 0); // MemAddr
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops[Vec+3] = CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, RegVT,
- N->getOperand(Vec+3));
- Ops[NumVecs+5] = Chain;
- Opc = QOpcodes1[OpcodeIndex];
- SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
- MVT::Other, Ops.data(), NumVecs+6);
- Chain = SDValue(VStB, 1);
- ReplaceUses(SDValue(N, 0), Chain);
- return NULL;
- }
+ // Form the QQQQ REG_SEQUENCE.
+ SDValue V[8];
+ for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) {
+ V[i] = CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, RegVT,
+ N->getOperand(Vec+3));
+ V[i+1] = CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, RegVT,
+ N->getOperand(Vec+3));
+ }
+ if (NumVecs == 3)
+ V[6] = V[7] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, RegVT), 0);
+
+ SDValue RegSeq = SDValue(OctoDRegs(MVT::v8i64, V[0], V[1], V[2], V[3],
+ V[4], V[5], V[6], V[7]), 0);
+
+ // Store the even D registers.
+ assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
+ Ops.push_back(Reg0); // post-access address offset
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0+Vec*2, dl,
+ RegVT, RegSeq));
+ Ops.push_back(Pred);
+ Ops.push_back(Reg0); // predicate register
+ Ops.push_back(Chain);
+ unsigned Opc = QOpcodes0[OpcodeIndex];
+ SDNode *VStA = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
+ MVT::Other, Ops.data(), NumVecs+6);
+ Chain = SDValue(VStA, 1);
+
+ // Store the odd D registers.
+ Ops[0] = SDValue(VStA, 0); // MemAddr
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ Ops[Vec+3] = CurDAG->getTargetExtractSubreg(ARM::dsub_1+Vec*2, dl,
+ RegVT, RegSeq);
+ Ops[NumVecs+5] = Chain;
+ Opc = QOpcodes1[OpcodeIndex];
+ SDNode *VStB = CurDAG->getMachineNode(Opc, dl, MemAddr.getValueType(),
+ MVT::Other, Ops.data(), NumVecs+6);
+ Chain = SDValue(VStB, 1);
+ ReplaceUses(SDValue(N, 0), Chain);
+ return NULL;
}
SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
@@ -1482,35 +1416,26 @@
unsigned Opc = 0;
if (is64BitVector) {
Opc = DOpcodes[OpcodeIndex];
- if (llvm::ModelWithRegSequence()) {
- SDValue RegSeq;
- SDValue V0 = N->getOperand(0+3);
- SDValue V1 = N->getOperand(1+3);
- if (NumVecs == 2) {
- RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
- } else {
- SDValue V2 = N->getOperand(2+3);
- SDValue V3 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
- : N->getOperand(3+3);
- RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
- }
-
- // Now extract the D registers back out.
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, VT,
- RegSeq));
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, VT,
- RegSeq));
- if (NumVecs > 2)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_2, dl, VT,
- RegSeq));
- if (NumVecs > 3)
- Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_3, dl, VT,
- RegSeq));
+ SDValue RegSeq;
+ SDValue V0 = N->getOperand(0+3);
+ SDValue V1 = N->getOperand(1+3);
+ if (NumVecs == 2) {
+ RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
} else {
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(N->getOperand(Vec+3));
+ SDValue V2 = N->getOperand(2+3);
+ SDValue V3 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
+ : N->getOperand(3+3);
+ RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
}
+
+ // Now extract the D registers back out.
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_0, dl, VT, RegSeq));
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_1, dl, VT, RegSeq));
+ if (NumVecs > 2)
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_2, dl, VT,RegSeq));
+ if (NumVecs > 3)
+ Ops.push_back(CurDAG->getTargetExtractSubreg(ARM::dsub_3, dl, VT,RegSeq));
} else {
// Check if this is loading the even or odd subreg of a Q register.
if (Lane < NumElts) {
@@ -1520,31 +1445,24 @@
Opc = QOpcodes1[OpcodeIndex];
}
- if (llvm::ModelWithRegSequence()) {
- SDValue RegSeq;
- SDValue V0 = N->getOperand(0+3);
- SDValue V1 = N->getOperand(1+3);
- if (NumVecs == 2) {
- RegSeq = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
- } else {
- SDValue V2 = N->getOperand(2+3);
- SDValue V3 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
- : N->getOperand(3+3);
- RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
- }
-
- // Extract the subregs of the input vector.
- unsigned SubIdx = Even ? ARM::dsub_0 : ARM::dsub_1;
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(CurDAG->getTargetExtractSubreg(SubIdx+Vec*2, dl, RegVT,
- RegSeq));
+ SDValue RegSeq;
+ SDValue V0 = N->getOperand(0+3);
+ SDValue V1 = N->getOperand(1+3);
+ if (NumVecs == 2) {
+ RegSeq = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
} else {
- // Extract the subregs of the input vector.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- Ops.push_back(CurDAG->getTargetExtractSubreg(SubregIdx, dl, RegVT,
- N->getOperand(Vec+3)));
+ SDValue V2 = N->getOperand(2+3);
+ SDValue V3 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
+ : N->getOperand(3+3);
+ RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
}
+
+ // Extract the subregs of the input vector.
+ unsigned SubIdx = Even ? ARM::dsub_0 : ARM::dsub_1;
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ Ops.push_back(CurDAG->getTargetExtractSubreg(SubIdx+Vec*2, dl, RegVT,
+ RegSeq));
}
Ops.push_back(getI32Imm(Lane));
Ops.push_back(Pred);
@@ -1558,73 +1476,54 @@
ResTys.push_back(MVT::Other);
SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(),NumVecs+6);
- if (llvm::ModelWithRegSequence()) {
- // Form a REG_SEQUENCE to force register allocation.
- SDValue RegSeq;
- if (is64BitVector) {
- SDValue V0 = SDValue(VLdLn, 0);
- SDValue V1 = SDValue(VLdLn, 1);
- if (NumVecs == 2) {
- RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
- } else {
- SDValue V2 = SDValue(VLdLn, 2);
- // If it's a vld3, form a quad D-register but discard the last part.
- SDValue V3 = (NumVecs == 3)
- ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
- : SDValue(VLdLn, 3);
- RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
- }
+ // Form a REG_SEQUENCE to force register allocation.
+ SDValue RegSeq;
+ if (is64BitVector) {
+ SDValue V0 = SDValue(VLdLn, 0);
+ SDValue V1 = SDValue(VLdLn, 1);
+ if (NumVecs == 2) {
+ RegSeq = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
} else {
- // For 128-bit vectors, take the 64-bit results of the load and insert
- // them as subregs into the result.
- SDValue V[8];
- for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) {
- if (Even) {
- V[i] = SDValue(VLdLn, Vec);
- V[i+1] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, RegVT), 0);
- } else {
- V[i] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, RegVT), 0);
- V[i+1] = SDValue(VLdLn, Vec);
- }
+ SDValue V2 = SDValue(VLdLn, 2);
+ // If it's a vld3, form a quad D-register but discard the last part.
+ SDValue V3 = (NumVecs == 3)
+ ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
+ : SDValue(VLdLn, 3);
+ RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
+ }
+ } else {
+ // For 128-bit vectors, take the 64-bit results of the load and insert
+ // them as subregs into the result.
+ SDValue V[8];
+ for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) {
+ if (Even) {
+ V[i] = SDValue(VLdLn, Vec);
+ V[i+1] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, RegVT), 0);
+ } else {
+ V[i] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, RegVT), 0);
+ V[i+1] = SDValue(VLdLn, Vec);
}
- if (NumVecs == 3)
- V[6] = V[7] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
- dl, RegVT), 0);
-
- if (NumVecs == 2)
- RegSeq = SDValue(QuadDRegs(MVT::v4i64, V[0], V[1], V[2], V[3]), 0);
- else
- RegSeq = SDValue(OctoDRegs(MVT::v8i64, V[0], V[1], V[2], V[3],
- V[4], V[5], V[6], V[7]), 0);
}
+ if (NumVecs == 3)
+ V[6] = V[7] = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, RegVT), 0);
- assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
- assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
- unsigned SubIdx = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
- ReplaceUses(SDValue(N, Vec),
- CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, RegSeq));
- ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, NumVecs));
- return NULL;
- }
-
- // For a 64-bit vector load to D registers, nothing more needs to be done.
- if (is64BitVector)
- return VLdLn;
-
- // For 128-bit vectors, take the 64-bit results of the load and insert them
- // as subregs into the result.
- for (unsigned Vec = 0; Vec < NumVecs; ++Vec) {
- SDValue QuadVec = CurDAG->getTargetInsertSubreg(SubregIdx, dl, VT,
- N->getOperand(Vec+3),
- SDValue(VLdLn, Vec));
- ReplaceUses(SDValue(N, Vec), QuadVec);
+ if (NumVecs == 2)
+ RegSeq = SDValue(QuadDRegs(MVT::v4i64, V[0], V[1], V[2], V[3]), 0);
+ else
+ RegSeq = SDValue(OctoDRegs(MVT::v8i64, V[0], V[1], V[2], V[3],
+ V[4], V[5], V[6], V[7]), 0);
}
- Chain = SDValue(VLdLn, NumVecs);
- ReplaceUses(SDValue(N, NumVecs), Chain);
+ assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
+ assert(ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
+ unsigned SubIdx = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
+ for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
+ ReplaceUses(SDValue(N, Vec),
+ CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, RegSeq));
+ ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, NumVecs));
return NULL;
}
@@ -2410,9 +2309,3 @@
CodeGenOpt::Level OptLevel) {
return new ARMDAGToDAGISel(TM, OptLevel);
}
-
-/// ModelWithRegSequence - Return true if isel should use REG_SEQUENCE to model
-/// operations involving sub-registers.
-bool llvm::ModelWithRegSequence() {
- return UseRegSeq;
-}
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.cpp Fri Jul 2 04:34:51 2010
@@ -55,13 +55,18 @@
static cl::opt<bool>
EnableARMTailCalls("arm-tail-calls", cl::Hidden,
cl::desc("Generate tail calls (TEMPORARY OPTION)."),
- cl::init(false));
+ cl::init(true));
static cl::opt<bool>
EnableARMLongCalls("arm-long-calls", cl::Hidden,
cl::desc("Generate calls via indirect call instructions."),
cl::init(false));
+static cl::opt<bool>
+ARMInterworking("arm-interworking", cl::Hidden,
+ cl::desc("Enable / disable ARM interworking (for debugging only)"),
+ cl::init(true));
+
static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, EVT &ValVT, EVT &LocVT,
CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags,
@@ -104,10 +109,7 @@
}
setOperationAction(ISD::BUILD_VECTOR, VT.getSimpleVT(), Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT.getSimpleVT(), Custom);
- if (llvm::ModelWithRegSequence())
- setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
- else
- setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, VT.getSimpleVT(), Legal);
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Expand);
setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand);
setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand);
@@ -403,7 +405,12 @@
// doesn't yet know how to not do that for SjLj.
setExceptionSelectorRegister(ARM::R0);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
- setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
+ // Handle atomics directly for ARMv[67] (except for Thumb1), otherwise
+ // use the default expansion.
+ TargetLowering::LegalizeAction AtomicAction =
+ (Subtarget->hasV7Ops() ||
+ (Subtarget->hasV6Ops() && !Subtarget->isThumb1Only())) ? Custom : Expand;
+ setOperationAction(ISD::MEMBARRIER, MVT::Other, AtomicAction);
// If the subtarget does not have extract instructions, sign_extend_inreg
// needs to be expanded. Extract is available in ARM mode on v6 and up,
@@ -1094,7 +1101,7 @@
}
} else if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
- } else {
+ } else if (!IsSibCall) {
assert(VA.isMemLoc());
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
@@ -1109,11 +1116,14 @@
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into the appropriate regs.
SDValue InFlag;
- for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
- RegsToPass[i].second, InFlag);
- InFlag = Chain.getValue(1);
- }
+ // Tail call byval lowering might overwrite argument registers so in case of
+ // tail call optimization the copies to registers are lowered later.
+ if (!isTailCall)
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ RegsToPass[i].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
// For tail calls lower the arguments to the 'real' stack slot.
if (isTailCall) {
@@ -1185,7 +1195,7 @@
getTargetMachine().getRelocationModel() != Reloc::Static;
isARMFunc = !Subtarget->isThumb() || isStub;
// ARM call to a local ARM function is predicable.
- isLocalARMFunc = !Subtarget->isThumb() && !isExt;
+ isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking);
// tBX takes a register source operand.
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
unsigned ARMPCLabelIndex = AFI->createConstPoolEntryUId();
@@ -1347,14 +1357,8 @@
// Look for obvious safe cases to perform tail call optimization that do not
// require ABI changes. This is what gcc calls sibcall.
- // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
- // emit a special epilogue.
- // Not sure yet if this is true on ARM.
-//?? if (RegInfo->needsStackRealignment(MF))
-//?? return false;
-
- // Do not sibcall optimize vararg calls unless the call site is not passing any
- // arguments.
+ // Do not sibcall optimize vararg calls unless the call site is not passing
+ // any arguments.
if (isVarArg && !Outs.empty())
return false;
@@ -1363,6 +1367,19 @@
if (isCalleeStructRet || isCallerStructRet)
return false;
+ // On Thumb, for the moment, we can only do this to functions defined in this
+ // compilation, or to indirect calls. A Thumb B to an ARM function is not
+ // easily fixed up in the linker, unlike BL.
+ if (Subtarget->isThumb()) {
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ const GlobalValue *GV = G->getGlobal();
+ if (GV->isDeclaration() || GV->isWeakForLinker())
+ return false;
+ } else if (isa<ExternalSymbolSDNode>(Callee)) {
+ return false;
+ }
+ }
+
// If the calling conventions do not match, then we'd better make sure the
// results are returned in the same way as what the caller expects.
if (!CCMatch) {
@@ -1809,8 +1826,7 @@
SDValue
ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *Subtarget)
- const {
+ const ARMSubtarget *Subtarget) const {
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
DebugLoc dl = Op.getDebugLoc();
switch (IntNo) {
@@ -1850,25 +1866,21 @@
}
static SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *Subtarget) {
+ const ARMSubtarget *Subtarget) {
DebugLoc dl = Op.getDebugLoc();
SDValue Op5 = Op.getOperand(5);
- SDValue Res;
unsigned isDeviceBarrier = cast<ConstantSDNode>(Op5)->getZExtValue();
- if (isDeviceBarrier) {
- if (Subtarget->hasV7Ops())
- Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0));
- else
- Res = DAG.getNode(ARMISD::SYNCBARRIER, dl, MVT::Other, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
- } else {
- if (Subtarget->hasV7Ops())
- Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
- else
- Res = DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0),
- DAG.getConstant(0, MVT::i32));
- }
- return Res;
+ // v6 and v7 can both handle barriers directly, but need handled a bit
+ // differently. Thumb1 and pre-v6 ARM mode use a libcall instead and should
+ // never get here.
+ unsigned Opc = isDeviceBarrier ? ARMISD::SYNCBARRIER : ARMISD::MEMBARRIER;
+ if (Subtarget->hasV7Ops())
+ return DAG.getNode(Opc, dl, MVT::Other, Op.getOperand(0));
+ else if (Subtarget->hasV6Ops() && !Subtarget->isThumb1Only())
+ return DAG.getNode(Opc, dl, MVT::Other, Op.getOperand(0),
+ DAG.getConstant(0, MVT::i32));
+ assert(0 && "Unexpected ISD::MEMBARRIER encountered. Should be libcall!");
+ return SDValue();
}
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
@@ -2447,7 +2459,8 @@
DAG.getConstant(0, MVT::i32));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
DAG.getConstant(1, MVT::i32));
- return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, DstVT,
+ DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
}
// Turn f64->i64 into VMOVRRD.
@@ -2757,76 +2770,152 @@
return Result;
}
-/// isVMOVSplat - Check if the specified splat value corresponds to an immediate
-/// VMOV instruction, and if so, return the constant being splatted.
-static SDValue isVMOVSplat(uint64_t SplatBits, uint64_t SplatUndef,
- unsigned SplatBitSize, SelectionDAG &DAG) {
+/// isNEONModifiedImm - Check if the specified splat value corresponds to a
+/// valid vector constant for a NEON instruction with a "modified immediate"
+/// operand (e.g., VMOV). If so, return either the constant being
+/// splatted or the encoded value, depending on the DoEncode parameter. The
+/// format of the encoded value is: bit12=Op, bits11-8=Cmode,
+/// bits7-0=Immediate.
+static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
+ unsigned SplatBitSize, SelectionDAG &DAG,
+ bool isVMOV, bool DoEncode) {
+ unsigned Op, Cmode, Imm;
+ EVT VT;
+
+ // SplatBitSize is set to the smallest size that splats the vector, so a
+ // zero vector will always have SplatBitSize == 8. However, NEON modified
+ // immediate instructions others than VMOV do not support the 8-bit encoding
+ // of a zero vector, and the default encoding of zero is supposed to be the
+ // 32-bit version.
+ if (SplatBits == 0)
+ SplatBitSize = 32;
+
+ Op = 0;
switch (SplatBitSize) {
case 8:
- // Any 1-byte value is OK.
+ // Any 1-byte value is OK. Op=0, Cmode=1110.
assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
- return DAG.getTargetConstant(SplatBits, MVT::i8);
+ Cmode = 0xe;
+ Imm = SplatBits;
+ VT = MVT::i8;
+ break;
case 16:
// NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
- if ((SplatBits & ~0xff) == 0 ||
- (SplatBits & ~0xff00) == 0)
- return DAG.getTargetConstant(SplatBits, MVT::i16);
- break;
+ VT = MVT::i16;
+ if ((SplatBits & ~0xff) == 0) {
+ // Value = 0x00nn: Op=x, Cmode=100x.
+ Cmode = 0x8;
+ Imm = SplatBits;
+ break;
+ }
+ if ((SplatBits & ~0xff00) == 0) {
+ // Value = 0xnn00: Op=x, Cmode=101x.
+ Cmode = 0xa;
+ Imm = SplatBits >> 8;
+ break;
+ }
+ return SDValue();
case 32:
// NEON's 32-bit VMOV supports splat values where:
// * only one byte is nonzero, or
// * the least significant byte is 0xff and the second byte is nonzero, or
// * the least significant 2 bytes are 0xff and the third is nonzero.
- if ((SplatBits & ~0xff) == 0 ||
- (SplatBits & ~0xff00) == 0 ||
- (SplatBits & ~0xff0000) == 0 ||
- (SplatBits & ~0xff000000) == 0)
- return DAG.getTargetConstant(SplatBits, MVT::i32);
+ VT = MVT::i32;
+ if ((SplatBits & ~0xff) == 0) {
+ // Value = 0x000000nn: Op=x, Cmode=000x.
+ Cmode = 0;
+ Imm = SplatBits;
+ break;
+ }
+ if ((SplatBits & ~0xff00) == 0) {
+ // Value = 0x0000nn00: Op=x, Cmode=001x.
+ Cmode = 0x2;
+ Imm = SplatBits >> 8;
+ break;
+ }
+ if ((SplatBits & ~0xff0000) == 0) {
+ // Value = 0x00nn0000: Op=x, Cmode=010x.
+ Cmode = 0x4;
+ Imm = SplatBits >> 16;
+ break;
+ }
+ if ((SplatBits & ~0xff000000) == 0) {
+ // Value = 0xnn000000: Op=x, Cmode=011x.
+ Cmode = 0x6;
+ Imm = SplatBits >> 24;
+ break;
+ }
if ((SplatBits & ~0xffff) == 0 &&
- ((SplatBits | SplatUndef) & 0xff) == 0xff)
- return DAG.getTargetConstant(SplatBits | 0xff, MVT::i32);
+ ((SplatBits | SplatUndef) & 0xff) == 0xff) {
+ // Value = 0x0000nnff: Op=x, Cmode=1100.
+ Cmode = 0xc;
+ Imm = SplatBits >> 8;
+ SplatBits |= 0xff;
+ break;
+ }
if ((SplatBits & ~0xffffff) == 0 &&
- ((SplatBits | SplatUndef) & 0xffff) == 0xffff)
- return DAG.getTargetConstant(SplatBits | 0xffff, MVT::i32);
+ ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
+ // Value = 0x00nnffff: Op=x, Cmode=1101.
+ Cmode = 0xd;
+ Imm = SplatBits >> 16;
+ SplatBits |= 0xffff;
+ break;
+ }
// Note: there are a few 32-bit splat values (specifically: 00ffff00,
// ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
// VMOV.I32. A (very) minor optimization would be to replicate the value
// and fall through here to test for a valid 64-bit splat. But, then the
// caller would also need to check and handle the change in size.
- break;
+ return SDValue();
case 64: {
// NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
+ if (!isVMOV)
+ return SDValue();
uint64_t BitMask = 0xff;
uint64_t Val = 0;
+ unsigned ImmMask = 1;
+ Imm = 0;
for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
- if (((SplatBits | SplatUndef) & BitMask) == BitMask)
+ if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
Val |= BitMask;
- else if ((SplatBits & BitMask) != 0)
+ Imm |= ImmMask;
+ } else if ((SplatBits & BitMask) != 0) {
return SDValue();
+ }
BitMask <<= 8;
+ ImmMask <<= 1;
}
- return DAG.getTargetConstant(Val, MVT::i64);
+ // Op=1, Cmode=1110.
+ Op = 1;
+ Cmode = 0xe;
+ SplatBits = Val;
+ VT = MVT::i64;
+ break;
}
default:
- llvm_unreachable("unexpected size for isVMOVSplat");
- break;
+ llvm_unreachable("unexpected size for EncodeNEONModImm");
+ return SDValue();
}
- return SDValue();
+ if (DoEncode)
+ return DAG.getTargetConstant((Op << 12) | (Cmode << 8) | Imm, MVT::i32);
+ return DAG.getTargetConstant(SplatBits, VT);
}
-/// getVMOVImm - If this is a build_vector of constants which can be
-/// formed by using a VMOV instruction of the specified element size,
-/// return the constant being splatted. The ByteSize field indicates the
-/// number of bytes of each element [1248].
-SDValue ARM::getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
+
+/// getNEONModImm - If this is a valid vector constant for a NEON instruction
+/// with a "modified immediate" operand (e.g., VMOV) of the specified element
+/// size, return the encoded value for that immediate. The ByteSize field
+/// indicates the number of bytes of each element [1248].
+SDValue ARM::getNEONModImm(SDNode *N, unsigned ByteSize, bool isVMOV,
+ SelectionDAG &DAG) {
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N);
APInt SplatBits, SplatUndef;
unsigned SplatBitSize;
@@ -2838,8 +2927,8 @@
if (SplatBitSize > ByteSize * 8)
return SDValue();
- return isVMOVSplat(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
- SplatBitSize, DAG);
+ return isNEONModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
+ SplatBitSize, DAG, isVMOV, true);
}
static bool isVEXTMask(const SmallVectorImpl<int> &M, EVT VT,
@@ -3079,8 +3168,10 @@
bool HasAnyUndefs;
if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
if (SplatBitSize <= 64) {
- SDValue Val = isVMOVSplat(SplatBits.getZExtValue(),
- SplatUndef.getZExtValue(), SplatBitSize, DAG);
+ // Check if an immediate VMOV works.
+ SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
+ SplatUndef.getZExtValue(),
+ SplatBitSize, DAG, true, false);
if (Val.getNode())
return BuildSplat(Val, VT, DAG, dl);
}
@@ -4861,7 +4952,7 @@
}
}
if (StringRef("{cc}").equals_lower(Constraint))
- return std::make_pair(0U, ARM::CCRRegisterClass);
+ return std::make_pair(unsigned(ARM::CPSR), ARM::CCRRegisterClass);
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.h (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMISelLowering.h Fri Jul 2 04:34:51 2010
@@ -150,11 +150,12 @@
/// Define some predicates that are used for node matching.
namespace ARM {
- /// getVMOVImm - If this is a build_vector of constants which can be
- /// formed by using a VMOV instruction of the specified element size,
- /// return the constant being splatted. The ByteSize field indicates the
- /// number of bytes of each element [1248].
- SDValue getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
+ /// getNEONModImm - If this is a valid vector constant for a NEON
+ /// instruction with a "modified immediate" operand (e.g., VMOV) of the
+ /// specified element size, return the encoded value for that immediate.
+ /// The ByteSize field indicates the number of bytes of each element [1248].
+ SDValue getNEONModImm(SDNode *N, unsigned ByteSize, bool isVMOV,
+ SelectionDAG &DAG);
/// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be
/// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd)
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrInfo.td?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrInfo.td Fri Jul 2 04:34:51 2010
@@ -1044,15 +1044,15 @@
Pseudo, IIC_Br,
"@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
- def TCRETURNri : AInoP<(outs), (ins tGPR:$dst, variable_ops),
+ def TCRETURNri : AInoP<(outs), (ins tcGPR:$dst, variable_ops),
Pseudo, IIC_Br,
"@TC_RETURN","\t$dst", []>, Requires<[IsDarwin]>;
def TAILJMPd : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
- IIC_Br, "b\t$dst @ TAILCALL",
+ IIC_Br, "b.w\t$dst @ TAILCALL",
[]>, Requires<[IsDarwin]>;
- def TAILJMPr : AXI<(outs), (ins tGPR:$dst, variable_ops),
+ def TAILJMPr : AXI<(outs), (ins tcGPR:$dst, variable_ops),
BrMiscFrm, IIC_Br, "bx\t$dst @ TAILCALL",
[]>, Requires<[IsDarwin]> {
let Inst{7-4} = 0b0001;
@@ -1084,7 +1084,7 @@
"@TC_RETURN","\t$dst", []>, Requires<[IsNotDarwin]>;
def TAILJMPdND : ABXI<0b1010, (outs), (ins brtarget:$dst, variable_ops),
- IIC_Br, "b\t$dst @ TAILCALL",
+ IIC_Br, "b.w\t$dst @ TAILCALL",
[]>, Requires<[IsNotDarwin]>;
def TAILJMPrND : AXI<(outs), (ins tGPR:$dst, variable_ops),
@@ -1476,6 +1476,14 @@
let Inst{25} = 0;
}
+// A version for the smaller set of tail call registers.
+let neverHasSideEffects = 1 in
+def MOVr_TC : AsI1<0b1101, (outs tcGPR:$dst), (ins tcGPR:$src), DPFrm,
+ IIC_iMOVr, "mov", "\t$dst, $src", []>, UnaryDP {
+ let Inst{11-4} = 0b00000000;
+ let Inst{25} = 0;
+}
+
def MOVs : AsI1<0b1101, (outs GPR:$dst), (ins so_reg:$src),
DPSoRegFrm, IIC_iMOVsr,
"mov", "\t$dst, $src", [(set GPR:$dst, so_reg:$src)]>, UnaryDP {
@@ -2700,8 +2708,8 @@
// TODO: add,sub,and, 3-instr forms?
// Tail calls
-def : ARMPat<(ARMtcret tGPR:$dst),
- (TCRETURNri tGPR:$dst)>, Requires<[IsDarwin]>;
+def : ARMPat<(ARMtcret tcGPR:$dst),
+ (TCRETURNri tcGPR:$dst)>, Requires<[IsDarwin]>;
def : ARMPat<(ARMtcret (i32 tglobaladdr:$dst)),
(TCRETURNdi texternalsym:$dst)>, Requires<[IsDarwin]>;
@@ -2709,8 +2717,8 @@
def : ARMPat<(ARMtcret (i32 texternalsym:$dst)),
(TCRETURNdi texternalsym:$dst)>, Requires<[IsDarwin]>;
-def : ARMPat<(ARMtcret tGPR:$dst),
- (TCRETURNriND tGPR:$dst)>, Requires<[IsNotDarwin]>;
+def : ARMPat<(ARMtcret tcGPR:$dst),
+ (TCRETURNriND tcGPR:$dst)>, Requires<[IsNotDarwin]>;
def : ARMPat<(ARMtcret (i32 tglobaladdr:$dst)),
(TCRETURNdiND texternalsym:$dst)>, Requires<[IsNotDarwin]>;
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrNEON.td?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrNEON.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMInstrNEON.td Fri Jul 2 04:34:51 2010
@@ -98,17 +98,8 @@
// NEON operand definitions
//===----------------------------------------------------------------------===//
-def h8imm : Operand<i8> {
- let PrintMethod = "printHex8ImmOperand";
-}
-def h16imm : Operand<i16> {
- let PrintMethod = "printHex16ImmOperand";
-}
-def h32imm : Operand<i32> {
- let PrintMethod = "printHex32ImmOperand";
-}
-def h64imm : Operand<i64> {
- let PrintMethod = "printHex64ImmOperand";
+def nModImm : Operand<i32> {
+ let PrintMethod = "printNEONModImmOperand";
}
//===----------------------------------------------------------------------===//
@@ -2829,34 +2820,34 @@
// VMOV_get_imm8 xform function: convert build_vector to VMOV.i8 imm.
def VMOV_get_imm8 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 1, *CurDAG);
+ return ARM::getNEONModImm(N, 1, true, *CurDAG);
}]>;
def vmovImm8 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 1, *CurDAG).getNode() != 0;
+ return ARM::getNEONModImm(N, 1, true, *CurDAG).getNode() != 0;
}], VMOV_get_imm8>;
// VMOV_get_imm16 xform function: convert build_vector to VMOV.i16 imm.
def VMOV_get_imm16 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 2, *CurDAG);
+ return ARM::getNEONModImm(N, 2, true, *CurDAG);
}]>;
def vmovImm16 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 2, *CurDAG).getNode() != 0;
+ return ARM::getNEONModImm(N, 2, true, *CurDAG).getNode() != 0;
}], VMOV_get_imm16>;
// VMOV_get_imm32 xform function: convert build_vector to VMOV.i32 imm.
def VMOV_get_imm32 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 4, *CurDAG);
+ return ARM::getNEONModImm(N, 4, true, *CurDAG);
}]>;
def vmovImm32 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 4, *CurDAG).getNode() != 0;
+ return ARM::getNEONModImm(N, 4, true, *CurDAG).getNode() != 0;
}], VMOV_get_imm32>;
// VMOV_get_imm64 xform function: convert build_vector to VMOV.i64 imm.
def VMOV_get_imm64 : SDNodeXForm<build_vector, [{
- return ARM::getVMOVImm(N, 8, *CurDAG);
+ return ARM::getNEONModImm(N, 8, true, *CurDAG);
}]>;
def vmovImm64 : PatLeaf<(build_vector), [{
- return ARM::getVMOVImm(N, 8, *CurDAG).getNode() != 0;
+ return ARM::getNEONModImm(N, 8, true, *CurDAG).getNode() != 0;
}], VMOV_get_imm64>;
// Note: Some of the cmode bits in the following VMOV instructions need to
@@ -2864,38 +2855,38 @@
let isReMaterializable = 1 in {
def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$dst),
- (ins h8imm:$SIMM), IIC_VMOVImm,
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i8", "$dst, $SIMM", "",
[(set DPR:$dst, (v8i8 vmovImm8:$SIMM))]>;
def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$dst),
- (ins h8imm:$SIMM), IIC_VMOVImm,
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i8", "$dst, $SIMM", "",
[(set QPR:$dst, (v16i8 vmovImm8:$SIMM))]>;
-def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,?}, 0, 0, {?}, 1, (outs DPR:$dst),
- (ins h16imm:$SIMM), IIC_VMOVImm,
+def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 0, 1, (outs DPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i16", "$dst, $SIMM", "",
[(set DPR:$dst, (v4i16 vmovImm16:$SIMM))]>;
-def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,?}, 0, 1, {?}, 1, (outs QPR:$dst),
- (ins h16imm:$SIMM), IIC_VMOVImm,
+def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 0, 1, (outs QPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i16", "$dst, $SIMM", "",
[(set QPR:$dst, (v8i16 vmovImm16:$SIMM))]>;
-def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, {?}, 1, (outs DPR:$dst),
- (ins h32imm:$SIMM), IIC_VMOVImm,
+def VMOVv2i32 : N1ModImm<1, 0b000, {0,?,?,0}, 0, 0, 0, 1, (outs DPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i32", "$dst, $SIMM", "",
[(set DPR:$dst, (v2i32 vmovImm32:$SIMM))]>;
-def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, {?}, 1, (outs QPR:$dst),
- (ins h32imm:$SIMM), IIC_VMOVImm,
+def VMOVv4i32 : N1ModImm<1, 0b000, {0,?,?,0}, 0, 1, 0, 1, (outs QPR:$dst),
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i32", "$dst, $SIMM", "",
[(set QPR:$dst, (v4i32 vmovImm32:$SIMM))]>;
def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$dst),
- (ins h64imm:$SIMM), IIC_VMOVImm,
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i64", "$dst, $SIMM", "",
[(set DPR:$dst, (v1i64 vmovImm64:$SIMM))]>;
def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$dst),
- (ins h64imm:$SIMM), IIC_VMOVImm,
+ (ins nModImm:$SIMM), IIC_VMOVImm,
"vmov", "i64", "$dst, $SIMM", "",
[(set QPR:$dst, (v2i64 vmovImm64:$SIMM))]>;
} // isReMaterializable
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMLoadStoreOptimizer.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMLoadStoreOptimizer.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMLoadStoreOptimizer.cpp Fri Jul 2 04:34:51 2010
@@ -1024,10 +1024,6 @@
RS->enterBasicBlock(&MBB);
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
- if (MBBI->isDebugValue()) {
- ++MBBI;
- continue;
- }
if (FixInvalidRegPairOp(MBB, MBBI))
continue;
@@ -1094,7 +1090,12 @@
}
}
- if (Advance) {
+ if (MBBI->isDebugValue()) {
+ ++MBBI;
+ if (MBBI == E)
+ // Reach the end of the block, try merging the memory instructions.
+ TryMerge = true;
+ } else if (Advance) {
++Position;
++MBBI;
if (MBBI == E)
@@ -1490,7 +1491,8 @@
} else {
// This is the new location for the loads / stores.
MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
- while (InsertPos != MBB->end() && MemOps.count(InsertPos))
+ while (InsertPos != MBB->end()
+ && (MemOps.count(InsertPos) || InsertPos->isDebugValue()))
++InsertPos;
// If we are moving a pair of loads / stores, see if it makes sense
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMRegisterInfo.td Fri Jul 2 04:34:51 2010
@@ -349,6 +349,83 @@
}];
}
+// For tail calls, we can't use callee-saved registers, as they are restored
+// to the saved value before the tail call, which would clobber a call address.
+// Note, getMinimalPhysRegClass(R0) returns tGPR because of the names of
+// this class and the preceding one(!) This is what we want.
+def tcGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R9, R12]> {
+ let MethodProtos = [{
+ iterator allocation_order_begin(const MachineFunction &MF) const;
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ // R9 is available.
+ static const unsigned ARM_GPR_R9_TC[] = {
+ ARM::R0, ARM::R1, ARM::R2, ARM::R3,
+ ARM::R9, ARM::R12 };
+ // R9 is not available.
+ static const unsigned ARM_GPR_NOR9_TC[] = {
+ ARM::R0, ARM::R1, ARM::R2, ARM::R3,
+ ARM::R12 };
+
+ // For Thumb1 mode, we don't want to allocate hi regs at all, as we
+ // don't know how to spill them. If we make our prologue/epilogue code
+ // smarter at some point, we can go back to using the above allocation
+ // orders for the Thumb1 instructions that know how to use hi regs.
+ static const unsigned THUMB_GPR_AO_TC[] = {
+ ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
+
+ tcGPRClass::iterator
+ tcGPRClass::allocation_order_begin(const MachineFunction &MF) const {
+ const TargetMachine &TM = MF.getTarget();
+ const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
+ if (Subtarget.isThumb1Only())
+ return THUMB_GPR_AO_TC;
+ if (Subtarget.isTargetDarwin()) {
+ if (Subtarget.isR9Reserved())
+ return ARM_GPR_NOR9_TC;
+ else
+ return ARM_GPR_R9_TC;
+ } else {
+ if (Subtarget.isR9Reserved())
+ return ARM_GPR_NOR9_TC;
+ else if (Subtarget.isThumb())
+ return ARM_GPR_R9_TC;
+ else
+ return ARM_GPR_R9_TC;
+ }
+ }
+
+ tcGPRClass::iterator
+ tcGPRClass::allocation_order_end(const MachineFunction &MF) const {
+ const TargetMachine &TM = MF.getTarget();
+ const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
+ GPRClass::iterator I;
+
+ if (Subtarget.isThumb1Only()) {
+ I = THUMB_GPR_AO_TC + (sizeof(THUMB_GPR_AO_TC)/sizeof(unsigned));
+ return I;
+ }
+
+ if (Subtarget.isTargetDarwin()) {
+ if (Subtarget.isR9Reserved())
+ I = ARM_GPR_NOR9_TC + (sizeof(ARM_GPR_NOR9_TC)/sizeof(unsigned));
+ else
+ I = ARM_GPR_R9_TC + (sizeof(ARM_GPR_R9_TC)/sizeof(unsigned));
+ } else {
+ if (Subtarget.isR9Reserved())
+ I = ARM_GPR_NOR9_TC + (sizeof(ARM_GPR_NOR9_TC)/sizeof(unsigned));
+ else if (Subtarget.isThumb())
+ I = ARM_GPR_R9_TC + (sizeof(ARM_GPR_R9_TC)/sizeof(unsigned));
+ else
+ I = ARM_GPR_R9_TC + (sizeof(ARM_GPR_R9_TC)/sizeof(unsigned));
+ }
+ return I;
+ }
+ }];
+}
+
+
// Scalar single precision floating point register class..
def SPR : RegisterClass<"ARM", [f32], 32, [S0, S1, S2, S3, S4, S5, S6, S7, S8,
S9, S10, S11, S12, S13, S14, S15, S16, S17, S18, S19, S20, S21, S22,
Modified: llvm/branches/wendling/eh/lib/Target/ARM/ARMTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/ARMTargetMachine.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/ARMTargetMachine.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/ARMTargetMachine.cpp Fri Jul 2 04:34:51 2010
@@ -27,6 +27,11 @@
cl::desc("Form IT blocks early before register allocation"),
cl::init(false));
+static cl::opt<bool>
+EarlyIfConvert("arm-early-if-convert", cl::Hidden,
+ cl::desc("Run if-conversion before post-ra scheduling"),
+ cl::init(false));
+
static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
Triple TheTriple(TT);
switch (TheTriple.getOS()) {
@@ -125,13 +130,17 @@
// proper scheduling.
PM.add(createARMExpandPseudoPass());
+ if (EarlyIfConvert && OptLevel != CodeGenOpt::None) {
+ if (!Subtarget.isThumb1Only())
+ PM.add(createIfConverterPass());
+ }
+
return true;
}
bool ARMBaseTargetMachine::addPreEmitPass(PassManagerBase &PM,
CodeGenOpt::Level OptLevel) {
- // FIXME: temporarily disabling load / store optimization pass for Thumb1.
- if (OptLevel != CodeGenOpt::None) {
+ if (!EarlyIfConvert && OptLevel != CodeGenOpt::None) {
if (!Subtarget.isThumb1Only())
PM.add(createIfConverterPass());
}
Modified: llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp Fri Jul 2 04:34:51 2010
@@ -175,23 +175,8 @@
raw_ostream &O);
void printVFPf64ImmOperand(const MachineInstr *MI, int OpNum,
raw_ostream &O);
-
- void printHex8ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xff);
- }
- void printHex16ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xffff);
- }
- void printHex32ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xffffffff);
- }
- void printHex64ImmOperand(const MachineInstr *MI, int OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm());
- }
+ void printNEONModImmOperand(const MachineInstr *MI, int OpNum,
+ raw_ostream &O);
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
unsigned AsmVariant, const char *ExtraCode,
@@ -1039,6 +1024,40 @@
}
}
+void ARMAsmPrinter::printNEONModImmOperand(const MachineInstr *MI, int OpNum,
+ raw_ostream &O) {
+ unsigned Imm = MI->getOperand(OpNum).getImm();
+ unsigned OpCmode = (Imm >> 8) & 0x1f;
+ unsigned Imm8 = Imm & 0xff;
+ uint64_t Val = 0;
+
+ if (OpCmode == 0xe) {
+ // 8-bit vector elements
+ Val = Imm8;
+ } else if ((OpCmode & 0xc) == 0x8) {
+ // 16-bit vector elements
+ unsigned ByteNum = (OpCmode & 0x6) >> 1;
+ Val = Imm8 << (8 * ByteNum);
+ } else if ((OpCmode & 0x8) == 0) {
+ // 32-bit vector elements, zero with one byte set
+ unsigned ByteNum = (OpCmode & 0x6) >> 1;
+ Val = Imm8 << (8 * ByteNum);
+ } else if ((OpCmode & 0xe) == 0xc) {
+ // 32-bit vector elements, one byte with low bits set
+ unsigned ByteNum = (OpCmode & 0x1);
+ Val = (Imm8 << (8 * ByteNum)) | (0xffff >> (8 * (1 - ByteNum)));
+ } else if (OpCmode == 0x1e) {
+ // 64-bit vector elements
+ for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
+ if ((Imm >> ByteNum) & 1)
+ Val |= (uint64_t)0xff << (8 * ByteNum);
+ }
+ } else {
+ assert(false && "Unsupported NEON immediate");
+ }
+ O << "#0x" << utohexstr(Val);
+}
+
bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
unsigned AsmVariant, const char *ExtraCode,
raw_ostream &O) {
Modified: llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp Fri Jul 2 04:34:51 2010
@@ -779,22 +779,36 @@
O << '#' << MI->getOperand(OpNum).getImm();
}
-void ARMInstPrinter::printHex8ImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xff);
-}
-
-void ARMInstPrinter::printHex16ImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xffff);
-}
-
-void ARMInstPrinter::printHex32ImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm() & 0xffffffff);
-}
+void ARMInstPrinter::printNEONModImmOperand(const MCInst *MI, unsigned OpNum,
+ raw_ostream &O) {
+ unsigned Imm = MI->getOperand(OpNum).getImm();
+ unsigned OpCmode = (Imm >> 8) & 0x1f;
+ unsigned Imm8 = Imm & 0xff;
+ uint64_t Val = 0;
-void ARMInstPrinter::printHex64ImmOperand(const MCInst *MI, unsigned OpNum,
- raw_ostream &O) {
- O << "#0x" << utohexstr(MI->getOperand(OpNum).getImm());
+ if (OpCmode == 0xe) {
+ // 8-bit vector elements
+ Val = Imm8;
+ } else if ((OpCmode & 0xc) == 0x8) {
+ // 16-bit vector elements
+ unsigned ByteNum = (OpCmode & 0x6) >> 1;
+ Val = Imm8 << (8 * ByteNum);
+ } else if ((OpCmode & 0x8) == 0) {
+ // 32-bit vector elements, zero with one byte set
+ unsigned ByteNum = (OpCmode & 0x6) >> 1;
+ Val = Imm8 << (8 * ByteNum);
+ } else if ((OpCmode & 0xe) == 0xc) {
+ // 32-bit vector elements, one byte with low bits set
+ unsigned ByteNum = (OpCmode & 0x1);
+ Val = (Imm8 << (8 * ByteNum)) | (0xffff >> (8 * (1 - ByteNum)));
+ } else if (OpCmode == 0x1e) {
+ // 64-bit vector elements
+ for (unsigned ByteNum = 0; ByteNum < 8; ++ByteNum) {
+ if ((Imm >> ByteNum) & 1)
+ Val |= (uint64_t)0xff << (8 * ByteNum);
+ }
+ } else {
+ assert(false && "Unsupported NEON immediate");
+ }
+ O << "#0x" << utohexstr(Val);
}
Modified: llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h Fri Jul 2 04:34:51 2010
@@ -104,10 +104,7 @@
void printNoHashImmediate(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printVFPf32ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printVFPf64ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printHex8ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printHex16ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printHex32ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
- void printHex64ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
+ void printNEONModImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
void printPCLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O);
// FIXME: Implement.
Modified: llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ARMDisassemblerCore.cpp Fri Jul 2 04:34:51 2010
@@ -765,7 +765,7 @@
|| Opcode == ARM::SMC || Opcode == ARM::SVC) &&
"Unexpected Opcode");
- assert(NumOps >= 1 && OpInfo[0].RegClass == 0 && "Reg operand expected");
+ assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Reg operand expected");
int Imm32 = 0;
if (Opcode == ARM::SMC) {
@@ -1106,7 +1106,7 @@
assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
(OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
- (OpInfo[OpIdx+2].RegClass == 0) &&
+ (OpInfo[OpIdx+2].RegClass < 0) &&
"Expect 3 reg operands");
// Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
@@ -1201,7 +1201,7 @@
return false;
assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
- (OpInfo[OpIdx+1].RegClass == 0) &&
+ (OpInfo[OpIdx+1].RegClass < 0) &&
"Expect 1 reg operand followed by 1 imm operand");
ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
@@ -1323,7 +1323,7 @@
return false;
assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
- (OpInfo[OpIdx+1].RegClass == 0) &&
+ (OpInfo[OpIdx+1].RegClass < 0) &&
"Expect 1 reg operand followed by 1 imm operand");
ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
@@ -1494,7 +1494,7 @@
// If there is still an operand info left which is an immediate operand, add
// an additional imm5 LSL/ASR operand.
- if (ThreeReg && OpInfo[OpIdx].RegClass == 0
+ if (ThreeReg && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// Extract the 5-bit immediate field Inst{11-7}.
unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
@@ -1540,7 +1540,7 @@
// If there is still an operand info left which is an immediate operand, add
// an additional rotate immediate operand.
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// Extract the 2-bit rotate field Inst{11-10}.
unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
@@ -1725,7 +1725,7 @@
"Tied to operand expected");
MI.addOperand(MI.getOperand(0));
- assert(OpInfo[2].RegClass == 0 && !OpInfo[2].isPredicate() &&
+ assert(OpInfo[2].RegClass < 0 && !OpInfo[2].isPredicate() &&
!OpInfo[2].isOptionalDef() && "Imm operand expected");
MI.addOperand(MCOperand::CreateImm(fbits));
@@ -1984,7 +1984,7 @@
++OpIdx;
// Extract/decode the f64/f32 immediate.
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// The asm syntax specifies the before-expanded <imm>.
// Not VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
@@ -2077,42 +2077,12 @@
// imm3 = Inst{18-16}, imm4 = Inst{3-0}
// Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
+ unsigned char op = (insn >> 5) & 1;
unsigned char cmode = (insn >> 8) & 0xF;
unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
((insn >> 16) & 7) << 4 |
(insn & 0xF);
- uint64_t Imm64 = 0;
-
- switch (esize) {
- case ESize8:
- Imm64 = Imm8;
- break;
- case ESize16:
- Imm64 = Imm8 << 8*(cmode >> 1 & 1);
- break;
- case ESize32: {
- if (cmode == 12)
- Imm64 = (Imm8 << 8) | 0xFF;
- else if (cmode == 13)
- Imm64 = (Imm8 << 16) | 0xFFFF;
- else {
- // Imm8 to be shifted left by how many bytes...
- Imm64 = Imm8 << 8*(cmode >> 1 & 3);
- }
- break;
- }
- case ESize64: {
- for (unsigned i = 0; i < 8; ++i)
- if ((Imm8 >> i) & 1)
- Imm64 |= (uint64_t)0xFF << 8*i;
- break;
- }
- default:
- assert(0 && "Unreachable code!");
- return 0;
- }
-
- return Imm64;
+ return (op << 12) | (cmode << 8) | Imm8;
}
// A8.6.339 VMUL, VMULL (by scalar)
@@ -2303,7 +2273,7 @@
}
assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
- OpInfo[OpIdx + 1].RegClass == 0 && "Addrmode #6 Operands expected");
+ OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
Rn)));
MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
@@ -2329,7 +2299,7 @@
}
// Handle possible lane index.
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
++OpIdx;
@@ -2355,7 +2325,7 @@
}
assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
- OpInfo[OpIdx + 1].RegClass == 0 && "Addrmode #6 Operands expected");
+ OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
Rn)));
MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
@@ -2374,7 +2344,7 @@
}
// Handle possible lane index.
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
++OpIdx;
@@ -2438,7 +2408,7 @@
assert(NumOps >= 2 &&
(OpInfo[0].RegClass == ARM::DPRRegClassID ||
OpInfo[0].RegClass == ARM::QPRRegClassID) &&
- (OpInfo[1].RegClass == 0) &&
+ (OpInfo[1].RegClass < 0) &&
"Expect 1 reg operand followed by 1 imm operand");
// Qd/Dd = Inst{22:15-12} => NEON Rd
@@ -2552,7 +2522,7 @@
}
// Add the imm operand, if required.
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
unsigned imm = 0xFFFFFFFF;
@@ -2632,7 +2602,7 @@
decodeNEONRm(insn))));
++OpIdx;
- assert(OpInfo[OpIdx].RegClass == 0 && "Imm operand expected");
+ assert(OpInfo[OpIdx].RegClass < 0 && "Imm operand expected");
// Add the imm operand.
@@ -2762,7 +2732,7 @@
getRegisterEnum(B, OpInfo[OpIdx].RegClass, m)));
++OpIdx;
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// Add the imm operand.
unsigned Imm = 0;
@@ -2887,7 +2857,7 @@
assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
OpInfo[0].RegClass == ARM::GPRRegClassID &&
OpInfo[1].RegClass == ARM::DPRRegClassID &&
- OpInfo[2].RegClass == 0 &&
+ OpInfo[2].RegClass < 0 &&
"Expect >= 3 operands with one dst operand");
ElemSize esize =
@@ -2923,7 +2893,7 @@
OpInfo[1].RegClass == ARM::DPRRegClassID &&
TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
OpInfo[2].RegClass == ARM::GPRRegClassID &&
- OpInfo[3].RegClass == 0 &&
+ OpInfo[3].RegClass < 0 &&
"Expect >= 3 operands with one dst operand");
ElemSize esize =
@@ -3233,7 +3203,8 @@
// a pair of TargetOperandInfos with isPredicate() property.
if (NumOpsRemaining >= 2 &&
OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
- OpInfo[Idx].RegClass == 0 && OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
+ OpInfo[Idx].RegClass < 0 &&
+ OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
{
// If we are inside an IT block, get the IT condition bits maintained via
// ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
@@ -3265,7 +3236,8 @@
// a pair of TargetOperandInfos with isPredicate() property.
if (NumOpsRemaining >= 2 &&
OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
- OpInfo[Idx].RegClass == 0 && OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
+ OpInfo[Idx].RegClass < 0 &&
+ OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
{
// If we are inside an IT block, get the IT condition bits maintained via
// ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
Modified: llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Disassembler/ThumbDisassemblerCore.h Fri Jul 2 04:34:51 2010
@@ -395,7 +395,7 @@
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::tGPRRegClassID,
getT1tRm(insn))));
} else {
- assert(OpInfo[OpIdx].RegClass == 0 &&
+ assert(OpInfo[OpIdx].RegClass < 0 &&
!OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()
&& "Pure imm operand expected");
MI.addOperand(MCOperand::CreateImm(UseRt ? getT1Imm8(insn)
@@ -531,7 +531,7 @@
if (!OpInfo) return false;
assert(NumOps >= 2 && OpInfo[0].RegClass == ARM::tGPRRegClassID &&
- (OpInfo[1].RegClass == 0 &&
+ (OpInfo[1].RegClass < 0 &&
!OpInfo[1].isPredicate() &&
!OpInfo[1].isOptionalDef())
&& "Invalid arguments");
@@ -598,7 +598,7 @@
assert(OpIdx < NumOps && "More operands expected");
- if (OpInfo[OpIdx].RegClass == 0 && !OpInfo[OpIdx].isPredicate() &&
+ if (OpInfo[OpIdx].RegClass < 0 && !OpInfo[OpIdx].isPredicate() &&
!OpInfo[OpIdx].isOptionalDef()) {
MI.addOperand(MCOperand::CreateImm(Imm5 ? getT1Imm5(insn) : 0));
@@ -632,7 +632,7 @@
assert(NumOps >= 3 &&
OpInfo[0].RegClass == ARM::tGPRRegClassID &&
OpInfo[1].RegClass == ARM::GPRRegClassID &&
- (OpInfo[2].RegClass == 0 &&
+ (OpInfo[2].RegClass < 0 &&
!OpInfo[2].isPredicate() &&
!OpInfo[2].isOptionalDef())
&& "Invalid arguments");
@@ -658,7 +658,7 @@
if (!OpInfo) return false;
assert(NumOps >= 2 && OpInfo[0].RegClass == ARM::tGPRRegClassID &&
- (OpInfo[1].RegClass == 0 &&
+ (OpInfo[1].RegClass < 0 &&
!OpInfo[1].isPredicate() &&
!OpInfo[1].isOptionalDef())
&& "Invalid arguments");
@@ -685,7 +685,7 @@
assert(NumOps >= 3 &&
OpInfo[0].RegClass == ARM::tGPRRegClassID &&
OpInfo[1].RegClass == ARM::GPRRegClassID &&
- (OpInfo[2].RegClass == 0 &&
+ (OpInfo[2].RegClass < 0 &&
!OpInfo[2].isPredicate() &&
!OpInfo[2].isOptionalDef())
&& "Invalid arguments");
@@ -761,7 +761,7 @@
// Predicate operands are handled elsewhere.
if (NumOps == 2 &&
OpInfo[0].isPredicate() && OpInfo[1].isPredicate() &&
- OpInfo[0].RegClass == 0 && OpInfo[1].RegClass == ARM::CCRRegClassID) {
+ OpInfo[0].RegClass < 0 && OpInfo[1].RegClass == ARM::CCRRegClassID) {
return true;
}
@@ -808,7 +808,7 @@
}
assert(NumOps >= 2 && OpInfo[0].RegClass == ARM::tGPRRegClassID &&
- (OpInfo[1].RegClass==0 || OpInfo[1].RegClass==ARM::tGPRRegClassID)
+ (OpInfo[1].RegClass < 0 || OpInfo[1].RegClass==ARM::tGPRRegClassID)
&& "Expect >=2 operands");
// Add the destination operand.
@@ -913,7 +913,7 @@
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
- assert(NumOps == 3 && OpInfo[0].RegClass == 0 &&
+ assert(NumOps == 3 && OpInfo[0].RegClass < 0 &&
OpInfo[1].isPredicate() && OpInfo[2].RegClass == ARM::CCRRegClassID
&& "Exactly 3 operands expected");
@@ -939,7 +939,7 @@
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
if (!OpInfo) return false;
- assert(NumOps == 1 && OpInfo[0].RegClass == 0 && "1 imm operand expected");
+ assert(NumOps == 1 && OpInfo[0].RegClass < 0 && "1 imm operand expected");
unsigned Imm11 = getT1Imm11(insn);
@@ -1239,7 +1239,7 @@
&& OpInfo[0].RegClass == ARM::GPRRegClassID
&& OpInfo[1].RegClass == ARM::GPRRegClassID
&& OpInfo[2].RegClass == ARM::GPRRegClassID
- && OpInfo[3].RegClass == 0
+ && OpInfo[3].RegClass < 0
&& "Expect >= 4 operands and first 3 as reg operands");
// Add the <Rt> <Rt2> operands.
@@ -1322,8 +1322,8 @@
assert(NumOps == 4
&& OpInfo[0].RegClass == ARM::GPRRegClassID
&& OpInfo[1].RegClass == ARM::GPRRegClassID
- && OpInfo[2].RegClass == 0
- && OpInfo[3].RegClass == 0
+ && OpInfo[2].RegClass < 0
+ && OpInfo[3].RegClass < 0
&& "Exactlt 4 operands expect and first two as reg operands");
// Only need to populate the src reg operand.
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
@@ -1375,7 +1375,7 @@
if (NumOps == OpIdx)
return true;
- if (OpInfo[OpIdx].RegClass == 0 && !OpInfo[OpIdx].isPredicate()
+ if (OpInfo[OpIdx].RegClass < 0 && !OpInfo[OpIdx].isPredicate()
&& !OpInfo[OpIdx].isOptionalDef()) {
if (Thumb2ShiftOpcode(Opcode))
@@ -1440,7 +1440,7 @@
}
// The modified immediate operand should come next.
- assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0 &&
+ assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0 &&
!OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()
&& "Pure imm operand expected");
@@ -1555,7 +1555,7 @@
++OpIdx;
}
- assert(OpInfo[OpIdx].RegClass == 0 && !OpInfo[OpIdx].isPredicate()
+ assert(OpInfo[OpIdx].RegClass < 0 && !OpInfo[OpIdx].isPredicate()
&& !OpInfo[OpIdx].isOptionalDef()
&& "Pure imm operand expected");
@@ -1772,7 +1772,7 @@
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
decodeRm(insn))));
} else {
- assert(OpInfo[OpIdx].RegClass == 0 && !OpInfo[OpIdx].isPredicate()
+ assert(OpInfo[OpIdx].RegClass < 0 && !OpInfo[OpIdx].isPredicate()
&& !OpInfo[OpIdx].isOptionalDef()
&& "Pure imm operand expected");
int Offset = 0;
@@ -1792,7 +1792,7 @@
}
++OpIdx;
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0 &&
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0 &&
!OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// Fills in the shift amount for t2PLDs, t2PLDWs, t2PLIs.
MI.addOperand(MCOperand::CreateImm(slice(insn, 5, 4)));
@@ -1818,7 +1818,7 @@
assert(NumOps >= 2 &&
OpInfo[0].RegClass == ARM::GPRRegClassID &&
- OpInfo[1].RegClass == 0 &&
+ OpInfo[1].RegClass < 0 &&
"Expect >= 2 operands, first as reg, and second as imm operand");
// Build the register operand, followed by the (+/-)imm12 immediate.
@@ -1930,7 +1930,7 @@
++OpIdx;
}
- assert(OpInfo[OpIdx].RegClass == 0 && !OpInfo[OpIdx].isPredicate()
+ assert(OpInfo[OpIdx].RegClass < 0 && !OpInfo[OpIdx].isPredicate()
&& !OpInfo[OpIdx].isOptionalDef()
&& "Pure imm operand expected");
@@ -1981,7 +1981,7 @@
decodeRm(insn))));
++OpIdx;
- if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
+ if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
// Add the rotation amount immediate.
MI.addOperand(MCOperand::CreateImm(decodeRotate(insn)));
Modified: llvm/branches/wendling/eh/lib/Target/ARM/NEONPreAllocPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/NEONPreAllocPass.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/NEONPreAllocPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/NEONPreAllocPass.cpp Fri Jul 2 04:34:51 2010
@@ -448,8 +448,7 @@
assert(DefMI->isExtractSubreg());
MO.setReg(LastSrcReg);
MO.setSubReg(SubIds[R]);
- if (R != 0)
- MO.setIsKill(false);
+ MO.setIsKill(false);
// Delete the EXTRACT_SUBREG if its result is now dead.
if (MRI->use_empty(OldReg))
DefMI->eraseFromParent();
@@ -467,8 +466,7 @@
unsigned FirstOpnd, NumRegs, Offset, Stride;
if (!isNEONMultiRegOp(MI->getOpcode(), FirstOpnd, NumRegs, Offset, Stride))
continue;
- if (llvm::ModelWithRegSequence() &&
- FormsRegSequence(MI, FirstOpnd, NumRegs, Offset, Stride))
+ if (FormsRegSequence(MI, FirstOpnd, NumRegs, Offset, Stride))
continue;
MachineBasicBlock::iterator NextI = llvm::next(MBBI);
Modified: llvm/branches/wendling/eh/lib/Target/ARM/Thumb1InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Thumb1InstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Thumb1InstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Thumb1InstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -39,8 +39,8 @@
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC,
DebugLoc DL) const {
- if (DestRC == ARM::GPRRegisterClass) {
- if (SrcRC == ARM::GPRRegisterClass) {
+ if (DestRC == ARM::GPRRegisterClass || DestRC == ARM::tcGPRRegisterClass) {
+ if (SrcRC == ARM::GPRRegisterClass || SrcRC == ARM::tcGPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVgpr2gpr), DestReg).addReg(SrcReg);
return true;
} else if (SrcRC == ARM::tGPRRegisterClass) {
@@ -48,7 +48,7 @@
return true;
}
} else if (DestRC == ARM::tGPRRegisterClass) {
- if (SrcRC == ARM::GPRRegisterClass) {
+ if (SrcRC == ARM::GPRRegisterClass || SrcRC == ARM::tcGPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVgpr2tgpr), DestReg).addReg(SrcReg);
return true;
} else if (SrcRC == ARM::tGPRRegisterClass) {
Modified: llvm/branches/wendling/eh/lib/Target/ARM/Thumb2ITBlockPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Thumb2ITBlockPass.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Thumb2ITBlockPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Thumb2ITBlockPass.cpp Fri Jul 2 04:34:51 2010
@@ -61,15 +61,7 @@
unsigned Opc = MI->getOpcode();
if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
return ARMCC::AL;
-
- int PIdx = MI->findFirstPredOperandIdx();
- if (PIdx == -1) {
- PredReg = 0;
- return ARMCC::AL;
- }
-
- PredReg = MI->getOperand(PIdx+1).getReg();
- return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
+ return llvm::getInstrPredicate(MI, PredReg);
}
bool
@@ -238,19 +230,6 @@
MBBI = E;
}
} while (MBBI != E);
-
- // Insert a new block for consecutive predicated instructions.
- MachineFunction *MF = MBB->getParent();
- MachineBasicBlock *NewMBB = MF->CreateMachineBasicBlock(MBB->getBasicBlock());
- MachineFunction::iterator Pos = MBB;
- MF->insert(++Pos, NewMBB);
-
- // Move all the successors of this block to the specified block.
- NewMBB->transferSuccessors(MBB);
-
- // Add an edge from CurMBB to NewMBB for the fall-through.
- MBB->addSuccessor(NewMBB);
- NewMBB->splice(NewMBB->end(), MBB, ++MBBI, MBB->end());
return true;
}
Modified: llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -42,8 +42,8 @@
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC,
DebugLoc DL) const {
- if (DestRC == ARM::GPRRegisterClass) {
- if (SrcRC == ARM::GPRRegisterClass) {
+ if (DestRC == ARM::GPRRegisterClass || DestRC == ARM::tcGPRRegisterClass) {
+ if (SrcRC == ARM::GPRRegisterClass || SrcRC == ARM::tcGPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVgpr2gpr), DestReg).addReg(SrcReg);
return true;
} else if (SrcRC == ARM::tGPRRegisterClass) {
@@ -51,7 +51,7 @@
return true;
}
} else if (DestRC == ARM::tGPRRegisterClass) {
- if (SrcRC == ARM::GPRRegisterClass) {
+ if (SrcRC == ARM::GPRRegisterClass || SrcRC == ARM::tcGPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVgpr2tgpr), DestReg).addReg(SrcReg);
return true;
} else if (SrcRC == ARM::tGPRRegisterClass) {
@@ -70,7 +70,8 @@
unsigned SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass) {
+ if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass ||
+ RC == ARM::tcGPRRegisterClass) {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
@@ -95,7 +96,8 @@
unsigned DestReg, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
- if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass) {
+ if (RC == ARM::GPRRegisterClass || RC == ARM::tGPRRegisterClass ||
+ RC == ARM::tcGPRRegisterClass) {
DebugLoc DL;
if (I != MBB.end()) DL = I->getDebugLoc();
@@ -503,3 +505,46 @@
Offset = (isSub) ? -Offset : Offset;
return Offset == 0;
}
+
+/// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
+/// two-addrss instruction inserted by two-address pass.
+void
+Thumb2InstrInfo::scheduleTwoAddrSource(MachineInstr *SrcMI,
+ MachineInstr *UseMI,
+ const TargetRegisterInfo &TRI) const {
+ if (SrcMI->getOpcode() != ARM::tMOVgpr2gpr ||
+ SrcMI->getOperand(1).isKill())
+ return;
+
+ unsigned PredReg = 0;
+ ARMCC::CondCodes CC = llvm::getInstrPredicate(UseMI, PredReg);
+ if (CC == ARMCC::AL || PredReg != ARM::CPSR)
+ return;
+
+ // Schedule the copy so it doesn't come between previous instructions
+ // and UseMI which can form an IT block.
+ unsigned SrcReg = SrcMI->getOperand(1).getReg();
+ ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
+ MachineBasicBlock *MBB = UseMI->getParent();
+ MachineBasicBlock::iterator MBBI = SrcMI;
+ unsigned NumInsts = 0;
+ while (--MBBI != MBB->begin()) {
+ if (MBBI->isDebugValue())
+ continue;
+
+ MachineInstr *NMI = &*MBBI;
+ ARMCC::CondCodes NCC = llvm::getInstrPredicate(NMI, PredReg);
+ if (!(NCC == CC || NCC == OCC) ||
+ NMI->modifiesRegister(SrcReg, &TRI) ||
+ NMI->definesRegister(ARM::CPSR))
+ break;
+ if (++NumInsts == 4)
+ // Too many in a row!
+ return;
+ }
+
+ if (NumInsts) {
+ MBB->remove(SrcMI);
+ MBB->insert(++MBBI, SrcMI);
+ }
+}
Modified: llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/ARM/Thumb2InstrInfo.h Fri Jul 2 04:34:51 2010
@@ -50,6 +50,11 @@
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
+ /// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
+ /// two-addrss instruction inserted by two-address pass.
+ void scheduleTwoAddrSource(MachineInstr *SrcMI, MachineInstr *UseMI,
+ const TargetRegisterInfo &TRI) const;
+
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
Modified: llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -110,9 +110,8 @@
unsigned AlphaInstrInfo::InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
"Alpha branch conditions have two components!");
@@ -120,25 +119,25 @@
// One-way branch.
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch
- BuildMI(&MBB, dl, get(Alpha::BR)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(Alpha::BR)).addMBB(TBB);
else // Conditional branch
if (isAlphaIntCondCode(Cond[0].getImm()))
- BuildMI(&MBB, dl, get(Alpha::COND_BRANCH_I))
+ BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_I))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
else
- BuildMI(&MBB, dl, get(Alpha::COND_BRANCH_F))
+ BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_F))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
return 1;
}
// Two-way Conditional Branch.
if (isAlphaIntCondCode(Cond[0].getImm()))
- BuildMI(&MBB, dl, get(Alpha::COND_BRANCH_I))
+ BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_I))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
else
- BuildMI(&MBB, dl, get(Alpha::COND_BRANCH_F))
+ BuildMI(&MBB, DL, get(Alpha::COND_BRANCH_F))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
- BuildMI(&MBB, dl, get(Alpha::BR)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(Alpha::BR)).addMBB(FBB);
return 2;
}
Modified: llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/Alpha/AlphaInstrInfo.h Fri Jul 2 04:34:51 2010
@@ -42,8 +42,9 @@
int &FrameIndex) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SrcReg,
Modified: llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp Fri Jul 2 04:34:51 2010
@@ -132,8 +132,8 @@
SDValue Val) {
SmallVector<SDValue, 8> ops(N->op_begin(), N->op_end());
ops[Num] = Val;
- SDValue New = DAG.UpdateNodeOperands(SDValue(N, 0), ops.data(), ops.size());
- DAG.ReplaceAllUsesWith(N, New.getNode());
+ SDNode *New = DAG.UpdateNodeOperands(N, ops.data(), ops.size());
+ DAG.ReplaceAllUsesWith(N, New);
}
// After instruction selection, insert COPY_TO_REGCLASS nodes to help in
Modified: llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -104,10 +104,8 @@
InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc operand
- DebugLoc DL;
-
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
Modified: llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/Blackfin/BlackfinInstrInfo.h Fri Jul 2 04:34:51 2010
@@ -44,7 +44,8 @@
InsertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Modified: llvm/branches/wendling/eh/lib/Target/CBackend/CBackend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CBackend/CBackend.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CBackend/CBackend.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/CBackend/CBackend.cpp Fri Jul 2 04:34:51 2010
@@ -264,7 +264,7 @@
//
static const AllocaInst *isDirectAlloca(const Value *V) {
const AllocaInst *AI = dyn_cast<AllocaInst>(V);
- if (!AI) return false;
+ if (!AI) return 0;
if (AI->isArrayAllocation())
return 0; // FIXME: we can also inline fixed size array allocas!
if (AI->getParent() != &AI->getParent()->getParent()->getEntryBlock())
Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPUISelLowering.cpp Fri Jul 2 04:34:51 2010
@@ -2867,7 +2867,7 @@
case SPUISD::IndirectAddr: {
if (!ST->usingLargeMem() && Op0.getOpcode() == SPUISD::AFormAddr) {
ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1));
- if (CN != 0 && CN->getZExtValue() == 0) {
+ if (CN != 0 && CN->isNullValue()) {
// (SPUindirect (SPUaform <addr>, 0), 0) ->
// (SPUaform <addr>, 0)
Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -554,9 +554,8 @@
unsigned
SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
@@ -566,14 +565,14 @@
if (FBB == 0) {
if (Cond.empty()) {
// Unconditional branch
- MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(SPU::BR));
+ MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(SPU::BR));
MIB.addMBB(TBB);
DEBUG(errs() << "Inserted one-way uncond branch: ");
DEBUG((*MIB).dump());
} else {
// Conditional branch
- MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(Cond[0].getImm()));
+ MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
MIB.addReg(Cond[1].getReg()).addMBB(TBB);
DEBUG(errs() << "Inserted one-way cond branch: ");
@@ -581,8 +580,8 @@
}
return 1;
} else {
- MachineInstrBuilder MIB = BuildMI(&MBB, dl, get(Cond[0].getImm()));
- MachineInstrBuilder MIB2 = BuildMI(&MBB, dl, get(SPU::BR));
+ MachineInstrBuilder MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
+ MachineInstrBuilder MIB2 = BuildMI(&MBB, DL, get(SPU::BR));
// Two-way Conditional Branch.
MIB.addReg(Cond[1].getReg()).addMBB(TBB);
Modified: llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/CellSPU/SPUInstrInfo.h Fri Jul 2 04:34:51 2010
@@ -94,8 +94,9 @@
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
};
}
Modified: llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeInstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeInstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -185,10 +185,11 @@
unsigned MBlazeInstrInfo::
InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Can only insert uncond branches so far.
assert(Cond.empty() && !FBB && TBB && "Can only handle uncond branches!");
- BuildMI(&MBB, DebugLoc(), get(MBlaze::BRI)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(MBlaze::BRI)).addMBB(TBB);
return 1;
}
Modified: llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeInstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeInstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/MBlaze/MBlazeInstrInfo.h Fri Jul 2 04:34:51 2010
@@ -198,7 +198,8 @@
/// Branch Analysis
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
Modified: llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -330,10 +330,8 @@
unsigned
MSP430InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc operand
- DebugLoc DL;
-
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
Modified: llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/MSP430/MSP430InstrInfo.h Fri Jul 2 04:34:51 2010
@@ -93,7 +93,8 @@
unsigned RemoveBranch(MachineBasicBlock &MBB) const;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
};
Modified: llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -520,9 +520,8 @@
unsigned MipsInstrInfo::
InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 3 || Cond.size() == 2 || Cond.size() == 0) &&
@@ -531,18 +530,18 @@
if (FBB == 0) { // One way branch.
if (Cond.empty()) {
// Unconditional branch?
- BuildMI(&MBB, dl, get(Mips::J)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(Mips::J)).addMBB(TBB);
} else {
// Conditional branch.
unsigned Opc = GetCondBranchFromCond((Mips::CondCode)Cond[0].getImm());
const TargetInstrDesc &TID = get(Opc);
if (TID.getNumOperands() == 3)
- BuildMI(&MBB, dl, TID).addReg(Cond[1].getReg())
+ BuildMI(&MBB, DL, TID).addReg(Cond[1].getReg())
.addReg(Cond[2].getReg())
.addMBB(TBB);
else
- BuildMI(&MBB, dl, TID).addReg(Cond[1].getReg())
+ BuildMI(&MBB, DL, TID).addReg(Cond[1].getReg())
.addMBB(TBB);
}
@@ -554,12 +553,12 @@
const TargetInstrDesc &TID = get(Opc);
if (TID.getNumOperands() == 3)
- BuildMI(&MBB, dl, TID).addReg(Cond[1].getReg()).addReg(Cond[2].getReg())
+ BuildMI(&MBB, DL, TID).addReg(Cond[1].getReg()).addReg(Cond[2].getReg())
.addMBB(TBB);
else
- BuildMI(&MBB, dl, TID).addReg(Cond[1].getReg()).addMBB(TBB);
+ BuildMI(&MBB, DL, TID).addReg(Cond[1].getReg()).addMBB(TBB);
- BuildMI(&MBB, dl, get(Mips::J)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(Mips::J)).addMBB(FBB);
return 2;
}
Modified: llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/Mips/MipsInstrInfo.h Fri Jul 2 04:34:51 2010
@@ -204,7 +204,8 @@
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SrcReg,
Modified: llvm/branches/wendling/eh/lib/Target/PIC16/PIC16ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/PIC16/PIC16ISelLowering.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/PIC16/PIC16ISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/PIC16/PIC16ISelLowering.cpp Fri Jul 2 04:34:51 2010
@@ -1791,14 +1791,14 @@
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
ISD::CondCode CC, unsigned &SPCC) {
if (isa<ConstantSDNode>(RHS) &&
- cast<ConstantSDNode>(RHS)->getZExtValue() == 0 &&
+ cast<ConstantSDNode>(RHS)->isNullValue() &&
CC == ISD::SETNE &&
(LHS.getOpcode() == PIC16ISD::SELECT_ICC &&
LHS.getOperand(3).getOpcode() == PIC16ISD::SUBCC) &&
isa<ConstantSDNode>(LHS.getOperand(0)) &&
isa<ConstantSDNode>(LHS.getOperand(1)) &&
- cast<ConstantSDNode>(LHS.getOperand(0))->getZExtValue() == 1 &&
- cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 0) {
+ cast<ConstantSDNode>(LHS.getOperand(0))->isOne() &&
+ cast<ConstantSDNode>(LHS.getOperand(1))->isNullValue()) {
SDValue CMPCC = LHS.getOperand(3);
SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
LHS = CMPCC.getOperand(0);
Modified: llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -196,15 +196,15 @@
unsigned PIC16InstrInfo::
InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
if (FBB == 0) { // One way branch.
if (Cond.empty()) {
// Unconditional branch?
- DebugLoc dl;
- BuildMI(&MBB, dl, get(PIC16::br_uncond)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(PIC16::br_uncond)).addMBB(TBB);
}
return 1;
}
Modified: llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/PIC16/PIC16InstrInfo.h Fri Jul 2 04:34:51 2010
@@ -70,7 +70,8 @@
virtual
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
Modified: llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/PowerPC/PPCISelLowering.cpp Fri Jul 2 04:34:51 2010
@@ -5042,19 +5042,19 @@
default: break;
case PPCISD::SHL:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getZExtValue() == 0) // 0 << V -> 0.
+ if (C->isNullValue()) // 0 << V -> 0.
return N->getOperand(0);
}
break;
case PPCISD::SRL:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getZExtValue() == 0) // 0 >>u V -> 0.
+ if (C->isNullValue()) // 0 >>u V -> 0.
return N->getOperand(0);
}
break;
case PPCISD::SRA:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
- if (C->getZExtValue() == 0 || // 0 >>s V -> 0.
+ if (C->isNullValue() || // 0 >>s V -> 0.
C->isAllOnesValue()) // -1 >>s V -> -1.
return N->getOperand(0);
}
Modified: llvm/branches/wendling/eh/lib/Target/PowerPC/PPCInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/PowerPC/PPCInstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/PowerPC/PPCInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/PowerPC/PPCInstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -316,9 +316,8 @@
unsigned
PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc argument
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
@@ -327,17 +326,17 @@
// One-way branch.
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch
- BuildMI(&MBB, dl, get(PPC::B)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB);
else // Conditional branch
- BuildMI(&MBB, dl, get(PPC::BCC))
+ BuildMI(&MBB, DL, get(PPC::BCC))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
return 1;
}
// Two-way Conditional Branch.
- BuildMI(&MBB, dl, get(PPC::BCC))
+ BuildMI(&MBB, DL, get(PPC::BCC))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
- BuildMI(&MBB, dl, get(PPC::B)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB);
return 2;
}
Modified: llvm/branches/wendling/eh/lib/Target/PowerPC/PPCInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/PowerPC/PPCInstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/PowerPC/PPCInstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/PowerPC/PPCInstrInfo.h Fri Jul 2 04:34:51 2010
@@ -109,7 +109,8 @@
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SrcReg,
Modified: llvm/branches/wendling/eh/lib/Target/README.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/README.txt?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/README.txt (original)
+++ llvm/branches/wendling/eh/lib/Target/README.txt Fri Jul 2 04:34:51 2010
@@ -1439,33 +1439,6 @@
//===---------------------------------------------------------------------===//
-186.crafty contains this interesting pattern:
-
-%77 = call i8* @strstr(i8* getelementptr ([6 x i8]* @"\01LC5", i32 0, i32 0),
- i8* %30)
-%phitmp648 = icmp eq i8* %77, getelementptr ([6 x i8]* @"\01LC5", i32 0, i32 0)
-br i1 %phitmp648, label %bb70, label %bb76
-
-bb70: ; preds = %OptionMatch.exit91, %bb69
- %78 = call i32 @strlen(i8* %30) nounwind readonly align 1 ; <i32> [#uses=1]
-
-This is basically:
- cststr = "abcdef";
- if (strstr(cststr, P) == cststr) {
- x = strlen(P);
- ...
-
-The strstr call would be significantly cheaper written as:
-
-cststr = "abcdef";
-if (memcmp(P, str, strlen(P)))
- x = strlen(P);
-
-This is memcmp+strlen instead of strstr. This also makes the strlen fully
-redundant.
-
-//===---------------------------------------------------------------------===//
-
186.crafty also contains this code:
%1906 = call i32 @strlen(i8* getelementptr ([32 x i8]* @pgn_event, i32 0,i32 0))
@@ -1863,3 +1836,47 @@
case it choses instead to keep the max operation obvious.
//===---------------------------------------------------------------------===//
+
+Take the following testcase on x86-64 (similar testcases exist for all targets
+with addc/adde):
+
+define void @a(i64* nocapture %s, i64* nocapture %t, i64 %a, i64 %b,
+i64 %c) nounwind {
+entry:
+ %0 = zext i64 %a to i128 ; <i128> [#uses=1]
+ %1 = zext i64 %b to i128 ; <i128> [#uses=1]
+ %2 = add i128 %1, %0 ; <i128> [#uses=2]
+ %3 = zext i64 %c to i128 ; <i128> [#uses=1]
+ %4 = shl i128 %3, 64 ; <i128> [#uses=1]
+ %5 = add i128 %4, %2 ; <i128> [#uses=1]
+ %6 = lshr i128 %5, 64 ; <i128> [#uses=1]
+ %7 = trunc i128 %6 to i64 ; <i64> [#uses=1]
+ store i64 %7, i64* %s, align 8
+ %8 = trunc i128 %2 to i64 ; <i64> [#uses=1]
+ store i64 %8, i64* %t, align 8
+ ret void
+}
+
+Generated code:
+ addq %rcx, %rdx
+ movl $0, %eax
+ adcq $0, %rax
+ addq %r8, %rax
+ movq %rax, (%rdi)
+ movq %rdx, (%rsi)
+ ret
+
+Expected code:
+ addq %rcx, %rdx
+ adcq $0, %r8
+ movq %r8, (%rdi)
+ movq %rdx, (%rsi)
+ ret
+
+The generated SelectionDAG has an ADD of an ADDE, where both operands of the
+ADDE are zero. Replacing one of the operands of the ADDE with the other operand
+of the ADD, and replacing the ADD with the ADDE, should give the desired result.
+
+(That said, we are doing a lot better than gcc on this testcase. :) )
+
+//===---------------------------------------------------------------------===//
Modified: llvm/branches/wendling/eh/lib/Target/Sparc/SparcISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Sparc/SparcISelLowering.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Sparc/SparcISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/Sparc/SparcISelLowering.cpp Fri Jul 2 04:34:51 2010
@@ -737,7 +737,7 @@
static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
ISD::CondCode CC, unsigned &SPCC) {
if (isa<ConstantSDNode>(RHS) &&
- cast<ConstantSDNode>(RHS)->getZExtValue() == 0 &&
+ cast<ConstantSDNode>(RHS)->isNullValue() &&
CC == ISD::SETNE &&
((LHS.getOpcode() == SPISD::SELECT_ICC &&
LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
@@ -745,8 +745,8 @@
LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
isa<ConstantSDNode>(LHS.getOperand(0)) &&
isa<ConstantSDNode>(LHS.getOperand(1)) &&
- cast<ConstantSDNode>(LHS.getOperand(0))->getZExtValue() == 1 &&
- cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 0) {
+ cast<ConstantSDNode>(LHS.getOperand(0))->isOne() &&
+ cast<ConstantSDNode>(LHS.getOperand(1))->isNullValue()) {
SDValue CMPCC = LHS.getOperand(3);
SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
LHS = CMPCC.getOperand(0);
Modified: llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -109,12 +109,11 @@
unsigned
SparcInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond)const{
- // FIXME this should probably take a DebugLoc argument
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL)const{
// Can only insert uncond branches so far.
assert(Cond.empty() && !FBB && TBB && "Can only handle uncond branches!");
- BuildMI(&MBB, dl, get(SP::BA)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB);
return 1;
}
Modified: llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/Sparc/SparcInstrInfo.h Fri Jul 2 04:34:51 2010
@@ -68,7 +68,8 @@
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Modified: llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -521,9 +521,8 @@
unsigned
SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME: this should probably have a DebugLoc operand
- DebugLoc DL;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
Modified: llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/SystemZ/SystemZInstrInfo.h Fri Jul 2 04:34:51 2010
@@ -102,7 +102,8 @@
bool AllowModify) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
SystemZCC::CondCodes getOppositeCondition(SystemZCC::CondCodes CC) const;
Modified: llvm/branches/wendling/eh/lib/Target/TargetInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/TargetInstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/TargetInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/TargetInstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -28,6 +28,10 @@
TargetOperandInfo::getRegClass(const TargetRegisterInfo *TRI) const {
if (isLookupPtrRegClass())
return TRI->getPointerRegClass(RegClass);
+ // Instructions like INSERT_SUBREG do not have fixed register classes.
+ if (RegClass < 0)
+ return 0;
+ // Otherwise just look it up normally.
return TRI->getRegClass(RegClass);
}
Modified: llvm/branches/wendling/eh/lib/Target/TargetLoweringObjectFile.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/TargetLoweringObjectFile.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/TargetLoweringObjectFile.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/TargetLoweringObjectFile.cpp Fri Jul 2 04:34:51 2010
@@ -101,7 +101,7 @@
ConstantInt *Null =
dyn_cast<ConstantInt>(CVA->getOperand(ATy->getNumElements()-1));
- if (Null == 0 || Null->getZExtValue() != 0)
+ if (Null == 0 || !Null->isZero())
return false; // Not null terminated.
// Verify that the null doesn't occur anywhere else in the string.
Modified: llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp Fri Jul 2 04:34:51 2010
@@ -277,10 +277,21 @@
return;
// Check whether this is an absolute address.
- if (Inst.getOperand(AddrBase + 0).getReg() != 0 ||
- Inst.getOperand(AddrBase + 2).getReg() != 0 ||
- Inst.getOperand(AddrBase + 4).getReg() != 0 ||
- Inst.getOperand(AddrBase + 1).getImm() != 1)
+ // FIXME: We know TLVP symbol refs aren't, but there should be a better way
+ // to do this here.
+ bool Absolute = true;
+ if (Inst.getOperand(AddrOp).isExpr()) {
+ const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr();
+ if (const MCSymbolRefExpr *SRE = dyn_cast<MCSymbolRefExpr>(MCE))
+ if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP)
+ Absolute = false;
+ }
+
+ if (Absolute &&
+ (Inst.getOperand(AddrBase + 0).getReg() != 0 ||
+ Inst.getOperand(AddrBase + 2).getReg() != 0 ||
+ Inst.getOperand(AddrBase + 4).getReg() != 0 ||
+ Inst.getOperand(AddrBase + 1).getImm() != 1))
return;
// If so, rewrite the instruction.
Modified: llvm/branches/wendling/eh/lib/Target/X86/README-X86-64.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/README-X86-64.txt?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/README-X86-64.txt (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/README-X86-64.txt Fri Jul 2 04:34:51 2010
@@ -240,3 +240,34 @@
expensive addressing mode.
//===---------------------------------------------------------------------===//
+
+Consider the following (contrived testcase, but contains common factors):
+
+#include <stdarg.h>
+int test(int x, ...) {
+ int sum, i;
+ va_list l;
+ va_start(l, x);
+ for (i = 0; i < x; i++)
+ sum += va_arg(l, int);
+ va_end(l);
+ return sum;
+}
+
+Testcase given in C because fixing it will likely involve changing the IR
+generated for it. The primary issue with the result is that it doesn't do any
+of the optimizations which are possible if we know the address of a va_list
+in the current function is never taken:
+1. We shouldn't spill the XMM registers because we only call va_arg with "int".
+2. It would be nice if we could scalarrepl the va_list.
+3. Probably overkill, but it'd be cool if we could peel off the first five
+iterations of the loop.
+
+Other optimizations involving functions which use va_arg on floats which don't
+have the address of a va_list taken:
+1. Conversely to the above, we shouldn't spill general registers if we only
+ call va_arg on "double".
+2. If we know nothing more than 64 bits wide is read from the XMM registers,
+ we can change the spilling code to reduce the amount of stack used by half.
+
+//===---------------------------------------------------------------------===//
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86FastISel.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86FastISel.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86FastISel.cpp Fri Jul 2 04:34:51 2010
@@ -349,6 +349,11 @@
U = C;
}
+ if (const PointerType *Ty = dyn_cast<PointerType>(V->getType()))
+ if (Ty->getAddressSpace() > 255)
+ // Fast instruction selection doesn't support pointers through %fs or %gs
+ return false;
+
switch (Opcode) {
default: break;
case Instruction::BitCast:
@@ -886,7 +891,7 @@
BuildMI(MBB, DL, TII.get(X86::JP_4)).addMBB(TrueMBB);
}
- FastEmitBranch(FalseMBB);
+ FastEmitBranch(FalseMBB, DL);
MBB->addSuccessor(TrueMBB);
return true;
}
@@ -941,7 +946,7 @@
BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ?
X86::JO_4 : X86::JB_4))
.addMBB(TrueMBB);
- FastEmitBranch(FalseMBB);
+ FastEmitBranch(FalseMBB, DL);
MBB->addSuccessor(TrueMBB);
return true;
}
@@ -956,7 +961,7 @@
BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg);
BuildMI(MBB, DL, TII.get(X86::JNE_4)).addMBB(TrueMBB);
- FastEmitBranch(FalseMBB);
+ FastEmitBranch(FalseMBB, DL);
MBB->addSuccessor(TrueMBB);
return true;
}
@@ -1205,7 +1210,7 @@
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(MBB, DL, TII.get(OpC), ResultReg).
- addImm(CI->getZExtValue() == 0 ? -1ULL : 0);
+ addImm(CI->isZero() ? -1ULL : 0);
UpdateValueMap(&I, ResultReg);
return true;
}
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86ISelDAGToDAG.cpp Fri Jul 2 04:34:51 2010
@@ -137,21 +137,6 @@
}
namespace {
- class X86ISelListener : public SelectionDAG::DAGUpdateListener {
- SmallSet<SDNode*, 4> Deletes;
- public:
- explicit X86ISelListener() {}
- virtual void NodeDeleted(SDNode *N, SDNode *E) {
- Deletes.insert(N);
- }
- virtual void NodeUpdated(SDNode *N) {
- // Ignore updates.
- }
- bool IsDeleted(SDNode *N) {
- return Deletes.count(N);
- }
- };
-
//===--------------------------------------------------------------------===//
/// ISel - X86 specific code to select X86 machine instructions for
/// SelectionDAG operations.
@@ -199,7 +184,6 @@
bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
- X86ISelListener &DeadNodes,
unsigned Depth);
bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
bool SelectAddr(SDNode *Op, SDValue N, SDValue &Base,
@@ -386,14 +370,14 @@
}
for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
Ops.push_back(OrigChain.getOperand(i));
- CurDAG->UpdateNodeOperands(OrigChain, &Ops[0], Ops.size());
- CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
+ CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
+ CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
Load.getOperand(1), Load.getOperand(2));
Ops.clear();
Ops.push_back(SDValue(Load.getNode(), 1));
for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
Ops.push_back(Call.getOperand(i));
- CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
+ CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size());
}
/// isCalleeLoad - Return true if call address is a load and it can be
@@ -664,8 +648,7 @@
/// returning true if it cannot be done. This just pattern matches for the
/// addressing mode.
bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
- X86ISelListener DeadNodes;
- if (MatchAddressRecursively(N, AM, DeadNodes, 0))
+ if (MatchAddressRecursively(N, AM, 0))
return true;
// Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
@@ -713,7 +696,6 @@
}
bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
- X86ISelListener &DeadNodes,
unsigned Depth) {
bool is64Bit = Subtarget->is64Bit();
DebugLoc dl = N.getDebugLoc();
@@ -876,13 +858,13 @@
// other uses, since it avoids a two-address sub instruction, however
// it costs an additional mov if the index register has other uses.
+ // Add an artificial use to this node so that we can keep track of
+ // it if it gets CSE'd with a different node.
+ HandleSDNode Handle(N);
+
// Test if the LHS of the sub can be folded.
X86ISelAddressMode Backup = AM;
- if (MatchAddressRecursively(N.getNode()->getOperand(0), AM,
- DeadNodes, Depth+1) ||
- // If it is successful but the recursive update causes N to be deleted,
- // then it's not safe to continue.
- DeadNodes.IsDeleted(N.getNode())) {
+ if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
AM = Backup;
break;
}
@@ -893,7 +875,7 @@
}
int Cost = 0;
- SDValue RHS = N.getNode()->getOperand(1);
+ SDValue RHS = Handle.getValue().getNode()->getOperand(1);
// If the RHS involves a register with multiple uses, this
// transformation incurs an extra mov, due to the neg instruction
// clobbering its operand.
@@ -944,35 +926,27 @@
}
case ISD::ADD: {
+ // Add an artificial use to this node so that we can keep track of
+ // it if it gets CSE'd with a different node.
+ HandleSDNode Handle(N);
+ SDValue LHS = Handle.getValue().getNode()->getOperand(0);
+ SDValue RHS = Handle.getValue().getNode()->getOperand(1);
+
X86ISelAddressMode Backup = AM;
- if (!MatchAddressRecursively(N.getNode()->getOperand(0), AM,
- DeadNodes, Depth+1)) {
- if (DeadNodes.IsDeleted(N.getNode()))
- // If it is successful but the recursive update causes N to be deleted,
- // then it's not safe to continue.
- return true;
- if (!MatchAddressRecursively(N.getNode()->getOperand(1), AM,
- DeadNodes, Depth+1))
- // If it is successful but the recursive update causes N to be deleted,
- // then it's not safe to continue.
- return DeadNodes.IsDeleted(N.getNode());
- }
+ if (!MatchAddressRecursively(LHS, AM, Depth+1) &&
+ !MatchAddressRecursively(RHS, AM, Depth+1))
+ return false;
+ AM = Backup;
+ LHS = Handle.getValue().getNode()->getOperand(0);
+ RHS = Handle.getValue().getNode()->getOperand(1);
// Try again after commuting the operands.
+ if (!MatchAddressRecursively(RHS, AM, Depth+1) &&
+ !MatchAddressRecursively(LHS, AM, Depth+1))
+ return false;
AM = Backup;
- if (!MatchAddressRecursively(N.getNode()->getOperand(1), AM,
- DeadNodes, Depth+1)) {
- if (DeadNodes.IsDeleted(N.getNode()))
- // If it is successful but the recursive update causes N to be deleted,
- // then it's not safe to continue.
- return true;
- if (!MatchAddressRecursively(N.getNode()->getOperand(0), AM,
- DeadNodes, Depth+1))
- // If it is successful but the recursive update causes N to be deleted,
- // then it's not safe to continue.
- return DeadNodes.IsDeleted(N.getNode());
- }
- AM = Backup;
+ LHS = Handle.getValue().getNode()->getOperand(0);
+ RHS = Handle.getValue().getNode()->getOperand(1);
// If we couldn't fold both operands into the address at the same time,
// see if we can just put each operand into a register and fold at least
@@ -980,8 +954,8 @@
if (AM.BaseType == X86ISelAddressMode::RegBase &&
!AM.Base_Reg.getNode() &&
!AM.IndexReg.getNode()) {
- AM.Base_Reg = N.getNode()->getOperand(0);
- AM.IndexReg = N.getNode()->getOperand(1);
+ AM.Base_Reg = LHS;
+ AM.IndexReg = RHS;
AM.Scale = 1;
return false;
}
@@ -996,7 +970,7 @@
uint64_t Offset = CN->getSExtValue();
// Start with the LHS as an addr mode.
- if (!MatchAddressRecursively(N.getOperand(0), AM, DeadNodes, Depth+1) &&
+ if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
// Address could not have picked a GV address for the displacement.
AM.GV == NULL &&
// On x86-64, the resultant disp must fit in 32-bits.
@@ -1073,7 +1047,7 @@
CurDAG->RepositionNode(N.getNode(), Shl.getNode());
Shl.getNode()->setNodeId(N.getNode()->getNodeId());
}
- CurDAG->ReplaceAllUsesWith(N, Shl, &DeadNodes);
+ CurDAG->ReplaceAllUsesWith(N, Shl);
AM.IndexReg = And;
AM.Scale = (1 << ScaleLog);
return false;
@@ -1124,7 +1098,7 @@
NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
}
- CurDAG->ReplaceAllUsesWith(N, NewSHIFT, &DeadNodes);
+ CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
AM.Scale = 1 << ShiftCst;
AM.IndexReg = NewAND;
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86ISelLowering.cpp Fri Jul 2 04:34:51 2010
@@ -3185,7 +3185,7 @@
/// constant +0.0.
bool X86::isZeroNode(SDValue Elt) {
return ((isa<ConstantSDNode>(Elt) &&
- cast<ConstantSDNode>(Elt)->getZExtValue() == 0) ||
+ cast<ConstantSDNode>(Elt)->isNullValue()) ||
(isa<ConstantFPSDNode>(Elt) &&
cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
}
@@ -6229,7 +6229,7 @@
if (Op0.getOpcode() == ISD::AND &&
Op0.hasOneUse() &&
Op1.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(Op1)->getZExtValue() == 0 &&
+ cast<ConstantSDNode>(Op1)->isNullValue() &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
if (NewSetCC.getNode())
@@ -6609,14 +6609,14 @@
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
CC = DAG.getConstant(CCode, MVT::i8);
- SDValue User = SDValue(*Op.getNode()->use_begin(), 0);
+ SDNode *User = *Op.getNode()->use_begin();
// Look for an unconditional branch following this conditional branch.
// We need this because we need to reverse the successors in order
// to implement FCMP_OEQ.
- if (User.getOpcode() == ISD::BR) {
- SDValue FalseBB = User.getOperand(1);
- SDValue NewBR =
- DAG.UpdateNodeOperands(User, User.getOperand(0), Dest);
+ if (User->getOpcode() == ISD::BR) {
+ SDValue FalseBB = User->getOperand(1);
+ SDNode *NewBR =
+ DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
assert(NewBR == User);
Dest = FalseBB;
@@ -8465,22 +8465,42 @@
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
unsigned Opc =
X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
+
BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
F->insert(It, copy0MBB);
F->insert(It, sinkMBB);
+
// Update machine-CFG edges by first adding all successors of the current
// block to the new block which will contain the Phi node for the select.
for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
E = BB->succ_end(); I != E; ++I)
sinkMBB->addSuccessor(*I);
+
// Next, remove all successors of the current block, and add the true
// and fallthrough blocks as its successors.
while (!BB->succ_empty())
BB->removeSuccessor(BB->succ_begin());
+
// Add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
+ // If the EFLAGS register isn't dead in the terminator, then claim that it's
+ // live into the sink and copy blocks.
+ const MachineFunction *MF = BB->getParent();
+ const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
+ BitVector ReservedRegs = TRI->getReservedRegs(*MF);
+ const MachineInstr *Term = BB->getFirstTerminator();
+
+ for (unsigned I = 0, E = Term->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = Term->getOperand(I);
+ if (!MO.isReg() || MO.isKill() || MO.isDead()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg != X86::EFLAGS) continue;
+ copy0MBB->addLiveIn(Reg);
+ sinkMBB->addLiveIn(Reg);
+ }
+
// copy0MBB:
// %FalseValue = ...
// # fallthrough to sinkMBB
@@ -8543,6 +8563,15 @@
.addReg(0);
MIB = BuildMI(BB, DL, TII->get(X86::CALL64m));
addDirectMem(MIB, X86::RDI).addReg(0);
+ } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
+ MachineInstrBuilder MIB = BuildMI(BB, DL, TII->get(X86::MOV32rm), X86::EAX)
+ .addReg(0)
+ .addImm(0).addReg(0)
+ .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
+ MI->getOperand(3).getTargetFlags())
+ .addReg(0);
+ MIB = BuildMI(BB, DL, TII->get(X86::CALL32m));
+ addDirectMem(MIB, X86::EAX).addReg(0);
} else {
MachineInstrBuilder MIB = BuildMI(BB, DL, TII->get(X86::MOV32rm), X86::EAX)
.addReg(TII->getGlobalBaseReg(F))
@@ -9612,8 +9641,10 @@
if (ShAmt1.getOpcode() == ISD::SUB) {
SDValue Sum = ShAmt1.getOperand(0);
if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
- if (SumC->getSExtValue() == Bits &&
- ShAmt1.getOperand(1) == ShAmt0)
+ SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
+ if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
+ ShAmt1Op1 = ShAmt1Op1.getOperand(0);
+ if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
return DAG.getNode(Opc, DL, VT,
Op0, Op1,
DAG.getNode(ISD::TRUNCATE, DL,
@@ -9840,9 +9871,10 @@
switch (atomic.getOpcode()) {
case ISD::ATOMIC_CMP_SWAP:
- return DAG.UpdateNodeOperands(atomic, fence.getOperand(0),
+ return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
+ fence.getOperand(0),
atomic.getOperand(1), atomic.getOperand(2),
- atomic.getOperand(3));
+ atomic.getOperand(3)), atomic.getResNo());
case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
@@ -9854,8 +9886,10 @@
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_UMAX:
- return DAG.UpdateNodeOperands(atomic, fence.getOperand(0),
- atomic.getOperand(1), atomic.getOperand(2));
+ return SDValue(DAG.UpdateNodeOperands(atomic.getNode(),
+ fence.getOperand(0),
+ atomic.getOperand(1), atomic.getOperand(2)),
+ atomic.getResNo());
default:
return SDValue();
}
@@ -10220,9 +10254,8 @@
case 'e': {
// 32-bit signed value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
- const ConstantInt *CI = C->getConstantIntValue();
- if (CI->isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
- C->getSExtValue())) {
+ if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
+ C->getSExtValue())) {
// Widen to 64 bits here to get it sign extended.
Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
break;
@@ -10235,9 +10268,8 @@
case 'Z': {
// 32-bit unsigned value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
- const ConstantInt *CI = C->getConstantIntValue();
- if (CI->isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
- C->getZExtValue())) {
+ if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
+ C->getZExtValue())) {
Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
break;
}
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86Instr64bit.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86Instr64bit.td?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86Instr64bit.td (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86Instr64bit.td Fri Jul 2 04:34:51 2010
@@ -293,7 +293,7 @@
"lea{q}\t{$src|$dst}, {$dst|$src}",
[(set GR64:$dst, lea64addr:$src)]>;
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
def BSWAP64r : RI<0xC8, AddRegFrm, (outs GR64:$dst), (ins GR64:$src),
"bswap{q}\t$dst",
[(set GR64:$dst, (bswap GR64:$src))]>, TB;
@@ -521,7 +521,7 @@
def ADD64i32 : RIi32<0x05, RawFrm, (outs), (ins i64i32imm:$src),
"add{q}\t{$src, %rax|%rax, $src}", []>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isConvertibleToThreeAddress = 1 in {
let isCommutable = 1 in
// Register-Register Addition
@@ -559,7 +559,7 @@
[(set GR64:$dst, EFLAGS,
(X86add_flag GR64:$src1, (load addr:$src2)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
// Memory-Register Addition
def ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
@@ -580,7 +580,7 @@
def ADC64i32 : RIi32<0x15, RawFrm, (outs), (ins i64i32imm:$src),
"adc{q}\t{$src, %rax|%rax, $src}", []>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
@@ -606,7 +606,7 @@
(ins GR64:$src1, i64i32imm:$src2),
"adc{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"adc{q}\t{$src2, $dst|$dst, $src2}",
@@ -621,7 +621,7 @@
addr:$dst)]>;
} // Uses = [EFLAGS]
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
// Register-Register Subtraction
def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
@@ -653,7 +653,7 @@
"sub{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86sub_flag GR64:$src1, i64immSExt32:$src2))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
def SUB64i32 : RIi32<0x2D, RawFrm, (outs), (ins i64i32imm:$src),
"sub{q}\t{$src, %rax|%rax, $src}", []>;
@@ -677,7 +677,7 @@
(implicit EFLAGS)]>;
let Uses = [EFLAGS] in {
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}",
@@ -702,7 +702,7 @@
(ins GR64:$src1, i64i32imm:$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
def SBB64i32 : RIi32<0x1D, RawFrm, (outs), (ins i64i32imm:$src),
"sbb{q}\t{$src, %rax|%rax, $src}", []>;
@@ -736,7 +736,7 @@
}
let Defs = [EFLAGS] in {
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let isCommutable = 1 in
// Register-Register Signed Integer Multiplication
def IMUL64rr : RI<0xAF, MRMSrcReg, (outs GR64:$dst),
@@ -751,7 +751,7 @@
"imul{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, EFLAGS,
(X86smul_flag GR64:$src1, (load addr:$src2)))]>, TB;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
// Suprisingly enough, these are not two address instructions!
@@ -803,7 +803,7 @@
// Unary instructions
let Defs = [EFLAGS], CodeSize = 2 in {
-let isTwoAddress = 1 in
+let Constraints = "$src = $dst" in
def NEG64r : RI<0xF7, MRM3r, (outs GR64:$dst), (ins GR64:$src), "neg{q}\t$dst",
[(set GR64:$dst, (ineg GR64:$src)),
(implicit EFLAGS)]>;
@@ -811,14 +811,14 @@
[(store (ineg (loadi64 addr:$dst)), addr:$dst),
(implicit EFLAGS)]>;
-let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
+let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
def INC64r : RI<0xFF, MRM0r, (outs GR64:$dst), (ins GR64:$src), "inc{q}\t$dst",
[(set GR64:$dst, EFLAGS, (X86inc_flag GR64:$src))]>;
def INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), "inc{q}\t$dst",
[(store (add (loadi64 addr:$dst), 1), addr:$dst),
(implicit EFLAGS)]>;
-let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in
+let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in
def DEC64r : RI<0xFF, MRM1r, (outs GR64:$dst), (ins GR64:$src), "dec{q}\t$dst",
[(set GR64:$dst, EFLAGS, (X86dec_flag GR64:$src))]>;
def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
@@ -826,7 +826,7 @@
(implicit EFLAGS)]>;
// In 64-bit mode, single byte INC and DEC cannot be encoded.
-let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in {
+let Constraints = "$src = $dst", isConvertibleToThreeAddress = 1 in {
// Can transform into LEA.
def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src),
"inc{w}\t$dst",
@@ -844,38 +844,36 @@
"dec{l}\t$dst",
[(set GR32:$dst, EFLAGS, (X86dec_flag GR32:$src))]>,
Requires<[In64BitMode]>;
-} // isConvertibleToThreeAddress
+} // Constraints = "$src = $dst", isConvertibleToThreeAddress
// These are duplicates of their 32-bit counterparts. Only needed so X86 knows
// how to unfold them.
-let isTwoAddress = 0, CodeSize = 2 in {
- def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
- [(store (add (loadi16 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize, Requires<[In64BitMode]>;
- def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
- [(store (add (loadi32 addr:$dst), 1), addr:$dst),
- (implicit EFLAGS)]>,
- Requires<[In64BitMode]>;
- def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
- [(store (add (loadi16 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
- OpSize, Requires<[In64BitMode]>;
- def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
- [(store (add (loadi32 addr:$dst), -1), addr:$dst),
- (implicit EFLAGS)]>,
- Requires<[In64BitMode]>;
-}
+def INC64_16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), "inc{w}\t$dst",
+ [(store (add (loadi16 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
+ OpSize, Requires<[In64BitMode]>;
+def INC64_32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), "inc{l}\t$dst",
+ [(store (add (loadi32 addr:$dst), 1), addr:$dst),
+ (implicit EFLAGS)]>,
+ Requires<[In64BitMode]>;
+def DEC64_16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), "dec{w}\t$dst",
+ [(store (add (loadi16 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
+ OpSize, Requires<[In64BitMode]>;
+def DEC64_32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), "dec{l}\t$dst",
+ [(store (add (loadi32 addr:$dst), -1), addr:$dst),
+ (implicit EFLAGS)]>,
+ Requires<[In64BitMode]>;
} // Defs = [EFLAGS], CodeSize
let Defs = [EFLAGS] in {
// Shift instructions
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src),
+def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
"shl{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (shl GR64:$src, CL))]>;
+ [(set GR64:$dst, (shl GR64:$src1, CL))]>;
let isConvertibleToThreeAddress = 1 in // Can transform into LEA.
def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
@@ -885,7 +883,7 @@
// 'add reg,reg' is cheaper.
def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1),
"shl{q}\t$dst", []>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def SHL64mCL : RI<0xD3, MRM4m, (outs), (ins i64mem:$dst),
@@ -898,18 +896,18 @@
"shl{q}\t$dst",
[(store (shl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src),
+def SHR64rCL : RI<0xD3, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
"shr{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (srl GR64:$src, CL))]>;
+ [(set GR64:$dst, (srl GR64:$src1, CL))]>;
def SHR64ri : RIi8<0xC1, MRM5r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2),
"shr{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (srl GR64:$src1, (i8 imm:$src2)))]>;
def SHR64r1 : RI<0xD1, MRM5r, (outs GR64:$dst), (ins GR64:$src1),
"shr{q}\t$dst",
[(set GR64:$dst, (srl GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def SHR64mCL : RI<0xD3, MRM5m, (outs), (ins i64mem:$dst),
@@ -922,11 +920,11 @@
"shr{q}\t$dst",
[(store (srl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src),
+def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
"sar{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (sra GR64:$src, CL))]>;
+ [(set GR64:$dst, (sra GR64:$src1, CL))]>;
def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"sar{q}\t{$src2, $dst|$dst, $src2}",
@@ -934,7 +932,7 @@
def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1),
"sar{q}\t$dst",
[(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src = $dst"
let Uses = [CL] in
def SAR64mCL : RI<0xD3, MRM7m, (outs), (ins i64mem:$dst),
@@ -949,7 +947,7 @@
// Rotate instructions
-let isTwoAddress = 1 in {
+let Constraints = "$src = $dst" in {
def RCL64r1 : RI<0xD1, MRM2r, (outs GR64:$dst), (ins GR64:$src),
"rcl{q}\t{1, $dst|$dst, 1}", []>;
def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt),
@@ -966,9 +964,8 @@
def RCR64rCL : RI<0xD3, MRM3r, (outs GR64:$dst), (ins GR64:$src),
"rcr{q}\t{%cl, $dst|$dst, CL}", []>;
}
-}
+} // Constraints = "$src = $dst"
-let isTwoAddress = 0 in {
def RCL64m1 : RI<0xD1, MRM2m, (outs), (ins i64mem:$dst),
"rcl{q}\t{1, $dst|$dst, 1}", []>;
def RCL64mi : RIi8<0xC1, MRM2m, (outs), (ins i64mem:$dst, i8imm:$cnt),
@@ -984,13 +981,12 @@
def RCR64mCL : RI<0xD3, MRM3m, (outs), (ins i64mem:$dst),
"rcr{q}\t{%cl, $dst|$dst, CL}", []>;
}
-}
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src),
+def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
"rol{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (rotl GR64:$src, CL))]>;
+ [(set GR64:$dst, (rotl GR64:$src1, CL))]>;
def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"rol{q}\t{$src2, $dst|$dst, $src2}",
@@ -998,7 +994,7 @@
def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1),
"rol{q}\t$dst",
[(set GR64:$dst, (rotl GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst),
@@ -1011,11 +1007,11 @@
"rol{q}\t$dst",
[(store (rotl (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in
-def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src),
+def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
"ror{q}\t{%cl, $dst|$dst, %CL}",
- [(set GR64:$dst, (rotr GR64:$src, CL))]>;
+ [(set GR64:$dst, (rotr GR64:$src1, CL))]>;
def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst),
(ins GR64:$src1, i8imm:$src2),
"ror{q}\t{$src2, $dst|$dst, $src2}",
@@ -1023,7 +1019,7 @@
def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1),
"ror{q}\t$dst",
[(set GR64:$dst, (rotr GR64:$src1, (i8 1)))]>;
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in
def ROR64mCL : RI<0xD3, MRM1m, (outs), (ins i64mem:$dst),
@@ -1037,7 +1033,7 @@
[(store (rotr (loadi64 addr:$dst), (i8 1)), addr:$dst)]>;
// Double shift instructions (generalizations of rotate)
-let isTwoAddress = 1 in {
+let Constraints = "$src1 = $dst" in {
let Uses = [CL] in {
def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst),
(ins GR64:$src1, GR64:$src2),
@@ -1067,7 +1063,7 @@
(i8 imm:$src3)))]>,
TB;
} // isCommutable
-} // isTwoAddress
+} // Constraints = "$src1 = $dst"
let Uses = [CL] in {
def SHLD64mrCL : RI<0xA5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86InstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86InstrFormats.td?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86InstrFormats.td (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86InstrFormats.td Fri Jul 2 04:34:51 2010
@@ -214,12 +214,24 @@
let CodeSize = 3;
}
+// SI - SSE 1 & 2 scalar instructions
+class SI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
+ : I<o, F, outs, ins, asm, pattern> {
+ let Predicates = !if(hasVEX_4VPrefix /* VEX_4V */,
+ !if(!eq(Prefix, 11 /* XD */), [HasAVX, HasSSE2], [HasAVX, HasSSE1]),
+ !if(!eq(Prefix, 12 /* XS */), [HasSSE2], [HasSSE1]));
+
+ // AVX instructions have a 'v' prefix in the mnemonic
+ let AsmString = !if(hasVEX_4VPrefix, !strconcat("v", asm), asm);
+}
+
// SSE1 Instruction Templates:
//
// SSI - SSE1 instructions with XS prefix.
// PSI - SSE1 instructions with TB prefix.
// PSIi8 - SSE1 instructions with ImmT == Imm8 and TB prefix.
// VSSI - SSE1 instructions with XS prefix in AVX form.
+// VPSI - SSE1 instructions with TB prefix in AVX form.
class SSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
: I<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE1]>;
@@ -237,6 +249,10 @@
list<dag> pattern>
: I<o, F, outs, ins, !strconcat("v", asm), pattern>, XS, VEX_4V,
Requires<[HasAVX, HasSSE1]>;
+class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedSingle>,
+ VEX_4V, Requires<[HasAVX, HasSSE1]>;
// SSE2 Instruction Templates:
//
@@ -246,6 +262,7 @@
// PDI - SSE2 instructions with TB and OpSize prefixes.
// PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes.
// VSDI - SSE2 instructions with XD prefix in AVX form.
+// VPDI - SSE2 instructions with TB and OpSize prefixes in AVX form.
class SDI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern>
: I<o, F, outs, ins, asm, pattern>, XD, Requires<[HasSSE2]>;
@@ -266,6 +283,10 @@
list<dag> pattern>
: I<o, F, outs, ins, !strconcat("v", asm), pattern>, XD, VEX_4V,
Requires<[HasAVX, HasSSE2]>;
+class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
+ list<dag> pattern>
+ : I<o, F, outs, ins, !strconcat("v", asm), pattern, SSEPackedDouble>,
+ VEX_4V, OpSize, Requires<[HasAVX, HasSSE2]>;
// SSE3 Instruction Templates:
//
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -1839,9 +1839,8 @@
unsigned
X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const {
- // FIXME this should probably have a DebugLoc operand
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
@@ -1850,7 +1849,7 @@
if (Cond.empty()) {
// Unconditional branch?
assert(!FBB && "Unconditional branch with multiple successors!");
- BuildMI(&MBB, dl, get(X86::JMP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(TBB);
return 1;
}
@@ -1860,27 +1859,27 @@
switch (CC) {
case X86::COND_NP_OR_E:
// Synthesize NP_OR_E with two branches.
- BuildMI(&MBB, dl, get(X86::JNP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JNP_4)).addMBB(TBB);
++Count;
- BuildMI(&MBB, dl, get(X86::JE_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JE_4)).addMBB(TBB);
++Count;
break;
case X86::COND_NE_OR_P:
// Synthesize NE_OR_P with two branches.
- BuildMI(&MBB, dl, get(X86::JNE_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JNE_4)).addMBB(TBB);
++Count;
- BuildMI(&MBB, dl, get(X86::JP_4)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(X86::JP_4)).addMBB(TBB);
++Count;
break;
default: {
unsigned Opc = GetCondBranchFromCond(CC);
- BuildMI(&MBB, dl, get(Opc)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
++Count;
}
}
if (FBB) {
// Two-way Conditional branch. Insert the second branch.
- BuildMI(&MBB, dl, get(X86::JMP_4)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(FBB);
++Count;
}
return Count;
@@ -2057,71 +2056,87 @@
return false;
}
-static unsigned getStoreRegOpcode(unsigned SrcReg,
- const TargetRegisterClass *RC,
- bool isStackAligned,
- TargetMachine &TM) {
- unsigned Opc = 0;
+static unsigned getLoadStoreRegOpcode(unsigned Reg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ const TargetMachine &TM,
+ bool load) {
if (RC == &X86::GR64RegClass || RC == &X86::GR64_NOSPRegClass) {
- Opc = X86::MOV64mr;
+ return load ? X86::MOV64rm : X86::MOV64mr;
} else if (RC == &X86::GR32RegClass || RC == &X86::GR32_NOSPRegClass) {
- Opc = X86::MOV32mr;
+ return load ? X86::MOV32rm : X86::MOV32mr;
} else if (RC == &X86::GR16RegClass) {
- Opc = X86::MOV16mr;
+ return load ? X86::MOV16rm : X86::MOV16mr;
} else if (RC == &X86::GR8RegClass) {
// Copying to or from a physical H register on x86-64 requires a NOREX
// move. Otherwise use a normal move.
- if (isHReg(SrcReg) &&
+ if (isHReg(Reg) &&
TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8mr_NOREX;
+ return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
else
- Opc = X86::MOV8mr;
+ return load ? X86::MOV8rm : X86::MOV8mr;
} else if (RC == &X86::GR64_ABCDRegClass) {
- Opc = X86::MOV64mr;
+ return load ? X86::MOV64rm : X86::MOV64mr;
} else if (RC == &X86::GR32_ABCDRegClass) {
- Opc = X86::MOV32mr;
+ return load ? X86::MOV32rm : X86::MOV32mr;
} else if (RC == &X86::GR16_ABCDRegClass) {
- Opc = X86::MOV16mr;
+ return load ? X86::MOV16rm : X86::MOV16mr;
} else if (RC == &X86::GR8_ABCD_LRegClass) {
- Opc = X86::MOV8mr;
+ return load ? X86::MOV8rm :X86::MOV8mr;
} else if (RC == &X86::GR8_ABCD_HRegClass) {
if (TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8mr_NOREX;
+ return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
else
- Opc = X86::MOV8mr;
+ return load ? X86::MOV8rm : X86::MOV8mr;
} else if (RC == &X86::GR64_NOREXRegClass ||
RC == &X86::GR64_NOREX_NOSPRegClass) {
- Opc = X86::MOV64mr;
+ return load ? X86::MOV64rm : X86::MOV64mr;
} else if (RC == &X86::GR32_NOREXRegClass) {
- Opc = X86::MOV32mr;
+ return load ? X86::MOV32rm : X86::MOV32mr;
} else if (RC == &X86::GR16_NOREXRegClass) {
- Opc = X86::MOV16mr;
+ return load ? X86::MOV16rm : X86::MOV16mr;
} else if (RC == &X86::GR8_NOREXRegClass) {
- Opc = X86::MOV8mr;
+ return load ? X86::MOV8rm : X86::MOV8mr;
} else if (RC == &X86::GR64_TCRegClass) {
- Opc = X86::MOV64mr_TC;
+ return load ? X86::MOV64rm_TC : X86::MOV64mr_TC;
} else if (RC == &X86::GR32_TCRegClass) {
- Opc = X86::MOV32mr_TC;
+ return load ? X86::MOV32rm_TC : X86::MOV32mr_TC;
} else if (RC == &X86::RFP80RegClass) {
- Opc = X86::ST_FpP80m; // pops
+ return load ? X86::LD_Fp80m : X86::ST_FpP80m;
} else if (RC == &X86::RFP64RegClass) {
- Opc = X86::ST_Fp64m;
+ return load ? X86::LD_Fp64m : X86::ST_Fp64m;
} else if (RC == &X86::RFP32RegClass) {
- Opc = X86::ST_Fp32m;
+ return load ? X86::LD_Fp32m : X86::ST_Fp32m;
} else if (RC == &X86::FR32RegClass) {
- Opc = X86::MOVSSmr;
+ return load ? X86::MOVSSrm : X86::MOVSSmr;
} else if (RC == &X86::FR64RegClass) {
- Opc = X86::MOVSDmr;
+ return load ? X86::MOVSDrm : X86::MOVSDmr;
} else if (RC == &X86::VR128RegClass) {
// If stack is realigned we can use aligned stores.
- Opc = isStackAligned ? X86::MOVAPSmr : X86::MOVUPSmr;
+ if (isStackAligned)
+ return load ? X86::MOVAPSrm : X86::MOVAPSmr;
+ else
+ return load ? X86::MOVUPSrm : X86::MOVUPSmr;
} else if (RC == &X86::VR64RegClass) {
- Opc = X86::MMX_MOVQ64mr;
+ return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
} else {
llvm_unreachable("Unknown regclass");
}
+}
+
+static unsigned getStoreRegOpcode(unsigned SrcReg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ TargetMachine &TM) {
+ return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, TM, false);
+}
+
- return Opc;
+static unsigned getLoadRegOpcode(unsigned DestReg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ const TargetMachine &TM) {
+ return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, TM, true);
}
void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
@@ -2155,72 +2170,6 @@
NewMIs.push_back(MIB);
}
-static unsigned getLoadRegOpcode(unsigned DestReg,
- const TargetRegisterClass *RC,
- bool isStackAligned,
- const TargetMachine &TM) {
- unsigned Opc = 0;
- if (RC == &X86::GR64RegClass || RC == &X86::GR64_NOSPRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32RegClass || RC == &X86::GR32_NOSPRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16RegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8RegClass) {
- // Copying to or from a physical H register on x86-64 requires a NOREX
- // move. Otherwise use a normal move.
- if (isHReg(DestReg) &&
- TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rm_NOREX;
- else
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR64_ABCDRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32_ABCDRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16_ABCDRegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8_ABCD_LRegClass) {
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR8_ABCD_HRegClass) {
- if (TM.getSubtarget<X86Subtarget>().is64Bit())
- Opc = X86::MOV8rm_NOREX;
- else
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR64_NOREXRegClass ||
- RC == &X86::GR64_NOREX_NOSPRegClass) {
- Opc = X86::MOV64rm;
- } else if (RC == &X86::GR32_NOREXRegClass) {
- Opc = X86::MOV32rm;
- } else if (RC == &X86::GR16_NOREXRegClass) {
- Opc = X86::MOV16rm;
- } else if (RC == &X86::GR8_NOREXRegClass) {
- Opc = X86::MOV8rm;
- } else if (RC == &X86::GR64_TCRegClass) {
- Opc = X86::MOV64rm_TC;
- } else if (RC == &X86::GR32_TCRegClass) {
- Opc = X86::MOV32rm_TC;
- } else if (RC == &X86::RFP80RegClass) {
- Opc = X86::LD_Fp80m;
- } else if (RC == &X86::RFP64RegClass) {
- Opc = X86::LD_Fp64m;
- } else if (RC == &X86::RFP32RegClass) {
- Opc = X86::LD_Fp32m;
- } else if (RC == &X86::FR32RegClass) {
- Opc = X86::MOVSSrm;
- } else if (RC == &X86::FR64RegClass) {
- Opc = X86::MOVSDrm;
- } else if (RC == &X86::VR128RegClass) {
- // If stack is realigned we can use aligned loads.
- Opc = isStackAligned ? X86::MOVAPSrm : X86::MOVUPSrm;
- } else if (RC == &X86::VR64RegClass) {
- Opc = X86::MMX_MOVQ64rm;
- } else {
- llvm_unreachable("Unknown regclass");
- }
-
- return Opc;
-}
void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86InstrInfo.h Fri Jul 2 04:34:51 2010
@@ -612,7 +612,8 @@
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual bool copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SrcReg,
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86InstrSSE.td Fri Jul 2 04:34:51 2010
@@ -646,6 +646,17 @@
defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef, 1, 1, 0>;
}
+/// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
+multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
+ RegisterClass RC, X86MemOperand memop> {
+ let isCommutable = 1 in {
+ def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ OpcodeStr, [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
+ }
+ def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memop:$src2),
+ OpcodeStr, [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
+}
+
/// basic_sse12_fp_binop_rm - SSE 1 & 2 binops come in both scalar and
/// vector forms.
///
@@ -660,50 +671,32 @@
let Constraints = "$src1 = $dst" in {
multiclass basic_sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
SDNode OpNode, bit Commutable = 0> {
- // Scalar operation, reg+reg.
- def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
- let isCommutable = Commutable;
- }
-
- def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
- let isCommutable = Commutable;
- }
-
- def V#NAME#SSrr : VSSI<opc, MRMSrcReg, (outs FR32:$dst),
- (ins FR32:$src1, FR32:$src2),
- !strconcat(OpcodeStr,
- "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []> {
- let isCommutable = Commutable;
- let Constraints = "";
- let isAsmParserOnly = 1;
- }
- def V#NAME#SDrr : VSDI<opc, MRMSrcReg, (outs FR64:$dst),
- (ins FR64:$src1, FR64:$src2),
- !strconcat(OpcodeStr,
- "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
- []> {
- let isCommutable = Commutable;
- let Constraints = "";
- let isAsmParserOnly = 1;
+ let Constraints = "", isAsmParserOnly = 1, hasVEX_4VPrefix = 1 in {
+ // Scalar operation, reg+reg.
+ let Prefix = 12 /* XS */ in
+ defm V#NAME#SS : sse12_fp_scalar<opc,
+ !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ OpNode, FR32, f32mem>;
+
+ let Prefix = 11 /* XD */ in
+ defm V#NAME#SD : sse12_fp_scalar<opc,
+ !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ OpNode, FR64, f64mem>;
+ }
+
+ let Constraints = "$src1 = $dst" in {
+ // Scalar operation, reg+reg.
+ let Prefix = 12 /* XS */ in
+ defm SS : sse12_fp_scalar<opc,
+ !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
+ OpNode, FR32, f32mem>;
+ let Prefix = 11 /* XD */ in
+ defm SD : sse12_fp_scalar<opc,
+ !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
+ OpNode, FR64, f64mem>;
}
- // Scalar operation, reg+mem.
- def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f32mem:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
-
- def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f64mem:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
-
// Vector operation, reg+reg.
def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
@@ -719,6 +712,26 @@
let isCommutable = Commutable;
}
+ def V#NAME#PSrr : VPSI<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []> {
+ let isCommutable = Commutable;
+ let Constraints = "";
+ let isAsmParserOnly = 1;
+ }
+
+ def V#NAME#PDrr : VPDI<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ []> {
+ let isCommutable = Commutable;
+ let Constraints = "";
+ let isAsmParserOnly = 1;
+ }
+
// Vector operation, reg+mem.
def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f128mem:$src2),
@@ -730,7 +743,43 @@
!strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
[(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
+ def V#NAME#PSrm : VPSI<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f128mem:$src2),
+ !strconcat(OpcodeStr,
+ "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []> {
+ let Constraints = "";
+ let isAsmParserOnly = 1;
+ }
+
+ def V#NAME#PDrm : VPDI<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, f128mem:$src2),
+ !strconcat(OpcodeStr,
+ "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []> {
+ let Constraints = "";
+ let isAsmParserOnly = 1;
+ }
+
// Intrinsic operation, reg+reg.
+ def V#NAME#SSrr_Int : VSSI<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
+ [(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse_",
+ !strconcat(OpcodeStr, "_ss")) VR128:$src1,
+ VR128:$src2))]> {
+ // int_x86_sse_xxx_ss
+ let Constraints = "";
+ }
+
+ def V#NAME#SDrr_Int : VSDI<opc, MRMSrcReg, (outs VR128:$dst),
+ (ins VR128:$src1, VR128:$src2),
+ !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
+ [(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse2_",
+ !strconcat(OpcodeStr, "_sd")) VR128:$src1,
+ VR128:$src2))]> {
+ // int_x86_sse2_xxx_sd
+ let Constraints = "";
+ }
+
def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
@@ -748,6 +797,26 @@
// int_x86_sse2_xxx_sd
// Intrinsic operation, reg+mem.
+ def V#NAME#SSrm_Int : VSSI<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, ssmem:$src2),
+ !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
+ [(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse_",
+ !strconcat(OpcodeStr, "_ss")) VR128:$src1,
+ sse_load_f32:$src2))]> {
+ // int_x86_sse_xxx_ss
+ let Constraints = "";
+ }
+
+ def V#NAME#SDrm_Int : VSDI<opc, MRMSrcMem, (outs VR128:$dst),
+ (ins VR128:$src1, sdmem:$src2),
+ !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
+ [(set VR128:$dst, (!nameconcat<Intrinsic>("int_x86_sse2_",
+ !strconcat(OpcodeStr, "_sd")) VR128:$src1,
+ sse_load_f64:$src2))]> {
+ // int_x86_sse2_xxx_sd
+ let Constraints = "";
+ }
+
def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, ssmem:$src2),
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
@@ -769,8 +838,11 @@
// Arithmetic instructions
defm ADD : basic_sse12_fp_binop_rm<0x58, "add", fadd, 1>;
defm MUL : basic_sse12_fp_binop_rm<0x59, "mul", fmul, 1>;
-defm SUB : basic_sse12_fp_binop_rm<0x5C, "sub", fsub>;
-defm DIV : basic_sse12_fp_binop_rm<0x5E, "div", fdiv>;
+
+let isCommutable = 0 in {
+ defm SUB : basic_sse12_fp_binop_rm<0x5C, "sub", fsub>;
+ defm DIV : basic_sse12_fp_binop_rm<0x5E, "div", fdiv>;
+}
/// sse12_fp_binop_rm - Other SSE 1 & 2 binops
///
@@ -785,30 +857,31 @@
multiclass sse12_fp_binop_rm<bits<8> opc, string OpcodeStr,
SDNode OpNode, bit Commutable = 0> {
- // Scalar operation, reg+reg.
- def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
- let isCommutable = Commutable;
+ let Constraints = "", isAsmParserOnly = 1, hasVEX_4VPrefix = 1 in {
+ // Scalar operation, reg+reg.
+ let Prefix = 12 /* XS */ in
+ defm V#NAME#SS : sse12_fp_scalar<opc,
+ !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ OpNode, FR32, f32mem>;
+
+ let Prefix = 11 /* XD */ in
+ defm V#NAME#SD : sse12_fp_scalar<opc,
+ !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
+ OpNode, FR64, f64mem>;
+ }
+
+ let Constraints = "$src1 = $dst" in {
+ // Scalar operation, reg+reg.
+ let Prefix = 12 /* XS */ in
+ defm SS : sse12_fp_scalar<opc,
+ !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
+ OpNode, FR32, f32mem>;
+ let Prefix = 11 /* XD */ in
+ defm SD : sse12_fp_scalar<opc,
+ !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
+ OpNode, FR64, f64mem>;
}
- def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
- let isCommutable = Commutable;
- }
-
- // Scalar operation, reg+mem.
- def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
- (ins FR32:$src1, f32mem:$src2),
- !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
- [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
-
- def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
- (ins FR64:$src1, f64mem:$src2),
- !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
- [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
-
// Vector operation, reg+reg.
def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
@@ -913,8 +986,10 @@
}
}
-defm MAX : sse12_fp_binop_rm<0x5F, "max", X86fmax>;
-defm MIN : sse12_fp_binop_rm<0x5D, "min", X86fmin>;
+let isCommutable = 0 in {
+ defm MAX : sse12_fp_binop_rm<0x5F, "max", X86fmax>;
+ defm MIN : sse12_fp_binop_rm<0x5D, "min", X86fmin>;
+}
//===----------------------------------------------------------------------===//
// SSE packed FP Instructions
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86MCCodeEmitter.cpp Fri Jul 2 04:34:51 2010
@@ -349,6 +349,13 @@
//
unsigned char VEX_R = 0x1;
+ // VEX_X: equivalent to REX.X, only used when a
+ // register is used for index in SIB Byte.
+ //
+ // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
+ // 0: Same as REX.X=1 (64-bit mode only)
+ unsigned char VEX_X = 0x1;
+
// VEX_B:
//
// 1: Same as REX_B=0 (ignored in 32-bit mode)
@@ -385,12 +392,16 @@
// functionality of a SIMD prefix
//
// 0b00: None
- // 0b01: 66 (not handled yet)
+ // 0b01: 66
// 0b10: F3
// 0b11: F2
//
unsigned char VEX_PP = 0;
+ // Encode the operand size opcode prefix as needed.
+ if (TSFlags & X86II::OpSize)
+ VEX_PP = 0x01;
+
switch (TSFlags & X86II::Op0Mask) {
default: assert(0 && "Invalid prefix!");
case 0: break; // No prefix!
@@ -415,9 +426,12 @@
unsigned NumOps = MI.getNumOperands();
unsigned i = 0;
unsigned SrcReg = 0, SrcRegNum = 0;
+ bool IsSrcMem = false;
switch (TSFlags & X86II::FormMask) {
case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!");
+ case X86II::MRMSrcMem:
+ IsSrcMem = true;
case X86II::MRMSrcReg:
if (MI.getOperand(0).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
@@ -447,6 +461,9 @@
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg() && X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
VEX_B = 0x0;
+ if (!VEX_B && MO.isReg() && IsSrcMem &&
+ X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
+ VEX_X = 0x0;
}
break;
default:
@@ -464,11 +481,9 @@
// | C5h | | R | vvvv | L | pp |
// +-----+ +-------------------+
//
- // Note: VEX.X isn't used so far
- //
unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
- if (VEX_B /* & VEX_X */) { // 2 byte VEX prefix
+ if (VEX_B && VEX_X) { // 2 byte VEX prefix
EmitByte(0xC5, CurByte, OS);
EmitByte(LastByte | (VEX_R << 7), CurByte, OS);
return;
@@ -476,7 +491,7 @@
// 3 byte VEX prefix
EmitByte(0xC4, CurByte, OS);
- EmitByte(VEX_R << 7 | 1 << 6 /* VEX_X = 1 */ | VEX_5M, CurByte, OS);
+ EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_5M, CurByte, OS);
EmitByte(LastByte | (VEX_W << 7), CurByte, OS);
}
@@ -491,7 +506,7 @@
unsigned REX = 0;
if (TSFlags & X86II::REX_W)
- REX |= 1 << 3;
+ REX |= 1 << 3; // set REX.W
if (MI.getNumOperands() == 0) return REX;
@@ -509,7 +524,7 @@
if (!X86InstrInfo::isX86_64NonExtLowByteReg(Reg)) continue;
// FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
// that returns non-zero.
- REX |= 0x40;
+ REX |= 0x40; // REX fixed encoding prefix
break;
}
@@ -518,25 +533,25 @@
case X86II::MRMSrcReg:
if (MI.getOperand(0).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
i = isTwoAddr ? 2 : 1;
for (; i != NumOps; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg() && X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 0;
+ REX |= 1 << 0; // set REX.B
}
break;
case X86II::MRMSrcMem: {
if (MI.getOperand(0).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
unsigned Bit = 0;
i = isTwoAddr ? 2 : 1;
for (; i != NumOps; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg()) {
if (X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit;
+ REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1)
Bit++;
}
}
@@ -551,13 +566,13 @@
i = isTwoAddr ? 1 : 0;
if (NumOps > e && MI.getOperand(e).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(e).getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
unsigned Bit = 0;
for (; i != e; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg()) {
if (X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit;
+ REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1)
Bit++;
}
}
@@ -566,12 +581,12 @@
default:
if (MI.getOperand(0).isReg() &&
X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 0;
+ REX |= 1 << 0; // set REX.B
i = isTwoAddr ? 2 : 1;
for (unsigned e = NumOps; i != e; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (MO.isReg() && X86InstrInfo::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 2;
+ REX |= 1 << 2; // set REX.R
}
break;
}
@@ -751,8 +766,13 @@
AddrOperands = X86AddrNumOperands - 1; // No segment register
else
AddrOperands = X86AddrNumOperands;
-
- EmitMemModRMByte(MI, CurOp+1, GetX86RegNum(MI.getOperand(CurOp)),
+
+ if (IsAVXForm)
+ AddrOperands++;
+
+ // Skip the register source (which is encoded in VEX_VVVV)
+ EmitMemModRMByte(MI, IsAVXForm ? CurOp+2 : CurOp+1,
+ GetX86RegNum(MI.getOperand(CurOp)),
TSFlags, CurByte, OS, Fixups);
CurOp += AddrOperands + 1;
break;
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86RegisterInfo.cpp Fri Jul 2 04:34:51 2010
@@ -1279,9 +1279,11 @@
for (unsigned i = 0; i != 5; ++i)
MIB.addOperand(MBBI->getOperand(i));
} else if (RetOpcode == X86::TCRETURNri64) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg());
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
} else {
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg());
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
}
MachineInstr *NewMI = prior(MBBI);
Modified: llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/X86/X86Subtarget.cpp Fri Jul 2 04:34:51 2010
@@ -53,9 +53,12 @@
if (GV->hasDLLImportLinkage())
return X86II::MO_DLLIMPORT;
- // Materializable GVs (in JIT lazy compilation mode) do not require an
- // extra load from stub.
- bool isDecl = GV->isDeclaration() && !GV->isMaterializable();
+ // Determine whether this is a reference to a definition or a declaration.
+ // Materializable GVs (in JIT lazy compilation mode) do not require an extra
+ // load from stub.
+ bool isDecl = GV->hasAvailableExternallyLinkage();
+ if (GV->isDeclaration() && !GV->isMaterializable())
+ isDecl = true;
// X86-64 in PIC mode.
if (isPICStyleRIPRel()) {
Modified: llvm/branches/wendling/eh/lib/Target/XCore/XCoreInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/XCore/XCoreInstrInfo.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/XCore/XCoreInstrInfo.cpp (original)
+++ llvm/branches/wendling/eh/lib/Target/XCore/XCoreInstrInfo.cpp Fri Jul 2 04:34:51 2010
@@ -299,9 +299,8 @@
unsigned
XCoreInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond)const{
- // FIXME there should probably be a DebugLoc argument here
- DebugLoc dl;
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL)const{
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 2 || Cond.size() == 0) &&
@@ -310,11 +309,11 @@
if (FBB == 0) { // One way branch.
if (Cond.empty()) {
// Unconditional branch
- BuildMI(&MBB, dl, get(XCore::BRFU_lu6)).addMBB(TBB);
+ BuildMI(&MBB, DL, get(XCore::BRFU_lu6)).addMBB(TBB);
} else {
// Conditional branch.
unsigned Opc = GetCondBranchFromCond((XCore::CondCode)Cond[0].getImm());
- BuildMI(&MBB, dl, get(Opc)).addReg(Cond[1].getReg())
+ BuildMI(&MBB, DL, get(Opc)).addReg(Cond[1].getReg())
.addMBB(TBB);
}
return 1;
@@ -323,9 +322,9 @@
// Two-way Conditional branch.
assert(Cond.size() == 2 && "Unexpected number of components!");
unsigned Opc = GetCondBranchFromCond((XCore::CondCode)Cond[0].getImm());
- BuildMI(&MBB, dl, get(Opc)).addReg(Cond[1].getReg())
+ BuildMI(&MBB, DL, get(Opc)).addReg(Cond[1].getReg())
.addMBB(TBB);
- BuildMI(&MBB, dl, get(XCore::BRFU_lu6)).addMBB(FBB);
+ BuildMI(&MBB, DL, get(XCore::BRFU_lu6)).addMBB(FBB);
return 2;
}
Modified: llvm/branches/wendling/eh/lib/Target/XCore/XCoreInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Target/XCore/XCoreInstrInfo.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Target/XCore/XCoreInstrInfo.h (original)
+++ llvm/branches/wendling/eh/lib/Target/XCore/XCoreInstrInfo.h Fri Jul 2 04:34:51 2010
@@ -58,8 +58,9 @@
bool AllowModify) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond) const;
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const;
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
Modified: llvm/branches/wendling/eh/lib/Transforms/Scalar/IndVarSimplify.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Scalar/IndVarSimplify.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Scalar/IndVarSimplify.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Scalar/IndVarSimplify.cpp Fri Jul 2 04:34:51 2010
@@ -467,6 +467,17 @@
}
bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
+ // If LoopSimplify form is not available, stay out of trouble. Some notes:
+ // - LSR currently only supports LoopSimplify-form loops. Indvars'
+ // canonicalization can be a pessimization without LSR to "clean up"
+ // afterwards.
+ // - We depend on having a preheader; in particular,
+ // Loop::getCanonicalInductionVariable only supports loops with preheaders,
+ // and we're in trouble if we can't find the induction variable even when
+ // we've manually inserted one.
+ if (!L->isLoopSimplifyForm())
+ return false;
+
IU = &getAnalysis<IVUsers>();
LI = &getAnalysis<LoopInfo>();
SE = &getAnalysis<ScalarEvolution>();
Modified: llvm/branches/wendling/eh/lib/Transforms/Scalar/JumpThreading.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Scalar/JumpThreading.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Scalar/JumpThreading.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Scalar/JumpThreading.cpp Fri Jul 2 04:34:51 2010
@@ -870,9 +870,14 @@
// Add all the unavailable predecessors to the PredsToSplit list.
for (pred_iterator PI = pred_begin(LoadBB), PE = pred_end(LoadBB);
- PI != PE; ++PI)
+ PI != PE; ++PI) {
+ // If the predecessor is an indirect goto, we can't split the edge.
+ if (isa<IndirectBrInst>((*PI)->getTerminator()))
+ return false;
+
if (!AvailablePredSet.count(*PI))
PredsToSplit.push_back(*PI);
+ }
// Split them out to their own block.
UnavailablePred =
Modified: llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyCFGPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyCFGPass.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyCFGPass.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyCFGPass.cpp Fri Jul 2 04:34:51 2010
@@ -137,6 +137,9 @@
// they should be changed to unreachable by passes that can't modify the
// CFG.
if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
+ // Don't touch volatile stores.
+ if (SI->isVolatile()) continue;
+
Value *Ptr = SI->getOperand(1);
if (isa<UndefValue>(Ptr) ||
Modified: llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyLibCalls.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyLibCalls.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyLibCalls.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Scalar/SimplifyLibCalls.cpp Fri Jul 2 04:34:51 2010
@@ -66,6 +66,11 @@
this->TD = TD;
if (CI->getCalledFunction())
Context = &CI->getCalledFunction()->getContext();
+
+ // We never change the calling convention.
+ if (CI->getCallingConv() != llvm::CallingConv::C)
+ return NULL;
+
return CallOptimizer(CI->getCalledFunction(), CI, B);
}
};
@@ -92,6 +97,20 @@
return true;
}
+/// IsOnlyUsedInEqualityComparison - Return true if it is only used in equality
+/// comparisons with With.
+static bool IsOnlyUsedInEqualityComparison(Value *V, Value *With) {
+ for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
+ UI != E; ++UI) {
+ if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
+ if (IC->isEquality() && IC->getOperand(1) == With)
+ continue;
+ // Unknown instruction.
+ return false;
+ }
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// String and Memory LibCall Optimizations
//===----------------------------------------------------------------------===//
@@ -328,6 +347,9 @@
if (Length == 0) // strncmp(x,y,0) -> 0
return ConstantInt::get(CI->getType(), 0);
+ if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
+ return EmitMemCmp(Str1P, Str2P, CI->getOperand(3), B, TD);
+
std::string Str1, Str2;
bool HasStr1 = GetConstantStringInfo(Str1P, Str1);
bool HasStr2 = GetConstantStringInfo(Str2P, Str2);
@@ -503,6 +525,23 @@
if (CI->getOperand(1) == CI->getOperand(2))
return B.CreateBitCast(CI->getOperand(1), CI->getType());
+ // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
+ if (TD && IsOnlyUsedInEqualityComparison(CI, CI->getOperand(1))) {
+ Value *StrLen = EmitStrLen(CI->getOperand(2), B, TD);
+ Value *StrNCmp = EmitStrNCmp(CI->getOperand(1), CI->getOperand(2),
+ StrLen, B, TD);
+ for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end();
+ UI != UE; ) {
+ ICmpInst *Old = cast<ICmpInst>(UI++);
+ Value *Cmp = B.CreateICmp(Old->getPredicate(), StrNCmp,
+ ConstantInt::getNullValue(StrNCmp->getType()),
+ "cmp");
+ Old->replaceAllUsesWith(Cmp);
+ Old->eraseFromParent();
+ }
+ return CI;
+ }
+
// See if either input string is a constant string.
std::string SearchStr, ToFindStr;
bool HasStr1 = GetConstantStringInfo(CI->getOperand(1), SearchStr);
Modified: llvm/branches/wendling/eh/lib/Transforms/Utils/BuildLibCalls.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Utils/BuildLibCalls.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Utils/BuildLibCalls.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Utils/BuildLibCalls.cpp Fri Jul 2 04:34:51 2010
@@ -69,6 +69,31 @@
return CI;
}
+/// EmitStrNCmp - Emit a call to the strncmp function to the builder.
+Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
+ IRBuilder<> &B, const TargetData *TD) {
+ Module *M = B.GetInsertBlock()->getParent()->getParent();
+ AttributeWithIndex AWI[3];
+ AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture);
+ AWI[1] = AttributeWithIndex::get(2, Attribute::NoCapture);
+ AWI[2] = AttributeWithIndex::get(~0u, Attribute::ReadOnly |
+ Attribute::NoUnwind);
+
+ LLVMContext &Context = B.GetInsertBlock()->getContext();
+ Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI, 3),
+ B.getInt32Ty(),
+ B.getInt8PtrTy(),
+ B.getInt8PtrTy(),
+ TD->getIntPtrType(Context), NULL);
+ CallInst *CI = B.CreateCall3(StrNCmp, CastToCStr(Ptr1, B),
+ CastToCStr(Ptr2, B), Len, "strncmp");
+
+ if (const Function *F = dyn_cast<Function>(StrNCmp->stripPointerCasts()))
+ CI->setCallingConv(F->getCallingConv());
+
+ return CI;
+}
+
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
Modified: llvm/branches/wendling/eh/lib/Transforms/Utils/DemoteRegToStack.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/Transforms/Utils/DemoteRegToStack.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/Transforms/Utils/DemoteRegToStack.cpp (original)
+++ llvm/branches/wendling/eh/lib/Transforms/Utils/DemoteRegToStack.cpp Fri Jul 2 04:34:51 2010
@@ -35,7 +35,7 @@
I.eraseFromParent();
return 0;
}
-
+
// Create a stack slot to hold the value.
AllocaInst *Slot;
if (AllocaPoint) {
@@ -46,7 +46,7 @@
Slot = new AllocaInst(I.getType(), 0, I.getName()+".reg2mem",
F->getEntryBlock().begin());
}
-
+
// Change all of the users of the instruction to read from the stack slot
// instead.
while (!I.use_empty()) {
@@ -67,7 +67,7 @@
Value *&V = Loads[PN->getIncomingBlock(i)];
if (V == 0) {
// Insert the load into the predecessor block
- V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
+ V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
PN->getIncomingBlock(i)->getTerminator());
}
PN->setIncomingValue(i, V);
@@ -110,8 +110,8 @@
/// The phi node is deleted and it returns the pointer to the alloca inserted.
AllocaInst* llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
if (P->use_empty()) {
- P->eraseFromParent();
- return 0;
+ P->eraseFromParent();
+ return 0;
}
// Create a stack slot to hold the value.
@@ -124,23 +124,23 @@
Slot = new AllocaInst(P->getType(), 0, P->getName()+".reg2mem",
F->getEntryBlock().begin());
}
-
+
// Iterate over each operand, insert store in each predecessor.
for (unsigned i = 0, e = P->getNumIncomingValues(); i < e; ++i) {
if (InvokeInst *II = dyn_cast<InvokeInst>(P->getIncomingValue(i))) {
- assert(II->getParent() != P->getIncomingBlock(i) &&
+ assert(II->getParent() != P->getIncomingBlock(i) &&
"Invoke edge not supported yet"); II=II;
}
- new StoreInst(P->getIncomingValue(i), Slot,
+ new StoreInst(P->getIncomingValue(i), Slot,
P->getIncomingBlock(i)->getTerminator());
}
-
+
// Insert load in place of the phi and replace all uses.
Value *V = new LoadInst(Slot, P->getName()+".reload", P);
P->replaceAllUsesWith(V);
-
+
// Delete phi.
P->eraseFromParent();
-
+
return Slot;
}
Modified: llvm/branches/wendling/eh/lib/VMCore/Module.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Module.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Module.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Module.cpp Fri Jul 2 04:34:51 2010
@@ -17,6 +17,7 @@
#include "llvm/DerivedTypes.h"
#include "llvm/GVMaterializer.h"
#include "llvm/LLVMContext.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/LeakDetector.h"
@@ -316,6 +317,12 @@
return NamedMDSymTab->lookup(Name);
}
+NamedMDNode *Module::getNamedMetadataUsingTwine(Twine Name) const {
+ SmallString<256> NameData;
+ StringRef NameRef = Name.toStringRef(NameData);
+ return NamedMDSymTab->lookup(NameRef);
+}
+
/// getOrInsertNamedMetadata - Return the first named MDNode in the module
/// with the specified name. This method returns a new NamedMDNode if a
/// NamedMDNode with the specified name is not found.
Modified: llvm/branches/wendling/eh/lib/VMCore/Verifier.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/lib/VMCore/Verifier.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/lib/VMCore/Verifier.cpp (original)
+++ llvm/branches/wendling/eh/lib/VMCore/Verifier.cpp Fri Jul 2 04:34:51 2010
@@ -85,7 +85,8 @@
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
if (I->empty() || !I->back().isTerminator()) {
- dbgs() << "Basic Block does not have terminator!\n";
+ dbgs() << "Basic Block in function '" << F.getName()
+ << "' does not have terminator!\n";
WriteAsOperand(dbgs(), I, true);
dbgs() << "\n";
Broken = true;
Modified: llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/2008-07-29-SMinExpr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/2008-07-29-SMinExpr.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/2008-07-29-SMinExpr.ll (original)
+++ llvm/branches/wendling/eh/test/Analysis/ScalarEvolution/2008-07-29-SMinExpr.ll Fri Jul 2 04:34:51 2010
@@ -22,5 +22,5 @@
ret i32 %j.0.lcssa
}
-; CHECK: backedge-taken count is (-2147483632 + ((-1 + (-1 * %y)) smax (-1 + (-1 * %x))))
+; CHECK: backedge-taken count is (-2147483632 + ((-1 + (-1 * %{{[xy]}})) smax (-1 + (-1 * %{{[xy]}}))))
Modified: llvm/branches/wendling/eh/test/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CMakeLists.txt?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/test/CMakeLists.txt Fri Jul 2 04:34:51 2010
@@ -35,6 +35,7 @@
-e "s#\@LLVM_BINARY_DIR\@#${LLVM_BINARY_DIR}#"
-e "s#\@LLVM_TOOLS_DIR\@#${LLVM_TOOLS_BINARY_DIR}/${CMAKE_CFG_INTDIR}#"
-e "s#\@LLVMGCCDIR\@##"
+ -e "s#\@PYTHON_EXECUTABLE\@#${PYTHON_EXECUTABLE}#"
${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in >
${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg
COMMAND sed -e "s#\@LLVM_SOURCE_DIR\@#${LLVM_MAIN_SRC_DIR}#"
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -mtriple=arm-linux-gnueabi -regalloc=local
; RUN: llc < %s -mtriple=arm-linux-gnueabi -regalloc=fast
; PR1925
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2008-02-29-RegAllocLocal.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2008-02-29-RegAllocLocal.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2008-02-29-RegAllocLocal.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2008-02-29-RegAllocLocal.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -mtriple=arm-apple-darwin -regalloc=local
; RUN: llc < %s -mtriple=arm-apple-darwin -regalloc=fast
; PR1925
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-05-07-RegAllocLocal.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-05-07-RegAllocLocal.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-05-07-RegAllocLocal.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-05-07-RegAllocLocal.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -mtriple=armv5-unknown-linux-gnueabi -O0 -regalloc=local
; RUN: llc < %s -mtriple=armv5-unknown-linux-gnueabi -O0 -regalloc=fast
; PR4100
@.str = external constant [30 x i8] ; <[30 x i8]*> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll Fri Jul 2 04:34:51 2010
@@ -3,7 +3,7 @@
%struct.rtunion = type { i64 }
%struct.rtx_def = type { i16, i8, i8, [1 x %struct.rtunion] }
-define arm_apcscc void @simplify_unary_real(i8* nocapture %p) nounwind {
+define void @simplify_unary_real(i8* nocapture %p) nounwind {
entry:
%tmp121 = load i64* null, align 4 ; <i64> [#uses=1]
%0 = getelementptr %struct.rtx_def* null, i32 0, i32 3, i32 3, i32 0 ; <i64*> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll Fri Jul 2 04:34:51 2010
@@ -8,11 +8,11 @@
@"\01LC16" = external constant [33 x i8], align 1 ; <[33 x i8]*> [#uses=1]
@"\01LC17" = external constant [47 x i8], align 1 ; <[47 x i8]*> [#uses=1]
-declare arm_apcscc i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(i8* nocapture, ...) nounwind
-declare arm_apcscc void @diff(i8*, i8*, i32, i32, i32, i32) nounwind
+declare void @diff(i8*, i8*, i32, i32, i32, i32) nounwind
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
+define void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
entry:
br i1 undef, label %bb5, label %bb
@@ -44,17 +44,17 @@
store i32 0, i32* @no_mat, align 4
store i32 0, i32* @no_mis, align 4
%3 = getelementptr i8* %B, i32 %0 ; <i8*> [#uses=1]
- tail call arm_apcscc void @diff(i8* undef, i8* %3, i32 undef, i32 undef, i32 undef, i32 undef) nounwind
+ tail call void @diff(i8* undef, i8* %3, i32 undef, i32 undef, i32 undef, i32 undef) nounwind
%4 = sitofp i32 undef to double ; <double> [#uses=1]
%5 = fdiv double %4, 1.000000e+01 ; <double> [#uses=1]
- %6 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([29 x i8]* @"\01LC12", i32 0, i32 0), double %5) nounwind ; <i32> [#uses=0]
+ %6 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([29 x i8]* @"\01LC12", i32 0, i32 0), double %5) nounwind ; <i32> [#uses=0]
%7 = load i32* @al_len, align 4 ; <i32> [#uses=1]
%8 = load i32* @no_mat, align 4 ; <i32> [#uses=1]
%9 = load i32* @no_mis, align 4 ; <i32> [#uses=1]
%10 = sub i32 %7, %8 ; <i32> [#uses=1]
%11 = sub i32 %10, %9 ; <i32> [#uses=1]
- %12 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC16", i32 0, i32 0), i32 %11) nounwind ; <i32> [#uses=0]
- %13 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([47 x i8]* @"\01LC17", i32 0, i32 0), i32 undef, i32 %1, i32 undef, i32 undef) nounwind ; <i32> [#uses=0]
+ %12 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC16", i32 0, i32 0), i32 %11) nounwind ; <i32> [#uses=0]
+ %13 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([47 x i8]* @"\01LC17", i32 0, i32 0), i32 undef, i32 %1, i32 undef, i32 undef) nounwind ; <i32> [#uses=0]
br i1 undef, label %bb15, label %bb12
bb12: ; preds = %bb11
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll Fri Jul 2 04:34:51 2010
@@ -6,11 +6,11 @@
@"\01LC15" = external constant [33 x i8], align 1 ; <[33 x i8]*> [#uses=1]
@"\01LC17" = external constant [47 x i8], align 1 ; <[47 x i8]*> [#uses=1]
-declare arm_apcscc i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(i8* nocapture, ...) nounwind
-declare arm_apcscc void @diff(i8*, i8*, i32, i32, i32, i32) nounwind
+declare void @diff(i8*, i8*, i32, i32, i32, i32) nounwind
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
+define void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
entry:
br i1 undef, label %bb5, label %bb
@@ -41,11 +41,11 @@
store i32 0, i32* @no_mat, align 4
store i32 0, i32* @no_mis, align 4
%4 = getelementptr i8* %B, i32 %0 ; <i8*> [#uses=1]
- tail call arm_apcscc void @diff(i8* undef, i8* %4, i32 undef, i32 %3, i32 undef, i32 undef) nounwind
- %5 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC11", i32 0, i32 0), i32 %tmp13) nounwind ; <i32> [#uses=0]
+ tail call void @diff(i8* undef, i8* %4, i32 undef, i32 %3, i32 undef, i32 undef) nounwind
+ %5 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC11", i32 0, i32 0), i32 %tmp13) nounwind ; <i32> [#uses=0]
%6 = load i32* @no_mis, align 4 ; <i32> [#uses=1]
- %7 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC15", i32 0, i32 0), i32 %6) nounwind ; <i32> [#uses=0]
- %8 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([47 x i8]* @"\01LC17", i32 0, i32 0), i32 undef, i32 %1, i32 undef, i32 %2) nounwind ; <i32> [#uses=0]
+ %7 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC15", i32 0, i32 0), i32 %6) nounwind ; <i32> [#uses=0]
+ %8 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([47 x i8]* @"\01LC17", i32 0, i32 0), i32 undef, i32 %1, i32 undef, i32 %2) nounwind ; <i32> [#uses=0]
br i1 undef, label %bb15, label %bb12
bb12: ; preds = %bb11
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll Fri Jul 2 04:34:51 2010
@@ -2,7 +2,7 @@
@JJ = external global i32* ; <i32**> [#uses=1]
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
+define void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
entry:
br i1 undef, label %bb5, label %bb
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll Fri Jul 2 04:34:51 2010
@@ -6,9 +6,9 @@
@no_mis = external global i32 ; <i32*> [#uses=1]
@name1 = external global i8* ; <i8**> [#uses=1]
-declare arm_apcscc void @diff(i8*, i8*, i32, i32, i32, i32) nounwind
+declare void @diff(i8*, i8*, i32, i32, i32, i32) nounwind
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
+define void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
entry:
br i1 undef, label %bb5, label %bb
@@ -35,7 +35,7 @@
store i32 0, i32* @no_mis, align 4
%1 = getelementptr i8* %A, i32 0 ; <i8*> [#uses=1]
%2 = getelementptr i8* %B, i32 0 ; <i8*> [#uses=1]
- tail call arm_apcscc void @diff(i8* %1, i8* %2, i32 undef, i32 undef, i32 undef, i32 undef) nounwind
+ tail call void @diff(i8* %1, i8* %2, i32 undef, i32 undef, i32 undef, i32 undef) nounwind
br i1 undef, label %bb15, label %bb12
bb12: ; preds = %bb11
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll Fri Jul 2 04:34:51 2010
@@ -2,7 +2,7 @@
@XX = external global i32* ; <i32**> [#uses=1]
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
+define void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
entry:
br i1 undef, label %bb5, label %bb
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-01-CommuteBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-01-CommuteBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-01-CommuteBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-01-CommuteBug.ll Fri Jul 2 04:34:51 2010
@@ -4,7 +4,7 @@
@II = external global i32* ; <i32**> [#uses=1]
@JJ = external global i32* ; <i32**> [#uses=1]
-define arm_apcscc void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
+define void @SIM(i8* %A, i8* %B, i32 %M, i32 %N, i32 %K, [256 x i32]* %V, i32 %Q, i32 %R, i32 %nseq) nounwind {
entry:
br i1 undef, label %bb5, label %bb
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-18-RewriterBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-18-RewriterBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-18-RewriterBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-18-RewriterBug.ll Fri Jul 2 04:34:51 2010
@@ -8,7 +8,7 @@
@_2E_str7 = internal constant [21 x i8] c"ERROR: Only 1 point!\00", section "__TEXT,__cstring,cstring_literals", align 1 ; <[21 x i8]*> [#uses=1]
@llvm.used = appending global [1 x i8*] [i8* bitcast (void (%struct.EDGE_PAIR*, %struct.VERTEX*, %struct.VERTEX*)* @build_delaunay to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-define arm_apcscc void @build_delaunay(%struct.EDGE_PAIR* noalias nocapture sret %agg.result, %struct.VERTEX* %tree, %struct.VERTEX* %extra) nounwind {
+define void @build_delaunay(%struct.EDGE_PAIR* noalias nocapture sret %agg.result, %struct.VERTEX* %tree, %struct.VERTEX* %extra) nounwind {
entry:
%delright = alloca %struct.EDGE_PAIR, align 8 ; <%struct.EDGE_PAIR*> [#uses=3]
%delleft = alloca %struct.EDGE_PAIR, align 8 ; <%struct.EDGE_PAIR*> [#uses=3]
@@ -29,10 +29,10 @@
br i1 %6, label %get_low.exit, label %bb1.i
get_low.exit: ; preds = %bb1.i
- call arm_apcscc void @build_delaunay(%struct.EDGE_PAIR* noalias sret %delright, %struct.VERTEX* %2, %struct.VERTEX* %extra) nounwind
+ call void @build_delaunay(%struct.EDGE_PAIR* noalias sret %delright, %struct.VERTEX* %2, %struct.VERTEX* %extra) nounwind
%7 = getelementptr %struct.VERTEX* %tree, i32 0, i32 1 ; <%struct.VERTEX**> [#uses=1]
%8 = load %struct.VERTEX** %7, align 4 ; <%struct.VERTEX*> [#uses=1]
- call arm_apcscc void @build_delaunay(%struct.EDGE_PAIR* noalias sret %delleft, %struct.VERTEX* %8, %struct.VERTEX* %tree) nounwind
+ call void @build_delaunay(%struct.EDGE_PAIR* noalias sret %delleft, %struct.VERTEX* %8, %struct.VERTEX* %tree) nounwind
%9 = getelementptr %struct.EDGE_PAIR* %delleft, i32 0, i32 0 ; <%struct.edge_rec**> [#uses=1]
%10 = load %struct.edge_rec** %9, align 8 ; <%struct.edge_rec*> [#uses=2]
%11 = getelementptr %struct.EDGE_PAIR* %delleft, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
@@ -141,7 +141,7 @@
%85 = inttoptr i32 %84 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
%86 = getelementptr %struct.edge_rec* %ldi_addr.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%87 = load %struct.VERTEX** %86, align 4 ; <%struct.VERTEX*> [#uses=1]
- %88 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=6]
+ %88 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=6]
%89 = getelementptr %struct.edge_rec* %88, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
store %struct.edge_rec* %88, %struct.edge_rec** %89, align 4
%90 = getelementptr %struct.edge_rec* %88, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=2]
@@ -780,7 +780,7 @@
%592 = and i32 %589, -64 ; <i32> [#uses=1]
%593 = or i32 %591, %592 ; <i32> [#uses=1]
%594 = inttoptr i32 %593 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %595 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=5]
+ %595 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=5]
%596 = getelementptr %struct.edge_rec* %595, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
store %struct.edge_rec* %595, %struct.edge_rec** %596, align 4
%597 = getelementptr %struct.edge_rec* %595, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
@@ -882,7 +882,7 @@
%677 = and i32 %674, -64 ; <i32> [#uses=1]
%678 = or i32 %676, %677 ; <i32> [#uses=1]
%679 = inttoptr i32 %678 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %680 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
+ %680 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
%681 = getelementptr %struct.edge_rec* %680, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=5]
store %struct.edge_rec* %680, %struct.edge_rec** %681, align 4
%682 = getelementptr %struct.edge_rec* %680, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
@@ -1005,15 +1005,15 @@
%762 = getelementptr %struct.VERTEX* %tree, i32 0, i32 1 ; <%struct.VERTEX**> [#uses=1]
%763 = load %struct.VERTEX** %762, align 4 ; <%struct.VERTEX*> [#uses=4]
%764 = icmp eq %struct.VERTEX* %763, null ; <i1> [#uses=1]
- %765 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=5]
+ %765 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=5]
%766 = getelementptr %struct.edge_rec* %765, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
store %struct.edge_rec* %765, %struct.edge_rec** %766, align 4
%767 = getelementptr %struct.edge_rec* %765, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=3]
br i1 %764, label %bb10, label %bb11
bb8: ; preds = %entry
- %768 = call arm_apcscc i32 @puts(i8* getelementptr ([21 x i8]* @_2E_str7, i32 0, i32 0)) nounwind ; <i32> [#uses=0]
- call arm_apcscc void @exit(i32 -1) noreturn nounwind
+ %768 = call i32 @puts(i8* getelementptr ([21 x i8]* @_2E_str7, i32 0, i32 0)) nounwind ; <i32> [#uses=0]
+ call void @exit(i32 -1) noreturn nounwind
unreachable
bb10: ; preds = %bb7
@@ -1053,7 +1053,7 @@
store %struct.VERTEX* %tree, %struct.VERTEX** %790, align 4
%791 = getelementptr %struct.edge_rec* %785, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %783, %struct.edge_rec** %791, align 4
- %792 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
+ %792 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
%793 = getelementptr %struct.edge_rec* %792, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
store %struct.edge_rec* %792, %struct.edge_rec** %793, align 4
%794 = getelementptr %struct.edge_rec* %792, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
@@ -1117,7 +1117,7 @@
%843 = or i32 %841, %842 ; <i32> [#uses=1]
%844 = inttoptr i32 %843 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
%845 = load %struct.VERTEX** %767, align 4 ; <%struct.VERTEX*> [#uses=1]
- %846 = call arm_apcscc %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
+ %846 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
%847 = getelementptr %struct.edge_rec* %846, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=7]
store %struct.edge_rec* %846, %struct.edge_rec** %847, align 4
%848 = getelementptr %struct.edge_rec* %846, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
@@ -1316,8 +1316,8 @@
ret void
}
-declare arm_apcscc i32 @puts(i8* nocapture) nounwind
+declare i32 @puts(i8* nocapture) nounwind
-declare arm_apcscc void @exit(i32) noreturn nounwind
+declare void @exit(i32) noreturn nounwind
-declare arm_apcscc %struct.edge_rec* @alloc_edge() nounwind
+declare %struct.edge_rec* @alloc_edge() nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll Fri Jul 2 04:34:51 2010
@@ -6,9 +6,9 @@
%struct.cli_bm_patt = type { i8*, i8*, i16, i16, i8*, i8*, i8, %struct.cli_bm_patt*, i16 }
%struct.cli_matcher = type { i16, i8, i8*, %struct.cli_bm_patt**, i32*, i32, i8, i8, %struct.cli_ac_node*, %struct.cli_ac_node**, %struct.cli_ac_patt**, i32, i32, i32 }
-declare arm_apcscc i32 @strlen(i8* nocapture) nounwind readonly
+declare i32 @strlen(i8* nocapture) nounwind readonly
-define arm_apcscc i32 @cli_ac_addsig(%struct.cli_matcher* nocapture %root, i8* %virname, i8* %hexsig, i32 %sigid, i16 zeroext %parts, i16 zeroext %partno, i16 zeroext %type, i32 %mindist, i32 %maxdist, i8* %offset, i8 zeroext %target) nounwind {
+define i32 @cli_ac_addsig(%struct.cli_matcher* nocapture %root, i8* %virname, i8* %hexsig, i32 %sigid, i16 zeroext %parts, i16 zeroext %partno, i16 zeroext %type, i32 %mindist, i32 %maxdist, i8* %offset, i8 zeroext %target) nounwind {
entry:
br i1 undef, label %bb126, label %bb1
@@ -86,7 +86,7 @@
%0 = load i16* undef, align 4 ; <i16> [#uses=1]
%1 = icmp eq i16 %0, 0 ; <i1> [#uses=1]
%iftmp.20.0 = select i1 %1, i8* %hexsig, i8* null ; <i8*> [#uses=1]
- %2 = tail call arm_apcscc i32 @strlen(i8* %iftmp.20.0) nounwind readonly ; <i32> [#uses=0]
+ %2 = tail call i32 @strlen(i8* %iftmp.20.0) nounwind readonly ; <i32> [#uses=0]
unreachable
bb126: ; preds = %entry
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll Fri Jul 2 04:34:51 2010
@@ -6,7 +6,7 @@
%struct.cli_bm_patt = type { i8*, i8*, i16, i16, i8*, i8*, i8, %struct.cli_bm_patt*, i16 }
%struct.cli_matcher = type { i16, i8, i8*, %struct.cli_bm_patt**, i32*, i32, i8, i8, %struct.cli_ac_node*, %struct.cli_ac_node**, %struct.cli_ac_patt**, i32, i32, i32 }
-define arm_apcscc i32 @cli_ac_addsig(%struct.cli_matcher* nocapture %root, i8* %virname, i8* %hexsig, i32 %sigid, i16 zeroext %parts, i16 zeroext %partno, i16 zeroext %type, i32 %mindist, i32 %maxdist, i8* %offset, i8 zeroext %target) nounwind {
+define i32 @cli_ac_addsig(%struct.cli_matcher* nocapture %root, i8* %virname, i8* %hexsig, i32 %sigid, i16 zeroext %parts, i16 zeroext %partno, i16 zeroext %type, i32 %mindist, i32 %maxdist, i8* %offset, i8 zeroext %target) nounwind {
entry:
br i1 undef, label %bb126, label %bb1
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll Fri Jul 2 04:34:51 2010
@@ -4,7 +4,7 @@
declare double @llvm.exp.f64(double) nounwind readonly
-define arm_apcscc void @findratio(double* nocapture %res1, double* nocapture %res2) nounwind {
+define void @findratio(double* nocapture %res1, double* nocapture %res2) nounwind {
entry:
br label %bb
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll Fri Jul 2 04:34:51 2010
@@ -4,7 +4,7 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
target triple = "armv7-apple-darwin9"
-define arm_apcscc <4 x i32> @scale(<4 x i32> %v, i32 %f) nounwind {
+define <4 x i32> @scale(<4 x i32> %v, i32 %f) nounwind {
entry:
%v_addr = alloca <4 x i32> ; <<4 x i32>*> [#uses=2]
%f_addr = alloca i32 ; <i32*> [#uses=2]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-04-RegScavengerAssert-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-04-RegScavengerAssert-2.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-04-RegScavengerAssert-2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-04-RegScavengerAssert-2.ll Fri Jul 2 04:34:51 2010
@@ -1,10 +1,10 @@
-; RUN: llc < %s -mtriple=armv6-elf
+; RUN: llc < %s -mtriple=arm-linux-gnueabi
; PR4528
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
target triple = "armv6-elf"
-define arm_aapcscc i32 @file_read_actor(i32* nocapture %desc, i32* %page, i32 %offset, i32 %size) nounwind optsize {
+define i32 @file_read_actor(i32* nocapture %desc, i32* %page, i32 %offset, i32 %size) nounwind optsize {
entry:
br i1 undef, label %fault_in_pages_writeable.exit, label %bb5.i
@@ -26,8 +26,8 @@
unreachable
bb3: ; preds = %fault_in_pages_writeable.exit
- %1 = tail call arm_aapcscc i32 @__copy_to_user(i8* undef, i8* undef, i32 undef) nounwind ; <i32> [#uses=0]
+ %1 = tail call i32 @__copy_to_user(i8* undef, i8* undef, i32 undef) nounwind ; <i32> [#uses=0]
unreachable
}
-declare arm_aapcscc i32 @__copy_to_user(i8*, i8*, i32)
+declare i32 @__copy_to_user(i8*, i8*, i32)
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-04-RegScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-04-RegScavengerAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-04-RegScavengerAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-04-RegScavengerAssert.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
-; RUN: llc < %s -mtriple=armv6-elf
+; RUN: llc < %s -mtriple=arm-linux-gnueabi
; PR4528
-define arm_aapcscc i32 @file_read_actor(i32 %desc, i32 %page, i32 %offset, i32 %size) nounwind optsize {
+define i32 @file_read_actor(i32 %desc, i32 %page, i32 %offset, i32 %size) nounwind optsize {
entry:
br i1 undef, label %fault_in_pages_writeable.exit, label %bb5.i
@@ -18,8 +18,8 @@
unreachable
bb3: ; preds = %fault_in_pages_writeable.exit
- %2 = tail call arm_aapcscc i32 @__copy_to_user(i8* undef, i8* undef, i32 undef) nounwind ; <i32> [#uses=0]
+ %2 = tail call i32 @__copy_to_user(i8* undef, i8* undef, i32 undef) nounwind ; <i32> [#uses=0]
unreachable
}
-declare arm_aapcscc i32 @__copy_to_user(i8*, i8*, i32)
+declare i32 @__copy_to_user(i8*, i8*, i32)
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,4 @@
-; RUN: llc < %s -march=arm
+; RUN: llc < %s -mtriple=arm-linux-gnueabi
; PR4528
; Inline asm is allowed to contain operands "=&r", "0".
@@ -6,7 +6,7 @@
%struct.device_dma_parameters = type { i32, i32 }
%struct.iovec = type { i8*, i32 }
-define arm_aapcscc i32 @generic_segment_checks(%struct.iovec* nocapture %iov, i32* nocapture %nr_segs, i32* nocapture %count, i32 %access_flags) nounwind optsize {
+define i32 @generic_segment_checks(%struct.iovec* nocapture %iov, i32* nocapture %nr_segs, i32* nocapture %count, i32 %access_flags) nounwind optsize {
entry:
br label %bb8
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-15-RegScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-15-RegScavengerAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-15-RegScavengerAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-15-RegScavengerAssert.ll Fri Jul 2 04:34:51 2010
@@ -1,10 +1,10 @@
-; RUN: llc < %s -march=arm
+; RUN: llc < %s -mtriple=arm-linux-gnueabi
; PR4716
-define arm_aapcscc void @_start() nounwind naked {
+define void @_start() nounwind naked {
entry:
- tail call arm_aapcscc void @exit(i32 undef) noreturn nounwind
+ tail call void @exit(i32 undef) noreturn nounwind
unreachable
}
-declare arm_aapcscc void @exit(i32) noreturn nounwind
+declare void @exit(i32) noreturn nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill.ll Fri Jul 2 04:34:51 2010
@@ -7,7 +7,7 @@
%struct.tree = type { i32, double, double, %struct.tree*, %struct.tree*, %struct.tree*, %struct.tree* }
@g = common global %struct.tree* null
-define arm_apcscc %struct.tree* @tsp(%struct.tree* %t, i32 %nproc) nounwind {
+define %struct.tree* @tsp(%struct.tree* %t, i32 %nproc) nounwind {
entry:
%t.idx51.val.i = load double* null ; <double> [#uses=1]
br i1 undef, label %bb4.i, label %bb.i
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill2.ll Fri Jul 2 04:34:51 2010
@@ -9,7 +9,7 @@
%struct.icstruct = type { [3 x i32], i16 }
%struct.node = type { i16, double, [3 x double], i32, i32 }
-declare arm_apcscc double @floor(double) nounwind readnone
+declare double @floor(double) nounwind readnone
define void @intcoord(%struct.icstruct* noalias nocapture sret %agg.result, i1 %a, double %b) {
entry:
@@ -28,7 +28,7 @@
br i1 %a, label %bb11, label %bb9
bb9: ; preds = %bb7
- %0 = tail call arm_apcscc double @floor(double %b) nounwind readnone ; <double> [#uses=0]
+ %0 = tail call double @floor(double %b) nounwind readnone ; <double> [#uses=0]
br label %bb11
bb11: ; preds = %bb9, %bb7
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill3.ll Fri Jul 2 04:34:51 2010
@@ -9,7 +9,7 @@
%struct.Patient = type { i32, i32, i32, %struct.Village* }
%struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i32 }
-define arm_apcscc %struct.Village* @alloc_tree(i32 %level, i32 %label, %struct.Village* %back, i1 %p) nounwind {
+define %struct.Village* @alloc_tree(i32 %level, i32 %label, %struct.Village* %back, i1 %p) nounwind {
entry:
br i1 %p, label %bb8, label %bb1
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-21-PostRAKill4.ll Fri Jul 2 04:34:51 2010
@@ -8,19 +8,19 @@
@.str1 = external constant [31 x i8], align 1 ; <[31 x i8]*> [#uses=1]
@.str2 = external constant [4 x i8], align 1 ; <[4 x i8]*> [#uses=1]
-declare arm_apcscc i32 @getUnknown(i32, ...) nounwind
+declare i32 @getUnknown(i32, ...) nounwind
declare void @llvm.va_start(i8*) nounwind
declare void @llvm.va_end(i8*) nounwind
-declare arm_apcscc i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(i8* nocapture, ...) nounwind
-define arm_apcscc i32 @main() nounwind {
+define i32 @main() nounwind {
entry:
- %0 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([31 x i8]* @.str1, i32 0, i32 0), i32 1, i32 1, i32 1, i32 1, i32 1, i32 1) nounwind ; <i32> [#uses=0]
- %1 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([31 x i8]* @.str1, i32 0, i32 0), i32 -128, i32 116, i32 116, i32 -3852, i32 -31232, i32 -1708916736) nounwind ; <i32> [#uses=0]
- %2 = tail call arm_apcscc i32 (i32, ...)* @getUnknown(i32 undef, i32 116, i32 116, i32 -3852, i32 -31232, i32 30556, i32 -1708916736) nounwind ; <i32> [#uses=1]
- %3 = tail call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @.str2, i32 0, i32 0), i32 %2) nounwind ; <i32> [#uses=0]
+ %0 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([31 x i8]* @.str1, i32 0, i32 0), i32 1, i32 1, i32 1, i32 1, i32 1, i32 1) nounwind ; <i32> [#uses=0]
+ %1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([31 x i8]* @.str1, i32 0, i32 0), i32 -128, i32 116, i32 116, i32 -3852, i32 -31232, i32 -1708916736) nounwind ; <i32> [#uses=0]
+ %2 = tail call i32 (i32, ...)* @getUnknown(i32 undef, i32 116, i32 116, i32 -3852, i32 -31232, i32 30556, i32 -1708916736) nounwind ; <i32> [#uses=1]
+ %3 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @.str2, i32 0, i32 0), i32 %2) nounwind ; <i32> [#uses=0]
ret i32 0
}
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-26-ScalarToVector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-26-ScalarToVector.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-26-ScalarToVector.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-26-ScalarToVector.ll Fri Jul 2 04:34:51 2010
@@ -10,7 +10,7 @@
declare <2 x i32> @llvm.arm.neon.vpadd.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
-define arm_apcscc void @_ZN6squish10ClusterFit9Compress3EPv(%quuz* %this, i8* %block) {
+define void @_ZN6squish10ClusterFit9Compress3EPv(%quuz* %this, i8* %block) {
entry:
%0 = lshr <4 x i32> zeroinitializer, <i32 31, i32 31, i32 31, i32 31> ; <<4 x i32>> [#uses=1]
%1 = shufflevector <4 x i32> %0, <4 x i32> undef, <2 x i32> <i32 2, i32 3> ; <<2 x i32>> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-27-ScalarToVector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-27-ScalarToVector.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-27-ScalarToVector.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-27-ScalarToVector.ll Fri Jul 2 04:34:51 2010
@@ -8,7 +8,7 @@
%quux = type { i32 (...)**, %baz*, i32 }
%quuz = type { %quux, i32, %bar, [128 x i8], [16 x %foo], %foo, %foo, %foo }
-define arm_apcscc void @aaaa(%quuz* %this, i8* %block) {
+define void @aaaa(%quuz* %this, i8* %block) {
entry:
br i1 undef, label %bb.nph269, label %bb201
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-29-ExtractEltf32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-29-ExtractEltf32.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-29-ExtractEltf32.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-29-ExtractEltf32.ll Fri Jul 2 04:34:51 2010
@@ -2,7 +2,7 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
target triple = "thumbv7-elf"
-define arm_apcscc void @foo() nounwind {
+define void @foo() nounwind {
entry:
%0 = tail call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> undef, <2 x float> undef) nounwind ; <<2 x float>> [#uses=1]
%tmp28 = extractelement <2 x float> %0, i32 0 ; <float> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-29-TooLongSplat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-29-TooLongSplat.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-29-TooLongSplat.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-29-TooLongSplat.ll Fri Jul 2 04:34:51 2010
@@ -2,7 +2,7 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
target triple = "thumbv7-elf"
-define arm_apcscc void @aaa() nounwind {
+define void @aaa() nounwind {
entry:
%0 = fmul <4 x float> undef, <float 1.000000e+00, float 1.000000e+01, float 1.000000e+02, float 0x3EB0C6F7A0000000> ; <<4 x float>> [#uses=1]
%tmp31 = extractelement <4 x float> %0, i32 0 ; <float> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll Fri Jul 2 04:34:51 2010
@@ -2,7 +2,7 @@
%struct.A = type { i32* }
-define arm_apcscc void @"\01-[MyFunction Name:]"() {
+define void @"\01-[MyFunction Name:]"() {
entry:
%save_filt.1 = alloca i32 ; <i32*> [#uses=2]
%save_eptr.0 = alloca i8* ; <i8**> [#uses=2]
@@ -10,12 +10,12 @@
%eh_exception = alloca i8* ; <i8**> [#uses=5]
%eh_selector = alloca i32 ; <i32*> [#uses=3]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call arm_apcscc void @_ZN1AC1Ev(%struct.A* %a)
- invoke arm_apcscc void @_Z3barv()
+ call void @_ZN1AC1Ev(%struct.A* %a)
+ invoke void @_Z3barv()
to label %invcont unwind label %lpad
invcont: ; preds = %entry
- call arm_apcscc void @_ZN1AD1Ev(%struct.A* %a) nounwind
+ call void @_ZN1AD1Ev(%struct.A* %a) nounwind
br label %return
bb: ; preds = %ppad
@@ -23,7 +23,7 @@
store i32 %eh_select, i32* %save_filt.1, align 4
%eh_value = load i8** %eh_exception ; <i8*> [#uses=1]
store i8* %eh_value, i8** %save_eptr.0, align 4
- call arm_apcscc void @_ZN1AD1Ev(%struct.A* %a) nounwind
+ call void @_ZN1AD1Ev(%struct.A* %a) nounwind
%0 = load i8** %save_eptr.0, align 4 ; <i8*> [#uses=1]
store i8* %0, i8** %eh_exception, align 4
%1 = load i32* %save_filt.1, align 4 ; <i32> [#uses=1]
@@ -46,16 +46,16 @@
Unwind: ; preds = %bb
%eh_ptr3 = load i8** %eh_exception ; <i8*> [#uses=1]
- call arm_apcscc void @_Unwind_SjLj_Resume(i8* %eh_ptr3)
+ call void @_Unwind_SjLj_Resume(i8* %eh_ptr3)
unreachable
}
-define linkonce_odr arm_apcscc void @_ZN1AC1Ev(%struct.A* %this) {
+define linkonce_odr void @_ZN1AC1Ev(%struct.A* %this) {
entry:
%this_addr = alloca %struct.A* ; <%struct.A**> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store %struct.A* %this, %struct.A** %this_addr
- %0 = call arm_apcscc i8* @_Znwm(i32 4) ; <i8*> [#uses=1]
+ %0 = call i8* @_Znwm(i32 4) ; <i8*> [#uses=1]
%1 = bitcast i8* %0 to i32* ; <i32*> [#uses=1]
%2 = load %struct.A** %this_addr, align 4 ; <%struct.A*> [#uses=1]
%3 = getelementptr inbounds %struct.A* %2, i32 0, i32 0 ; <i32**> [#uses=1]
@@ -66,9 +66,9 @@
ret void
}
-declare arm_apcscc i8* @_Znwm(i32)
+declare i8* @_Znwm(i32)
-define linkonce_odr arm_apcscc void @_ZN1AD1Ev(%struct.A* %this) nounwind {
+define linkonce_odr void @_ZN1AD1Ev(%struct.A* %this) nounwind {
entry:
%this_addr = alloca %struct.A* ; <%struct.A**> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
@@ -77,7 +77,7 @@
%1 = getelementptr inbounds %struct.A* %0, i32 0, i32 0 ; <i32**> [#uses=1]
%2 = load i32** %1, align 4 ; <i32*> [#uses=1]
%3 = bitcast i32* %2 to i8* ; <i8*> [#uses=1]
- call arm_apcscc void @_ZdlPv(i8* %3) nounwind
+ call void @_ZdlPv(i8* %3) nounwind
br label %bb
bb: ; preds = %entry
@@ -88,9 +88,9 @@
}
;CHECK: L_LSDA_0:
-declare arm_apcscc void @_ZdlPv(i8*) nounwind
+declare void @_ZdlPv(i8*) nounwind
-declare arm_apcscc void @_Z3barv()
+declare void @_Z3barv()
declare i8* @llvm.eh.exception() nounwind
@@ -98,6 +98,6 @@
declare i32 @llvm.eh.typeid.for.i32(i8*) nounwind
-declare arm_apcscc i32 @__gxx_personality_sj0(...)
+declare i32 @__gxx_personality_sj0(...)
-declare arm_apcscc void @_Unwind_SjLj_Resume(i8*)
+declare void @_Unwind_SjLj_Resume(i8*)
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-01-PostRAProlog.ll Fri Jul 2 04:34:51 2010
@@ -30,11 +30,11 @@
@.str218 = private constant [6 x i8] c"%7d%c\00", align 1 ; <[6 x i8]*> [#uses=1]
@.str319 = private constant [30 x i8] c"Failed to allocate %u bytes.\0A\00", align 1 ; <[30 x i8]*> [#uses=1]
-declare arm_apcscc i32 @puts(i8* nocapture) nounwind
+declare i32 @puts(i8* nocapture) nounwind
-declare arm_apcscc i32 @getchar() nounwind
+declare i32 @getchar() nounwind
-define internal arm_apcscc i32 @transpose() nounwind readonly {
+define internal i32 @transpose() nounwind readonly {
; CHECK: push
entry:
%0 = load i32* getelementptr inbounds ([128 x i32]* @columns, i32 0, i32 1), align 4 ; <i32> [#uses=1]
@@ -101,6 +101,6 @@
ret i32 -128
}
-declare arm_apcscc noalias i8* @calloc(i32, i32) nounwind
+declare noalias i8* @calloc(i32, i32) nounwind
declare void @llvm.memset.i64(i8* nocapture, i8, i64, i32) nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-09-AllOnes.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-09-AllOnes.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-09-AllOnes.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-09-AllOnes.ll Fri Jul 2 04:34:51 2010
@@ -2,7 +2,7 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32"
target triple = "thumbv7-elf"
-define arm_apcscc void @foo() {
+define void @foo() {
entry:
%0 = insertelement <4 x i32> undef, i32 -1, i32 3
store <4 x i32> %0, <4 x i32>* undef, align 16
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-24-spill-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-24-spill-align.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-24-spill-align.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-24-spill-align.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
; pr4926
-define arm_apcscc void @test_vget_lanep16() nounwind {
+define void @test_vget_lanep16() nounwind {
entry:
%arg0_poly16x4_t = alloca <4 x i16> ; <<4 x i16>*> [#uses=1]
%out_poly16_t = alloca i16 ; <i16*> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-09-28-LdStOptiBug.ll Fri Jul 2 04:34:51 2010
@@ -3,7 +3,7 @@
%0 = type { double, double }
-define arm_aapcscc void @foo(%0* noalias nocapture sret %agg.result, double %x.0, double %y.0) nounwind {
+define void @foo(%0* noalias nocapture sret %agg.result, double %x.0, double %y.0) nounwind {
; CHECK: foo:
; CHECK: bl __adddf3
; CHECK-NOT: strd
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-10-27-double-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-10-27-double-align.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-10-27-double-align.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-10-27-double-align.ll Fri Jul 2 04:34:51 2010
@@ -2,13 +2,13 @@
@.str = private constant [1 x i8] zeroinitializer, align 1
-define arm_aapcscc void @g() {
+define void @g() {
entry:
;CHECK: [sp, #8]
;CHECK: [sp, #12]
;CHECK: [sp]
- tail call arm_aapcscc void (i8*, ...)* @f(i8* getelementptr ([1 x i8]* @.str, i32 0, i32 0), i32 1, double 2.000000e+00, i32 3, double 4.000000e+00)
+ tail call void (i8*, ...)* @f(i8* getelementptr ([1 x i8]* @.str, i32 0, i32 0), i32 1, double 2.000000e+00, i32 3, double 4.000000e+00)
ret void
}
-declare arm_aapcscc void @f(i8*, ...)
+declare void @f(i8*, ...)
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-11-01-NeonMoves.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-11-01-NeonMoves.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-11-01-NeonMoves.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-11-01-NeonMoves.ll Fri Jul 2 04:34:51 2010
@@ -11,11 +11,11 @@
%0 = getelementptr inbounds %foo* %quat_addr, i32 0, i32 0 ; <<4 x float>*> [#uses=1]
store <4 x float> %quat.0, <4 x float>* %0
%1 = call arm_aapcs_vfpcc <4 x float> @quux(%foo* %quat_addr) nounwind ; <<4 x float>> [#uses=3]
-;CHECK: vmov.f32
-;CHECK: vmov.f32
%2 = fmul <4 x float> %1, %1 ; <<4 x float>> [#uses=2]
%3 = shufflevector <4 x float> %2, <4 x float> undef, <2 x i32> <i32 0, i32 1> ; <<2 x float>> [#uses=1]
%4 = shufflevector <4 x float> %2, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; <<2 x float>> [#uses=1]
+;CHECK-NOT: vmov
+;CHECK: vpadd
%5 = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %3, <2 x float> %4) nounwind ; <<2 x float>> [#uses=2]
%6 = call <2 x float> @llvm.arm.neon.vpadd.v2f32(<2 x float> %5, <2 x float> %5) nounwind ; <<2 x float>> [#uses=2]
%7 = shufflevector <2 x float> %6, <2 x float> %6, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=2]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2009-12-02-vtrn-undef.ll Fri Jul 2 04:34:51 2010
@@ -6,7 +6,7 @@
%struct.int16x8_t = type { <8 x i16> }
%struct.int16x8x2_t = type { [2 x %struct.int16x8_t] }
-define arm_apcscc void @t(%struct.int16x8x2_t* noalias nocapture sret %agg.result, <8 x i16> %tmp.0, %struct.int16x8x2_t* nocapture %dst) nounwind {
+define void @t(%struct.int16x8x2_t* noalias nocapture sret %agg.result, <8 x i16> %tmp.0, %struct.int16x8x2_t* nocapture %dst) nounwind {
entry:
;CHECK: vtrn.16
%0 = shufflevector <8 x i16> %tmp.0, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-09-NeonSelect.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-09-NeonSelect.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-09-NeonSelect.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-09-NeonSelect.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc -march=arm -mattr=+neon < %s
; Radar 7770501: Don't crash on SELECT and SELECT_CC with NEON vector values.
-define arm_apcscc void @vDSP_FFT16_copv(float* nocapture %O, float* nocapture %I, i32 %Direction) nounwind {
+define void @vDSP_FFT16_copv(float* nocapture %O, float* nocapture %I, i32 %Direction) nounwind {
entry:
%.22 = select i1 undef, <4 x float> undef, <4 x float> zeroinitializer ; <<4 x float>> [#uses=1]
%0 = fadd <4 x float> undef, %.22 ; <<4 x float>> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-13-v2f64SplitArg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-13-v2f64SplitArg.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-13-v2f64SplitArg.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-13-v2f64SplitArg.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=arm-apple-darwin -mcpu=cortex-a8
; Radar 7855014
-define arm_apcscc void @test1(i32 %f0, i32 %f1, i32 %f2, <4 x i32> %f3) nounwind {
+define void @test1(i32 %f0, i32 %f1, i32 %f2, <4 x i32> %f3) nounwind {
entry:
unreachable
}
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-14-SplitVector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-14-SplitVector.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-14-SplitVector.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-14-SplitVector.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mcpu=arm1136jf-s
; Radar 7854640
-define arm_apcscc void @test() nounwind {
+define void @test() nounwind {
bb:
br i1 undef, label %bb9, label %bb10
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-04-15-ScavengerDebugValue.ll Fri Jul 2 04:34:51 2010
@@ -3,7 +3,7 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64-n32"
target triple = "armv4t-apple-darwin10"
-define hidden arm_apcscc i32 @__addvsi3(i32 %a, i32 %b) nounwind {
+define hidden i32 @__addvsi3(i32 %a, i32 %b) nounwind {
entry:
tail call void @llvm.dbg.value(metadata !{i32 %b}, i64 0, metadata !0)
%0 = add nsw i32 %b, %a, !dbg !9 ; <i32> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-18-LocalAllocCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-18-LocalAllocCrash.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-18-LocalAllocCrash.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-18-LocalAllocCrash.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -O0 -verify-machineinstrs -regalloc=local
; RUN: llc < %s -O0 -verify-machineinstrs -regalloc=fast
; rdar://problem/7948106
;; This test would spill %R4 before the call to zz, but it forgot to move the
@@ -11,7 +10,7 @@
@.str = external constant [1 x i8] ; <[1 x i8]*> [#uses=1]
-define arm_apcscc void @yy(%struct.q* %qq) nounwind {
+define void @yy(%struct.q* %qq) nounwind {
entry:
%vla6 = alloca i8, i32 undef, align 1 ; <i8*> [#uses=1]
%vla10 = alloca i8, i32 undef, align 1 ; <i8*> [#uses=1]
@@ -20,18 +19,18 @@
%tmp21 = load i32* undef ; <i32> [#uses=1]
%0 = mul i32 1, %tmp21 ; <i32> [#uses=1]
%vla22 = alloca i8, i32 %0, align 1 ; <i8*> [#uses=1]
- call arm_apcscc void (...)* @zz(i8* getelementptr inbounds ([1 x i8]* @.str, i32 0, i32 0), i32 2, i32 1)
+ call void (...)* @zz(i8* getelementptr inbounds ([1 x i8]* @.str, i32 0, i32 0), i32 2, i32 1)
br i1 undef, label %if.then, label %if.end36
if.then: ; preds = %entry
- %call = call arm_apcscc i32 (...)* @x(%struct.q* undef, i8* undef, i8* %vla6, i8* %vla10, i32 undef) ; <i32> [#uses=0]
- %call35 = call arm_apcscc i32 (...)* @x(%struct.q* undef, i8* %vla14, i8* %vla18, i8* %vla22, i32 undef) ; <i32> [#uses=0]
+ %call = call i32 (...)* @x(%struct.q* undef, i8* undef, i8* %vla6, i8* %vla10, i32 undef) ; <i32> [#uses=0]
+ %call35 = call i32 (...)* @x(%struct.q* undef, i8* %vla14, i8* %vla18, i8* %vla22, i32 undef) ; <i32> [#uses=0]
unreachable
if.end36: ; preds = %entry
ret void
}
-declare arm_apcscc void @zz(...)
+declare void @zz(...)
-declare arm_apcscc i32 @x(...)
+declare i32 @x(...)
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll Fri Jul 2 04:34:51 2010
@@ -4,7 +4,7 @@
%struct.foo = type { i64, i64 }
-define arm_apcscc zeroext i8 @t(%struct.foo* %this) noreturn optsize {
+define zeroext i8 @t(%struct.foo* %this) noreturn optsize {
entry:
; ARM: t:
; ARM: str r0, [r1], r0
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-21-BuildVector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-21-BuildVector.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-21-BuildVector.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/2010-05-21-BuildVector.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
; Radar 7872877
-define arm_apcscc void @test(float* %fltp, i32 %packedValue, float* %table) nounwind {
+define void @test(float* %fltp, i32 %packedValue, float* %table) nounwind {
entry:
%0 = load float* %fltp
%1 = insertelement <4 x float> undef, float %0, i32 0
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/arm-frameaddr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/arm-frameaddr.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/arm-frameaddr.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/arm-frameaddr.ll Fri Jul 2 04:34:51 2010
@@ -3,7 +3,7 @@
; PR4344
; PR4416
-define arm_aapcscc i8* @t() nounwind {
+define i8* @t() nounwind {
entry:
; DARWIN: t:
; DARWIN: mov r0, r7
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/arm-returnaddr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/arm-returnaddr.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/arm-returnaddr.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/arm-returnaddr.ll Fri Jul 2 04:34:51 2010
@@ -3,7 +3,7 @@
; rdar://8015977
; rdar://8020118
-define arm_apcscc i8* @rt0(i32 %x) nounwind readnone {
+define i8* @rt0(i32 %x) nounwind readnone {
entry:
; CHECK: rt0:
; CHECK: mov r0, lr
@@ -11,7 +11,7 @@
ret i8* %0
}
-define arm_apcscc i8* @rt2() nounwind readnone {
+define i8* @rt2() nounwind readnone {
entry:
; CHECK: rt2:
; CHECK: ldr r0, [r7]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/armv4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/armv4.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/armv4.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/armv4.ll Fri Jul 2 04:34:51 2010
@@ -5,7 +5,7 @@
; RUN: llc < %s -mtriple=armv4-unknown-eabi | FileCheck %s -check-prefix=ARM
; RUN: llc < %s -mtriple=armv4t-unknown-eabi | FileCheck %s -check-prefix=THUMB
-define arm_aapcscc i32 @test(i32 %a) nounwind readnone {
+define i32 @test(i32 %a) nounwind readnone {
entry:
; ARM: mov pc
; THUMB: bx
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/fpconsts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/fpconsts.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/fpconsts.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/fpconsts.ll Fri Jul 2 04:34:51 2010
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=arm -mattr=+vfp3 | FileCheck %s
-define arm_apcscc float @t1(float %x) nounwind readnone optsize {
+define float @t1(float %x) nounwind readnone optsize {
entry:
; CHECK: t1:
; CHECK: vmov.f32 s1, #4.000000e+00
@@ -8,7 +8,7 @@
ret float %0
}
-define arm_apcscc double @t2(double %x) nounwind readnone optsize {
+define double @t2(double %x) nounwind readnone optsize {
entry:
; CHECK: t2:
; CHECK: vmov.f64 d1, #3.000000e+00
@@ -16,7 +16,7 @@
ret double %0
}
-define arm_apcscc double @t3(double %x) nounwind readnone optsize {
+define double @t3(double %x) nounwind readnone optsize {
entry:
; CHECK: t3:
; CHECK: vmov.f64 d1, #-1.300000e+01
@@ -24,7 +24,7 @@
ret double %0
}
-define arm_apcscc float @t4(float %x) nounwind readnone optsize {
+define float @t4(float %x) nounwind readnone optsize {
entry:
; CHECK: t4:
; CHECK: vmov.f32 s1, #-2.400000e+01
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/indirectbr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/indirectbr.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/indirectbr.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/indirectbr.ll Fri Jul 2 04:34:51 2010
@@ -5,7 +5,7 @@
@nextaddr = global i8* null ; <i8**> [#uses=2]
@C.0.2070 = private constant [5 x i8*] [i8* blockaddress(@foo, %L1), i8* blockaddress(@foo, %L2), i8* blockaddress(@foo, %L3), i8* blockaddress(@foo, %L4), i8* blockaddress(@foo, %L5)] ; <[5 x i8*]*> [#uses=1]
-define internal arm_apcscc i32 @foo(i32 %i) nounwind {
+define internal i32 @foo(i32 %i) nounwind {
; ARM: foo:
; THUMB: foo:
; THUMB2: foo:
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/inlineasm3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/inlineasm3.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/inlineasm3.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/inlineasm3.ll Fri Jul 2 04:34:51 2010
@@ -3,7 +3,7 @@
; Radar 7449043
%struct.int32x4_t = type { <4 x i32> }
-define arm_apcscc void @t() nounwind {
+define void @t() nounwind {
entry:
; CHECK: vmov.I64 q15, #0
; CHECK: vmov.32 d30[0], r0
@@ -16,7 +16,7 @@
; Radar 7457110
%struct.int32x2_t = type { <4 x i32> }
-define arm_apcscc void @t2() nounwind {
+define void @t2() nounwind {
entry:
; CHECK: vmov d30, d0
; CHECK: vmov.32 r0, d30[0]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-on-unrolled-loops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-on-unrolled-loops.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-on-unrolled-loops.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/lsr-on-unrolled-loops.ll Fri Jul 2 04:34:51 2010
@@ -40,7 +40,7 @@
%22 = type { void (%0*)*, void (%0*, i8***, i32, i8**, i32)* }
%23 = type { void (%0*, i32)*, void (%0*, i8**, i8**, i32)*, void (%0*)*, void (%0*)* }
-define arm_apcscc void @test(%0* nocapture %a0, %11* nocapture %a1, i16* nocapture %a2, i8** nocapture %a3, i32 %a4) nounwind {
+define void @test(%0* nocapture %a0, %11* nocapture %a1, i16* nocapture %a2, i8** nocapture %a3, i32 %a4) nounwind {
bb:
%t = alloca [64 x float], align 4
%t5 = getelementptr inbounds %0* %a0, i32 0, i32 65
@@ -393,7 +393,7 @@
%struct.z_stream = type { i8*, i32, i32, i8*, i32, i32, i8*, %struct.internal_state*, i8* (i8*, i32, i32)*, void (i8*, i8*)*, i8*, i32, i32, i32 }
%union.anon = type { i16 }
-define arm_apcscc i32 @longest_match(%struct.internal_state* %s, i32 %cur_match) nounwind optsize {
+define i32 @longest_match(%struct.internal_state* %s, i32 %cur_match) nounwind optsize {
entry:
%0 = getelementptr inbounds %struct.internal_state* %s, i32 0, i32 31 ; <i32*> [#uses=1]
%1 = load i32* %0, align 4 ; <i32> [#uses=2]
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/reg_sequence.ll Fri Jul 2 04:34:51 2010
@@ -8,7 +8,7 @@
%struct.__neon_int16x8x2_t = type { <8 x i16>, <8 x i16> }
%struct.__neon_int32x4x2_t = type { <4 x i32>, <4 x i32> }
-define arm_apcscc void @t1(i16* %i_ptr, i16* %o_ptr, %struct.int32x4_t* nocapture %vT0ptr, %struct.int32x4_t* nocapture %vT1ptr) nounwind {
+define void @t1(i16* %i_ptr, i16* %o_ptr, %struct.int32x4_t* nocapture %vT0ptr, %struct.int32x4_t* nocapture %vT1ptr) nounwind {
entry:
; CHECK: t1:
; CHECK: vld1.16
@@ -41,7 +41,7 @@
ret void
}
-define arm_apcscc void @t2(i16* %i_ptr, i16* %o_ptr, %struct.int16x8_t* nocapture %vT0ptr, %struct.int16x8_t* nocapture %vT1ptr) nounwind {
+define void @t2(i16* %i_ptr, i16* %o_ptr, %struct.int16x8_t* nocapture %vT0ptr, %struct.int16x8_t* nocapture %vT1ptr) nounwind {
entry:
; CHECK: t2:
; CHECK: vld1.16
@@ -88,7 +88,7 @@
ret <8 x i8> %tmp4
}
-define arm_apcscc void @t4(i32* %in, i32* %out) nounwind {
+define void @t4(i32* %in, i32* %out) nounwind {
entry:
; CHECK: t4:
; CHECK: vld2.32
@@ -163,7 +163,7 @@
ret <8 x i8> %tmp5
}
-define arm_apcscc void @t7(i32* %iptr, i32* %optr) nounwind {
+define void @t7(i32* %iptr, i32* %optr) nounwind {
entry:
; CHECK: t7:
; CHECK: vld2.32
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/remat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/remat.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/remat.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/remat.ll Fri Jul 2 04:34:51 2010
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=arm -mattr=+v6,+vfp2 -stats -info-output-file - | grep "Number of re-materialization"
-define arm_apcscc i32 @main(i32 %argc, i8** nocapture %argv, double %d1, double %d2) nounwind {
+define i32 @main(i32 %argc, i8** nocapture %argv, double %d1, double %d2) nounwind {
entry:
br i1 undef, label %smvp.exit, label %bb.i3
@@ -25,7 +25,7 @@
br i1 %14, label %phi1.exit, label %bb.i35
bb.i35: ; preds = %bb142
- %5 = call arm_apcscc double @sin(double %15) nounwind readonly ; <double> [#uses=1]
+ %5 = call double @sin(double %15) nounwind readonly ; <double> [#uses=1]
%6 = fmul double %5, 0x4031740AFA84AD8A ; <double> [#uses=1]
%7 = fsub double 1.000000e+00, undef ; <double> [#uses=1]
%8 = fdiv double %7, 6.000000e-01 ; <double> [#uses=1]
@@ -62,4 +62,4 @@
unreachable
}
-declare arm_apcscc double @sin(double) nounwind readonly
+declare double @sin(double) nounwind readonly
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/select-imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/select-imm.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/select-imm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/select-imm.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck %s --check-prefix=ARM
; RUN: llc < %s -march=arm -mattr=+thumb2 | FileCheck %s --check-prefix=T2
-define arm_apcscc i32 @t1(i32 %c) nounwind readnone {
+define i32 @t1(i32 %c) nounwind readnone {
entry:
; ARM: t1:
; ARM: mov r1, #101
@@ -17,7 +17,7 @@
ret i32 %1
}
-define arm_apcscc i32 @t2(i32 %c) nounwind readnone {
+define i32 @t2(i32 %c) nounwind readnone {
entry:
; ARM: t2:
; ARM: mov r1, #101
@@ -33,7 +33,7 @@
ret i32 %1
}
-define arm_apcscc i32 @t3(i32 %a) nounwind readnone {
+define i32 @t3(i32 %a) nounwind readnone {
entry:
; ARM: t3:
; ARM: mov r0, #0
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/spill-q.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/spill-q.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/spill-q.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/spill-q.ll Fri Jul 2 04:34:51 2010
@@ -9,7 +9,7 @@
declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*) nounwind readonly
-define arm_apcscc void @aaa(%quuz* %this, i8* %block) {
+define void @aaa(%quuz* %this, i8* %block) {
; CHECK: aaa:
; CHECK: bic sp, sp, #15
; CHECK: vst1.64 {{.*}}sp, :128
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/trap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/trap.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/trap.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/trap.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=arm | FileCheck %s
; rdar://7961298
-define arm_apcscc void @t() nounwind {
+define void @t() nounwind {
entry:
; CHECK: t:
; CHECK: trap
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/unaligned_load_store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/unaligned_load_store.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/unaligned_load_store.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/unaligned_load_store.ll Fri Jul 2 04:34:51 2010
@@ -4,7 +4,7 @@
; rdar://7113725
-define arm_apcscc void @t(i8* nocapture %a, i8* nocapture %b) nounwind {
+define void @t(i8* nocapture %a, i8* nocapture %b) nounwind {
entry:
; GENERIC: t:
; GENERIC: ldrb r2
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vdup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vdup.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vdup.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vdup.ll Fri Jul 2 04:34:51 2010
@@ -244,25 +244,25 @@
ret <4 x float> %tmp2
}
-define arm_apcscc <2 x i64> @foo(<2 x i64> %arg0_int64x1_t) nounwind readnone {
+define <2 x i64> @foo(<2 x i64> %arg0_int64x1_t) nounwind readnone {
entry:
%0 = shufflevector <2 x i64> %arg0_int64x1_t, <2 x i64> undef, <2 x i32> <i32 1, i32 1>
ret <2 x i64> %0
}
-define arm_apcscc <2 x i64> @bar(<2 x i64> %arg0_int64x1_t) nounwind readnone {
+define <2 x i64> @bar(<2 x i64> %arg0_int64x1_t) nounwind readnone {
entry:
%0 = shufflevector <2 x i64> %arg0_int64x1_t, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
ret <2 x i64> %0
}
-define arm_apcscc <2 x double> @baz(<2 x double> %arg0_int64x1_t) nounwind readnone {
+define <2 x double> @baz(<2 x double> %arg0_int64x1_t) nounwind readnone {
entry:
%0 = shufflevector <2 x double> %arg0_int64x1_t, <2 x double> undef, <2 x i32> <i32 1, i32 1>
ret <2 x double> %0
}
-define arm_apcscc <2 x double> @qux(<2 x double> %arg0_int64x1_t) nounwind readnone {
+define <2 x double> @qux(<2 x double> %arg0_int64x1_t) nounwind readnone {
entry:
%0 = shufflevector <2 x double> %arg0_int64x1_t, <2 x double> undef, <2 x i32> <i32 0, i32 0>
ret <2 x double> %0
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vext.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vext.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vext.ll Fri Jul 2 04:34:51 2010
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-define arm_apcscc <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK: test_vextd:
;CHECK: vext
%tmp1 = load <8 x i8>* %A
@@ -9,7 +9,7 @@
ret <8 x i8> %tmp3
}
-define arm_apcscc <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK: test_vextRd:
;CHECK: vext
%tmp1 = load <8 x i8>* %A
@@ -18,7 +18,7 @@
ret <8 x i8> %tmp3
}
-define arm_apcscc <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK: test_vextq:
;CHECK: vext
%tmp1 = load <16 x i8>* %A
@@ -27,7 +27,7 @@
ret <16 x i8> %tmp3
}
-define arm_apcscc <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
+define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK: test_vextRq:
;CHECK: vext
%tmp1 = load <16 x i8>* %A
@@ -36,7 +36,7 @@
ret <16 x i8> %tmp3
}
-define arm_apcscc <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
+define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK: test_vextd16:
;CHECK: vext
%tmp1 = load <4 x i16>* %A
@@ -45,7 +45,7 @@
ret <4 x i16> %tmp3
}
-define arm_apcscc <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
+define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK: test_vextq32:
;CHECK: vext
%tmp1 = load <4 x i32>* %A
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vmov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vmov.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vmov.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vmov.ll Fri Jul 2 04:34:51 2010
@@ -136,7 +136,7 @@
; Check for correct assembler printing for immediate values.
%struct.int8x8_t = type { <8 x i8> }
-define arm_apcscc void @vdupn128(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
+define void @vdupn128(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
entry:
;CHECK: vdupn128:
;CHECK: vmov.i8 d0, #0x80
@@ -145,7 +145,7 @@
ret void
}
-define arm_apcscc void @vdupnneg75(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
+define void @vdupnneg75(%struct.int8x8_t* noalias nocapture sret %agg.result) nounwind {
entry:
;CHECK: vdupnneg75:
;CHECK: vmov.i8 d0, #0xB5
Modified: llvm/branches/wendling/eh/test/CodeGen/ARM/vrev.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/ARM/vrev.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/ARM/vrev.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/ARM/vrev.ll Fri Jul 2 04:34:51 2010
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-define arm_apcscc <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
+define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
;CHECK: test_vrev64D8:
;CHECK: vrev64.8
%tmp1 = load <8 x i8>* %A
@@ -8,7 +8,7 @@
ret <8 x i8> %tmp2
}
-define arm_apcscc <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
+define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
;CHECK: test_vrev64D16:
;CHECK: vrev64.16
%tmp1 = load <4 x i16>* %A
@@ -16,7 +16,7 @@
ret <4 x i16> %tmp2
}
-define arm_apcscc <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
+define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
;CHECK: test_vrev64D32:
;CHECK: vrev64.32
%tmp1 = load <2 x i32>* %A
@@ -24,7 +24,7 @@
ret <2 x i32> %tmp2
}
-define arm_apcscc <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
+define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
;CHECK: test_vrev64Df:
;CHECK: vrev64.32
%tmp1 = load <2 x float>* %A
@@ -32,7 +32,7 @@
ret <2 x float> %tmp2
}
-define arm_apcscc <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
+define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
;CHECK: test_vrev64Q8:
;CHECK: vrev64.8
%tmp1 = load <16 x i8>* %A
@@ -40,7 +40,7 @@
ret <16 x i8> %tmp2
}
-define arm_apcscc <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
+define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
;CHECK: test_vrev64Q16:
;CHECK: vrev64.16
%tmp1 = load <8 x i16>* %A
@@ -48,7 +48,7 @@
ret <8 x i16> %tmp2
}
-define arm_apcscc <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
+define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
;CHECK: test_vrev64Q32:
;CHECK: vrev64.32
%tmp1 = load <4 x i32>* %A
@@ -56,7 +56,7 @@
ret <4 x i32> %tmp2
}
-define arm_apcscc <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
+define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
;CHECK: test_vrev64Qf:
;CHECK: vrev64.32
%tmp1 = load <4 x float>* %A
@@ -64,7 +64,7 @@
ret <4 x float> %tmp2
}
-define arm_apcscc <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
+define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
;CHECK: test_vrev32D8:
;CHECK: vrev32.8
%tmp1 = load <8 x i8>* %A
@@ -72,7 +72,7 @@
ret <8 x i8> %tmp2
}
-define arm_apcscc <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
+define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
;CHECK: test_vrev32D16:
;CHECK: vrev32.16
%tmp1 = load <4 x i16>* %A
@@ -80,7 +80,7 @@
ret <4 x i16> %tmp2
}
-define arm_apcscc <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
+define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
;CHECK: test_vrev32Q8:
;CHECK: vrev32.8
%tmp1 = load <16 x i8>* %A
@@ -88,7 +88,7 @@
ret <16 x i8> %tmp2
}
-define arm_apcscc <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
+define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
;CHECK: test_vrev32Q16:
;CHECK: vrev32.16
%tmp1 = load <8 x i16>* %A
@@ -96,7 +96,7 @@
ret <8 x i16> %tmp2
}
-define arm_apcscc <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
+define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
;CHECK: test_vrev16D8:
;CHECK: vrev16.8
%tmp1 = load <8 x i8>* %A
@@ -104,7 +104,7 @@
ret <8 x i8> %tmp2
}
-define arm_apcscc <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
+define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
;CHECK: test_vrev16Q8:
;CHECK: vrev16.8
%tmp1 = load <16 x i8>* %A
Modified: llvm/branches/wendling/eh/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -regalloc=local
; RUN: llc < %s -regalloc=fast
%struct.CHESS_POSITION = type { i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i32, i32, i8, i8, [64 x i8], i8, i8, i8, i8, i8 }
Modified: llvm/branches/wendling/eh/test/CodeGen/Generic/2010-ZeroSizedArg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Generic/2010-ZeroSizedArg.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Generic/2010-ZeroSizedArg.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Generic/2010-ZeroSizedArg.ll Fri Jul 2 04:34:51 2010
@@ -6,7 +6,7 @@
@.str = private constant [1 x i8] c" "
-define arm_apcscc void @t(%0) nounwind {
+define void @t(%0) nounwind {
entry:
%arg0 = alloca %union.T0
%1 = bitcast %union.T0* %arg0 to %0*
@@ -14,4 +14,4 @@
ret void
}
-declare arm_apcscc i32 @printf(i8*, ...)
+declare i32 @printf(i8*, ...)
Modified: llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll Fri Jul 2 04:34:51 2010
@@ -1,5 +1,4 @@
; RUN: llc < %s | FileCheck %s
-; RUN: llc < %s -regalloc=local | FileCheck %s
; RUN: llc < %s -regalloc=fast | FileCheck %s
; The first argument of subfc must not be the same as any other register.
Modified: llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -mtriple=powerpc64-apple-darwin9 -regalloc=local -relocation-model=pic
; RUN: llc < %s -mtriple=powerpc64-apple-darwin9 -regalloc=fast -relocation-model=pic
%struct.NSError = type opaque
Modified: llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -mtriple=powerpc64-apple-darwin9 -regalloc=local -relocation-model=pic
; RUN: llc < %s -mtriple=powerpc64-apple-darwin9 -regalloc=fast -relocation-model=pic
%struct.NSError = type opaque
Modified: llvm/branches/wendling/eh/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -mtriple=powerpc-apple-darwin -regalloc=local
; RUN: llc < %s -mtriple=powerpc-apple-darwin -regalloc=fast
define i32 @bork(i64 %foo, i64 %bar) {
Modified: llvm/branches/wendling/eh/test/CodeGen/PowerPC/cr_spilling.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/PowerPC/cr_spilling.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/PowerPC/cr_spilling.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/PowerPC/cr_spilling.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -march=ppc32 -regalloc=local -O0 -relocation-model=pic -o -
; RUN: llc < %s -march=ppc32 -regalloc=fast -O0 -relocation-model=pic -o -
; PR1638
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-19-SPDecBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-19-SPDecBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-19-SPDecBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-19-SPDecBug.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=thumbv6-elf | not grep "subs sp"
; PR4567
-define arm_apcscc i8* @__gets_chk(i8* %s, i32 %slen) nounwind {
+define i8* @__gets_chk(i8* %s, i32 %slen) nounwind {
entry:
br i1 undef, label %bb, label %bb1
@@ -23,11 +23,11 @@
br i1 undef, label %bb5, label %bb6
bb5: ; preds = %bb4
- %2 = call arm_apcscc i8* @gets(i8* %s) nounwind ; <i8*> [#uses=1]
+ %2 = call i8* @gets(i8* %s) nounwind ; <i8*> [#uses=1]
ret i8* %2
bb6: ; preds = %bb4
unreachable
}
-declare arm_apcscc i8* @gets(i8*) nounwind
+declare i8* @gets(i8*) nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-20-TwoAddrBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-20-TwoAddrBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-20-TwoAddrBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-20-TwoAddrBug.ll Fri Jul 2 04:34:51 2010
@@ -2,7 +2,7 @@
@Time.2535 = external global i64 ; <i64*> [#uses=2]
-define arm_apcscc i64 @millisecs() nounwind {
+define i64 @millisecs() nounwind {
entry:
%0 = load i64* @Time.2535, align 4 ; <i64> [#uses=2]
%1 = add i64 %0, 1 ; <i64> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-07-27-PEIAssert.ll Fri Jul 2 04:34:51 2010
@@ -4,10 +4,10 @@
%struct.List = type { i32, i32* }
@llvm.used = appending global [1 x i8*] [i8* bitcast (i32 ()* @main to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-define arm_apcscc i32 @main() nounwind {
+define i32 @main() nounwind {
entry:
%ll = alloca %struct.LinkList*, align 4 ; <%struct.LinkList**> [#uses=1]
- %0 = call arm_apcscc i32 @ReadList(%struct.LinkList** %ll, %struct.List** null) nounwind ; <i32> [#uses=1]
+ %0 = call i32 @ReadList(%struct.LinkList** %ll, %struct.List** null) nounwind ; <i32> [#uses=1]
switch i32 %0, label %bb5 [
i32 7, label %bb4
i32 42, label %bb3
@@ -23,4 +23,4 @@
ret i32 1
}
-declare arm_apcscc i32 @ReadList(%struct.LinkList** nocapture, %struct.List** nocapture) nounwind
+declare i32 @ReadList(%struct.LinkList** nocapture, %struct.List** nocapture) nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-12-ConstIslandAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-12-ConstIslandAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-12-ConstIslandAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-12-ConstIslandAssert.ll Fri Jul 2 04:34:51 2010
@@ -2,7 +2,7 @@
%struct.BF_KEY = type { [18 x i32], [1024 x i32] }
-define arm_apcscc void @BF_encrypt(i32* nocapture %data, %struct.BF_KEY* nocapture %key, i32 %encrypt) nounwind {
+define void @BF_encrypt(i32* nocapture %data, %struct.BF_KEY* nocapture %key, i32 %encrypt) nounwind {
entry:
%0 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 0; <i32*> [#uses=2]
%1 = load i32* %data, align 4 ; <i32> [#uses=2]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-12-RegInfoAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-12-RegInfoAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-12-RegInfoAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-12-RegInfoAssert.ll Fri Jul 2 04:34:51 2010
@@ -3,15 +3,15 @@
%struct.vorbis_comment = type { i8**, i32*, i32, i8* }
@.str16 = external constant [2 x i8], align 1 ; <[2 x i8]*> [#uses=1]
-declare arm_apcscc i8* @__strcpy_chk(i8*, i8*, i32) nounwind
+declare i8* @__strcpy_chk(i8*, i8*, i32) nounwind
-declare arm_apcscc i8* @__strcat_chk(i8*, i8*, i32) nounwind
+declare i8* @__strcat_chk(i8*, i8*, i32) nounwind
-define arm_apcscc i8* @vorbis_comment_query(%struct.vorbis_comment* nocapture %vc, i8* %tag, i32 %count) nounwind {
+define i8* @vorbis_comment_query(%struct.vorbis_comment* nocapture %vc, i8* %tag, i32 %count) nounwind {
entry:
%0 = alloca i8, i32 undef, align 4 ; <i8*> [#uses=2]
- %1 = call arm_apcscc i8* @__strcpy_chk(i8* %0, i8* %tag, i32 -1) nounwind; <i8*> [#uses=0]
- %2 = call arm_apcscc i8* @__strcat_chk(i8* %0, i8* getelementptr ([2 x i8]* @.str16, i32 0, i32 0), i32 -1) nounwind; <i8*> [#uses=0]
+ %1 = call i8* @__strcpy_chk(i8* %0, i8* %tag, i32 -1) nounwind; <i8*> [#uses=0]
+ %2 = call i8* @__strcat_chk(i8* %0, i8* getelementptr ([2 x i8]* @.str16, i32 0, i32 0), i32 -1) nounwind; <i8*> [#uses=0]
%3 = getelementptr %struct.vorbis_comment* %vc, i32 0, i32 0; <i8***> [#uses=1]
br label %bb11
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-20-ISelBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-20-ISelBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-20-ISelBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-08-20-ISelBug.ll Fri Jul 2 04:34:51 2010
@@ -9,7 +9,7 @@
@llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (%struct.asl_file_t*, i64, i64*)* @t to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-define arm_apcscc i32 @t(%struct.asl_file_t* %s, i64 %off, i64* %out) nounwind optsize {
+define i32 @t(%struct.asl_file_t* %s, i64 %off, i64* %out) nounwind optsize {
; CHECK: t:
; CHECK: adds r0, #8
entry:
@@ -32,7 +32,7 @@
br i1 %8, label %bb13, label %bb5
bb5: ; preds = %bb3
- %9 = call arm_apcscc i32 @fseeko(%struct.FILE* %2, i64 %off, i32 0) nounwind ; <i32> [#uses=1]
+ %9 = call i32 @fseeko(%struct.FILE* %2, i64 %off, i32 0) nounwind ; <i32> [#uses=1]
%10 = icmp eq i32 %9, 0 ; <i1> [#uses=1]
br i1 %10, label %bb7, label %bb13
@@ -40,7 +40,7 @@
store i64 0, i64* %val, align 4
%11 = load %struct.FILE** %1, align 4 ; <%struct.FILE*> [#uses=1]
%val8 = bitcast i64* %val to i8* ; <i8*> [#uses=1]
- %12 = call arm_apcscc i32 @fread(i8* noalias %val8, i32 8, i32 1, %struct.FILE* noalias %11) nounwind ; <i32> [#uses=1]
+ %12 = call i32 @fread(i8* noalias %val8, i32 8, i32 1, %struct.FILE* noalias %11) nounwind ; <i32> [#uses=1]
%13 = icmp eq i32 %12, 1 ; <i1> [#uses=1]
br i1 %13, label %bb10, label %bb13
@@ -50,7 +50,7 @@
bb11: ; preds = %bb10
%15 = load i64* %val, align 4 ; <i64> [#uses=1]
- %16 = call arm_apcscc i64 @asl_core_ntohq(i64 %15) nounwind ; <i64> [#uses=1]
+ %16 = call i64 @asl_core_ntohq(i64 %15) nounwind ; <i64> [#uses=1]
store i64 %16, i64* %out, align 4
ret i32 0
@@ -59,8 +59,8 @@
ret i32 %.0
}
-declare arm_apcscc i32 @fseeko(%struct.FILE* nocapture, i64, i32) nounwind
+declare i32 @fseeko(%struct.FILE* nocapture, i64, i32) nounwind
-declare arm_apcscc i32 @fread(i8* noalias nocapture, i32, i32, %struct.FILE* noalias nocapture) nounwind
+declare i32 @fread(i8* noalias nocapture, i32, i32, %struct.FILE* noalias nocapture) nounwind
-declare arm_apcscc i64 @asl_core_ntohq(i64)
+declare i64 @asl_core_ntohq(i64)
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-12-17-pre-regalloc-taildup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-12-17-pre-regalloc-taildup.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-12-17-pre-regalloc-taildup.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/2009-12-17-pre-regalloc-taildup.ll Fri Jul 2 04:34:51 2010
@@ -10,7 +10,7 @@
@codetable.2928 = internal constant [5 x i8*] [i8* blockaddress(@interpret_threaded, %RETURN), i8* blockaddress(@interpret_threaded, %INCREMENT), i8* blockaddress(@interpret_threaded, %DECREMENT), i8* blockaddress(@interpret_threaded, %DOUBLE), i8* blockaddress(@interpret_threaded, %SWAPWORD)] ; <[5 x i8*]*> [#uses=5]
@llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (i8*)* @interpret_threaded to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-define arm_apcscc i32 @interpret_threaded(i8* nocapture %opcodes) nounwind readonly optsize {
+define i32 @interpret_threaded(i8* nocapture %opcodes) nounwind readonly optsize {
entry:
%0 = load i8* %opcodes, align 1 ; <i8> [#uses=1]
%1 = zext i8 %0 to i32 ; <i32> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/2010-01-15-local-alloc-spill-physical.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/2010-01-15-local-alloc-spill-physical.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/2010-01-15-local-alloc-spill-physical.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/2010-01-15-local-alloc-spill-physical.ll Fri Jul 2 04:34:51 2010
@@ -1,14 +1,13 @@
-; RUN: llc < %s -regalloc=local -relocation-model=pic | FileCheck %s
; RUN: llc < %s -regalloc=fast -relocation-model=pic | FileCheck %s
target triple = "thumbv6-apple-darwin10"
@fred = internal global i32 0 ; <i32*> [#uses=1]
-define arm_apcscc void @foo() nounwind {
+define void @foo() nounwind {
entry:
; CHECK: str r0, [sp
- %0 = call arm_apcscc i32 (...)* @bar() nounwind ; <i32> [#uses=1]
+ %0 = call i32 (...)* @bar() nounwind ; <i32> [#uses=1]
; CHECK: blx _bar
; CHECK: ldr r1, [sp
store i32 %0, i32* @fred, align 4
@@ -18,4 +17,4 @@
ret void
}
-declare arm_apcscc i32 @bar(...)
+declare i32 @bar(...)
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/asmprinter-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/asmprinter-bug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/asmprinter-bug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/asmprinter-bug.ll Fri Jul 2 04:34:51 2010
@@ -13,7 +13,7 @@
@__stderrp = external global %struct.FILE* ; <%struct.FILE**> [#uses=1]
@.str1 = private constant [28 x i8] c"Final valprev=%d, index=%d\0A\00", section "__TEXT,__cstring,cstring_literals", align 1 ; <[28 x i8]*> [#uses=1]
-define arm_apcscc void @adpcm_coder(i16* nocapture %indata, i8* nocapture %outdata, i32 %len, %struct.adpcm_state* nocapture %state) nounwind {
+define void @adpcm_coder(i16* nocapture %indata, i8* nocapture %outdata, i32 %len, %struct.adpcm_state* nocapture %state) nounwind {
entry:
%0 = getelementptr %struct.adpcm_state* %state, i32 0, i32 0 ; <i16*> [#uses=2]
%1 = load i16* %0, align 2 ; <i16> [#uses=1]
@@ -138,7 +138,7 @@
ret void
}
-define arm_apcscc void @adpcm_decoder(i8* nocapture %indata, i16* nocapture %outdata, i32 %len, %struct.adpcm_state* nocapture %state) nounwind {
+define void @adpcm_decoder(i8* nocapture %indata, i16* nocapture %outdata, i32 %len, %struct.adpcm_state* nocapture %state) nounwind {
entry:
%0 = getelementptr %struct.adpcm_state* %state, i32 0, i32 0 ; <i16*> [#uses=2]
%1 = load i16* %0, align 2 ; <i16> [#uses=1]
@@ -245,17 +245,17 @@
ret void
}
-define arm_apcscc i32 @main() nounwind {
+define i32 @main() nounwind {
entry:
br label %bb
bb: ; preds = %bb3, %entry
- %0 = tail call arm_apcscc i32 (...)* @read(i32 0, i8* getelementptr ([500 x i8]* @abuf, i32 0, i32 0), i32 500) nounwind ; <i32> [#uses=4]
+ %0 = tail call i32 (...)* @read(i32 0, i8* getelementptr ([500 x i8]* @abuf, i32 0, i32 0), i32 500) nounwind ; <i32> [#uses=4]
%1 = icmp slt i32 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %bb1, label %bb2
bb1: ; preds = %bb
- tail call arm_apcscc void @perror(i8* getelementptr ([11 x i8]* @.str, i32 0, i32 0)) nounwind
+ tail call void @perror(i8* getelementptr ([11 x i8]* @.str, i32 0, i32 0)) nounwind
ret i32 1
bb2: ; preds = %bb
@@ -264,9 +264,9 @@
bb3: ; preds = %bb2
%3 = shl i32 %0, 1 ; <i32> [#uses=1]
- tail call arm_apcscc void @adpcm_decoder(i8* getelementptr ([500 x i8]* @abuf, i32 0, i32 0), i16* getelementptr ([1000 x i16]* @sbuf, i32 0, i32 0), i32 %3, %struct.adpcm_state* @state) nounwind
+ tail call void @adpcm_decoder(i8* getelementptr ([500 x i8]* @abuf, i32 0, i32 0), i16* getelementptr ([1000 x i16]* @sbuf, i32 0, i32 0), i32 %3, %struct.adpcm_state* @state) nounwind
%4 = shl i32 %0, 2 ; <i32> [#uses=1]
- %5 = tail call arm_apcscc i32 (...)* @write(i32 1, i16* getelementptr ([1000 x i16]* @sbuf, i32 0, i32 0), i32 %4) nounwind ; <i32> [#uses=0]
+ %5 = tail call i32 (...)* @write(i32 1, i16* getelementptr ([1000 x i16]* @sbuf, i32 0, i32 0), i32 %4) nounwind ; <i32> [#uses=0]
br label %bb
bb4: ; preds = %bb2
@@ -275,14 +275,14 @@
%8 = sext i16 %7 to i32 ; <i32> [#uses=1]
%9 = load i8* getelementptr (%struct.adpcm_state* @state, i32 0, i32 1), align 2 ; <i8> [#uses=1]
%10 = sext i8 %9 to i32 ; <i32> [#uses=1]
- %11 = tail call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %6, i8* getelementptr ([28 x i8]* @.str1, i32 0, i32 0), i32 %8, i32 %10) nounwind ; <i32> [#uses=0]
+ %11 = tail call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %6, i8* getelementptr ([28 x i8]* @.str1, i32 0, i32 0), i32 %8, i32 %10) nounwind ; <i32> [#uses=0]
ret i32 0
}
-declare arm_apcscc i32 @read(...)
+declare i32 @read(...)
-declare arm_apcscc void @perror(i8* nocapture) nounwind
+declare void @perror(i8* nocapture) nounwind
-declare arm_apcscc i32 @write(...)
+declare i32 @write(...)
-declare arm_apcscc i32 @fprintf(%struct.FILE* nocapture, i8* nocapture, ...) nounwind
+declare i32 @fprintf(%struct.FILE* nocapture, i8* nocapture, ...) nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/machine-licm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/machine-licm.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/machine-licm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/machine-licm.ll Fri Jul 2 04:34:51 2010
@@ -7,7 +7,7 @@
@GV = external global i32 ; <i32*> [#uses=2]
-define arm_apcscc void @t(i32* nocapture %vals, i32 %c) nounwind {
+define void @t(i32* nocapture %vals, i32 %c) nounwind {
entry:
; CHECK: t:
%0 = icmp eq i32 %c, 0 ; <i1> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/pop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/pop.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/pop.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/pop.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=thumb-apple-darwin | FileCheck %s
; rdar://7268481
-define arm_apcscc void @t(i8* %a, ...) nounwind {
+define void @t(i8* %a, ...) nounwind {
; CHECK: t:
; CHECK: pop {r3}
; CHECK-NEXT: add sp, #12
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/push.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/push.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/push.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/push.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=thumb-apple-darwin -disable-fp-elim | FileCheck %s
; rdar://7268481
-define arm_apcscc void @t() nounwind {
+define void @t() nounwind {
; CHECK: t:
; CHECK-NEXT : push {r7}
entry:
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb/trap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb/trap.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb/trap.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb/trap.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=thumb | FileCheck %s
; rdar://7961298
-define arm_apcscc void @t() nounwind {
+define void @t() nounwind {
entry:
; CHECK: t:
; CHECK: trap
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll Fri Jul 2 04:34:51 2010
@@ -4,9 +4,9 @@
target triple = "thumbv6t2-elf"
%struct.dwarf_cie = type <{ i32, i32, i8, [0 x i8], [3 x i8] }>
-declare arm_apcscc i8* @read_sleb128(i8*, i32* nocapture) nounwind
+declare i8* @read_sleb128(i8*, i32* nocapture) nounwind
-define arm_apcscc i32 @get_cie_encoding(%struct.dwarf_cie* %cie) nounwind {
+define i32 @get_cie_encoding(%struct.dwarf_cie* %cie) nounwind {
entry:
br i1 undef, label %bb1, label %bb13
@@ -27,7 +27,7 @@
%.sum40 = add i32 %indvar.i, undef ; <i32> [#uses=1]
%.sum31 = add i32 %.sum40, 2 ; <i32> [#uses=1]
%scevgep.i = getelementptr %struct.dwarf_cie* %cie, i32 0, i32 3, i32 %.sum31 ; <i8*> [#uses=1]
- %3 = call arm_apcscc i8* @read_sleb128(i8* %scevgep.i, i32* undef) ; <i8*> [#uses=0]
+ %3 = call i8* @read_sleb128(i8* %scevgep.i, i32* undef) ; <i8*> [#uses=0]
unreachable
bb13: ; preds = %entry
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll Fri Jul 2 04:34:51 2010
@@ -3,7 +3,7 @@
@"\01LC" = external constant [36 x i8], align 1 ; <[36 x i8]*> [#uses=1]
-define arm_apcscc i32 @t(i32, ...) nounwind {
+define i32 @t(i32, ...) nounwind {
entry:
; CHECK: t:
; CHECK: add r7, sp, #12
@@ -24,7 +24,7 @@
%15 = sext i8 %6 to i32 ; <i32> [#uses=2]
%16 = sext i16 %10 to i32 ; <i32> [#uses=2]
%17 = sext i16 %13 to i32 ; <i32> [#uses=2]
- %18 = call arm_apcscc i32 (i8*, ...)* @printf(i8* getelementptr ([36 x i8]* @"\01LC", i32 0, i32 0), i32 -128, i32 0, i32 %15, i32 %16, i32 %17, i32 0, i32 %14) nounwind ; <i32> [#uses=0]
+ %18 = call i32 (i8*, ...)* @printf(i8* getelementptr ([36 x i8]* @"\01LC", i32 0, i32 0), i32 -128, i32 0, i32 %15, i32 %16, i32 %17, i32 0, i32 %14) nounwind ; <i32> [#uses=0]
%19 = add i32 0, %15 ; <i32> [#uses=1]
%20 = add i32 %19, %16 ; <i32> [#uses=1]
%21 = add i32 %20, %14 ; <i32> [#uses=1]
@@ -33,4 +33,4 @@
ret i32 %23
}
-declare arm_apcscc i32 @printf(i8* nocapture, ...) nounwind
+declare i32 @printf(i8* nocapture, ...) nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-23-CPIslandBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-23-CPIslandBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-23-CPIslandBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-23-CPIslandBug.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mattr=+vfp2,+thumb2
; rdar://7083961
-define arm_apcscc i32 @value(i64 %b1, i64 %b2) nounwind readonly {
+define i32 @value(i64 %b1, i64 %b2) nounwind readonly {
entry:
%0 = icmp eq i32 undef, 0 ; <i1> [#uses=1]
%mod.0.ph.ph = select i1 %0, float -1.000000e+00, float 1.000000e+00 ; <float> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll Fri Jul 2 04:34:51 2010
@@ -28,7 +28,7 @@
%struct.jvirt_barray_control = type { [64 x i16]**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_barray_control*, %struct.backing_store_info }
%struct.jvirt_sarray_control = type { i8**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_sarray_control*, %struct.backing_store_info }
-define arm_apcscc void @jpeg_idct_float(%struct.jpeg_decompress_struct* nocapture %cinfo, %struct.jpeg_component_info* nocapture %compptr, i16* nocapture %coef_block, i8** nocapture %output_buf, i32 %output_col) nounwind {
+define void @jpeg_idct_float(%struct.jpeg_decompress_struct* nocapture %cinfo, %struct.jpeg_component_info* nocapture %compptr, i16* nocapture %coef_block, i8** nocapture %output_buf, i32 %output_col) nounwind {
entry:
%workspace = alloca [64 x float], align 4 ; <[64 x float]*> [#uses=11]
%0 = load i8** undef, align 4 ; <i8*> [#uses=5]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll Fri Jul 2 04:34:51 2010
@@ -6,7 +6,7 @@
@lefline = external global [100 x [20 x i32]] ; <[100 x [20 x i32]]*> [#uses=1]
@sep = external global [20 x i32] ; <[20 x i32]*> [#uses=1]
-define arm_apcscc void @main(i32 %argc, i8** %argv) noreturn nounwind {
+define void @main(i32 %argc, i8** %argv) noreturn nounwind {
; CHECK: main:
; CHECK: ldrb
entry:
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll Fri Jul 2 04:34:51 2010
@@ -22,9 +22,9 @@
%"struct.xalanc_1_8::XalanDOMString" = type { %"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >", i32 }
%"struct.xalanc_1_8::XalanOutputStream" = type { i32 (...)**, i32, %"struct.std::basic_ostream<char,std::char_traits<char> >.base"*, i32, %"struct.std::vector<short unsigned int,std::allocator<short unsigned int> >", %"struct.xalanc_1_8::XalanDOMString", i8, i8, %"struct.std::CharVectorType" }
-declare arm_apcscc void @_ZN10xalanc_1_814FormatterToXML17writeParentTagEndEv(%"struct.xalanc_1_8::FormatterToXML"*)
+declare void @_ZN10xalanc_1_814FormatterToXML17writeParentTagEndEv(%"struct.xalanc_1_8::FormatterToXML"*)
-define arm_apcscc void @_ZN10xalanc_1_814FormatterToXML5cdataEPKtj(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 %length) {
+define void @_ZN10xalanc_1_814FormatterToXML5cdataEPKtj(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 %length) {
entry:
%0 = getelementptr %"struct.xalanc_1_8::FormatterToXML"* %this, i32 0, i32 13 ; <i8*> [#uses=1]
br i1 undef, label %bb4, label %bb
@@ -36,11 +36,11 @@
%3 = getelementptr i32 (...)** %2, i32 11 ; <i32 (...)**> [#uses=1]
%4 = load i32 (...)** %3, align 4 ; <i32 (...)*> [#uses=1]
%5 = bitcast i32 (...)* %4 to void (%"struct.xalanc_1_8::FormatterToXML"*, i16*, i32)* ; <void (%"struct.xalanc_1_8::FormatterToXML"*, i16*, i32)*> [#uses=1]
- tail call arm_apcscc void %5(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 %length)
+ tail call void %5(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 %length)
ret void
bb4: ; preds = %entry
- tail call arm_apcscc void @_ZN10xalanc_1_814FormatterToXML17writeParentTagEndEv(%"struct.xalanc_1_8::FormatterToXML"* %this)
- tail call arm_apcscc void undef(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 0, i32 %length, i8 zeroext undef)
+ tail call void @_ZN10xalanc_1_814FormatterToXML17writeParentTagEndEv(%"struct.xalanc_1_8::FormatterToXML"* %this)
+ tail call void undef(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 0, i32 %length, i8 zeroext undef)
ret void
}
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-CoalescerAssert.ll Fri Jul 2 04:34:51 2010
@@ -5,7 +5,7 @@
%struct._IO_marker = type { %struct._IO_marker*, %struct.FILE*, i32 }
@.str2 = external constant [30 x i8], align 1 ; <[30 x i8]*> [#uses=1]
-define arm_aapcscc i32 @__mf_heuristic_check(i32 %ptr, i32 %ptr_high) nounwind {
+define i32 @__mf_heuristic_check(i32 %ptr, i32 %ptr_high) nounwind {
entry:
br i1 undef, label %bb1, label %bb
@@ -17,7 +17,7 @@
bb2: ; preds = %bb1
%0 = call i8* @llvm.frameaddress(i32 0) ; <i8*> [#uses=1]
- %1 = call arm_aapcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* noalias undef, i8* noalias getelementptr ([30 x i8]* @.str2, i32 0, i32 0), i8* %0, i8* null) nounwind ; <i32> [#uses=0]
+ %1 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* noalias undef, i8* noalias getelementptr ([30 x i8]* @.str2, i32 0, i32 0), i8* %0, i8* null) nounwind ; <i32> [#uses=0]
unreachable
bb9: ; preds = %bb1
@@ -26,4 +26,4 @@
declare i8* @llvm.frameaddress(i32) nounwind readnone
-declare arm_aapcscc i32 @fprintf(%struct.FILE* noalias nocapture, i8* noalias nocapture, ...) nounwind
+declare i32 @fprintf(%struct.FILE* noalias nocapture, i8* noalias nocapture, ...) nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll Fri Jul 2 04:34:51 2010
@@ -28,17 +28,17 @@
@.str1822946 = external constant [8 x i8], align 1 ; <[8 x i8]*> [#uses=1]
@.str1842948 = external constant [11 x i8], align 1 ; <[11 x i8]*> [#uses=1]
-declare arm_apcscc i32 @fprintf(%struct.FILE* nocapture, i8* nocapture, ...) nounwind
+declare i32 @fprintf(%struct.FILE* nocapture, i8* nocapture, ...) nounwind
-declare arm_apcscc i32 @"\01_fwrite"(i8*, i32, i32, i8*)
+declare i32 @"\01_fwrite"(i8*, i32, i32, i8*)
-declare arm_apcscc %struct.FILE* @OpenIncGraphicFile(i8*, i8 zeroext, %struct.rec** nocapture, %struct.FILE_POS*, i32* nocapture) nounwind
+declare %struct.FILE* @OpenIncGraphicFile(i8*, i8 zeroext, %struct.rec** nocapture, %struct.FILE_POS*, i32* nocapture) nounwind
-declare arm_apcscc void @Error(i32, i32, i8*, i32, %struct.FILE_POS*, ...) nounwind
+declare void @Error(i32, i32, i8*, i32, %struct.FILE_POS*, ...) nounwind
-declare arm_apcscc i8* @fgets(i8*, i32, %struct.FILE* nocapture) nounwind
+declare i8* @fgets(i8*, i32, %struct.FILE* nocapture) nounwind
-define arm_apcscc void @PS_PrintGraphicInclude(%struct.rec* %x, i32 %colmark, i32 %rowmark) nounwind {
+define void @PS_PrintGraphicInclude(%struct.rec* %x, i32 %colmark, i32 %rowmark) nounwind {
entry:
br label %bb5
@@ -49,7 +49,7 @@
br i1 undef, label %bb5, label %bb6
bb6: ; preds = %bb5
- %0 = call arm_apcscc %struct.FILE* @OpenIncGraphicFile(i8* undef, i8 zeroext 0, %struct.rec** undef, %struct.FILE_POS* null, i32* undef) nounwind ; <%struct.FILE*> [#uses=1]
+ %0 = call %struct.FILE* @OpenIncGraphicFile(i8* undef, i8 zeroext 0, %struct.rec** undef, %struct.FILE_POS* null, i32* undef) nounwind ; <%struct.FILE*> [#uses=1]
br i1 false, label %bb.i, label %FontHalfXHeight.exit
bb.i: ; preds = %bb6
@@ -67,22 +67,22 @@
br i1 %2, label %bb.i5, label %FontName.exit
bb.i5: ; preds = %FontSize.exit
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([10 x i8]* @.str81872, i32 0, i32 0)) nounwind
+ call void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([10 x i8]* @.str81872, i32 0, i32 0)) nounwind
br label %FontName.exit
FontName.exit: ; preds = %bb.i5, %FontSize.exit
- %3 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([8 x i8]* @.str1822946, i32 0, i32 0), i32 %1, i8* undef) nounwind ; <i32> [#uses=0]
- %4 = call arm_apcscc i32 @"\01_fwrite"(i8* getelementptr ([11 x i8]* @.str1842948, i32 0, i32 0), i32 1, i32 10, i8* undef) nounwind ; <i32> [#uses=0]
+ %3 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([8 x i8]* @.str1822946, i32 0, i32 0), i32 %1, i8* undef) nounwind ; <i32> [#uses=0]
+ %4 = call i32 @"\01_fwrite"(i8* getelementptr ([11 x i8]* @.str1842948, i32 0, i32 0), i32 1, i32 10, i8* undef) nounwind ; <i32> [#uses=0]
%5 = sub i32 %colmark, undef ; <i32> [#uses=1]
%6 = sub i32 %rowmark, undef ; <i32> [#uses=1]
%7 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
- %8 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %7, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 %5, i32 %6) nounwind ; <i32> [#uses=0]
+ %8 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %7, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 %5, i32 %6) nounwind ; <i32> [#uses=0]
store i32 0, i32* @cpexists, align 4
%9 = getelementptr %struct.rec* %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 1 ; <i32*> [#uses=1]
%10 = load i32* %9, align 4 ; <i32> [#uses=1]
%11 = sub i32 0, %10 ; <i32> [#uses=1]
%12 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
- %13 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %12, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 undef, i32 %11) nounwind ; <i32> [#uses=0]
+ %13 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %12, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 undef, i32 %11) nounwind ; <i32> [#uses=0]
store i32 0, i32* @cpexists, align 4
br label %bb100.outer.outer
@@ -132,7 +132,7 @@
br label %bb2.i41
bb2.i.i15.critedge: ; preds = %bb.i47
- %16 = call arm_apcscc i8* @fgets(i8* undef, i32 512, %struct.FILE* %0) nounwind ; <i8*> [#uses=0]
+ %16 = call i8* @fgets(i8* undef, i32 512, %struct.FILE* %0) nounwind ; <i8*> [#uses=0]
%iftmp.560.0 = select i1 undef, i32 2, i32 0 ; <i32> [#uses=1]
br label %bb100.outer
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll Fri Jul 2 04:34:51 2010
@@ -55,25 +55,25 @@
@.str1872951 = external constant [17 x i8], align 1 ; <[17 x i8]*> [#uses=1]
@.str1932957 = external constant [26 x i8], align 1 ; <[26 x i8]*> [#uses=1]
-declare arm_apcscc i32 @fprintf(%struct.FILE* nocapture, i8* nocapture, ...) nounwind
+declare i32 @fprintf(%struct.FILE* nocapture, i8* nocapture, ...) nounwind
-declare arm_apcscc i32 @"\01_fwrite"(i8*, i32, i32, i8*)
+declare i32 @"\01_fwrite"(i8*, i32, i32, i8*)
-declare arm_apcscc i32 @remove(i8* nocapture) nounwind
+declare i32 @remove(i8* nocapture) nounwind
-declare arm_apcscc %struct.FILE* @OpenIncGraphicFile(i8*, i8 zeroext, %struct.rec** nocapture, %struct.FILE_POS*, i32* nocapture) nounwind
+declare %struct.FILE* @OpenIncGraphicFile(i8*, i8 zeroext, %struct.rec** nocapture, %struct.FILE_POS*, i32* nocapture) nounwind
-declare arm_apcscc %struct.rec* @MakeWord(i32, i8* nocapture, %struct.FILE_POS*) nounwind
+declare %struct.rec* @MakeWord(i32, i8* nocapture, %struct.FILE_POS*) nounwind
-declare arm_apcscc void @Error(i32, i32, i8*, i32, %struct.FILE_POS*, ...) nounwind
+declare void @Error(i32, i32, i8*, i32, %struct.FILE_POS*, ...) nounwind
-declare arm_apcscc i32 @"\01_fputs"(i8*, %struct.FILE*)
+declare i32 @"\01_fputs"(i8*, %struct.FILE*)
-declare arm_apcscc noalias i8* @calloc(i32, i32) nounwind
+declare noalias i8* @calloc(i32, i32) nounwind
-declare arm_apcscc i8* @fgets(i8*, i32, %struct.FILE* nocapture) nounwind
+declare i8* @fgets(i8*, i32, %struct.FILE* nocapture) nounwind
-define arm_apcscc void @PS_PrintGraphicInclude(%struct.rec* %x, i32 %colmark, i32 %rowmark) nounwind {
+define void @PS_PrintGraphicInclude(%struct.rec* %x, i32 %colmark, i32 %rowmark) nounwind {
entry:
%buff = alloca [512 x i8], align 4 ; <[512 x i8]*> [#uses=5]
%0 = getelementptr %struct.rec* %x, i32 0, i32 0, i32 1, i32 0, i32 0 ; <i8*> [#uses=2]
@@ -94,7 +94,7 @@
br i1 %8, label %bb2, label %bb3
bb2: ; preds = %bb1
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([40 x i8]* @.str1802944, i32 0, i32 0)) nounwind
+ call void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([40 x i8]* @.str1802944, i32 0, i32 0)) nounwind
br label %bb3
bb3: ; preds = %bb2, %bb1
@@ -108,7 +108,7 @@
bb6: ; preds = %bb5
%10 = load i8* %0, align 4 ; <i8> [#uses=1]
%11 = getelementptr %struct.rec* %y.0, i32 0, i32 0, i32 1, i32 0 ; <%struct.FILE_POS*> [#uses=1]
- %12 = call arm_apcscc %struct.FILE* @OpenIncGraphicFile(i8* undef, i8 zeroext %10, %struct.rec** null, %struct.FILE_POS* %11, i32* undef) nounwind ; <%struct.FILE*> [#uses=4]
+ %12 = call %struct.FILE* @OpenIncGraphicFile(i8* undef, i8 zeroext %10, %struct.rec** null, %struct.FILE_POS* %11, i32* undef) nounwind ; <%struct.FILE*> [#uses=4]
br i1 false, label %bb7, label %bb8
bb7: ; preds = %bb6
@@ -124,7 +124,7 @@
br i1 %15, label %bb.i, label %FontHalfXHeight.exit
bb.i: ; preds = %bb9
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([17 x i8]* @.str111875, i32 0, i32 0)) nounwind
+ call void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([17 x i8]* @.str111875, i32 0, i32 0)) nounwind
%.pre186 = load i32* @currentfont, align 4 ; <i32> [#uses=1]
br label %FontHalfXHeight.exit
@@ -139,7 +139,7 @@
br i1 undef, label %bb2.i, label %FontSize.exit
bb2.i: ; preds = %bb1.i
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 37, i32 61, i8* getelementptr ([30 x i8]* @.str101874, i32 0, i32 0), i32 1, %struct.FILE_POS* null) nounwind
+ call void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 37, i32 61, i8* getelementptr ([30 x i8]* @.str101874, i32 0, i32 0), i32 1, %struct.FILE_POS* null) nounwind
unreachable
FontSize.exit: ; preds = %bb1.i
@@ -151,35 +151,35 @@
br i1 %21, label %bb.i5, label %FontName.exit
bb.i5: ; preds = %FontSize.exit
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([10 x i8]* @.str81872, i32 0, i32 0)) nounwind
+ call void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 1, i32 2, i8* getelementptr ([20 x i8]* @.str24239, i32 0, i32 0), i32 0, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*), i8* getelementptr ([10 x i8]* @.str81872, i32 0, i32 0)) nounwind
br label %FontName.exit
FontName.exit: ; preds = %bb.i5, %FontSize.exit
%22 = phi %struct.FONT_INFO* [ undef, %bb.i5 ], [ undef, %FontSize.exit ] ; <%struct.FONT_INFO*> [#uses=1]
%23 = getelementptr %struct.FONT_INFO* %22, i32 %19, i32 5 ; <%struct.rec**> [#uses=0]
- %24 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([8 x i8]* @.str1822946, i32 0, i32 0), i32 %18, i8* null) nounwind ; <i32> [#uses=0]
+ %24 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([8 x i8]* @.str1822946, i32 0, i32 0), i32 %18, i8* null) nounwind ; <i32> [#uses=0]
br label %bb10
bb10: ; preds = %FontName.exit, %bb8
- %25 = call arm_apcscc i32 @"\01_fwrite"(i8* getelementptr ([11 x i8]* @.str1842948, i32 0, i32 0), i32 1, i32 10, i8* undef) nounwind ; <i32> [#uses=0]
+ %25 = call i32 @"\01_fwrite"(i8* getelementptr ([11 x i8]* @.str1842948, i32 0, i32 0), i32 1, i32 10, i8* undef) nounwind ; <i32> [#uses=0]
%26 = sub i32 %rowmark, undef ; <i32> [#uses=1]
%27 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
- %28 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %27, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 undef, i32 %26) nounwind ; <i32> [#uses=0]
+ %28 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %27, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 undef, i32 %26) nounwind ; <i32> [#uses=0]
store i32 0, i32* @cpexists, align 4
- %29 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([17 x i8]* @.str192782, i32 0, i32 0), double 2.000000e+01, double 2.000000e+01) nounwind ; <i32> [#uses=0]
+ %29 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([17 x i8]* @.str192782, i32 0, i32 0), double 2.000000e+01, double 2.000000e+01) nounwind ; <i32> [#uses=0]
%30 = getelementptr %struct.rec* %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
%31 = load i32* %30, align 4 ; <i32> [#uses=1]
%32 = sub i32 0, %31 ; <i32> [#uses=1]
%33 = load i32* undef, align 4 ; <i32> [#uses=1]
%34 = sub i32 0, %33 ; <i32> [#uses=1]
%35 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
- %36 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %35, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 %32, i32 %34) nounwind ; <i32> [#uses=0]
+ %36 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %35, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 %32, i32 %34) nounwind ; <i32> [#uses=0]
store i32 0, i32* @cpexists, align 4
%37 = load %struct.rec** null, align 4 ; <%struct.rec*> [#uses=1]
%38 = getelementptr %struct.rec* %37, i32 0, i32 0, i32 4 ; <%struct.FOURTH_UNION*> [#uses=1]
- %39 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([23 x i8]* @.str1852949, i32 0, i32 0), %struct.FOURTH_UNION* %38) nounwind ; <i32> [#uses=0]
+ %39 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([23 x i8]* @.str1852949, i32 0, i32 0), %struct.FOURTH_UNION* %38) nounwind ; <i32> [#uses=0]
%buff14 = getelementptr [512 x i8]* %buff, i32 0, i32 0 ; <i8*> [#uses=5]
- %40 = call arm_apcscc i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=0]
+ %40 = call i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=0]
%iftmp.506.0 = select i1 undef, i32 2, i32 0 ; <i32> [#uses=1]
%41 = getelementptr [512 x i8]* %buff, i32 0, i32 26 ; <i8*> [#uses=1]
br label %bb100.outer.outer
@@ -230,7 +230,7 @@
br i1 %50, label %bb24, label %bb2.i.i68
bb24: ; preds = %bb3.i77
- %51 = call arm_apcscc %struct.rec* @MakeWord(i32 11, i8* %41, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind ; <%struct.rec*> [#uses=0]
+ %51 = call %struct.rec* @MakeWord(i32 11, i8* %41, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind ; <%struct.rec*> [#uses=0]
%52 = load i8* getelementptr ([150 x i8]* @zz_lengths, i32 0, i32 0), align 4 ; <i8> [#uses=1]
%53 = zext i8 %52 to i32 ; <i32> [#uses=2]
%54 = getelementptr [524 x %struct.rec*]* @zz_free, i32 0, i32 %53 ; <%struct.rec**> [#uses=2]
@@ -245,7 +245,7 @@
br i1 undef, label %bb1.i58, label %bb2.i60
bb1.i58: ; preds = %bb.i56
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 31, i32 1, i8* getelementptr ([32 x i8]* @.str1575, i32 0, i32 0), i32 1, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind
+ call void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 31, i32 1, i8* getelementptr ([32 x i8]* @.str1575, i32 0, i32 0), i32 1, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind
br label %bb2.i60
bb2.i60: ; preds = %bb1.i58, %bb.i56
@@ -287,7 +287,7 @@
br label %bb41
bb41: ; preds = %bb37, %bb35
- %61 = call arm_apcscc i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=1]
+ %61 = call i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=1]
%62 = icmp eq i8* %61, null ; <i1> [#uses=1]
%iftmp.554.0 = select i1 %62, i32 2, i32 1 ; <i32> [#uses=1]
br label %bb100.outer
@@ -342,11 +342,11 @@
br i1 undef, label %bb2.i6.i26, label %bb55
bb55: ; preds = %bb2.i6.i26
- %69 = call arm_apcscc i32 @"\01_fputs"(i8* %buff14, %struct.FILE* undef) nounwind ; <i32> [#uses=0]
+ %69 = call i32 @"\01_fputs"(i8* %buff14, %struct.FILE* undef) nounwind ; <i32> [#uses=0]
unreachable
bb58: ; preds = %StringBeginsWith.exit.i20
- %70 = call arm_apcscc i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=0]
+ %70 = call i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=0]
%iftmp.560.0 = select i1 undef, i32 2, i32 0 ; <i32> [#uses=1]
br label %bb100.outer
@@ -367,7 +367,7 @@
br i1 %phitmp93, label %bb66, label %bb2.i.i
bb66: ; preds = %StringBeginsWith.exit
- %71 = call arm_apcscc %struct.rec* @MakeWord(i32 11, i8* undef, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind ; <%struct.rec*> [#uses=4]
+ %71 = call %struct.rec* @MakeWord(i32 11, i8* undef, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind ; <%struct.rec*> [#uses=4]
%72 = load i8* getelementptr ([150 x i8]* @zz_lengths, i32 0, i32 0), align 4 ; <i8> [#uses=1]
%73 = zext i8 %72 to i32 ; <i32> [#uses=2]
%74 = getelementptr [524 x %struct.rec*]* @zz_free, i32 0, i32 %73 ; <%struct.rec**> [#uses=2]
@@ -379,13 +379,13 @@
br i1 undef, label %bb.i2, label %GetMemory.exit
bb.i2: ; preds = %bb69
- %77 = call arm_apcscc noalias i8* @calloc(i32 1020, i32 4) nounwind ; <i8*> [#uses=1]
+ %77 = call noalias i8* @calloc(i32 1020, i32 4) nounwind ; <i8*> [#uses=1]
%78 = bitcast i8* %77 to i8** ; <i8**> [#uses=3]
store i8** %78, i8*** @next_free.4772, align 4
br i1 undef, label %bb1.i3, label %bb2.i4
bb1.i3: ; preds = %bb.i2
- call arm_apcscc void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 31, i32 1, i8* getelementptr ([32 x i8]* @.str1575, i32 0, i32 0), i32 1, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind
+ call void (i32, i32, i8*, i32, %struct.FILE_POS*, ...)* @Error(i32 31, i32 1, i8* getelementptr ([32 x i8]* @.str1575, i32 0, i32 0), i32 1, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind
br label %bb2.i4
bb2.i4: ; preds = %bb1.i3, %bb.i2
@@ -482,7 +482,7 @@
unreachable
bb94: ; preds = %strip_out.exit, %StringBeginsWith.exit.i
- %96 = call arm_apcscc i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=0]
+ %96 = call i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=0]
unreachable
bb100.outer: ; preds = %bb58, %bb41, %bb100.outer.outer
@@ -497,12 +497,12 @@
br i1 %97, label %bb103, label %bb102
bb102: ; preds = %bb101.split
- %98 = call arm_apcscc i32 @remove(i8* getelementptr ([9 x i8]* @.str19294, i32 0, i32 0)) nounwind ; <i32> [#uses=0]
+ %98 = call i32 @remove(i8* getelementptr ([9 x i8]* @.str19294, i32 0, i32 0)) nounwind ; <i32> [#uses=0]
unreachable
bb103: ; preds = %bb101.split
%99 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
- %100 = call arm_apcscc i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %99, i8* getelementptr ([26 x i8]* @.str1932957, i32 0, i32 0)) nounwind ; <i32> [#uses=0]
+ %100 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %99, i8* getelementptr ([26 x i8]* @.str1932957, i32 0, i32 0)) nounwind ; <i32> [#uses=0]
store i32 0, i32* @wordcount, align 4
ret void
}
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug.ll Fri Jul 2 04:34:51 2010
@@ -8,7 +8,7 @@
%struct.Results = type { float, float, float }
%struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i32 }
-define arm_apcscc void @get_results(%struct.Results* noalias nocapture sret %agg.result, %struct.Village* %village) nounwind {
+define void @get_results(%struct.Results* noalias nocapture sret %agg.result, %struct.Village* %village) nounwind {
entry:
br i1 undef, label %bb, label %bb6.preheader
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug2.ll Fri Jul 2 04:34:51 2010
@@ -6,7 +6,7 @@
%struct.Patient = type { i32, i32, i32, %struct.Village* }
%struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i32 }
-define arm_apcscc %struct.List* @sim(%struct.Village* %village) nounwind {
+define %struct.List* @sim(%struct.Village* %village) nounwind {
entry:
br i1 undef, label %bb14, label %bb3.preheader
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll Fri Jul 2 04:34:51 2010
@@ -6,7 +6,7 @@
%struct.Patient = type { i32, i32, i32, %struct.Village* }
%struct.Village = type { [4 x %struct.Village*], %struct.Village*, %struct.List, %struct.Hosp, i32, i32 }
-define arm_apcscc %struct.List* @sim(%struct.Village* %village) nounwind {
+define %struct.List* @sim(%struct.Village* %village) nounwind {
entry:
br i1 undef, label %bb14, label %bb3.preheader
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-06-SpDecBug.ll Fri Jul 2 04:34:51 2010
@@ -2,12 +2,12 @@
; PR4659
; PR4682
-define hidden arm_aapcscc i32 @__gcov_execlp(i8* %path, i8* %arg, ...) nounwind {
+define hidden i32 @__gcov_execlp(i8* %path, i8* %arg, ...) nounwind {
entry:
; CHECK: __gcov_execlp:
; CHECK: mov sp, r7
; CHECK: sub sp, #4
- call arm_aapcscc void @__gcov_flush() nounwind
+ call void @__gcov_flush() nounwind
br i1 undef, label %bb5, label %bb
bb: ; preds = %bb, %entry
@@ -15,10 +15,10 @@
bb5: ; preds = %bb, %entry
%0 = alloca i8*, i32 undef, align 4 ; <i8**> [#uses=1]
- %1 = call arm_aapcscc i32 @execvp(i8* %path, i8** %0) nounwind ; <i32> [#uses=1]
+ %1 = call i32 @execvp(i8* %path, i8** %0) nounwind ; <i32> [#uses=1]
ret i32 %1
}
-declare hidden arm_aapcscc void @__gcov_flush()
+declare hidden void @__gcov_flush()
-declare arm_aapcscc i32 @execvp(i8*, i8**) nounwind
+declare i32 @execvp(i8*, i8**) nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll Fri Jul 2 04:34:51 2010
@@ -28,7 +28,7 @@
%struct.jvirt_barray_control = type { [64 x i16]**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_barray_control*, %struct.backing_store_info }
%struct.jvirt_sarray_control = type { i8**, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, %struct.jvirt_sarray_control*, %struct.backing_store_info }
-define arm_apcscc void @jpeg_idct_float(%struct.jpeg_decompress_struct* nocapture %cinfo, %struct.jpeg_component_info* nocapture %compptr, i16* nocapture %coef_block, i8** nocapture %output_buf, i32 %output_col) nounwind {
+define void @jpeg_idct_float(%struct.jpeg_decompress_struct* nocapture %cinfo, %struct.jpeg_component_info* nocapture %compptr, i16* nocapture %coef_block, i8** nocapture %output_buf, i32 %output_col) nounwind {
entry:
br label %bb
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-08-ScavengerAssert.ll Fri Jul 2 04:34:51 2010
@@ -3,7 +3,7 @@
@g_d = external global double ; <double*> [#uses=1]
-define arm_aapcscc void @foo(float %yIncr) {
+define void @foo(float %yIncr) {
entry:
br i1 undef, label %bb, label %bb4
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll Fri Jul 2 04:34:51 2010
@@ -1,6 +1,6 @@
; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mattr=+vfp2
-define arm_apcscc float @t1(i32 %v0) nounwind {
+define float @t1(i32 %v0) nounwind {
entry:
store i32 undef, i32* undef, align 4
%0 = load [4 x i8]** undef, align 4 ; <[4 x i8]*> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll Fri Jul 2 04:34:51 2010
@@ -4,7 +4,7 @@
@getNeighbour = external global void (i32, i32, i32, i32, %struct.pix_pos*)*, align 4 ; <void (i32, i32, i32, i32, %struct.pix_pos*)**> [#uses=2]
-define arm_apcscc void @t() nounwind {
+define void @t() nounwind {
; CHECK: t:
; CHECK: it eq
; CHECK-NEXT: cmpeq
@@ -47,12 +47,12 @@
%tmp14.i302 = load i32* undef ; <i32> [#uses=4]
%add.i307452 = or i32 %shl1959, 1 ; <i32> [#uses=1]
%sub.i308 = add i32 %shl, -1 ; <i32> [#uses=4]
- call arm_apcscc void undef(i32 %tmp14.i302, i32 %sub.i308, i32 %shl1959, i32 0, %struct.pix_pos* undef) nounwind
+ call void undef(i32 %tmp14.i302, i32 %sub.i308, i32 %shl1959, i32 0, %struct.pix_pos* undef) nounwind
%tmp49.i309 = load void (i32, i32, i32, i32, %struct.pix_pos*)** @getNeighbour ; <void (i32, i32, i32, i32, %struct.pix_pos*)*> [#uses=1]
- call arm_apcscc void %tmp49.i309(i32 %tmp14.i302, i32 %sub.i308, i32 %add.i307452, i32 0, %struct.pix_pos* null) nounwind
+ call void %tmp49.i309(i32 %tmp14.i302, i32 %sub.i308, i32 %add.i307452, i32 0, %struct.pix_pos* null) nounwind
%tmp49.1.i = load void (i32, i32, i32, i32, %struct.pix_pos*)** @getNeighbour ; <void (i32, i32, i32, i32, %struct.pix_pos*)*> [#uses=1]
- call arm_apcscc void %tmp49.1.i(i32 %tmp14.i302, i32 %sub.i308, i32 undef, i32 0, %struct.pix_pos* %arrayidx56.2.i) nounwind
- call arm_apcscc void undef(i32 %tmp14.i302, i32 %sub.i308, i32 undef, i32 0, %struct.pix_pos* %arrayidx56.3.i) nounwind
+ call void %tmp49.1.i(i32 %tmp14.i302, i32 %sub.i308, i32 undef, i32 0, %struct.pix_pos* %arrayidx56.2.i) nounwind
+ call void undef(i32 %tmp14.i302, i32 %sub.i308, i32 undef, i32 0, %struct.pix_pos* %arrayidx56.3.i) nounwind
unreachable
if.else2003: ; preds = %for.body1940
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-10-15-ITBlockBranch.ll Fri Jul 2 04:34:51 2010
@@ -12,6 +12,8 @@
; CHECK: _ZNKSs7compareERKSs:
; CHECK: it eq
; CHECK-NEXT: subeq.w r0, r6, r8
+; CHECK-NEXT: %bb
+; CHECK-NEXT: %bb1
; CHECK-NEXT: ldmia.w sp, {r4, r5, r6, r8, r9, pc}
entry:
%0 = tail call arm_aapcs_vfpcc i32 @_ZNKSs4sizeEv(%"struct.std::basic_string<char,std::char_traits<char>,std::allocator<char> >"* %this) ; <i32> [#uses=3]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-01-CopyReg2RegBug.ll Fri Jul 2 04:34:51 2010
@@ -1,6 +1,6 @@
; RUN: llc < %s -mtriple=thumbv7-apple-darwin -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8
-define arm_apcscc void @get_initial_mb16x16_cost() nounwind {
+define void @get_initial_mb16x16_cost() nounwind {
entry:
br i1 undef, label %bb4, label %bb1
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll Fri Jul 2 04:34:51 2010
@@ -3,9 +3,9 @@
%struct.OP = type { %struct.OP*, %struct.OP*, %struct.OP* ()*, i32, i16, i16, i8, i8 }
%struct.SV = type { i8*, i32, i32 }
-declare arm_apcscc void @Perl_mg_set(%struct.SV*) nounwind
+declare void @Perl_mg_set(%struct.SV*) nounwind
-define arm_apcscc %struct.OP* @Perl_pp_complement() nounwind {
+define %struct.OP* @Perl_pp_complement() nounwind {
entry:
%0 = load %struct.SV** null, align 4 ; <%struct.SV*> [#uses=2]
br i1 undef, label %bb21, label %bb5
@@ -23,7 +23,7 @@
%4 = bitcast i8* %3 to i32* ; <i32*> [#uses=1]
%5 = load i32* %4, align 4 ; <i32> [#uses=1]
%storemerge5 = xor i32 %5, -1 ; <i32> [#uses=1]
- call arm_apcscc void @Perl_sv_setiv(%struct.SV* undef, i32 %storemerge5) nounwind
+ call void @Perl_sv_setiv(%struct.SV* undef, i32 %storemerge5) nounwind
%6 = getelementptr inbounds %struct.SV* undef, i32 0, i32 2 ; <i32*> [#uses=1]
%7 = load i32* %6, align 4 ; <i32> [#uses=1]
%8 = and i32 %7, 16384 ; <i32> [#uses=1]
@@ -34,7 +34,7 @@
unreachable
bb11: ; preds = %bb7
- call arm_apcscc void @Perl_mg_set(%struct.SV* undef) nounwind
+ call void @Perl_mg_set(%struct.SV* undef) nounwind
br label %bb12
bb12: ; preds = %bb11, %bb7
@@ -42,11 +42,11 @@
br label %bb44
bb13: ; preds = %bb5
- %10 = call arm_apcscc i32 @Perl_sv_2uv(%struct.SV* %0) nounwind ; <i32> [#uses=0]
+ %10 = call i32 @Perl_sv_2uv(%struct.SV* %0) nounwind ; <i32> [#uses=0]
br i1 undef, label %bb.i, label %bb1.i
bb.i: ; preds = %bb13
- call arm_apcscc void @Perl_sv_setiv(%struct.SV* undef, i32 undef) nounwind
+ call void @Perl_sv_setiv(%struct.SV* undef, i32 undef) nounwind
br label %Perl_sv_setuv.exit
bb1.i: ; preds = %bb13
@@ -60,7 +60,7 @@
br i1 %14, label %bb20, label %bb19
bb19: ; preds = %Perl_sv_setuv.exit
- call arm_apcscc void @Perl_mg_set(%struct.SV* undef) nounwind
+ call void @Perl_mg_set(%struct.SV* undef) nounwind
br label %bb20
bb20: ; preds = %bb19, %Perl_sv_setuv.exit
@@ -80,6 +80,6 @@
ret %struct.OP* undef
}
-declare arm_apcscc void @Perl_sv_setiv(%struct.SV*, i32) nounwind
+declare void @Perl_sv_setiv(%struct.SV*, i32) nounwind
-declare arm_apcscc i32 @Perl_sv_2uv(%struct.SV*) nounwind
+declare i32 @Perl_sv_2uv(%struct.SV*) nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-11-13-STRDBug.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=thumbv7-apple-darwin10
; rdar://7394794
-define arm_apcscc void @lshift_double(i64 %l1, i64 %h1, i64 %count, i32 %prec, i64* nocapture %lv, i64* nocapture %hv, i32 %arith) nounwind {
+define void @lshift_double(i64 %l1, i64 %h1, i64 %count, i32 %prec, i64* nocapture %lv, i64* nocapture %hv, i32 %arith) nounwind {
entry:
%..i = select i1 false, i64 0, i64 0 ; <i64> [#uses=1]
br i1 undef, label %bb11.i, label %bb6.i
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: opt < %s -std-compile-opts | \
; RUN: llc -mtriple=thumbv7-apple-darwin10 -mattr=+neon | FileCheck %s
-define arm_apcscc void @fred(i32 %three_by_three, i8* %in, double %dt1, i32 %x_size, i32 %y_size, i8* %bp) nounwind {
+define void @fred(i32 %three_by_three, i8* %in, double %dt1, i32 %x_size, i32 %y_size, i8* %bp) nounwind {
entry:
; -- The loop following the load should only use a single add-literation
; instruction.
@@ -45,7 +45,7 @@
store i8* %bp, i8** %bp_addr
%0 = load i8** %in_addr, align 4 ; <i8*> [#uses=1]
store i8* %0, i8** %out, align 4
- %1 = call arm_apcscc i32 (...)* @foo() nounwind ; <i32> [#uses=1]
+ %1 = call i32 (...)* @foo() nounwind ; <i32> [#uses=1]
store i32 %1, i32* %i, align 4
%2 = load i32* %three_by_three_addr, align 4 ; <i32> [#uses=1]
%3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
@@ -76,7 +76,7 @@
%15 = load i32* %n_max, align 4 ; <i32> [#uses=1]
%16 = load i32* %n_max, align 4 ; <i32> [#uses=1]
%17 = mul i32 %15, %16 ; <i32> [#uses=1]
- %18 = call arm_apcscc noalias i8* @malloc(i32 %17) nounwind ; <i8*> [#uses=1]
+ %18 = call noalias i8* @malloc(i32 %17) nounwind ; <i8*> [#uses=1]
store i8* %18, i8** %dp, align 4
%19 = load i8** %dp, align 4 ; <i8*> [#uses=1]
store i8* %19, i8** %dpt, align 4
@@ -123,6 +123,6 @@
ret void
}
-declare arm_apcscc i32 @foo(...)
+declare i32 @foo(...)
-declare arm_apcscc noalias i8* @malloc(i32) nounwind
+declare noalias i8* @malloc(i32) nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll Fri Jul 2 04:34:51 2010
@@ -17,14 +17,14 @@
@_ZN3WTFL12thread_heapsE = internal global %"struct.WTF::TCMalloc_ThreadCache"* null ; <%"struct.WTF::TCMalloc_ThreadCache"**> [#uses=1]
@llvm.used = appending global [1 x i8*] [i8* bitcast (%"struct.WTF::TCMalloc_ThreadCache"* ()* @_ZN3WTF20TCMalloc_ThreadCache22CreateCacheIfNecessaryEv to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-define arm_apcscc %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache22CreateCacheIfNecessaryEv() nounwind {
+define %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache22CreateCacheIfNecessaryEv() nounwind {
entry:
- %0 = tail call arm_apcscc i32 @pthread_mutex_lock(%struct.PlatformMutex* getelementptr inbounds (%struct.SpinLock* @_ZN3WTFL13pageheap_lockE, i32 0, i32 0)) nounwind
+ %0 = tail call i32 @pthread_mutex_lock(%struct.PlatformMutex* getelementptr inbounds (%struct.SpinLock* @_ZN3WTFL13pageheap_lockE, i32 0, i32 0)) nounwind
%.b24 = load i1* @_ZN3WTFL10tsd_initedE.b, align 4 ; <i1> [#uses=1]
br i1 %.b24, label %bb5, label %bb6
bb5: ; preds = %entry
- %1 = tail call arm_apcscc %struct._opaque_pthread_t* @pthread_self() nounwind
+ %1 = tail call %struct._opaque_pthread_t* @pthread_self() nounwind
br label %bb6
bb6: ; preds = %bb5, %entry
@@ -34,7 +34,7 @@
bb7: ; preds = %bb11
%2 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache"* %h.0, i32 0, i32 1
%3 = load %struct._opaque_pthread_t** %2, align 4
- %4 = tail call arm_apcscc i32 @pthread_equal(%struct._opaque_pthread_t* %3, %struct._opaque_pthread_t* %me.0) nounwind
+ %4 = tail call i32 @pthread_equal(%struct._opaque_pthread_t* %3, %struct._opaque_pthread_t* %me.0) nounwind
%5 = icmp eq i32 %4, 0
br i1 %5, label %bb10, label %bb14
@@ -49,12 +49,12 @@
br i1 %7, label %bb13, label %bb7
bb13: ; preds = %bb11
- %8 = tail call arm_apcscc %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache7NewHeapEP17_opaque_pthread_t(%struct._opaque_pthread_t* %me.0) nounwind
+ %8 = tail call %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache7NewHeapEP17_opaque_pthread_t(%struct._opaque_pthread_t* %me.0) nounwind
br label %bb14
bb14: ; preds = %bb13, %bb7
%heap.1 = phi %"struct.WTF::TCMalloc_ThreadCache"* [ %8, %bb13 ], [ %h.0, %bb7 ] ; <%"struct.WTF::TCMalloc_ThreadCache"*> [#uses=4]
- %9 = tail call arm_apcscc i32 @pthread_mutex_unlock(%struct.PlatformMutex* getelementptr inbounds (%struct.SpinLock* @_ZN3WTFL13pageheap_lockE, i32 0, i32 0)) nounwind
+ %9 = tail call i32 @pthread_mutex_unlock(%struct.PlatformMutex* getelementptr inbounds (%struct.SpinLock* @_ZN3WTFL13pageheap_lockE, i32 0, i32 0)) nounwind
%10 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache"* %heap.1, i32 0, i32 2
%11 = load i8* %10, align 4
%toBool15not = icmp eq i8 %11, 0 ; <i1> [#uses=1]
@@ -68,22 +68,22 @@
store i8 1, i8* %10, align 4
%12 = load i32* @_ZN3WTFL8heap_keyE, align 4
%13 = bitcast %"struct.WTF::TCMalloc_ThreadCache"* %heap.1 to i8*
- %14 = tail call arm_apcscc i32 @pthread_setspecific(i32 %12, i8* %13) nounwind
+ %14 = tail call i32 @pthread_setspecific(i32 %12, i8* %13) nounwind
ret %"struct.WTF::TCMalloc_ThreadCache"* %heap.1
bb22: ; preds = %bb19, %bb14
ret %"struct.WTF::TCMalloc_ThreadCache"* %heap.1
}
-declare arm_apcscc i32 @pthread_mutex_lock(%struct.PlatformMutex*)
+declare i32 @pthread_mutex_lock(%struct.PlatformMutex*)
-declare arm_apcscc i32 @pthread_mutex_unlock(%struct.PlatformMutex*)
+declare i32 @pthread_mutex_unlock(%struct.PlatformMutex*)
-declare hidden arm_apcscc %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache7NewHeapEP17_opaque_pthread_t(%struct._opaque_pthread_t*) nounwind
+declare hidden %"struct.WTF::TCMalloc_ThreadCache"* @_ZN3WTF20TCMalloc_ThreadCache7NewHeapEP17_opaque_pthread_t(%struct._opaque_pthread_t*) nounwind
-declare arm_apcscc i32 @pthread_setspecific(i32, i8*)
+declare i32 @pthread_setspecific(i32, i8*)
-declare arm_apcscc %struct._opaque_pthread_t* @pthread_self()
+declare %struct._opaque_pthread_t* @pthread_self()
-declare arm_apcscc i32 @pthread_equal(%struct._opaque_pthread_t*, %struct._opaque_pthread_t*)
+declare i32 @pthread_equal(%struct._opaque_pthread_t*, %struct._opaque_pthread_t*)
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-01-19-RemovePredicates.ll Fri Jul 2 04:34:51 2010
@@ -6,16 +6,16 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
target triple = "thumbv7-apple-darwin10"
-declare arm_apcscc void @etoe53(i16* nocapture, i16* nocapture) nounwind
+declare void @etoe53(i16* nocapture, i16* nocapture) nounwind
-define arm_apcscc void @earith(double* nocapture %value, i32 %icode, double* nocapture %r1, double* nocapture %r2) nounwind {
+define void @earith(double* nocapture %value, i32 %icode, double* nocapture %r1, double* nocapture %r2) nounwind {
entry:
%v = alloca [6 x i16], align 4 ; <[6 x i16]*> [#uses=1]
br i1 undef, label %bb2.i, label %bb5
bb2.i: ; preds = %entry
%0 = bitcast double* %value to i16* ; <i16*> [#uses=1]
- call arm_apcscc void @etoe53(i16* null, i16* %0) nounwind
+ call void @etoe53(i16* null, i16* %0) nounwind
ret void
bb5: ; preds = %entry
@@ -48,6 +48,6 @@
bb46: ; preds = %bb26, %bb10
%1 = bitcast double* %value to i16* ; <i16*> [#uses=1]
%v47 = getelementptr inbounds [6 x i16]* %v, i32 0, i32 0 ; <i16*> [#uses=1]
- call arm_apcscc void @etoe53(i16* %v47, i16* %1) nounwind
+ call void @etoe53(i16* %v47, i16* %1) nounwind
ret void
}
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-02-11-phi-cycle.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=thumbv7-apple-darwin | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
-define arm_apcscc i32 @test(i32 %n) nounwind {
+define i32 @test(i32 %n) nounwind {
; CHECK: test:
; CHECK-NOT: mov
; CHECK: return
@@ -16,11 +16,11 @@
bb: ; preds = %bb.nph, %bb
%indvar = phi i32 [ 0, %bb.nph ], [ %indvar.next, %bb ] ; <i32> [#uses=1]
%u.05 = phi i64 [ undef, %bb.nph ], [ %ins, %bb ] ; <i64> [#uses=1]
- %1 = tail call arm_apcscc i32 @f() nounwind ; <i32> [#uses=1]
+ %1 = tail call i32 @f() nounwind ; <i32> [#uses=1]
%tmp4 = zext i32 %1 to i64 ; <i64> [#uses=1]
%mask = and i64 %u.05, -4294967296 ; <i64> [#uses=1]
%ins = or i64 %tmp4, %mask ; <i64> [#uses=2]
- tail call arm_apcscc void @g(i64 %ins) nounwind
+ tail call void @g(i64 %ins) nounwind
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %tmp ; <i1> [#uses=1]
br i1 %exitcond, label %return, label %bb
@@ -29,7 +29,7 @@
ret i32 undef
}
-define arm_apcscc i32 @test_dead_cycle(i32 %n) nounwind {
+define i32 @test_dead_cycle(i32 %n) nounwind {
; CHECK: test_dead_cycle:
; CHECK: blx
; CHECK-NOT: mov
@@ -50,11 +50,11 @@
br i1 %1, label %bb1, label %bb2
bb1: ; preds = %bb
- %2 = tail call arm_apcscc i32 @f() nounwind ; <i32> [#uses=1]
+ %2 = tail call i32 @f() nounwind ; <i32> [#uses=1]
%tmp6 = zext i32 %2 to i64 ; <i64> [#uses=1]
%mask = and i64 %u.17, -4294967296 ; <i64> [#uses=1]
%ins = or i64 %tmp6, %mask ; <i64> [#uses=1]
- tail call arm_apcscc void @g(i64 %ins) nounwind
+ tail call void @g(i64 %ins) nounwind
br label %bb2
bb2: ; preds = %bb1, %bb
@@ -71,6 +71,6 @@
ret i32 undef
}
-declare arm_apcscc i32 @f()
+declare i32 @f()
-declare arm_apcscc void @g(i64)
+declare void @g(i64)
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-02-24-BigStack.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-02-24-BigStack.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-02-24-BigStack.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-02-24-BigStack.ll Fri Jul 2 04:34:51 2010
@@ -4,7 +4,7 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
target triple = "thumbv7-apple-darwin3.0.0-iphoneos"
-define arm_apcscc void @FindMin(double* %panelTDEL, i8* %dclOfRow, i32 %numRows, i32 %numCols, double* %retMin_RES_TDEL) {
+define void @FindMin(double* %panelTDEL, i8* %dclOfRow, i32 %numRows, i32 %numCols, double* %retMin_RES_TDEL) {
entry:
%panelTDEL.addr = alloca double*, align 4 ; <double**> [#uses=1]
%panelResTDEL = alloca [2560 x double], align 4 ; <[2560 x double]*> [#uses=0]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll Fri Jul 2 04:34:51 2010
@@ -2,13 +2,13 @@
@.str41196 = external constant [2 x i8], align 4 ; <[2 x i8]*> [#uses=1]
-declare arm_apcscc void @syStopraw(i32) nounwind
+declare void @syStopraw(i32) nounwind
-declare arm_apcscc i32 @SyFopen(i8*, i8*) nounwind
+declare i32 @SyFopen(i8*, i8*) nounwind
-declare arm_apcscc i8* @SyFgets(i8*, i32) nounwind
+declare i8* @SyFgets(i8*, i32) nounwind
-define arm_apcscc void @SyHelp(i8* nocapture %topic, i32 %fin) nounwind {
+define void @SyHelp(i8* nocapture %topic, i32 %fin) nounwind {
entry:
%line = alloca [256 x i8], align 4 ; <[256 x i8]*> [#uses=1]
%secname = alloca [1024 x i8], align 4 ; <[1024 x i8]*> [#uses=0]
@@ -70,7 +70,7 @@
unreachable
bb224: ; preds = %bb162
- %0 = call arm_apcscc i32 @SyFopen(i8* undef, i8* getelementptr inbounds ([2 x i8]* @.str41196, i32 0, i32 0)) nounwind ; <i32> [#uses=2]
+ %0 = call i32 @SyFopen(i8* undef, i8* getelementptr inbounds ([2 x i8]* @.str41196, i32 0, i32 0)) nounwind ; <i32> [#uses=2]
br i1 false, label %bb297, label %bb300
bb297: ; preds = %bb224
@@ -177,7 +177,7 @@
br i1 undef, label %bb373, label %bb388
bb373: ; preds = %bb383, %bb369
- %7 = call arm_apcscc i8* @SyFgets(i8* undef, i32 %0) nounwind ; <i8*> [#uses=1]
+ %7 = call i8* @SyFgets(i8* undef, i32 %0) nounwind ; <i8*> [#uses=1]
%8 = icmp eq i8* %7, null ; <i1> [#uses=1]
br i1 %8, label %bb375, label %bb383
@@ -241,7 +241,7 @@
br i1 undef, label %return, label %bb406
bb406: ; preds = %bb405
- call arm_apcscc void @syStopraw(i32 %fin) nounwind
+ call void @syStopraw(i32 %fin) nounwind
ret void
bb407: ; preds = %bb404
@@ -255,7 +255,7 @@
br label %bb440
bb440: ; preds = %bb428, %bb300
- %13 = call arm_apcscc i8* @SyFgets(i8* undef, i32 %0) nounwind ; <i8*> [#uses=0]
+ %13 = call i8* @SyFgets(i8* undef, i32 %0) nounwind ; <i8*> [#uses=0]
br i1 false, label %bb442, label %bb308
bb442: ; preds = %bb440
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll Fri Jul 2 04:34:51 2010
@@ -13,7 +13,7 @@
; CHECK: InlineAsm End
; CHECK: cmp
; CHECK: beq
-define arm_apcscc void @test(%s1* %this, i32 %format, i32 %w, i32 %h, i32 %levels, i32* %s, i8* %data, i32* nocapture %rowbytes, void (i8*, i8*)* %release, i8* %info) nounwind {
+define void @test(%s1* %this, i32 %format, i32 %w, i32 %h, i32 %levels, i32* %s, i8* %data, i32* nocapture %rowbytes, void (i8*, i8*)* %release, i8* %info) nounwind {
entry:
%tmp1 = getelementptr inbounds %s1* %this, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
volatile store i32 1, i32* %tmp1, align 4
@@ -32,9 +32,9 @@
%tmp19 = getelementptr inbounds %s1* %this, i32 0, i32 10
store i64 0, i64* %tmp19, align 4
%tmp20 = getelementptr inbounds %s1* %this, i32 0, i32 0
- tail call arm_apcscc void @f1(%s3* %tmp20, i32* %s) nounwind
+ tail call void @f1(%s3* %tmp20, i32* %s) nounwind
%tmp21 = shl i32 %format, 6
- %tmp22 = tail call arm_apcscc zeroext i8 @f2(i32 %format) nounwind
+ %tmp22 = tail call zeroext i8 @f2(i32 %format) nounwind
%toBoolnot = icmp eq i8 %tmp22, 0
%tmp23 = zext i1 %toBoolnot to i32
%flags.0 = or i32 %tmp23, %tmp21
@@ -59,5 +59,5 @@
ret void
}
-declare arm_apcscc void @f1(%s3*, i32*)
-declare arm_apcscc zeroext i8 @f2(i32)
+declare void @f1(%s3*, i32*)
+declare zeroext i8 @f2(i32)
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-15-DynAllocBug.ll Fri Jul 2 04:34:51 2010
@@ -4,7 +4,7 @@
; Make sure the result of the first dynamic_alloc isn't copied back to sp more
; than once. We'll deal with poor codegen later.
-define arm_apcscc void @t() nounwind ssp {
+define void @t() nounwind ssp {
entry:
; CHECK: t:
; CHECK: mov r0, sp
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-26-CopyRegCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-26-CopyRegCrash.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-26-CopyRegCrash.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-04-26-CopyRegCrash.ll Fri Jul 2 04:34:51 2010
@@ -4,7 +4,7 @@
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
target triple = "thumbv7-apple-darwin10"
-define arm_apcscc void @test(i32 %mode) nounwind optsize noinline {
+define void @test(i32 %mode) nounwind optsize noinline {
entry:
br i1 undef, label %return, label %bb3
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-05-24-rsbs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-05-24-rsbs.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-05-24-rsbs.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/2010-05-24-rsbs.ll Fri Jul 2 04:34:51 2010
@@ -2,7 +2,7 @@
; Radar 8017376: Missing 's' suffix for t2RSBS instructions.
; CHECK: rsbs
-define arm_apcscc i64 @test(i64 %x) nounwind readnone {
+define i64 @test(i64 %x) nounwind readnone {
entry:
%0 = sub nsw i64 1, %x ; <i64> [#uses=1]
ret i64 %0
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-1.ll Fri Jul 2 04:34:51 2010
@@ -4,9 +4,9 @@
%struct.__sFILEX = type opaque
%struct.__sbuf = type { i8*, i32 }
-declare arm_apcscc i32 @fgetc(%struct.FILE* nocapture) nounwind
+declare i32 @fgetc(%struct.FILE* nocapture) nounwind
-define arm_apcscc i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
+define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
entry:
br i1 undef, label %bb, label %bb1
@@ -20,7 +20,7 @@
unreachable
bb1.i2: ; preds = %bb1
- %0 = call arm_apcscc i32 @fgetc(%struct.FILE* undef) nounwind ; <i32> [#uses=0]
+ %0 = call i32 @fgetc(%struct.FILE* undef) nounwind ; <i32> [#uses=0]
br i1 undef, label %bb2.i3, label %bb3.i4
bb2.i3: ; preds = %bb1.i2
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll Fri Jul 2 04:34:51 2010
@@ -1,6 +1,6 @@
; RUN: llc < %s -mtriple=thumbv7-apple-darwin9 -mcpu=cortex-a8 | grep vmov.f32 | count 1
-define arm_apcscc void @fht(float* nocapture %fz, i16 signext %n) nounwind {
+define void @fht(float* nocapture %fz, i16 signext %n) nounwind {
entry:
br label %bb5
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/frameless.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/frameless.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/frameless.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/frameless.ll Fri Jul 2 04:34:51 2010
@@ -1,6 +1,6 @@
; RUN: llc < %s -mtriple=thumbv7-apple-darwin -disable-fp-elim | not grep mov
; RUN: llc < %s -mtriple=thumbv7-linux -disable-fp-elim | not grep mov
-define arm_apcscc void @t() nounwind readnone {
+define void @t() nounwind readnone {
ret void
}
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/frameless2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/frameless2.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/frameless2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/frameless2.ll Fri Jul 2 04:34:51 2010
@@ -3,7 +3,7 @@
%struct.noise3 = type { [3 x [17 x i32]] }
%struct.noiseguard = type { i32, i32, i32 }
-define arm_apcscc void @vorbis_encode_noisebias_setup(i8* nocapture %vi.0.7.val, double %s, i32 %block, i32* nocapture %suppress, %struct.noise3* nocapture %in, %struct.noiseguard* nocapture %guard, double %userbias) nounwind {
+define void @vorbis_encode_noisebias_setup(i8* nocapture %vi.0.7.val, double %s, i32 %block, i32* nocapture %suppress, %struct.noise3* nocapture %in, %struct.noiseguard* nocapture %guard, double %userbias) nounwind {
entry:
%0 = getelementptr %struct.noiseguard* %guard, i32 %block, i32 2; <i32*> [#uses=1]
%1 = load i32* %0, align 4 ; <i32> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/ifcvt-neon.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/ifcvt-neon.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/ifcvt-neon.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/ifcvt-neon.ll Fri Jul 2 04:34:51 2010
@@ -4,7 +4,7 @@
@a = common global float 0.000000e+00 ; <float*> [#uses=2]
@b = common global float 0.000000e+00 ; <float*> [#uses=1]
-define arm_apcscc float @t(i32 %c) nounwind {
+define float @t(i32 %c) nounwind {
entry:
%0 = icmp sgt i32 %c, 1 ; <i1> [#uses=1]
%1 = load float* @a, align 4 ; <float> [#uses=2]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/ldr-str-imm12.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/ldr-str-imm12.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/ldr-str-imm12.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/ldr-str-imm12.ll Fri Jul 2 04:34:51 2010
@@ -20,7 +20,7 @@
@zz_hold = external global %union.rec* ; <%union.rec**> [#uses=2]
@zz_res = external global %union.rec* ; <%union.rec**> [#uses=1]
-define arm_apcscc %union.rec* @Manifest(%union.rec* %x, %union.rec* %env, %struct.STYLE* %style, %union.rec** %bthr, %union.rec** %fthr, %union.rec** %target, %union.rec** %crs, i32 %ok, i32 %need_expand, %union.rec** %enclose, i32 %fcr) nounwind {
+define %union.rec* @Manifest(%union.rec* %x, %union.rec* %env, %struct.STYLE* %style, %union.rec** %bthr, %union.rec** %fthr, %union.rec** %target, %union.rec** %crs, i32 %ok, i32 %need_expand, %union.rec** %enclose, i32 %fcr) nounwind {
entry:
; CHECK: ldr.w r9, [r7, #28]
%xgaps.i = alloca [32 x %union.rec*], align 4 ; <[32 x %union.rec*]*> [#uses=0]
@@ -56,7 +56,7 @@
store %union.rec* null, %union.rec** @zz_hold, align 4
store %union.rec* null, %union.rec** @zz_res, align 4
store %union.rec* %x, %union.rec** @zz_hold, align 4
- %0 = call arm_apcscc %union.rec* @Manifest(%union.rec* undef, %union.rec* %env, %struct.STYLE* %style, %union.rec** %bthr, %union.rec** %fthr, %union.rec** %target, %union.rec** %crs, i32 %ok, i32 %need_expand, %union.rec** %enclose, i32 %fcr) nounwind ; <%union.rec*> [#uses=0]
+ %0 = call %union.rec* @Manifest(%union.rec* undef, %union.rec* %env, %struct.STYLE* %style, %union.rec** %bthr, %union.rec** %fthr, %union.rec** %target, %union.rec** %crs, i32 %ok, i32 %need_expand, %union.rec** %enclose, i32 %fcr) nounwind ; <%union.rec*> [#uses=0]
unreachable
bb438: ; preds = %bb20, %bb20
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/lsr-deficiency.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/lsr-deficiency.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/lsr-deficiency.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/lsr-deficiency.ll Fri Jul 2 04:34:51 2010
@@ -11,7 +11,7 @@
@G = external global i32 ; <i32*> [#uses=2]
@array = external global i32* ; <i32**> [#uses=1]
-define arm_apcscc void @t() nounwind optsize {
+define void @t() nounwind optsize {
; CHECK: t:
; CHECK: mov.w r2, #1000
entry:
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/machine-licm.ll Fri Jul 2 04:34:51 2010
@@ -8,7 +8,7 @@
@GV = external global i32 ; <i32*> [#uses=2]
-define arm_apcscc void @t1(i32* nocapture %vals, i32 %c) nounwind {
+define void @t1(i32* nocapture %vals, i32 %c) nounwind {
entry:
; CHECK: t1:
; CHECK: cbz
@@ -52,7 +52,7 @@
}
; rdar://8001136
-define arm_apcscc void @t2(i8* %ptr1, i8* %ptr2) nounwind {
+define void @t2(i8* %ptr1, i8* %ptr2) nounwind {
entry:
; CHECK: t2:
; CHECK: adr r{{.}}, #LCPI1_0
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/pic-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/pic-load.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/pic-load.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/pic-load.ll Fri Jul 2 04:34:51 2010
@@ -5,7 +5,7 @@
@__dso_handle = external global { } ; <{ }*> [#uses=1]
@llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (void ()*)* @atexit to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
-define hidden arm_apcscc i32 @atexit(void ()* %func) nounwind {
+define hidden i32 @atexit(void ()* %func) nounwind {
entry:
; CHECK: atexit:
; CHECK: add r0, pc
@@ -14,8 +14,8 @@
store void ()* %func, void ()** %0, align 4
%1 = getelementptr %struct.one_atexit_routine* %r, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 0, i32* %1, align 4
- %2 = call arm_apcscc i32 @atexit_common(%struct.one_atexit_routine* %r, i8* bitcast ({ }* @__dso_handle to i8*)) nounwind ; <i32> [#uses=1]
+ %2 = call i32 @atexit_common(%struct.one_atexit_routine* %r, i8* bitcast ({ }* @__dso_handle to i8*)) nounwind ; <i32> [#uses=1]
ret i32 %2
}
-declare arm_apcscc i32 @atexit_common(%struct.one_atexit_routine*, i8*) nounwind
+declare i32 @atexit_common(%struct.one_atexit_routine*, i8*) nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/sign_extend_inreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/sign_extend_inreg.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/sign_extend_inreg.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/sign_extend_inreg.ll Fri Jul 2 04:34:51 2010
@@ -3,7 +3,7 @@
target triple = "thumbv7-apple-darwin10"
-define arm_apcscc i32 @f1(i16* %ptr) nounwind {
+define i32 @f1(i16* %ptr) nounwind {
; CHECK-A8: f1
; CHECK-A8: sxth
; CHECK-M3: f1
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-cbnz.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-cbnz.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-cbnz.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-cbnz.ll Fri Jul 2 04:34:51 2010
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s
; rdar://7354379
-declare arm_apcscc double @floor(double) nounwind readnone
+declare double @floor(double) nounwind readnone
define void @t(i1 %a, double %b) {
entry:
@@ -23,7 +23,7 @@
; CHECK: cmp r0, #0
; CHECK-NEXT: cmp r0, #0
; CHECK-NEXT: cbnz
- %0 = tail call arm_apcscc double @floor(double %b) nounwind readnone ; <double> [#uses=0]
+ %0 = tail call double @floor(double %b) nounwind readnone ; <double> [#uses=0]
br label %bb11
bb11: ; preds = %bb9, %bb7
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt2.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-ifcvt2.ll Fri Jul 2 04:34:51 2010
@@ -31,7 +31,7 @@
; CHECK: CountTree:
; CHECK: it eq
; CHECK: cmpeq
-; CHECK: bne
+; CHECK: beq
; CHECK: itt eq
; CHECK: moveq
; CHECK: popeq
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-spill-q.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-spill-q.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-spill-q.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-spill-q.ll Fri Jul 2 04:34:51 2010
@@ -9,7 +9,7 @@
declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*) nounwind readonly
-define arm_apcscc void @aaa(%quuz* %this, i8* %block) {
+define void @aaa(%quuz* %this, i8* %block) {
; CHECK: aaa:
; CHECK: bic r4, r4, #15
; CHECK: vst1.64 {{.*}}[{{.*}}, :128]
Modified: llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-tbh.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-tbh.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-tbh.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/Thumb2/thumb2-tbh.ll Fri Jul 2 04:34:51 2010
@@ -8,13 +8,13 @@
@.str31 = external constant [28 x i8], align 1 ; <[28 x i8]*> [#uses=1]
@_T_gtol = external global %struct._T_tstr* ; <%struct._T_tstr**> [#uses=2]
-declare arm_apcscc i32 @strlen(i8* nocapture) nounwind readonly
+declare i32 @strlen(i8* nocapture) nounwind readonly
-declare arm_apcscc void @Z_fatal(i8*) noreturn nounwind
+declare void @Z_fatal(i8*) noreturn nounwind
-declare arm_apcscc noalias i8* @calloc(i32, i32) nounwind
+declare noalias i8* @calloc(i32, i32) nounwind
-define arm_apcscc i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
+define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
; CHECK: main:
; CHECK: tbb
entry:
@@ -28,39 +28,39 @@
br label %bb40.i
bb7.i: ; preds = %bb42.i
- call arm_apcscc void @_T_addtol(%struct._T_tstr** @_T_gtol, i32 0, i8* null) nounwind
+ call void @_T_addtol(%struct._T_tstr** @_T_gtol, i32 0, i8* null) nounwind
unreachable
bb15.i: ; preds = %bb42.i
- call arm_apcscc void @_T_addtol(%struct._T_tstr** @_T_gtol, i32 2, i8* null) nounwind
+ call void @_T_addtol(%struct._T_tstr** @_T_gtol, i32 2, i8* null) nounwind
unreachable
bb23.i: ; preds = %bb42.i
- %1 = call arm_apcscc i32 @strlen(i8* null) nounwind readonly ; <i32> [#uses=0]
+ %1 = call i32 @strlen(i8* null) nounwind readonly ; <i32> [#uses=0]
unreachable
bb33.i: ; preds = %bb42.i
store i32 0, i32* @_C_nextcmd, align 4
- %2 = call arm_apcscc noalias i8* @calloc(i32 21, i32 1) nounwind ; <i8*> [#uses=0]
+ %2 = call noalias i8* @calloc(i32 21, i32 1) nounwind ; <i8*> [#uses=0]
unreachable
bb34.i: ; preds = %bb42.i
%3 = load i32* @_C_nextcmd, align 4 ; <i32> [#uses=1]
%4 = add i32 %3, 1 ; <i32> [#uses=1]
store i32 %4, i32* @_C_nextcmd, align 4
- %5 = call arm_apcscc noalias i8* @calloc(i32 22, i32 1) nounwind ; <i8*> [#uses=0]
+ %5 = call noalias i8* @calloc(i32 22, i32 1) nounwind ; <i8*> [#uses=0]
unreachable
bb35.i: ; preds = %bb42.i
- %6 = call arm_apcscc noalias i8* @calloc(i32 20, i32 1) nounwind ; <i8*> [#uses=0]
+ %6 = call noalias i8* @calloc(i32 20, i32 1) nounwind ; <i8*> [#uses=0]
unreachable
bb37.i: ; preds = %bb42.i
- %7 = call arm_apcscc noalias i8* @calloc(i32 14, i32 1) nounwind ; <i8*> [#uses=0]
+ %7 = call noalias i8* @calloc(i32 14, i32 1) nounwind ; <i8*> [#uses=0]
unreachable
bb39.i: ; preds = %bb42.i
- call arm_apcscc void @Z_fatal(i8* getelementptr ([28 x i8]* @.str31, i32 0, i32 0)) nounwind
+ call void @Z_fatal(i8* getelementptr ([28 x i8]* @.str31, i32 0, i32 0)) nounwind
unreachable
bb40.i: ; preds = %bb42.i, %bb5.i, %bb1.i2
@@ -81,4 +81,4 @@
]
}
-declare arm_apcscc void @_T_addtol(%struct._T_tstr** nocapture, i32, i8*) nounwind
+declare void @_T_addtol(%struct._T_tstr** nocapture, i32, i8*) nounwind
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -march=x86 -mattr=+sse2 -regalloc=local
; RUN: llc < %s -march=x86 -mattr=+sse2 -regalloc=fast
define void @SolveCubic(double %a, double %b, double %c, double %d, i32* %solutions, double* %x) {
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -regalloc=local -march=x86 -mattr=+mmx | grep esi
; RUN: llc < %s -regalloc=fast -march=x86 -mattr=+mmx | grep esi
; PR2082
; Local register allocator was refusing to use ESI, EDI, and EBP so it ran out of
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll Fri Jul 2 04:34:51 2010
@@ -1,5 +1,4 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -relocation-model=pic -disable-fp-elim -O0 -regalloc=local
; RUN: llc < %s -mtriple=x86_64-apple-darwin -relocation-model=pic -disable-fp-elim -O0 -regalloc=fast
; PR5534
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-05-28-LocalRegAllocBug.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -regalloc=local
; RUN: llc < %s -mtriple=i386-apple-darwin -regalloc=fast
@_ZTVN10Evaluation10GridOutputILi3EEE = external constant [5 x i32 (...)*] ; <[5 x i32 (...)*]*> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-17-inline-asm-1.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-17-inline-asm-1.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-17-inline-asm-1.ll Fri Jul 2 04:34:51 2010
@@ -1,5 +1,4 @@
; RUN: llc < %s -march=x86 | FileCheck %s
-; RUN: llc < %s -march=x86 -regalloc=local | FileCheck %s
; RUN: llc < %s -march=x86 -regalloc=fast | FileCheck %s
; %0 must not be put in EAX or EDX.
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-18-inline-asm-2.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-18-inline-asm-2.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2008-09-18-inline-asm-2.ll Fri Jul 2 04:34:51 2010
@@ -1,5 +1,4 @@
; RUN: llc < %s -march=x86 | grep "#%ebp %esi %edi 8(%edx) %eax (%ebx)"
-; RUN: llc < %s -march=x86 -regalloc=local | grep "#%edi %ebp %edx 8(%ebx) %eax (%esi)"
; RUN: llc < %s -march=x86 -regalloc=fast | grep "#%edi %ebp %edx 8(%ebx) %eax (%esi)"
; The 1st, 2nd, 3rd and 5th registers above must all be different. The registers
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2009-01-29-LocalRegAllocBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2009-01-29-LocalRegAllocBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2009-01-29-LocalRegAllocBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2009-01-29-LocalRegAllocBug.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9.6 -regalloc=local -disable-fp-elim
; RUN: llc < %s -mtriple=i386-apple-darwin9.6 -regalloc=fast -disable-fp-elim
; rdar://6538384
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2009-04-14-IllegalRegs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2009-04-14-IllegalRegs.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2009-04-14-IllegalRegs.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2009-04-14-IllegalRegs.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin -O0 -regalloc=local | not grep sil
; RUN: llc < %s -mtriple=i386-apple-darwin -O0 -regalloc=fast | not grep sil
; rdar://6787136
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2009-04-24.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2009-04-24.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2009-04-24.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2009-04-24.ll Fri Jul 2 04:34:51 2010
@@ -1,6 +1,4 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-linux-gnu -regalloc=local -relocation-model=pic > %t
-; RUN: grep {leal.*TLSGD.*___tls_get_addr} %t
-; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu -regalloc=local -relocation-model=pic > %t2
+; RUN: llc < %s -march=x86-64 -mtriple=x86_64-linux-gnu -regalloc=fast -relocation-model=pic > %t2
; RUN: grep {leaq.*TLSGD.*__tls_get_addr} %t2
; PR4004
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2010-03-17-ISelBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2010-03-17-ISelBug.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2010-03-17-ISelBug.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2010-03-17-ISelBug.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,5 @@
; RUN: llc < %s -mtriple=i386-apple-darwin5
+
; rdar://7761790
%"struct..0$_485" = type { i16, i16, i32 }
@@ -37,3 +38,30 @@
%4 = add nsw i32 %index.6379, 1 ; <i32> [#uses=1]
br label %bb169
}
+
+; PR7368
+
+%struct.bufBit_s = type { i8*, i8 }
+
+define fastcc void @printSwipe([2 x [256 x %struct.bufBit_s]]* nocapture %colourLines) nounwind {
+entry:
+ br label %for.body190
+
+for.body261.i: ; preds = %for.body261.i, %for.body190
+ %line.3300.i = phi i32 [ undef, %for.body190 ], [ %add292.i, %for.body261.i ] ; <i32> [#uses=3]
+ %conv268.i = and i32 %line.3300.i, 255 ; <i32> [#uses=1]
+ %tmp278.i = getelementptr [2 x [256 x %struct.bufBit_s]]* %colourLines, i32 undef, i32 %pen.1100, i32 %conv268.i, i32 0 ; <i8**> [#uses=1]
+ store i8* undef, i8** %tmp278.i
+ %tmp338 = shl i32 %line.3300.i, 3 ; <i32> [#uses=1]
+ %tmp339 = and i32 %tmp338, 2040 ; <i32> [#uses=1]
+ %tmp285.i = getelementptr i8* %scevgep328, i32 %tmp339 ; <i8*> [#uses=1]
+ store i8 undef, i8* %tmp285.i
+ %add292.i = add nsw i32 0, %line.3300.i ; <i32> [#uses=1]
+ br i1 undef, label %for.body190, label %for.body261.i
+
+for.body190: ; preds = %for.body261.i, %for.body190, %bb.nph104
+ %pen.1100 = phi i32 [ 0, %entry ], [ %inc230, %for.body261.i ], [ %inc230, %for.body190 ] ; <i32> [#uses=3]
+ %scevgep328 = getelementptr [2 x [256 x %struct.bufBit_s]]* %colourLines, i32 undef, i32 %pen.1100, i32 0, i32 1 ; <i8*> [#uses=1]
+ %inc230 = add i32 %pen.1100, 1 ; <i32> [#uses=2]
+ br i1 undef, label %for.body190, label %for.body261.i
+}
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -O0 -regalloc=local -relocation-model=pic -disable-fp-elim | FileCheck %s
; RUN: llc < %s -O0 -regalloc=fast -relocation-model=pic -disable-fp-elim | FileCheck %s
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
target triple = "i386-apple-darwin10.0.0"
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN-XFAIL: llc < %s -O0 -regalloc=local | FileCheck %s
; RUN: llc < %s -O0 -regalloc=fast | FileCheck %s
; PR6520
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/2010-05-06-LocalInlineAsmClobber.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/2010-05-06-LocalInlineAsmClobber.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/2010-05-06-LocalInlineAsmClobber.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/2010-05-06-LocalInlineAsmClobber.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc -regalloc=local %s -o %t
; RUN: llc -regalloc=fast %s -o %t
; PR7066
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/MachineSink-CritEdge.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/MachineSink-CritEdge.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/MachineSink-CritEdge.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/MachineSink-CritEdge.ll Fri Jul 2 04:34:51 2010
@@ -1,10 +1,4 @@
; RUN: llc < %s | FileCheck %s
-; XFAIL: *
-;
-; See <rdar://problem/8030636>. This test isn't valid after we made machine
-; sinking more conservative about sinking instructions that define a preg into a
-; block when we don't know if the preg is killed within the current block.
-
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10.0.0"
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/fp-stack-O0-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/fp-stack-O0-crash.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/fp-stack-O0-crash.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/fp-stack-O0-crash.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc %s -O0 -fast-isel -regalloc=local -o -
; RUN: llc %s -O0 -fast-isel -regalloc=fast -o -
; PR4767
Removed: llvm/branches/wendling/eh/test/CodeGen/X86/hidden-vis-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/hidden-vis-5.ll?rev=107463&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/hidden-vis-5.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/hidden-vis-5.ll (removed)
@@ -1,30 +0,0 @@
-; RUN: llc < %s -mtriple=i386-apple-darwin9 -relocation-model=pic -disable-fp-elim -unwind-tables | FileCheck %s
-; <rdar://problem/7383328>
-
- at .str = private constant [12 x i8] c"hello world\00", align 1 ; <[12 x i8]*> [#uses=1]
-
-define hidden void @func() nounwind ssp {
-entry:
- %0 = call i32 @puts(i8* getelementptr inbounds ([12 x i8]* @.str, i64 0, i64 0)) nounwind ; <i32> [#uses=0]
- br label %return
-
-return: ; preds = %entry
- ret void
-}
-
-declare i32 @puts(i8*)
-
-define hidden i32 @main() nounwind ssp {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=1]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- call void @func() nounwind
- br label %return
-
-return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval1
-}
-
-; CHECK: .private_extern _func.eh
-; CHECK: .private_extern _main.eh
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/liveness-local-regalloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/liveness-local-regalloc.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/liveness-local-regalloc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/liveness-local-regalloc.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,3 @@
-; RUN: llc < %s -O3 -regalloc=local -mtriple=x86_64-apple-darwin10
; RUN: llc < %s -O3 -regalloc=fast -mtriple=x86_64-apple-darwin10
; <rdar://problem/7755473>
Removed: llvm/branches/wendling/eh/test/CodeGen/X86/local-liveness.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/local-liveness.ll?rev=107463&view=auto
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/local-liveness.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/local-liveness.ll (removed)
@@ -1,31 +0,0 @@
-; RUN: llc < %s -march=x86 -regalloc=local | grep {subl %eax, %edx}
-
-; Local regalloc shouldn't assume that both the uses of the
-; sub instruction are kills, because one of them is tied
-; to an output. Previously, it was allocating both inputs
-; in the same register.
-
-define i32 @func_3() nounwind {
-entry:
- %retval = alloca i32 ; <i32*> [#uses=2]
- %g_323 = alloca i8 ; <i8*> [#uses=2]
- %p_5 = alloca i64, align 8 ; <i64*> [#uses=2]
- %0 = alloca i32 ; <i32*> [#uses=2]
- %"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- store i64 0, i64* %p_5, align 8
- store i8 1, i8* %g_323, align 1
- %1 = load i8* %g_323, align 1 ; <i8> [#uses=1]
- %2 = sext i8 %1 to i64 ; <i64> [#uses=1]
- %3 = load i64* %p_5, align 8 ; <i64> [#uses=1]
- %4 = sub i64 %3, %2 ; <i64> [#uses=1]
- %5 = icmp sge i64 %4, 0 ; <i1> [#uses=1]
- %6 = zext i1 %5 to i32 ; <i32> [#uses=1]
- store i32 %6, i32* %0, align 4
- %7 = load i32* %0, align 4 ; <i32> [#uses=1]
- store i32 %7, i32* %retval, align 4
- br label %return
-
-return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
- ret i32 %retval1
-}
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/phys-reg-local-regalloc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/phys-reg-local-regalloc.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/phys-reg-local-regalloc.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/phys-reg-local-regalloc.ll Fri Jul 2 04:34:51 2010
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=x86 -mtriple=i386-apple-darwin9 -regalloc=local | FileCheck %s
-; RUN: llc -O0 < %s -march=x86 -mtriple=i386-apple-darwin9 -regalloc=local | FileCheck %s
+; RUN: llc < %s -march=x86 -mtriple=i386-apple-darwin9 -regalloc=fast | FileCheck %s
+; RUN: llc -O0 < %s -march=x86 -mtriple=i386-apple-darwin9 -regalloc=fast | FileCheck %s
; CHECKed instructions should be the same with or without -O0.
@.str = private constant [12 x i8] c"x + y = %i\0A\00", align 1 ; <[12 x i8]*> [#uses=1]
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/shift-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/shift-folding.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/shift-folding.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/shift-folding.ll Fri Jul 2 04:34:51 2010
@@ -21,3 +21,8 @@
ret i32* %P2
}
+define fastcc i32 @test4(i32* %d) nounwind {
+ %tmp4 = load i32* %d
+ %tmp512 = lshr i32 %tmp4, 24
+ ret i32 %tmp512
+}
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/store-narrow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/store-narrow.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/store-narrow.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/store-narrow.ll Fri Jul 2 04:34:51 2010
@@ -67,7 +67,7 @@
; X64: movw %si, 2(%rdi)
; X32: test4:
-; X32: movzwl 8(%esp), %eax
+; X32: movl 8(%esp), %eax
; X32: movw %ax, 2(%{{.*}})
}
Modified: llvm/branches/wendling/eh/test/CodeGen/X86/vec-trunc-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/CodeGen/X86/vec-trunc-store.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/CodeGen/X86/vec-trunc-store.ll (original)
+++ llvm/branches/wendling/eh/test/CodeGen/X86/vec-trunc-store.ll Fri Jul 2 04:34:51 2010
@@ -1,13 +1,15 @@
-; RUN: llc < %s -march=x86-64 -disable-mmx | grep punpcklwd | count 2
+; RUN: llc < %s -march=x86-64 -disable-mmx >/dev/null
-define void @foo() nounwind {
- %cti69 = trunc <8 x i32> undef to <8 x i16> ; <<8 x i16>> [#uses=1]
+define void @foo(<8 x i32>* %p) nounwind {
+ %t = load <8 x i32>* %p
+ %cti69 = trunc <8 x i32> %t to <8 x i16> ; <<8 x i16>> [#uses=1]
store <8 x i16> %cti69, <8 x i16>* undef
ret void
}
-define void @bar() nounwind {
- %cti44 = trunc <4 x i32> undef to <4 x i16> ; <<4 x i16>> [#uses=1]
+define void @bar(<4 x i32>* %p) nounwind {
+ %t = load <4 x i32>* %p
+ %cti44 = trunc <4 x i32> %t to <4 x i16> ; <<4 x i16>> [#uses=1]
store <4 x i16> %cti44, <4 x i16>* undef
ret void
}
Modified: llvm/branches/wendling/eh/test/DebugInfo/2010-06-01-DeadArg-DbgInfo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/DebugInfo/2010-06-01-DeadArg-DbgInfo.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/DebugInfo/2010-06-01-DeadArg-DbgInfo.ll (original)
+++ llvm/branches/wendling/eh/test/DebugInfo/2010-06-01-DeadArg-DbgInfo.ll Fri Jul 2 04:34:51 2010
@@ -1,11 +1,13 @@
-; RUN: llc -O2 < %s | grep this | grep -v undef
+; RUN: llc -O2 < %s | FileCheck %s
; Test to check that unused argument 'this' is not undefined in debug info.
+target triple = "x86_64-apple-darwin10.2"
%struct.foo = type { i32 }
@llvm.used = appending global [1 x i8*] [i8* bitcast (i32 (%struct.foo*, i32)* @_ZN3foo3bazEi to i8*)], section "llvm.metadata" ; <[1 x i8*]*> [#uses=0]
define i32 @_ZN3foo3bazEi(%struct.foo* nocapture %this, i32 %x) nounwind readnone optsize noinline ssp align 2 {
+;CHECK: DEBUG_VALUE: baz:this <- RDI+0
entry:
tail call void @llvm.dbg.value(metadata !{%struct.foo* %this}, i64 0, metadata !15)
tail call void @llvm.dbg.value(metadata !{i32 %x}, i64 0, metadata !16)
Modified: llvm/branches/wendling/eh/test/FrontendC++/2010-02-17-DbgArtificialArg.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/FrontendC%2B%2B/2010-02-17-DbgArtificialArg.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/FrontendC++/2010-02-17-DbgArtificialArg.cpp (original)
+++ llvm/branches/wendling/eh/test/FrontendC++/2010-02-17-DbgArtificialArg.cpp Fri Jul 2 04:34:51 2010
@@ -1,4 +1,4 @@
-// RUN: %llvmgcc -g -S %s -o - | grep DW_TAG_pointer_type | grep "i32 524303, metadata .., metadata ..., metadata .., i32 ., i64 .., i64 .., i64 0, i32 64, metadata ..."
+// RUN: %llvmgcc -g -S %s -o - | FileCheck %s
// Here, second to last argument "i32 64" indicates that artificial type is set.
// Test to artificial attribute attahed to "this" pointer type.
// Radar 7655792 and 7655002
@@ -10,5 +10,7 @@
int foo() {
A a;
+ // Matching "i32 64, metadata !<number>} ; [ DW_TAG_pointer_type ]"
+ // CHECK: i32 64, metadata {{![0-9]+\} ; \[ DW_TAG_pointer_type \]}}
return a.fn1(1);
}
Modified: llvm/branches/wendling/eh/test/FrontendC/pr5406.c
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/FrontendC/pr5406.c?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/FrontendC/pr5406.c (original)
+++ llvm/branches/wendling/eh/test/FrontendC/pr5406.c Fri Jul 2 04:34:51 2010
@@ -8,7 +8,7 @@
void foo (int i, ...);
-// CHECK: call arm_aapcscc void (i32, ...)* @foo(i32 1, i32 {{.*}}) nounwind
+// CHECK: call void (i32, ...)* @foo(i32 1, i32 {{.*}}) nounwind
int main (void)
{
A0 a3;
Modified: llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-encoding.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-encoding.s?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-encoding.s (original)
+++ llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_32-encoding.s Fri Jul 2 04:34:51 2010
@@ -10084,3 +10084,131 @@
// CHECK: vdivsd %xmm4, %xmm6, %xmm2
// CHECK: encoding: [0xc5,0xcb,0x5e,0xd4]
vdivsd %xmm4, %xmm6, %xmm2
+
+// CHECK: vaddss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xea,0x58,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vaddss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vsubss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xea,0x5c,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vsubss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vmulss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xea,0x59,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vmulss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vdivss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xea,0x5e,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vdivss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vaddsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xeb,0x58,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vaddsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vsubsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xeb,0x5c,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vsubsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vmulsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xeb,0x59,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vmulsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vdivsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xeb,0x5e,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vdivsd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vaddps %xmm4, %xmm6, %xmm2
+// CHECK: encoding: [0xc5,0xc8,0x58,0xd4]
+ vaddps %xmm4, %xmm6, %xmm2
+
+// CHECK: vsubps %xmm4, %xmm6, %xmm2
+// CHECK: encoding: [0xc5,0xc8,0x5c,0xd4]
+ vsubps %xmm4, %xmm6, %xmm2
+
+// CHECK: vmulps %xmm4, %xmm6, %xmm2
+// CHECK: encoding: [0xc5,0xc8,0x59,0xd4]
+ vmulps %xmm4, %xmm6, %xmm2
+
+// CHECK: vdivps %xmm4, %xmm6, %xmm2
+// CHECK: encoding: [0xc5,0xc8,0x5e,0xd4]
+ vdivps %xmm4, %xmm6, %xmm2
+
+// CHECK: vaddpd %xmm4, %xmm6, %xmm2
+// CHECK: encoding: [0xc5,0xc9,0x58,0xd4]
+ vaddpd %xmm4, %xmm6, %xmm2
+
+// CHECK: vsubpd %xmm4, %xmm6, %xmm2
+// CHECK: encoding: [0xc5,0xc9,0x5c,0xd4]
+ vsubpd %xmm4, %xmm6, %xmm2
+
+// CHECK: vmulpd %xmm4, %xmm6, %xmm2
+// CHECK: encoding: [0xc5,0xc9,0x59,0xd4]
+ vmulpd %xmm4, %xmm6, %xmm2
+
+// CHECK: vdivpd %xmm4, %xmm6, %xmm2
+// CHECK: encoding: [0xc5,0xc9,0x5e,0xd4]
+ vdivpd %xmm4, %xmm6, %xmm2
+
+// CHECK: vaddps 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xe8,0x58,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vaddps 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vsubps 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xe8,0x5c,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vsubps 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vmulps 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xe8,0x59,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vmulps 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vdivps 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xe8,0x5e,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vdivps 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vaddpd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xe9,0x58,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vaddpd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vsubpd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xe9,0x5c,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vsubpd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vmulpd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xe9,0x59,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vmulpd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vdivpd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xe9,0x5e,0xac,0xcb,0xef,0xbe,0xad,0xde]
+ vdivpd 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: vmaxss %xmm2, %xmm4, %xmm6
+// CHECK: encoding: [0xc5,0xda,0x5f,0xf2]
+ vmaxss %xmm2, %xmm4, %xmm6
+
+// CHECK: vmaxsd %xmm2, %xmm4, %xmm6
+// CHECK: encoding: [0xc5,0xdb,0x5f,0xf2]
+ vmaxsd %xmm2, %xmm4, %xmm6
+
+// CHECK: vminss %xmm2, %xmm4, %xmm6
+// CHECK: encoding: [0xc5,0xda,0x5d,0xf2]
+ vminss %xmm2, %xmm4, %xmm6
+
+// CHECK: vminsd %xmm2, %xmm4, %xmm6
+// CHECK: encoding: [0xc5,0xdb,0x5d,0xf2]
+ vminsd %xmm2, %xmm4, %xmm6
+
+// CHECK: vmaxss -4(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xea,0x5f,0x6c,0xcb,0xfc]
+ vmaxss -4(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vmaxsd -4(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xeb,0x5f,0x6c,0xcb,0xfc]
+ vmaxsd -4(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vminss -4(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xea,0x5d,0x6c,0xcb,0xfc]
+ vminss -4(%ebx,%ecx,8), %xmm2, %xmm5
+
+// CHECK: vminsd -4(%ebx,%ecx,8), %xmm2, %xmm5
+// CHECK: encoding: [0xc5,0xeb,0x5d,0x6c,0xcb,0xfc]
+ vminsd -4(%ebx,%ecx,8), %xmm2, %xmm5
+
Modified: llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-encoding.s
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-encoding.s?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-encoding.s (original)
+++ llvm/branches/wendling/eh/test/MC/AsmParser/X86/x86_64-encoding.s Fri Jul 2 04:34:51 2010
@@ -135,3 +135,132 @@
// CHECK: vdivsd %xmm8, %xmm9, %xmm10
// CHECK: encoding: [0xc4,0x41,0x33,0x5e,0xd0]
vdivsd %xmm8, %xmm9, %xmm10
+
+// CHECK: vaddss -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x2a,0x58,0x5c,0xd9,0xfc]
+vaddss -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vsubss -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x2a,0x5c,0x5c,0xd9,0xfc]
+vsubss -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vmulss -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x2a,0x59,0x5c,0xd9,0xfc]
+vmulss -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vdivss -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x2a,0x5e,0x5c,0xd9,0xfc]
+vdivss -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vaddsd -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x2b,0x58,0x5c,0xd9,0xfc]
+vaddsd -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vsubsd -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x2b,0x5c,0x5c,0xd9,0xfc]
+vsubsd -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vmulsd -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x2b,0x59,0x5c,0xd9,0xfc]
+vmulsd -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vdivsd -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x2b,0x5e,0x5c,0xd9,0xfc]
+vdivsd -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vaddps %xmm10, %xmm11, %xmm15
+// CHECK: encoding: [0xc4,0x41,0x20,0x58,0xfa]
+vaddps %xmm10, %xmm11, %xmm15
+
+// CHECK: vsubps %xmm10, %xmm11, %xmm15
+// CHECK: encoding: [0xc4,0x41,0x20,0x5c,0xfa]
+vsubps %xmm10, %xmm11, %xmm15
+
+// CHECK: vmulps %xmm10, %xmm11, %xmm15
+// CHECK: encoding: [0xc4,0x41,0x20,0x59,0xfa]
+vmulps %xmm10, %xmm11, %xmm15
+
+// CHECK: vdivps %xmm10, %xmm11, %xmm15
+// CHECK: encoding: [0xc4,0x41,0x20,0x5e,0xfa]
+vdivps %xmm10, %xmm11, %xmm15
+
+// CHECK: vaddpd %xmm10, %xmm11, %xmm15
+// CHECK: encoding: [0xc4,0x41,0x21,0x58,0xfa]
+vaddpd %xmm10, %xmm11, %xmm15
+
+// CHECK: vsubpd %xmm10, %xmm11, %xmm15
+// CHECK: encoding: [0xc4,0x41,0x21,0x5c,0xfa]
+vsubpd %xmm10, %xmm11, %xmm15
+
+// CHECK: vmulpd %xmm10, %xmm11, %xmm15
+// CHECK: encoding: [0xc4,0x41,0x21,0x59,0xfa]
+vmulpd %xmm10, %xmm11, %xmm15
+
+// CHECK: vdivpd %xmm10, %xmm11, %xmm15
+// CHECK: encoding: [0xc4,0x41,0x21,0x5e,0xfa]
+vdivpd %xmm10, %xmm11, %xmm15
+
+// CHECK: vaddps -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x28,0x58,0x5c,0xd9,0xfc]
+vaddps -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vsubps -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x28,0x5c,0x5c,0xd9,0xfc]
+vsubps -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vmulps -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x28,0x59,0x5c,0xd9,0xfc]
+vmulps -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vdivps -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x28,0x5e,0x5c,0xd9,0xfc]
+vdivps -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vaddpd -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x29,0x58,0x5c,0xd9,0xfc]
+vaddpd -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vsubpd -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x29,0x5c,0x5c,0xd9,0xfc]
+vsubpd -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vmulpd -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x29,0x59,0x5c,0xd9,0xfc]
+vmulpd -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11
+// CHECK: encoding: [0xc5,0x29,0x5e,0x5c,0xd9,0xfc]
+vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11
+
+// CHECK: vmaxss %xmm10, %xmm14, %xmm12
+// CHECK: encoding: [0xc4,0x41,0x0a,0x5f,0xe2]
+ vmaxss %xmm10, %xmm14, %xmm12
+
+// CHECK: vmaxsd %xmm10, %xmm14, %xmm12
+// CHECK: encoding: [0xc4,0x41,0x0b,0x5f,0xe2]
+ vmaxsd %xmm10, %xmm14, %xmm12
+
+// CHECK: vminss %xmm10, %xmm14, %xmm12
+// CHECK: encoding: [0xc4,0x41,0x0a,0x5d,0xe2]
+ vminss %xmm10, %xmm14, %xmm12
+
+// CHECK: vminsd %xmm10, %xmm14, %xmm12
+// CHECK: encoding: [0xc4,0x41,0x0b,0x5d,0xe2]
+ vminsd %xmm10, %xmm14, %xmm12
+
+// CHECK: vmaxss -4(%rbx,%rcx,8), %xmm12, %xmm10
+// CHECK: encoding: [0xc5,0x1a,0x5f,0x54,0xcb,0xfc]
+ vmaxss -4(%rbx,%rcx,8), %xmm12, %xmm10
+
+// CHECK: vmaxsd -4(%rbx,%rcx,8), %xmm12, %xmm10
+// CHECK: encoding: [0xc5,0x1b,0x5f,0x54,0xcb,0xfc]
+ vmaxsd -4(%rbx,%rcx,8), %xmm12, %xmm10
+
+// CHECK: vminss -4(%rbx,%rcx,8), %xmm12, %xmm10
+// CHECK: encoding: [0xc5,0x1a,0x5d,0x54,0xcb,0xfc]
+ vminss -4(%rbx,%rcx,8), %xmm12, %xmm10
+
+// CHECK: vminsd -4(%rbx,%rcx,8), %xmm12, %xmm10
+// CHECK: encoding: [0xc5,0x1b,0x5d,0x54,0xcb,0xfc]
+ vminsd -4(%rbx,%rcx,8), %xmm12, %xmm10
+
Modified: llvm/branches/wendling/eh/test/Other/2010-05-06-Printer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Other/2010-05-06-Printer.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Other/2010-05-06-Printer.ll (original)
+++ llvm/branches/wendling/eh/test/Other/2010-05-06-Printer.ll Fri Jul 2 04:34:51 2010
@@ -1,4 +1,4 @@
-; RUN: llc -O2 -print-after-all < %s 2>&1
+; RUN: llc -O2 -print-after-all < %s 2>/dev/null
define void @tester(){
ret void
Modified: llvm/branches/wendling/eh/test/Transforms/GVN/load-pre-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/GVN/load-pre-align.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/GVN/load-pre-align.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/GVN/load-pre-align.ll Fri Jul 2 04:34:51 2010
@@ -4,7 +4,7 @@
@p = external global i32
-define arm_apcscc i32 @test(i32 %n) nounwind {
+define i32 @test(i32 %n) nounwind {
; CHECK: @test
entry:
br label %for.cond
Modified: llvm/branches/wendling/eh/test/Transforms/IndVarSimplify/indirectbr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/IndVarSimplify/indirectbr.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/IndVarSimplify/indirectbr.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/IndVarSimplify/indirectbr.ll Fri Jul 2 04:34:51 2010
@@ -1,6 +1,6 @@
; RUN: opt < %s -indvars -S -disable-output
-; PR5758
+; PR5758
define zeroext i1 @foo() nounwind {
entry:
indirectbr i8* undef, [label %"202", label %"133"]
@@ -20,3 +20,20 @@
"202": ; preds = %entry
ret i1 false
}
+
+; PR7333
+define void @__atomvec_module__put_vrml_bonds() nounwind {
+bb7.preheader: ; preds = %entry
+ indirectbr i8* undef, [label %bb14, label %bb16]
+
+bb14: ; preds = %bb14, %bb7.preheader
+ br label %bb16
+
+bb16: ; preds = %bb16, %bb14, %bb7.preheader
+ %S.31.0 = phi i64 [ %3, %bb16 ], [ 1, %bb7.preheader ], [ 1, %bb14 ] ; <i64> [#uses=2]
+ %0 = add nsw i64 %S.31.0, -1 ; <i64> [#uses=1]
+ %1 = getelementptr inbounds [3 x double]* undef, i64 0, i64 %0 ; <double*> [#uses=1]
+ %2 = load double* %1, align 8 ; <double> [#uses=0]
+ %3 = add nsw i64 %S.31.0, 1 ; <i64> [#uses=1]
+ br label %bb16
+}
Modified: llvm/branches/wendling/eh/test/Transforms/IndVarSimplify/single-element-range.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/IndVarSimplify/single-element-range.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/IndVarSimplify/single-element-range.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/IndVarSimplify/single-element-range.ll Fri Jul 2 04:34:51 2010
@@ -3,7 +3,7 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:64"
target triple = "armv6-apple-darwin10"
-define arm_apcscc void @sqlite3_free_table(i8** %azResult) nounwind {
+define void @sqlite3_free_table(i8** %azResult) nounwind {
entry:
br i1 undef, label %return, label %bb
Modified: llvm/branches/wendling/eh/test/Transforms/InstCombine/call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/InstCombine/call.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/InstCombine/call.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/InstCombine/call.ll Fri Jul 2 04:34:51 2010
@@ -100,7 +100,7 @@
declare void @test8a()
define i8* @test8() {
- invoke arm_apcscc void @test8a()
+ invoke void @test8a()
to label %invoke.cont unwind label %try.handler
invoke.cont: ; preds = %entry
@@ -114,5 +114,5 @@
; calling conv, but the implementation of test8a may actually end up using the
; right calling conv.
; CHECK: @test8() {
-; CHECK-NEXT: invoke arm_apcscc void @test8a()
+; CHECK-NEXT: invoke void @test8a()
Modified: llvm/branches/wendling/eh/test/Transforms/InstCombine/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/InstCombine/crash.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/InstCombine/crash.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/InstCombine/crash.ll Fri Jul 2 04:34:51 2010
@@ -127,11 +127,11 @@
}
; PR5471
-define arm_apcscc i32 @test5a() {
+define i32 @test5a() {
ret i32 0
}
-define arm_apcscc void @test5() {
+define void @test5() {
store i1 true, i1* undef
%1 = invoke i32 @test5a() to label %exit unwind label %exit
exit:
@@ -212,7 +212,7 @@
entry:
store i1 true, i1* undef
store i1 true, i1* undef
- invoke arm_apcscc void @test10a()
+ invoke void @test10a()
to label %invoke.cont unwind label %try.handler ; <i8*> [#uses=0]
invoke.cont: ; preds = %entry
Modified: llvm/branches/wendling/eh/test/Transforms/JumpThreading/crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/JumpThreading/crash.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/JumpThreading/crash.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/JumpThreading/crash.ll Fri Jul 2 04:34:51 2010
@@ -341,3 +341,25 @@
ret void
}
+
+
+; PR7356
+define i32 @test13(i32* %P, i8* %Ptr) {
+entry:
+ indirectbr i8* %Ptr, [label %BrBlock, label %B2]
+
+B2:
+ store i32 4, i32 *%P
+ br label %BrBlock
+
+BrBlock:
+ %L = load i32* %P
+ %C = icmp eq i32 %L, 42
+ br i1 %C, label %T, label %F
+
+T:
+ ret i32 123
+F:
+ ret i32 1422
+}
+
Modified: llvm/branches/wendling/eh/test/Transforms/LoopUnswitch/preserve-analyses.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/LoopUnswitch/preserve-analyses.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/LoopUnswitch/preserve-analyses.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/LoopUnswitch/preserve-analyses.ll Fri Jul 2 04:34:51 2010
@@ -9,7 +9,7 @@
@delim1 = external global i32 ; <i32*> [#uses=1]
@delim2 = external global i32 ; <i32*> [#uses=1]
-define arm_apcscc i32 @ineqn(i8* %s, i8* %p) nounwind readonly {
+define i32 @ineqn(i8* %s, i8* %p) nounwind readonly {
entry:
%0 = load i32* @delim1, align 4 ; <i32> [#uses=1]
%1 = load i32* @delim2, align 4 ; <i32> [#uses=1]
Modified: llvm/branches/wendling/eh/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll Fri Jul 2 04:34:51 2010
@@ -9,7 +9,7 @@
%struct.int16x8x2_t = type { [2 x %struct.int16x8_t] }
%union..0anon = type { %struct.int16x8x2_t }
-define arm_apcscc void @test(<8 x i16> %tmp.0, %struct.int16x8x2_t* %dst) nounwind {
+define void @test(<8 x i16> %tmp.0, %struct.int16x8x2_t* %dst) nounwind {
; CHECK: @test
; CHECK-NOT: alloca
; CHECK: "alloca point"
@@ -68,7 +68,7 @@
; Radar 7466574
%struct._NSRange = type { i64 }
-define arm_apcscc void @test_memcpy_self() nounwind {
+define void @test_memcpy_self() nounwind {
; CHECK: @test_memcpy_self
; CHECK-NOT: alloca
; CHECK: br i1
Modified: llvm/branches/wendling/eh/test/Transforms/ScalarRepl/2010-01-18-SelfCopy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/ScalarRepl/2010-01-18-SelfCopy.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/ScalarRepl/2010-01-18-SelfCopy.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/ScalarRepl/2010-01-18-SelfCopy.ll Fri Jul 2 04:34:51 2010
@@ -5,7 +5,7 @@
%struct.test = type { [3 x double ] }
-define arm_apcscc void @test_memcpy_self() nounwind {
+define void @test_memcpy_self() nounwind {
; CHECK: @test_memcpy_self
; CHECK-NOT: alloca
; CHECK: ret void
Modified: llvm/branches/wendling/eh/test/Transforms/ScalarRepl/memcpy-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/ScalarRepl/memcpy-align.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/ScalarRepl/memcpy-align.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/ScalarRepl/memcpy-align.ll Fri Jul 2 04:34:51 2010
@@ -9,7 +9,7 @@
@c = external global %0 ; <%0*> [#uses=1]
-define arm_aapcscc void @good() nounwind {
+define void @good() nounwind {
entry:
%x0 = alloca %struct.anon, align 4 ; <%struct.anon*> [#uses=2]
%tmp = bitcast %struct.anon* %x0 to i8* ; <i8*> [#uses=1]
Modified: llvm/branches/wendling/eh/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll Fri Jul 2 04:34:51 2010
@@ -31,3 +31,14 @@
; CHECK: call void @llvm.trap
; CHECK: unreachable
}
+
+; PR7369
+define void @test3() nounwind {
+entry:
+ volatile store i32 4, i32* null
+ ret void
+
+; CHECK: @test3
+; CHECK: volatile store i32 4, i32* null
+; CHECK: ret
+}
Modified: llvm/branches/wendling/eh/test/Transforms/SimplifyLibCalls/StrNCmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/SimplifyLibCalls/StrNCmp.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/SimplifyLibCalls/StrNCmp.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/SimplifyLibCalls/StrNCmp.ll Fri Jul 2 04:34:51 2010
@@ -2,6 +2,9 @@
; RUN: opt < %s -simplify-libcalls -S | \
; RUN: not grep {call.*strncmp}
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
+target triple = "i386-apple-darwin9.0"
+
@hello = constant [6 x i8] c"hello\00" ; <[6 x i8]*> [#uses=1]
@hell = constant [5 x i8] c"hell\00" ; <[5 x i8]*> [#uses=1]
@null = constant [1 x i8] zeroinitializer ; <[1 x i8]*> [#uses=1]
@@ -26,3 +29,7 @@
ret i32 %rslt4
}
+define i32 @test1(i8* %P, i8* %Q) {
+ %cmp = call i32 @strncmp(i8* %P, i8* %Q, i32 1)
+ ret i32 %cmp
+}
Modified: llvm/branches/wendling/eh/test/Transforms/SimplifyLibCalls/StrStr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/Transforms/SimplifyLibCalls/StrStr.ll?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/Transforms/SimplifyLibCalls/StrStr.ll (original)
+++ llvm/branches/wendling/eh/test/Transforms/SimplifyLibCalls/StrStr.ll Fri Jul 2 04:34:51 2010
@@ -46,3 +46,15 @@
; CHECK: @test4
; CHECK: ret i8* %P
}
+
+define i1 @test5(i8* %P, i8* %Q) nounwind readonly {
+entry:
+ %call = tail call i8* @strstr(i8* %P, i8* %Q) nounwind ; <i8*> [#uses=1]
+ %cmp = icmp eq i8* %call, %P
+ ret i1 %cmp
+; CHECK: @test5
+; CHECK: [[LEN:%[a-z]+]] = call {{i[0-9]+}} @strlen(i8* %Q)
+; CHECK: [[NCMP:%[a-z]+]] = call {{i[0-9]+}} @strncmp(i8* %P, i8* %Q, {{i[0-9]+}} [[LEN]])
+; CHECK: icmp eq {{i[0-9]+}} [[NCMP]], 0
+; CHECK: ret i1
+}
Modified: llvm/branches/wendling/eh/test/lit.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/lit.cfg?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/lit.cfg (original)
+++ llvm/branches/wendling/eh/test/lit.cfg Fri Jul 2 04:34:51 2010
@@ -48,6 +48,13 @@
# Propogate 'HOME' through the environment.
config.environment['HOME'] = os.environ['HOME']
+# Propogate LLVM_SRC_ROOT into the environment.
+config.environment['LLVM_SRC_ROOT'] = config.llvm_src_root
+
+# Propogate PYTHON_EXEUTABLE into the environment
+config.environment['PYTHON_EXECUTABLE'] = getattr(config, 'python_executable',
+ '')
+
###
import os
Modified: llvm/branches/wendling/eh/test/lit.site.cfg.in
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/test/lit.site.cfg.in?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/test/lit.site.cfg.in (original)
+++ llvm/branches/wendling/eh/test/lit.site.cfg.in Fri Jul 2 04:34:51 2010
@@ -4,6 +4,7 @@
config.llvm_obj_root = "@LLVM_BINARY_DIR@"
config.llvm_tools_dir = "@LLVM_TOOLS_DIR@"
config.llvmgcc_dir = "@LLVMGCCDIR@"
+config.python_executable = "@PYTHON_EXECUTABLE@"
# Let the main config do the real work.
lit.load_config(config, "@LLVM_SOURCE_DIR@/test/lit.cfg")
Modified: llvm/branches/wendling/eh/tools/gold/gold-plugin.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/tools/gold/gold-plugin.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/tools/gold/gold-plugin.cpp (original)
+++ llvm/branches/wendling/eh/tools/gold/gold-plugin.cpp Fri Jul 2 04:34:51 2010
@@ -41,6 +41,7 @@
ld_plugin_add_symbols add_symbols = NULL;
ld_plugin_get_symbols get_symbols = NULL;
ld_plugin_add_input_file add_input_file = NULL;
+ ld_plugin_add_input_library add_input_library = NULL;
ld_plugin_message message = discard_message;
int api_version = 0;
@@ -64,6 +65,7 @@
static generate_bc generate_bc_file = BC_NO;
static std::string bc_path;
static std::string as_path;
+ static std::vector<std::string> pass_through;
// Additional options to pass into the code generator.
// Note: This array will contain all plugin options which are not claimed
// as plugin exclusive to pass to the code generator.
@@ -86,6 +88,9 @@
} else {
as_path = opt.substr(strlen("as="));
}
+ } else if (opt.startswith("pass-through=")) {
+ llvm::StringRef item = opt.substr(strlen("pass-through="));
+ pass_through.push_back(item.str());
} else if (opt == "emit-llvm") {
generate_bc_file = BC_ONLY;
} else if (opt == "also-emit-llvm") {
@@ -190,6 +195,9 @@
case LDPT_ADD_INPUT_FILE:
add_input_file = tv->tv_u.tv_add_input_file;
break;
+ case LDPT_ADD_INPUT_LIBRARY:
+ add_input_library = tv->tv_u.tv_add_input_file;
+ break;
case LDPT_MESSAGE:
message = tv->tv_u.tv_message;
break;
@@ -368,15 +376,15 @@
api_file << I->syms[i].name << "\n";
}
}
+ }
- if (options::generate_api_file)
- api_file.close();
+ if (options::generate_api_file)
+ api_file.close();
- if (!anySymbolsPreserved) {
- // This entire file is unnecessary!
- lto_codegen_dispose(cg);
- return LDPS_OK;
- }
+ if (!anySymbolsPreserved) {
+ // All of the IL is unnecessary!
+ lto_codegen_dispose(cg);
+ return LDPS_OK;
}
lto_codegen_set_pic_model(cg, output_type);
@@ -437,6 +445,24 @@
return LDPS_ERR;
}
+ for (std::vector<std::string>::iterator i = options::pass_through.begin(),
+ e = options::pass_through.end();
+ i != e; ++i) {
+ std::string &item = *i;
+ char *item_p = const_cast<char*>(item.c_str());
+ if (llvm::StringRef(item).startswith("-l")) {
+ if (add_input_library(item_p + 2) != LDPS_OK) {
+ (*message)(LDPL_ERROR, "Unable to add library to the link.");
+ return LDPS_ERR;
+ }
+ } else {
+ if (add_input_file(item_p) != LDPS_OK) {
+ (*message)(LDPL_ERROR, "Unable to add .o file to the link.");
+ return LDPS_ERR;
+ }
+ }
+ }
+
Cleanup.push_back(uniqueObjPath);
return LDPS_OK;
Modified: llvm/branches/wendling/eh/tools/llvm-mc/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/tools/llvm-mc/Makefile?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/tools/llvm-mc/Makefile (original)
+++ llvm/branches/wendling/eh/tools/llvm-mc/Makefile Fri Jul 2 04:34:51 2010
@@ -12,7 +12,6 @@
# This tool has no plugins, optimize startup time.
TOOL_NO_EXPORTS = 1
-NO_INSTALL = 1
# Include this here so we can get the configuration of the targets
# that have been configured for construction. We have to do this
Modified: llvm/branches/wendling/eh/utils/TableGen/ARMDecoderEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/ARMDecoderEmitter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/ARMDecoderEmitter.cpp (original)
+++ llvm/branches/wendling/eh/utils/TableGen/ARMDecoderEmitter.cpp Fri Jul 2 04:34:51 2010
@@ -1579,7 +1579,8 @@
if (Name == "TCRETURNdi" || Name == "TCRETURNdiND" ||
Name == "TCRETURNri" || Name == "TCRETURNriND" ||
Name == "TAILJMPd" || Name == "TAILJMPdND" ||
- Name == "TAILJMPr" || Name == "TAILJMPrND")
+ Name == "TAILJMPr" || Name == "TAILJMPrND" ||
+ Name == "MOVr_TC")
return false;
// VLDMQ/VSTMQ can be hanlded with the more generic VLDMD/VSTMD.
Modified: llvm/branches/wendling/eh/utils/TableGen/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/CMakeLists.txt?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/CMakeLists.txt (original)
+++ llvm/branches/wendling/eh/utils/TableGen/CMakeLists.txt Fri Jul 2 04:34:51 2010
@@ -5,6 +5,7 @@
AsmWriterInst.cpp
CallingConvEmitter.cpp
ClangASTNodesEmitter.cpp
+ ClangAttrEmitter.cpp
ClangDiagnosticsEmitter.cpp
CodeEmitterGen.cpp
CodeGenDAGPatterns.cpp
Modified: llvm/branches/wendling/eh/utils/TableGen/DAGISelMatcherEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/DAGISelMatcherEmitter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/DAGISelMatcherEmitter.cpp (original)
+++ llvm/branches/wendling/eh/utils/TableGen/DAGISelMatcherEmitter.cpp Fri Jul 2 04:34:51 2010
@@ -635,6 +635,7 @@
if (!ComplexPatterns.empty()) {
OS << "bool CheckComplexPattern(SDNode *Root, SDValue N,\n";
OS << " unsigned PatternNo, SmallVectorImpl<SDValue> &Result) {\n";
+ OS << " unsigned NextRes = Result.size();\n";
OS << " switch (PatternNo) {\n";
OS << " default: assert(0 && \"Invalid pattern # in table?\");\n";
for (unsigned i = 0, e = ComplexPatterns.size(); i != e; ++i) {
@@ -645,12 +646,12 @@
++NumOps; // Get the chained node too.
OS << " case " << i << ":\n";
- OS << " Result.resize(Result.size()+" << NumOps << ");\n";
+ OS << " Result.resize(NextRes+" << NumOps << ");\n";
OS << " return " << P.getSelectFunc();
OS << "(Root, N";
for (unsigned i = 0; i != NumOps; ++i)
- OS << ", Result[Result.size()-" << (NumOps-i) << ']';
+ OS << ", Result[NextRes+" << i << ']';
OS << ");\n";
}
OS << " }\n";
Modified: llvm/branches/wendling/eh/utils/TableGen/EDEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/EDEmitter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/EDEmitter.cpp (original)
+++ llvm/branches/wendling/eh/utils/TableGen/EDEmitter.cpp Fri Jul 2 04:34:51 2010
@@ -570,6 +570,7 @@
static int ARMFlagFromOpName(LiteralConstantEmitter *type,
const std::string &name) {
REG("GPR");
+ REG("tcGPR");
REG("cc_out");
REG("s_cc_out");
REG("tGPR");
@@ -592,10 +593,7 @@
IMM("msr_mask");
IMM("neg_zero");
IMM("imm0_31");
- IMM("h8imm");
- IMM("h16imm");
- IMM("h32imm");
- IMM("h64imm");
+ IMM("nModImm");
IMM("imm0_4095");
IMM("jt2block_operand");
IMM("t_imm_s4");
Modified: llvm/branches/wendling/eh/utils/TableGen/InstrInfoEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/InstrInfoEmitter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/InstrInfoEmitter.cpp (original)
+++ llvm/branches/wendling/eh/utils/TableGen/InstrInfoEmitter.cpp Fri Jul 2 04:34:51 2010
@@ -92,7 +92,8 @@
else if (OpR->isSubClassOf("PointerLikeRegClass"))
Res += utostr(OpR->getValueAsInt("RegClassKind")) + ", ";
else
- Res += "0, ";
+ // -1 means the operand does not have a fixed register class.
+ Res += "-1, ";
// Fill in applicable flags.
Res += "0";
Modified: llvm/branches/wendling/eh/utils/TableGen/NeonEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/NeonEmitter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/NeonEmitter.cpp (original)
+++ llvm/branches/wendling/eh/utils/TableGen/NeonEmitter.cpp Fri Jul 2 04:34:51 2010
@@ -11,6 +11,15 @@
// a declaration and definition of each function specified by the ARM NEON
// compiler interface. See ARM document DUI0348B.
//
+// Each NEON instruction is implemented in terms of 1 or more functions which
+// are suffixed with the element type of the input vectors. Functions may be
+// implemented in terms of generic vector operations such as +, *, -, etc. or
+// by calling a __builtin_-prefixed function which will be handled by clang's
+// CodeGen library.
+//
+// Additional validation code can be generated by this file when runHeader() is
+// called, rather than the normal run() entry point.
+//
//===----------------------------------------------------------------------===//
#include "NeonEmitter.h"
@@ -21,6 +30,10 @@
using namespace llvm;
+/// ParseTypes - break down a string such as "fQf" into a vector of StringRefs,
+/// which each StringRef representing a single type declared in the string.
+/// for "fQf" we would end up with 2 StringRefs, "f", and "Qf", representing
+/// 2xfloat and 4xfloat respectively.
static void ParseTypes(Record *r, std::string &s,
SmallVectorImpl<StringRef> &TV) {
const char *data = s.data();
@@ -49,6 +62,8 @@
}
}
+/// Widen - Convert a type code into the next wider type. char -> short,
+/// short -> int, etc.
static char Widen(const char t) {
switch (t) {
case 'c':
@@ -62,6 +77,8 @@
return '\0';
}
+/// Narrow - Convert a type code into the next smaller type. short -> char,
+/// float -> half float, etc.
static char Narrow(const char t) {
switch (t) {
case 's':
@@ -77,6 +94,8 @@
return '\0';
}
+/// For a particular StringRef, return the base type code, and whether it has
+/// the quad-vector, polynomial, or unsigned modifiers set.
static char ClassifyType(StringRef ty, bool &quad, bool &poly, bool &usgn) {
unsigned off = 0;
@@ -102,6 +121,8 @@
return ty[off];
}
+/// ModType - Transform a type code and its modifiers based on a mod code. The
+/// mod code definitions may be found at the top of arm_neon.td.
static char ModType(const char mod, char type, bool &quad, bool &poly,
bool &usgn, bool &scal, bool &cnst, bool &pntr) {
switch (mod) {
@@ -137,6 +158,7 @@
usgn = true;
break;
case 's':
+ case 'a':
scal = true;
break;
case 'k':
@@ -165,8 +187,11 @@
return type;
}
+/// TypeString - for a modifier and type, generate the name of the typedef for
+/// that type. If generic is true, emit the generic vector type rather than
+/// the public NEON type. QUc -> uint8x8t_t / __neon_uint8x8_t.
static std::string TypeString(const char mod, StringRef typestr,
- bool ret = false) {
+ bool generic = false) {
bool quad = false;
bool poly = false;
bool usgn = false;
@@ -187,7 +212,7 @@
SmallString<128> s;
- if (ret)
+ if (generic)
s += "__neon_";
if (usgn)
@@ -254,6 +279,9 @@
return s.str();
}
+/// TypeString - for a modifier and type, generate the clang BuiltinsARM.def
+/// prototype code for the function. See the top of clang's Builtins.def for
+/// a description of the type strings.
static std::string BuiltinTypeString(const char mod, StringRef typestr,
ClassKind ck, bool ret) {
bool quad = false;
@@ -310,13 +338,13 @@
return quad ? "V48c" : "V24c";
if (mod == '4')
return quad ? "V64c" : "V32c";
- if (mod == 'f' || (ck == ClassI && type == 'f'))
+ if (mod == 'f' || (ck != ClassB && type == 'f'))
return quad ? "V4f" : "V2f";
- if (ck == ClassI && type == 's')
+ if (ck != ClassB && type == 's')
return quad ? "V8s" : "V4s";
- if (ck == ClassI && type == 'i')
+ if (ck != ClassB && type == 'i')
return quad ? "V4i" : "V2i";
- if (ck == ClassI && type == 'l')
+ if (ck != ClassB && type == 'l')
return quad ? "V2LLi" : "V1LLi";
return quad ? "V16c" : "V8c";
@@ -330,19 +358,21 @@
if (mod == '4')
return quad ? "V16cV16cV16cV16c" : "V8cV8cV8cV8c";
- if (mod == 'f' || (ck == ClassI && type == 'f'))
+ if (mod == 'f' || (ck != ClassB && type == 'f'))
return quad ? "V4f" : "V2f";
- if (ck == ClassI && type == 's')
+ if (ck != ClassB && type == 's')
return quad ? "V8s" : "V4s";
- if (ck == ClassI && type == 'i')
+ if (ck != ClassB && type == 'i')
return quad ? "V4i" : "V2i";
- if (ck == ClassI && type == 'l')
+ if (ck != ClassB && type == 'l')
return quad ? "V2LLi" : "V1LLi";
return quad ? "V16c" : "V8c";
}
-// Turn "vst2_lane" into "vst2q_lane_f32", etc.
+/// MangleName - Append a type or width suffix to a base neon function name,
+/// and insert a 'q' in the appropriate location if the operation works on
+/// 128b rather than 64b. E.g. turn "vst2_lane" into "vst2q_lane_f32", etc.
static std::string MangleName(const std::string &name, StringRef typestr,
ClassKind ck) {
if (name == "vcvt_f32_f16")
@@ -442,14 +472,26 @@
return s;
}
+static std::string Duplicate(unsigned nElts, StringRef typestr,
+ const std::string &a) {
+ std::string s;
+
+ s = "(__neon_" + TypeString('d', typestr) + "){ ";
+ for (unsigned i = 0; i != nElts; ++i) {
+ s += a;
+ if ((i + 1) < nElts)
+ s += ", ";
+ }
+ s += " }";
+
+ return s;
+}
+
// Generate the definition for this intrinsic, e.g. "a + b" for OpAdd.
// If structTypes is true, the NEON types are structs of vector types rather
// than vector types, and the call becomes "a.val + b.val"
static std::string GenOpString(OpKind op, const std::string &proto,
StringRef typestr, bool structTypes = true) {
- std::string ts = TypeString(proto[0], typestr);
- std::string s = ts + " r; r";
-
bool dummy, quad = false;
char type = ClassifyType(typestr, quad, dummy, dummy);
unsigned nElts = 0;
@@ -461,7 +503,9 @@
case 'h': nElts = 4; break;
case 'f': nElts = 2; break;
}
- nElts <<= quad;
+
+ std::string ts = TypeString(proto[0], typestr);
+ std::string s = ts + " r; r";
if (structTypes)
s += ".val";
@@ -481,12 +525,18 @@
case OpSub:
s += a + " - " + b;
break;
+ case OpMulN:
+ b = Duplicate(nElts << (int)quad, typestr, "b");
case OpMul:
s += a + " * " + b;
break;
+ case OpMlaN:
+ c = Duplicate(nElts << (int)quad, typestr, "c");
case OpMla:
s += a + " + ( " + b + " * " + c + " )";
break;
+ case OpMlsN:
+ c = Duplicate(nElts << (int)quad, typestr, "c");
case OpMls:
s += a + " - ( " + b + " * " + c + " )";
break;
@@ -540,13 +590,35 @@
s += "(__neon_int64x1_t)(((__neon_int64x2_t)" + a + ")[0])";
break;
case OpDup:
- s += "(__neon_" + ts + "){ ";
- for (unsigned i = 0; i != nElts; ++i) {
- s += a;
- if ((i + 1) < nElts)
- s += ", ";
- }
- s += " }";
+ s += Duplicate(nElts << (int)quad, typestr, a);
+ break;
+ case OpSelect:
+ // ((0 & 1) | (~0 & 2))
+ ts = TypeString(proto[1], typestr);
+ s += "( " + a + " & (__neon_" + ts + ")" + b + ") | ";
+ s += "(~" + a + " & (__neon_" + ts + ")" + c + ")";
+ break;
+ case OpRev16:
+ s += "__builtin_shufflevector(" + a + ", " + a;
+ for (unsigned i = 2; i <= nElts << (int)quad; i += 2)
+ for (unsigned j = 0; j != 2; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ")";
+ break;
+ case OpRev32:
+ nElts >>= 1;
+ s += "__builtin_shufflevector(" + a + ", " + a;
+ for (unsigned i = nElts; i <= nElts << (1 + (int)quad); i += nElts)
+ for (unsigned j = 0; j != nElts; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ")";
+ break;
+ case OpRev64:
+ s += "__builtin_shufflevector(" + a + ", " + a;
+ for (unsigned i = nElts; i <= nElts << (int)quad; i += nElts)
+ for (unsigned j = 0; j != nElts; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ")";
break;
default:
throw "unknown OpKind!";
@@ -613,6 +685,19 @@
static std::string GenBuiltin(const std::string &name, const std::string &proto,
StringRef typestr, ClassKind ck,
bool structTypes = true) {
+ bool dummy, quad = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+ unsigned nElts = 0;
+ switch (type) {
+ case 'c': nElts = 8; break;
+ case 's': nElts = 4; break;
+ case 'i': nElts = 2; break;
+ case 'l': nElts = 1; break;
+ case 'h': nElts = 4; break;
+ case 'f': nElts = 2; break;
+ }
+ if (quad) nElts <<= 1;
+
char arg = 'a';
std::string s;
@@ -647,19 +732,29 @@
s += " = ";
}
- }
+ }
+
+ bool splat = proto.find('a') != std::string::npos;
s += "__builtin_neon_";
- s += MangleName(name, typestr, ck);
+ if (splat) {
+ std::string vname(name, 0, name.size()-2);
+ s += MangleName(vname, typestr, ck);
+ } else {
+ s += MangleName(name, typestr, ck);
+ }
s += "(";
for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ std::string args = std::string(&arg, 1);
+ if (define)
+ args = "(" + args + ")";
+
// Handle multiple-vector values specially, emitting each subvector as an
// argument to the __builtin.
if (structTypes && (proto[i] == '2' || proto[i] == '3' || proto[i] == '4')){
for (unsigned vi = 0, ve = proto[i] - '0'; vi != ve; ++vi) {
- s.push_back(arg);
- s += ".val[" + utostr(vi) + "]";
+ s += args + ".val[" + utostr(vi) + "]";
if ((vi + 1) < ve)
s += ", ";
}
@@ -669,15 +764,13 @@
continue;
}
- // Parenthesize the args from the macro.
- if (define)
- s.push_back('(');
- s.push_back(arg);
- if (define)
- s.push_back(')');
+ if (splat && (i + 1) == e)
+ s += Duplicate(nElts, typestr, args);
+ else
+ s += args;
if (structTypes && proto[i] != 's' && proto[i] != 'i' && proto[i] != 'l' &&
- proto[i] != 'p' && proto[i] != 'c') {
+ proto[i] != 'p' && proto[i] != 'c' && proto[i] != 'a') {
s += ".val";
}
if ((i + 1) < e)
@@ -732,6 +825,8 @@
return s;
}
+/// run - Read the records in arm_neon.td and output arm_neon.h. arm_neon.h
+/// is comprised of type definitions and function declarations.
void NeonEmitter::run(raw_ostream &OS) {
EmitSourceFileHeader("ARM NEON Header", OS);
@@ -747,8 +842,6 @@
OS << "#include <stdint.h>\n\n";
// Emit NEON-specific scalar typedefs.
- // FIXME: probably need to do something better for polynomial types.
- // FIXME: is this the correct thing to do for float16?
OS << "typedef float float32_t;\n";
OS << "typedef uint8_t poly8_t;\n";
OS << "typedef uint16_t poly16_t;\n";
@@ -853,31 +946,66 @@
OS << "#endif /* __ARM_NEON_H */\n";
}
+static unsigned RangeFromType(StringRef typestr) {
+ // base type to get the type string for.
+ bool quad = false, dummy = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+
+ switch (type) {
+ case 'c':
+ return (8 << (int)quad) - 1;
+ case 'h':
+ case 's':
+ return (4 << (int)quad) - 1;
+ case 'f':
+ case 'i':
+ return (2 << (int)quad) - 1;
+ case 'l':
+ return (1 << (int)quad) - 1;
+ default:
+ throw "unhandled type!";
+ break;
+ }
+}
+
+/// runHeader - Emit a file with sections defining:
+/// 1. the NEON section of BuiltinsARM.def.
+/// 2. the SemaChecking code for the type overload checking.
+/// 3. the SemaChecking code for validation of intrinsic immedate arguments.
void NeonEmitter::runHeader(raw_ostream &OS) {
std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
StringMap<OpKind> EmittedMap;
+ // Generate BuiltinsARM.def for NEON
+ OS << "#ifdef GET_NEON_BUILTINS\n";
for (unsigned i = 0, e = RV.size(); i != e; ++i) {
Record *R = RV[i];
-
OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
if (k != OpNone)
continue;
-
- std::string name = LowercaseString(R->getName());
+
std::string Proto = R->getValueAsString("Prototype");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
std::string Types = R->getValueAsString("Types");
-
SmallVector<StringRef, 16> TypeVec;
ParseTypes(R, Types, TypeVec);
-
+
if (R->getSuperClasses().size() < 2)
throw TGError(R->getLoc(), "Builtin has no class kind");
+ std::string name = LowercaseString(R->getName());
ClassKind ck = ClassMap[R->getSuperClasses()[1]];
for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ // Generate the BuiltinsARM.def declaration for this builtin, ensuring
+ // that each unique BUILTIN() macro appears only once in the output
+ // stream.
std::string bd = GenBuiltinDef(name, Proto, TypeVec[ti], ck);
if (EmittedMap.count(bd))
continue;
@@ -886,4 +1014,130 @@
OS << bd << "\n";
}
}
+ OS << "#endif\n\n";
+
+ // Generate the overloaded type checking code for SemaChecking.cpp
+ OS << "#ifdef GET_NEON_OVERLOAD_CHECK\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+ std::string name = LowercaseString(R->getName());
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ // Functions which have a scalar argument cannot be overloaded, no need to
+ // check them if we are emitting the type checking code.
+ if (Proto.find('s') != std::string::npos)
+ continue;
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ int si = -1, qi = -1;
+ unsigned mask = 0, qmask = 0;
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ // Generate the switch case(s) for this builtin for the type validation.
+ bool quad = false, poly = false, usgn = false;
+ (void) ClassifyType(TypeVec[ti], quad, poly, usgn);
+
+ if (quad) {
+ qi = ti;
+ qmask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ } else {
+ si = ti;
+ mask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ }
+ }
+ if (mask)
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[si], ClassB)
+ << ": mask = " << "0x" << utohexstr(mask) << "; break;\n";
+ if (qmask)
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[qi], ClassB)
+ << ": mask = " << "0x" << utohexstr(qmask) << "; break;\n";
+ }
+ OS << "#endif\n\n";
+
+ // Generate the intrinsic range checking code for shift/lane immediates.
+ OS << "#ifdef GET_NEON_IMMEDIATE_CHECK\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string name = LowercaseString(R->getName());
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ // Functions which do not have an immediate do not need to have range
+ // checking code emitted.
+ if (Proto.find('i') == std::string::npos)
+ continue;
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ ClassKind ck = ClassMap[R->getSuperClasses()[1]];
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ std::string namestr, shiftstr, rangestr;
+
+ // Builtins which are overloaded by type will need to have their upper
+ // bound computed at Sema time based on the type constant.
+ if (Proto.find('s') == std::string::npos) {
+ ck = ClassB;
+ if (R->getValueAsBit("isShift")) {
+ shiftstr = ", true";
+
+ // Right shifts have an 'r' in the name, left shifts do not.
+ if (name.find('r') != std::string::npos)
+ rangestr = "l = 1; ";
+ }
+ rangestr += "u = RFT(TV" + shiftstr + ")";
+ } else {
+ rangestr = "u = " + utostr(RangeFromType(TypeVec[ti]));
+ }
+ // Make sure cases appear only once.
+ namestr = MangleName(name, TypeVec[ti], ck);
+ if (EmittedMap.count(namestr))
+ continue;
+ EmittedMap[namestr] = OpNone;
+
+ unsigned immidx = 0;
+ for (unsigned ii = 1, ie = Proto.size(); ii != ie; ++ii) {
+ switch (Proto[ii]) {
+ default: immidx += 1; break;
+ case '2': immidx += 2; break;
+ case '3': immidx += 3; break;
+ case '4': immidx += 4; break;
+ case 'i': ie = ii + 1; break;
+ }
+ }
+ OS << "case ARM::BI__builtin_neon_" << MangleName(name, TypeVec[ti], ck)
+ << ": i = " << immidx << "; " << rangestr << "; break;\n";
+ }
+ }
+ OS << "#endif\n\n";
}
Modified: llvm/branches/wendling/eh/utils/TableGen/NeonEmitter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/NeonEmitter.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/NeonEmitter.h (original)
+++ llvm/branches/wendling/eh/utils/TableGen/NeonEmitter.h Fri Jul 2 04:34:51 2010
@@ -28,6 +28,9 @@
OpMul,
OpMla,
OpMls,
+ OpMulN,
+ OpMlaN,
+ OpMlsN,
OpEq,
OpGe,
OpLe,
@@ -44,7 +47,11 @@
OpConcat,
OpDup,
OpHi,
- OpLo
+ OpLo,
+ OpSelect,
+ OpRev16,
+ OpRev32,
+ OpRev64
};
enum ClassKind {
@@ -64,38 +71,43 @@
public:
NeonEmitter(RecordKeeper &R) : Records(R) {
- OpMap["OP_NONE"] = OpNone;
- OpMap["OP_ADD"] = OpAdd;
- OpMap["OP_SUB"] = OpSub;
- OpMap["OP_MUL"] = OpMul;
- OpMap["OP_MLA"] = OpMla;
- OpMap["OP_MLS"] = OpMls;
- OpMap["OP_EQ"] = OpEq;
- OpMap["OP_GE"] = OpGe;
- OpMap["OP_LE"] = OpLe;
- OpMap["OP_GT"] = OpGt;
- OpMap["OP_LT"] = OpLt;
- OpMap["OP_NEG"] = OpNeg;
- OpMap["OP_NOT"] = OpNot;
- OpMap["OP_AND"] = OpAnd;
- OpMap["OP_OR"] = OpOr;
- OpMap["OP_XOR"] = OpXor;
- OpMap["OP_ANDN"] = OpAndNot;
- OpMap["OP_ORN"] = OpOrNot;
- OpMap["OP_CAST"] = OpCast;
- OpMap["OP_CONC"] = OpConcat;
- OpMap["OP_HI"] = OpHi;
- OpMap["OP_LO"] = OpLo;
- OpMap["OP_DUP"] = OpDup;
+ OpMap["OP_NONE"] = OpNone;
+ OpMap["OP_ADD"] = OpAdd;
+ OpMap["OP_SUB"] = OpSub;
+ OpMap["OP_MUL"] = OpMul;
+ OpMap["OP_MLA"] = OpMla;
+ OpMap["OP_MLS"] = OpMls;
+ OpMap["OP_MUL_N"] = OpMulN;
+ OpMap["OP_MLA_N"] = OpMlaN;
+ OpMap["OP_MLS_N"] = OpMlsN;
+ OpMap["OP_EQ"] = OpEq;
+ OpMap["OP_GE"] = OpGe;
+ OpMap["OP_LE"] = OpLe;
+ OpMap["OP_GT"] = OpGt;
+ OpMap["OP_LT"] = OpLt;
+ OpMap["OP_NEG"] = OpNeg;
+ OpMap["OP_NOT"] = OpNot;
+ OpMap["OP_AND"] = OpAnd;
+ OpMap["OP_OR"] = OpOr;
+ OpMap["OP_XOR"] = OpXor;
+ OpMap["OP_ANDN"] = OpAndNot;
+ OpMap["OP_ORN"] = OpOrNot;
+ OpMap["OP_CAST"] = OpCast;
+ OpMap["OP_CONC"] = OpConcat;
+ OpMap["OP_HI"] = OpHi;
+ OpMap["OP_LO"] = OpLo;
+ OpMap["OP_DUP"] = OpDup;
+ OpMap["OP_SEL"] = OpSelect;
+ OpMap["OP_REV16"] = OpRev16;
+ OpMap["OP_REV32"] = OpRev32;
+ OpMap["OP_REV64"] = OpRev64;
Record *SI = R.getClass("SInst");
Record *II = R.getClass("IInst");
Record *WI = R.getClass("WInst");
- Record *BI = R.getClass("BInst");
ClassMap[SI] = ClassS;
ClassMap[II] = ClassI;
ClassMap[WI] = ClassW;
- ClassMap[BI] = ClassB;
}
// run - Emit arm_neon.h.inc
Modified: llvm/branches/wendling/eh/utils/TableGen/Record.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/Record.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/Record.cpp (original)
+++ llvm/branches/wendling/eh/utils/TableGen/Record.cpp Fri Jul 2 04:34:51 2010
@@ -270,7 +270,15 @@
}
bool RecordRecTy::baseClassOf(const RecordRecTy *RHS) const {
- return Rec == RHS->getRecord() || RHS->getRecord()->isSubClassOf(Rec);
+ if (Rec == RHS->getRecord() || RHS->getRecord()->isSubClassOf(Rec))
+ return true;
+
+ const std::vector<Record*> &SC = Rec->getSuperClasses();
+ for (unsigned i = 0, e = SC.size(); i != e; ++i)
+ if (RHS->getRecord()->isSubClassOf(SC[i]))
+ return true;
+
+ return false;
}
@@ -721,9 +729,20 @@
break;
}
case EQ: {
- // Make sure we've resolved
+ // try to fold eq comparison for 'bit' and 'int', otherwise fallback
+ // to string objects.
+ IntInit* L =
+ dynamic_cast<IntInit*>(LHS->convertInitializerTo(new IntRecTy()));
+ IntInit* R =
+ dynamic_cast<IntInit*>(RHS->convertInitializerTo(new IntRecTy()));
+
+ if (L && R)
+ return new IntInit(L->getValue() == R->getValue());
+
StringInit *LHSs = dynamic_cast<StringInit*>(LHS);
StringInit *RHSs = dynamic_cast<StringInit*>(RHS);
+
+ // Make sure we've resolved
if (LHSs && RHSs)
return new IntInit(LHSs->getValue() == RHSs->getValue());
@@ -971,6 +990,8 @@
case IF: {
IntInit *LHSi = dynamic_cast<IntInit*>(LHS);
+ if (Init *I = LHS->convertInitializerTo(new IntRecTy()))
+ LHSi = dynamic_cast<IntInit*>(I);
if (LHSi) {
if (LHSi->getValue()) {
return MHS;
@@ -990,6 +1011,8 @@
if (Opc == IF && lhs != LHS) {
IntInit *Value = dynamic_cast<IntInit*>(lhs);
+ if (Init *I = lhs->convertInitializerTo(new IntRecTy()))
+ Value = dynamic_cast<IntInit*>(I);
if (Value != 0) {
// Short-circuit
if (Value->getValue()) {
Modified: llvm/branches/wendling/eh/utils/TableGen/RegisterInfoEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/RegisterInfoEmitter.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/RegisterInfoEmitter.cpp (original)
+++ llvm/branches/wendling/eh/utils/TableGen/RegisterInfoEmitter.cpp Fri Jul 2 04:34:51 2010
@@ -96,7 +96,7 @@
for (unsigned i = 0, e = RegisterClasses.size(); i != e; ++i) {
if (i) OS << ",\n";
OS << " " << RegisterClasses[i].getName() << "RegClassID";
- OS << " = " << (i+1);
+ OS << " = " << i;
}
OS << "\n };\n\n";
Modified: llvm/branches/wendling/eh/utils/TableGen/TGParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/TGParser.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/TGParser.cpp (original)
+++ llvm/branches/wendling/eh/utils/TableGen/TGParser.cpp Fri Jul 2 04:34:51 2010
@@ -1640,7 +1640,7 @@
///
/// DefInst ::= DEF ObjectName ObjectBody
///
-llvm::Record *TGParser::ParseDef(MultiClass *CurMultiClass) {
+bool TGParser::ParseDef(MultiClass *CurMultiClass) {
SMLoc DefLoc = Lex.getLoc();
assert(Lex.getCode() == tgtok::Def && "Unknown tok");
Lex.Lex(); // Eat the 'def' token.
@@ -1654,7 +1654,7 @@
// Ensure redefinition doesn't happen.
if (Records.getDef(CurRec->getName())) {
Error(DefLoc, "def '" + CurRec->getName() + "' already defined");
- return 0;
+ return true;
}
Records.addDef(CurRec);
} else {
@@ -1663,20 +1663,33 @@
if (CurMultiClass->DefPrototypes[i]->getName() == CurRec->getName()) {
Error(DefLoc, "def '" + CurRec->getName() +
"' already defined in this multiclass!");
- return 0;
+ return true;
}
CurMultiClass->DefPrototypes.push_back(CurRec);
}
if (ParseObjectBody(CurRec))
- return 0;
+ return true;
if (CurMultiClass == 0) // Def's in multiclasses aren't really defs.
CurRec->resolveReferences();
// If ObjectBody has template arguments, it's an error.
assert(CurRec->getTemplateArgs().empty() && "How'd this get template args?");
- return CurRec;
+
+ if (CurMultiClass) {
+ // Copy the template arguments for the multiclass into the def.
+ const std::vector<std::string> &TArgs =
+ CurMultiClass->Rec.getTemplateArgs();
+
+ for (unsigned i = 0, e = TArgs.size(); i != e; ++i) {
+ const RecordVal *RV = CurMultiClass->Rec.getValue(TArgs[i]);
+ assert(RV && "Template arg doesn't exist?");
+ CurRec->addValue(*RV);
+ }
+ }
+
+ return false;
}
@@ -1757,12 +1770,12 @@
}
/// ParseTopLevelLet - Parse a 'let' at top level. This can be a couple of
-/// different related productions.
+/// different related productions. This works inside multiclasses too.
///
/// Object ::= LET LetList IN '{' ObjectList '}'
/// Object ::= LET LetList IN Object
///
-bool TGParser::ParseTopLevelLet() {
+bool TGParser::ParseTopLevelLet(MultiClass *CurMultiClass) {
assert(Lex.getCode() == tgtok::Let && "Unexpected token");
Lex.Lex();
@@ -1778,7 +1791,7 @@
// If this is a scalar let, just handle it now
if (Lex.getCode() != tgtok::l_brace) {
// LET LetList IN Object
- if (ParseObject())
+ if (ParseObject(CurMultiClass))
return true;
} else { // Object ::= LETCommand '{' ObjectList '}'
SMLoc BraceLoc = Lex.getLoc();
@@ -1786,7 +1799,7 @@
Lex.Lex(); // eat the '{'.
// Parse the object list.
- if (ParseObjectList())
+ if (ParseObjectList(CurMultiClass))
return true;
if (Lex.getCode() != tgtok::r_brace) {
@@ -1801,27 +1814,6 @@
return false;
}
-/// ParseMultiClassDef - Parse a def in a multiclass context.
-///
-/// MultiClassDef ::= DefInst
-///
-bool TGParser::ParseMultiClassDef(MultiClass *CurMC) {
-
- Record *D = ParseDef(CurMC);
- if (D == 0) return true;
-
- // Copy the template arguments for the multiclass into the def.
- const std::vector<std::string> &TArgs = CurMC->Rec.getTemplateArgs();
-
- for (unsigned i = 0, e = TArgs.size(); i != e; ++i) {
- const RecordVal *RV = CurMC->Rec.getValue(TArgs[i]);
- assert(RV && "Template arg doesn't exist?");
- D->addValue(*RV);
- }
-
- return false;
-}
-
/// ParseMultiClass - Parse a multiclass definition.
///
/// MultiClassInst ::= MULTICLASS ID TemplateArgList?
@@ -1883,18 +1875,17 @@
return TokError("multiclass must contain at least one def");
while (Lex.getCode() != tgtok::r_brace) {
- if (Lex.getCode() != tgtok::Defm && Lex.getCode() != tgtok::Def)
- return TokError("expected 'def' or 'defm' in multiclass body");
-
- if (Lex.getCode() == tgtok::Def)
- if (ParseMultiClassDef(CurMultiClass))
- return true;
-
- if (Lex.getCode() == tgtok::Defm)
- if (ParseDefm(CurMultiClass))
- return true;
+ switch (Lex.getCode()) {
+ default:
+ return TokError("expected 'let', 'def' or 'defm' in multiclass body");
+ case tgtok::Let:
+ case tgtok::Def:
+ case tgtok::Defm:
+ if (ParseObject(CurMultiClass))
+ return true;
+ break;
+ }
}
-
Lex.Lex(); // eat the '}'.
}
@@ -1916,6 +1907,12 @@
if (Lex.Lex() != tgtok::colon)
return TokError("expected ':' after defm identifier");
+ // Keep track of the new generated record definitions.
+ std::vector<Record*> NewRecDefs;
+
+ // This record also inherits from a regular class (non-multiclass)?
+ bool InheritFromClass = false;
+
// eat the colon.
Lex.Lex();
@@ -2025,15 +2022,59 @@
Records.addDef(CurRec);
CurRec->resolveReferences();
}
+
+ NewRecDefs.push_back(CurRec);
}
if (Lex.getCode() != tgtok::comma) break;
Lex.Lex(); // eat ','.
SubClassLoc = Lex.getLoc();
+
+ // A defm can inherit from regular classes (non-multiclass) as
+ // long as they come in the end of the inheritance list.
+ InheritFromClass = (Records.getClass(Lex.getCurStrVal()) != 0);
+
+ if (InheritFromClass)
+ break;
+
Ref = ParseSubClassReference(0, true);
}
+ if (InheritFromClass) {
+ // Process all the classes to inherit as if they were part of a
+ // regular 'def' and inherit all record values.
+ SubClassReference SubClass = ParseSubClassReference(0, false);
+ while (1) {
+ // Check for error.
+ if (SubClass.Rec == 0) return true;
+
+ // Get the expanded definition prototypes and teach them about
+ // the record values the current class to inherit has
+ for (unsigned i = 0, e = NewRecDefs.size(); i != e; ++i) {
+ Record *CurRec = NewRecDefs[i];
+
+ // Add it.
+ if (AddSubClass(CurRec, SubClass))
+ return true;
+
+ // Process any variables on the let stack.
+ for (unsigned i = 0, e = LetStack.size(); i != e; ++i)
+ for (unsigned j = 0, e = LetStack[i].size(); j != e; ++j)
+ if (SetValue(CurRec, LetStack[i][j].Loc, LetStack[i][j].Name,
+ LetStack[i][j].Bits, LetStack[i][j].Value))
+ return true;
+
+ if (!CurMultiClass)
+ CurRec->resolveReferences();
+ }
+
+ if (Lex.getCode() != tgtok::comma) break;
+ Lex.Lex(); // eat ','.
+ SubClass = ParseSubClassReference(0, false);
+ }
+ }
+
if (Lex.getCode() != tgtok::semi)
return TokError("expected ';' at end of defm");
Lex.Lex();
@@ -2048,12 +2089,12 @@
/// Object ::= DefMInst
/// Object ::= LETCommand '{' ObjectList '}'
/// Object ::= LETCommand Object
-bool TGParser::ParseObject() {
+bool TGParser::ParseObject(MultiClass *MC) {
switch (Lex.getCode()) {
default: assert(0 && "This is not an object");
- case tgtok::Let: return ParseTopLevelLet();
- case tgtok::Def: return ParseDef(0) == 0;
- case tgtok::Defm: return ParseDefm();
+ case tgtok::Let: return ParseTopLevelLet(MC);
+ case tgtok::Def: return ParseDef(MC);
+ case tgtok::Defm: return ParseDefm(MC);
case tgtok::Class: return ParseClass();
case tgtok::MultiClass: return ParseMultiClass();
}
@@ -2061,9 +2102,9 @@
/// ParseObjectList
/// ObjectList :== Object*
-bool TGParser::ParseObjectList() {
+bool TGParser::ParseObjectList(MultiClass *MC) {
while (isObjectStart(Lex.getCode())) {
- if (ParseObject())
+ if (ParseObject(MC))
return true;
}
return false;
Modified: llvm/branches/wendling/eh/utils/TableGen/TGParser.h
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/TGParser.h?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/TGParser.h (original)
+++ llvm/branches/wendling/eh/utils/TableGen/TGParser.h Fri Jul 2 04:34:51 2010
@@ -69,16 +69,15 @@
SubMultiClassReference &SubMultiClass);
private: // Parser methods.
- bool ParseObjectList();
- bool ParseObject();
+ bool ParseObjectList(MultiClass *MC = 0);
+ bool ParseObject(MultiClass *MC);
bool ParseClass();
bool ParseMultiClass();
- bool ParseMultiClassDef(MultiClass *CurMC);
- bool ParseDefm(MultiClass *CurMultiClass = 0);
- bool ParseTopLevelLet();
+ bool ParseDefm(MultiClass *CurMultiClass);
+ bool ParseDef(MultiClass *CurMultiClass);
+ bool ParseTopLevelLet(MultiClass *CurMultiClass);
std::vector<LetRecord> ParseLetList();
- Record *ParseDef(MultiClass *CurMultiClass);
bool ParseObjectBody(Record *CurRec);
bool ParseBody(Record *CurRec);
bool ParseBodyItem(Record *CurRec);
Modified: llvm/branches/wendling/eh/utils/TableGen/TableGen.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/TableGen.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/TableGen.cpp (original)
+++ llvm/branches/wendling/eh/utils/TableGen/TableGen.cpp Fri Jul 2 04:34:51 2010
@@ -19,6 +19,7 @@
#include "AsmWriterEmitter.h"
#include "CallingConvEmitter.h"
#include "ClangASTNodesEmitter.h"
+#include "ClangAttrEmitter.h"
#include "ClangDiagnosticsEmitter.h"
#include "CodeEmitterGen.h"
#include "DAGISelEmitter.h"
@@ -53,6 +54,8 @@
GenARMDecoder,
GenDisassembler,
GenCallingConv,
+ GenClangAttrClasses,
+ GenClangAttrList,
GenClangDiagsDefs,
GenClangDiagGroups,
GenClangDeclNodes,
@@ -65,8 +68,8 @@
GenTgtIntrinsic,
GenLLVMCConf,
GenEDHeader, GenEDInfo,
- GenNeonHeader,
- GenNeonBuiltinsDef,
+ GenArmNeon,
+ GenArmNeonSema,
PrintEnums
};
@@ -111,6 +114,10 @@
"Generate intrinsic information"),
clEnumValN(GenTgtIntrinsic, "gen-tgt-intrinsic",
"Generate target intrinsic information"),
+ clEnumValN(GenClangAttrClasses, "gen-clang-attr-classes",
+ "Generate clang attribute clases"),
+ clEnumValN(GenClangAttrList, "gen-clang-attr-list",
+ "Generate a clang attribute list"),
clEnumValN(GenClangDiagsDefs, "gen-clang-diags-defs",
"Generate Clang diagnostics definitions"),
clEnumValN(GenClangDiagGroups, "gen-clang-diag-groups",
@@ -125,10 +132,10 @@
"Generate enhanced disassembly info header"),
clEnumValN(GenEDInfo, "gen-enhanced-disassembly-info",
"Generate enhanced disassembly info"),
- clEnumValN(GenNeonHeader, "gen-arm-neon-header",
+ clEnumValN(GenArmNeon, "gen-arm-neon",
"Generate arm_neon.h for clang"),
- clEnumValN(GenNeonBuiltinsDef, "gen-arm-neon-builtins-def",
- "Generate NEON BuiltinsARM.def for clang"),
+ clEnumValN(GenArmNeonSema, "gen-arm-neon-sema",
+ "Generate ARM NEON sema support for clang"),
clEnumValN(PrintEnums, "print-enums",
"Print enum values for a class"),
clEnumValEnd));
@@ -248,6 +255,12 @@
case GenAsmMatcher:
AsmMatcherEmitter(Records).run(Out);
break;
+ case GenClangAttrClasses:
+ ClangAttrClassEmitter(Records).run(Out);
+ break;
+ case GenClangAttrList:
+ ClangAttrListEmitter(Records).run(Out);
+ break;
case GenClangDiagsDefs:
ClangDiagsDefsEmitter(Records, ClangComponent).run(Out);
break;
@@ -294,10 +307,10 @@
case GenEDInfo:
EDEmitter(Records).run(Out);
break;
- case GenNeonHeader:
+ case GenArmNeon:
NeonEmitter(Records).run(Out);
break;
- case GenNeonBuiltinsDef:
+ case GenArmNeonSema:
NeonEmitter(Records).runHeader(Out);
break;
case PrintEnums:
Modified: llvm/branches/wendling/eh/utils/TableGen/X86RecognizableInstr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/TableGen/X86RecognizableInstr.cpp?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/TableGen/X86RecognizableInstr.cpp (original)
+++ llvm/branches/wendling/eh/utils/TableGen/X86RecognizableInstr.cpp Fri Jul 2 04:34:51 2010
@@ -535,7 +535,8 @@
HANDLE_OPERAND(rmRegister)
if (HasVEX_4VPrefix)
- // FIXME: encoding of registers in AVX is in 1's complement form.
+ // FIXME: In AVX, the register below becomes the one encoded
+ // in ModRMVEX and the one above the one in the VEX.VVVV field
HANDLE_OPTIONAL(rmRegister)
else
HANDLE_OPTIONAL(immediate)
@@ -547,6 +548,12 @@
assert(numPhysicalOperands >= 2 && numPhysicalOperands <= 3 &&
"Unexpected number of operands for MRMSrcMemFrm");
HANDLE_OPERAND(roRegister)
+
+ if (HasVEX_4VPrefix)
+ // FIXME: In AVX, the register below becomes the one encoded
+ // in ModRMVEX and the one above the one in the VEX.VVVV field
+ HANDLE_OPTIONAL(rmRegister)
+
HANDLE_OPERAND(memory)
HANDLE_OPTIONAL(immediate)
break;
Modified: llvm/branches/wendling/eh/utils/buildit/build_llvm
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/buildit/build_llvm?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/buildit/build_llvm (original)
+++ llvm/branches/wendling/eh/utils/buildit/build_llvm Fri Jul 2 04:34:51 2010
@@ -66,7 +66,6 @@
DEVELOPER_DIR="${DEVELOPER_DIR-Developer}"
if [ "$ARM_HOSTED_BUILD" = yes ]; then
DT_HOME="$DEST_DIR/usr"
- HOST_SDKROOT=$SDKROOT
else
DT_HOME="$DEST_DIR/$DEVELOPER_DIR/usr"
fi
@@ -195,7 +194,7 @@
fi
make $JOBS_FLAG $OPTIMIZE_OPTS UNIVERSAL=1 UNIVERSAL_ARCH="$HOSTS" \
- UNIVERSAL_SDK_PATH=$HOST_SDKROOT \
+ UNIVERSAL_SDK_PATH=$SDKROOT \
NO_RUNTIME_LIBS=1 \
DISABLE_EDIS=1 \
LLVM_SUBMIT_VERSION=$LLVM_SUBMIT_VERSION \
Modified: llvm/branches/wendling/eh/utils/lit/lit/TestRunner.py
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/wendling/eh/utils/lit/lit/TestRunner.py?rev=107464&r1=107463&r2=107464&view=diff
==============================================================================
--- llvm/branches/wendling/eh/utils/lit/lit/TestRunner.py (original)
+++ llvm/branches/wendling/eh/utils/lit/lit/TestRunner.py Fri Jul 2 04:34:51 2010
@@ -13,11 +13,13 @@
self.command = command
self.message = message
+kIsWindows = platform.system() == 'Windows'
+
# Don't use close_fds on Windows.
-kUseCloseFDs = platform.system() != 'Windows'
+kUseCloseFDs = not kIsWindows
# Use temporary files to replace /dev/null on Windows.
-kAvoidDevNull = platform.system() == 'Windows'
+kAvoidDevNull = kIsWindows
def executeCommand(command, cwd=None, env=None):
p = subprocess.Popen(command, cwd=cwd,
@@ -64,6 +66,7 @@
input = subprocess.PIPE
stderrTempFiles = []
opened_files = []
+ named_temp_files = []
# To avoid deadlock, we use a single stderr stream for piped
# output. This is null until we have seen some output using
# stderr.
@@ -146,6 +149,15 @@
if not args[0]:
raise InternalShellError(j, '%r: command not found' % j.args[0])
+ # Replace uses of /dev/null with temporary files.
+ if kAvoidDevNull:
+ for i,arg in enumerate(args):
+ if arg == "/dev/null":
+ f = tempfile.NamedTemporaryFile(delete=False)
+ f.close()
+ named_temp_files.append(f.name)
+ args[i] = f.name
+
procs.append(subprocess.Popen(args, cwd=cwd,
stdin = stdin,
stdout = stdout,
@@ -207,6 +219,13 @@
for f in opened_files:
f.close()
+ # Remove any named temporary files we created.
+ for f in named_temp_files:
+ try:
+ os.remove(f)
+ except OSError:
+ pass
+
if cmd.negate:
exitCode = not exitCode
@@ -364,7 +383,7 @@
return True
-def parseIntegratedTestScript(test):
+def parseIntegratedTestScript(test, normalize_slashes=False):
"""parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
script and extract the lines to 'RUN' as well as 'XFAIL' and 'XTARGET'
information. The RUN lines also will have variable substitution performed.
@@ -375,18 +394,25 @@
#
# FIXME: This should not be here?
sourcepath = test.getSourcePath()
+ sourcedir = os.path.dirname(sourcepath)
execpath = test.getExecPath()
execdir,execbase = os.path.split(execpath)
tmpBase = os.path.join(execdir, 'Output', execbase)
if test.index is not None:
tmpBase += '_%d' % test.index
+ # Normalize slashes, if requested.
+ if normalize_slashes:
+ sourcepath = sourcepath.replace('\\', '/')
+ sourcedir = sourcedir.replace('\\', '/')
+ tmpBase = tmpBase.replace('\\', '/')
+
# We use #_MARKER_# to hide %% while we do the other substitutions.
substitutions = [('%%', '#_MARKER_#')]
substitutions.extend(test.config.substitutions)
substitutions.extend([('%s', sourcepath),
- ('%S', os.path.dirname(sourcepath)),
- ('%p', os.path.dirname(sourcepath)),
+ ('%S', sourcedir),
+ ('%p', sourcedir),
('%t', tmpBase + '.tmp'),
# FIXME: Remove this once we kill DejaGNU.
('%abs_tmp', tmpBase + '.tmp'),
@@ -462,7 +488,9 @@
if test.config.unsupported:
return (Test.UNSUPPORTED, 'Test is unsupported')
- res = parseIntegratedTestScript(test)
+ # Parse the test script, normalizing slashes in substitutions on Windows
+ # (since otherwise Tcl style lexing will treat them as escapes).
+ res = parseIntegratedTestScript(test, normalize_slashes=kIsWindows)
if len(res) == 2:
return res
More information about the llvm-branch-commits
mailing list