[llvm] r239657 - R600 -> AMDGPU rename

Tom Stellard thomas.stellard at amd.com
Fri Jun 12 20:28:16 PDT 2015


Author: tstellar
Date: Fri Jun 12 22:28:10 2015
New Revision: 239657

URL: http://llvm.org/viewvc/llvm-project?rev=239657&view=rev
Log:
R600 -> AMDGPU rename

Added:
    llvm/trunk/docs/AMDGPUUsage.rst
      - copied, changed from r239647, llvm/trunk/docs/R600Usage.rst
    llvm/trunk/lib/Target/AMDGPU/
    llvm/trunk/lib/Target/AMDGPU/AMDGPU.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPU.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPU.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPU.td
    llvm/trunk/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUAlwaysInlinePass.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUCallingConv.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUCallingConv.td
    llvm/trunk/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUFrameLowering.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.td
    llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUInstructions.td
    llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsics.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUIntrinsics.td
    llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUPromoteAlloca.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.td
    llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUSubtarget.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUSubtarget.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.h
    llvm/trunk/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDILCFGStructurizer.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDKernelCodeT.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AMDKernelCodeT.h
    llvm/trunk/lib/Target/AMDGPU/AsmParser/
    llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp
    llvm/trunk/lib/Target/AMDGPU/AsmParser/CMakeLists.txt
    llvm/trunk/lib/Target/AMDGPU/AsmParser/LLVMBuild.txt
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AsmParser/LLVMBuild.txt
    llvm/trunk/lib/Target/AMDGPU/AsmParser/Makefile
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/AsmParser/Makefile
    llvm/trunk/lib/Target/AMDGPU/CIInstructions.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/CIInstructions.td
    llvm/trunk/lib/Target/AMDGPU/CMakeLists.txt
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/CMakeLists.txt
    llvm/trunk/lib/Target/AMDGPU/CaymanInstructions.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/CaymanInstructions.td
    llvm/trunk/lib/Target/AMDGPU/EvergreenInstructions.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/EvergreenInstructions.td
    llvm/trunk/lib/Target/AMDGPU/InstPrinter/
    llvm/trunk/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
    llvm/trunk/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
    llvm/trunk/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt
    llvm/trunk/lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/InstPrinter/LLVMBuild.txt
    llvm/trunk/lib/Target/AMDGPU/InstPrinter/Makefile
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/InstPrinter/Makefile
    llvm/trunk/lib/Target/AMDGPU/LLVMBuild.txt
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/LLVMBuild.txt
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/CMakeLists.txt
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/LLVMBuild.txt
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/Makefile
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/Makefile
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
    llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
    llvm/trunk/lib/Target/AMDGPU/Makefile
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/Makefile
    llvm/trunk/lib/Target/AMDGPU/Processors.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/Processors.td
    llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600ClauseMergePass.cpp
    llvm/trunk/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600ControlFlowFinalizer.cpp
    llvm/trunk/lib/Target/AMDGPU/R600Defines.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600Defines.h
    llvm/trunk/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600EmitClauseMarkers.cpp
    llvm/trunk/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600ExpandSpecialInstrs.cpp
    llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600ISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600ISelLowering.h
    llvm/trunk/lib/Target/AMDGPU/R600InstrFormats.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600InstrFormats.td
    llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600InstrInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600InstrInfo.h
    llvm/trunk/lib/Target/AMDGPU/R600Instructions.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600Instructions.td
    llvm/trunk/lib/Target/AMDGPU/R600Intrinsics.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600Intrinsics.td
    llvm/trunk/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/R600MachineFunctionInfo.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.h
    llvm/trunk/lib/Target/AMDGPU/R600MachineScheduler.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600MachineScheduler.cpp
    llvm/trunk/lib/Target/AMDGPU/R600MachineScheduler.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600MachineScheduler.h
    llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600OptimizeVectorRegisters.cpp
    llvm/trunk/lib/Target/AMDGPU/R600Packetizer.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600Packetizer.cpp
    llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600RegisterInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600RegisterInfo.h
    llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600RegisterInfo.td
    llvm/trunk/lib/Target/AMDGPU/R600Schedule.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600Schedule.td
    llvm/trunk/lib/Target/AMDGPU/R600TextureIntrinsicsReplacer.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp
    llvm/trunk/lib/Target/AMDGPU/R700Instructions.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/R700Instructions.td
    llvm/trunk/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIAnnotateControlFlow.cpp
    llvm/trunk/lib/Target/AMDGPU/SIDefines.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIDefines.h
    llvm/trunk/lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIFixControlFlowLiveIntervals.cpp
    llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIFixSGPRCopies.cpp
    llvm/trunk/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIFixSGPRLiveRanges.cpp
    llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIFoldOperands.cpp
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIISelLowering.h
    llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIInsertWaits.cpp
    llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIInstrFormats.td
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIInstrInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIInstrInfo.h
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIInstrInfo.td
    llvm/trunk/lib/Target/AMDGPU/SIInstructions.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIInstructions.td
    llvm/trunk/lib/Target/AMDGPU/SIIntrinsics.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIIntrinsics.td
    llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SILoadStoreOptimizer.cpp
    llvm/trunk/lib/Target/AMDGPU/SILowerControlFlow.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SILowerControlFlow.cpp
    llvm/trunk/lib/Target/AMDGPU/SILowerI1Copies.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SILowerI1Copies.cpp
    llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.h
    llvm/trunk/lib/Target/AMDGPU/SIPrepareScratchRegs.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIPrepareScratchRegs.cpp
    llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIRegisterInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.h
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIRegisterInfo.h
    llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIRegisterInfo.td
    llvm/trunk/lib/Target/AMDGPU/SISchedule.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SISchedule.td
    llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SIShrinkInstructions.cpp
    llvm/trunk/lib/Target/AMDGPU/SITypeRewriter.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/SITypeRewriter.cpp
    llvm/trunk/lib/Target/AMDGPU/TargetInfo/
    llvm/trunk/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/TargetInfo/CMakeLists.txt
    llvm/trunk/lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/TargetInfo/LLVMBuild.txt
    llvm/trunk/lib/Target/AMDGPU/TargetInfo/Makefile
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/TargetInfo/Makefile
    llvm/trunk/lib/Target/AMDGPU/VIInstrFormats.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/VIInstrFormats.td
    llvm/trunk/lib/Target/AMDGPU/VIInstructions.td
      - copied, changed from r239647, llvm/trunk/lib/Target/R600/VIInstructions.td
    llvm/trunk/test/CodeGen/AMDGPU/
    llvm/trunk/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/32-bit-local-address-space.ll
    llvm/trunk/test/CodeGen/AMDGPU/README
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/README
    llvm/trunk/test/CodeGen/AMDGPU/add-debug.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/add-debug.ll
    llvm/trunk/test/CodeGen/AMDGPU/add.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/add.ll
    llvm/trunk/test/CodeGen/AMDGPU/add_i64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/add_i64.ll
    llvm/trunk/test/CodeGen/AMDGPU/address-space.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/address-space.ll
    llvm/trunk/test/CodeGen/AMDGPU/and.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/and.ll
    llvm/trunk/test/CodeGen/AMDGPU/anyext.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/anyext.ll
    llvm/trunk/test/CodeGen/AMDGPU/array-ptr-calc-i32.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/array-ptr-calc-i32.ll
    llvm/trunk/test/CodeGen/AMDGPU/array-ptr-calc-i64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/array-ptr-calc-i64.ll
    llvm/trunk/test/CodeGen/AMDGPU/atomic_cmp_swap_local.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/atomic_cmp_swap_local.ll
    llvm/trunk/test/CodeGen/AMDGPU/atomic_load_add.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/atomic_load_add.ll
    llvm/trunk/test/CodeGen/AMDGPU/atomic_load_sub.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/atomic_load_sub.ll
    llvm/trunk/test/CodeGen/AMDGPU/basic-branch.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/basic-branch.ll
    llvm/trunk/test/CodeGen/AMDGPU/basic-loop.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/basic-loop.ll
    llvm/trunk/test/CodeGen/AMDGPU/bfe_uint.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/bfe_uint.ll
    llvm/trunk/test/CodeGen/AMDGPU/bfi_int.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/bfi_int.ll
    llvm/trunk/test/CodeGen/AMDGPU/big_alu.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/big_alu.ll
    llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/bitcast.ll
    llvm/trunk/test/CodeGen/AMDGPU/bswap.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/bswap.ll
    llvm/trunk/test/CodeGen/AMDGPU/build_vector.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/build_vector.ll
    llvm/trunk/test/CodeGen/AMDGPU/call.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/call.ll
    llvm/trunk/test/CodeGen/AMDGPU/call_fs.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/call_fs.ll
    llvm/trunk/test/CodeGen/AMDGPU/cayman-loop-bug.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/cayman-loop-bug.ll
    llvm/trunk/test/CodeGen/AMDGPU/cf-stack-bug.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/cf-stack-bug.ll
    llvm/trunk/test/CodeGen/AMDGPU/cf_end.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/cf_end.ll
    llvm/trunk/test/CodeGen/AMDGPU/cgp-addressing-modes.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/cgp-addressing-modes.ll
    llvm/trunk/test/CodeGen/AMDGPU/coalescer_remat.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/coalescer_remat.ll
    llvm/trunk/test/CodeGen/AMDGPU/codegen-prepare-addrmode-sext.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
    llvm/trunk/test/CodeGen/AMDGPU/combine_vloads.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/combine_vloads.ll
    llvm/trunk/test/CodeGen/AMDGPU/commute-compares.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/commute-compares.ll
    llvm/trunk/test/CodeGen/AMDGPU/commute_modifiers.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/commute_modifiers.ll
    llvm/trunk/test/CodeGen/AMDGPU/complex-folding.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/complex-folding.ll
    llvm/trunk/test/CodeGen/AMDGPU/concat_vectors.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/concat_vectors.ll
    llvm/trunk/test/CodeGen/AMDGPU/copy-illegal-type.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll
    llvm/trunk/test/CodeGen/AMDGPU/copy-to-reg.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/copy-to-reg.ll
    llvm/trunk/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ctlz_zero_undef.ll
    llvm/trunk/test/CodeGen/AMDGPU/ctpop.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ctpop.ll
    llvm/trunk/test/CodeGen/AMDGPU/ctpop64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ctpop64.ll
    llvm/trunk/test/CodeGen/AMDGPU/cttz_zero_undef.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/cttz_zero_undef.ll
    llvm/trunk/test/CodeGen/AMDGPU/cvt_f32_ubyte.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/cvt_f32_ubyte.ll
    llvm/trunk/test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/cvt_flr_i32_f32.ll
    llvm/trunk/test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/cvt_rpi_i32_f32.ll
    llvm/trunk/test/CodeGen/AMDGPU/dagcombiner-bug-illegal-vec4-int-to-fp.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll
    llvm/trunk/test/CodeGen/AMDGPU/debug.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/debug.ll
    llvm/trunk/test/CodeGen/AMDGPU/default-fp-mode.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/default-fp-mode.ll
    llvm/trunk/test/CodeGen/AMDGPU/disconnected-predset-break-bug.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/disconnected-predset-break-bug.ll
    llvm/trunk/test/CodeGen/AMDGPU/dot4-folding.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/dot4-folding.ll
    llvm/trunk/test/CodeGen/AMDGPU/ds-negative-offset-addressing-mode-loop.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll
    llvm/trunk/test/CodeGen/AMDGPU/ds_read2.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ds_read2.ll
    llvm/trunk/test/CodeGen/AMDGPU/ds_read2_offset_order.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ds_read2_offset_order.ll
    llvm/trunk/test/CodeGen/AMDGPU/ds_read2st64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ds_read2st64.ll
    llvm/trunk/test/CodeGen/AMDGPU/ds_write2.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ds_write2.ll
    llvm/trunk/test/CodeGen/AMDGPU/ds_write2st64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ds_write2st64.ll
    llvm/trunk/test/CodeGen/AMDGPU/elf.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/elf.ll
    llvm/trunk/test/CodeGen/AMDGPU/elf.r600.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/elf.r600.ll
    llvm/trunk/test/CodeGen/AMDGPU/empty-function.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/empty-function.ll
    llvm/trunk/test/CodeGen/AMDGPU/endcf-loop-header.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/endcf-loop-header.ll
    llvm/trunk/test/CodeGen/AMDGPU/extload-private.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/extload-private.ll
    llvm/trunk/test/CodeGen/AMDGPU/extload.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/extload.ll
    llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/extract_vector_elt_i16.ll
    llvm/trunk/test/CodeGen/AMDGPU/fabs.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fabs.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fabs.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fabs.ll
    llvm/trunk/test/CodeGen/AMDGPU/fadd.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fadd.ll
    llvm/trunk/test/CodeGen/AMDGPU/fadd64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fadd64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fceil.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fceil.ll
    llvm/trunk/test/CodeGen/AMDGPU/fceil64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fceil64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fcmp-cnd.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fcmp-cnd.ll
    llvm/trunk/test/CodeGen/AMDGPU/fcmp-cnde-int-args.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fcmp-cnde-int-args.ll
    llvm/trunk/test/CodeGen/AMDGPU/fcmp.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fcmp.ll
    llvm/trunk/test/CodeGen/AMDGPU/fcmp64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fcmp64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fconst64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fconst64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fcopysign.f32.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fcopysign.f32.ll
    llvm/trunk/test/CodeGen/AMDGPU/fcopysign.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fcopysign.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fdiv.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fdiv.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fdiv.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fdiv.ll
    llvm/trunk/test/CodeGen/AMDGPU/fetch-limits.r600.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fetch-limits.r600.ll
    llvm/trunk/test/CodeGen/AMDGPU/fetch-limits.r700+.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fetch-limits.r700+.ll
    llvm/trunk/test/CodeGen/AMDGPU/ffloor.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ffloor.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/ffloor.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ffloor.ll
    llvm/trunk/test/CodeGen/AMDGPU/flat-address-space.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/flat-address-space.ll
    llvm/trunk/test/CodeGen/AMDGPU/floor.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/floor.ll
    llvm/trunk/test/CodeGen/AMDGPU/fma-combine.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fma-combine.ll
    llvm/trunk/test/CodeGen/AMDGPU/fma.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fma.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fma.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fma.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmad.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmad.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmax.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmax.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmax3.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmax3.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmax3.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmax3.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmax_legacy.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmax_legacy.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmax_legacy.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmax_legacy.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmaxnum.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmaxnum.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmaxnum.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmaxnum.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmin.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmin.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmin3.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmin3.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmin_legacy.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmin_legacy.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmin_legacy.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmin_legacy.ll
    llvm/trunk/test/CodeGen/AMDGPU/fminnum.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fminnum.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fminnum.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fminnum.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmul.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmul.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmul64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmul64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fmuladd.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fmuladd.ll
    llvm/trunk/test/CodeGen/AMDGPU/fnearbyint.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fnearbyint.ll
    llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fneg-fabs.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fneg-fabs.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fneg-fabs.ll
    llvm/trunk/test/CodeGen/AMDGPU/fneg.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fneg.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fneg.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fneg.ll
    llvm/trunk/test/CodeGen/AMDGPU/fp-classify.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fp-classify.ll
    llvm/trunk/test/CodeGen/AMDGPU/fp16_to_fp.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fp16_to_fp.ll
    llvm/trunk/test/CodeGen/AMDGPU/fp32_to_fp16.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fp32_to_fp16.ll
    llvm/trunk/test/CodeGen/AMDGPU/fp_to_sint.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fp_to_sint.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fp_to_sint.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fp_to_sint.ll
    llvm/trunk/test/CodeGen/AMDGPU/fp_to_uint.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fp_to_uint.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/fp_to_uint.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fp_to_uint.ll
    llvm/trunk/test/CodeGen/AMDGPU/fpext.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fpext.ll
    llvm/trunk/test/CodeGen/AMDGPU/fptrunc.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fptrunc.ll
    llvm/trunk/test/CodeGen/AMDGPU/frem.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/frem.ll
    llvm/trunk/test/CodeGen/AMDGPU/fsqrt.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fsqrt.ll
    llvm/trunk/test/CodeGen/AMDGPU/fsub.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fsub.ll
    llvm/trunk/test/CodeGen/AMDGPU/fsub64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/fsub64.ll
    llvm/trunk/test/CodeGen/AMDGPU/ftrunc.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ftrunc.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/ftrunc.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ftrunc.ll
    llvm/trunk/test/CodeGen/AMDGPU/gep-address-space.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/gep-address-space.ll
    llvm/trunk/test/CodeGen/AMDGPU/global-directive.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/global-directive.ll
    llvm/trunk/test/CodeGen/AMDGPU/global-extload-i1.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/global-extload-i1.ll
    llvm/trunk/test/CodeGen/AMDGPU/global-extload-i16.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/global-extload-i16.ll
    llvm/trunk/test/CodeGen/AMDGPU/global-extload-i32.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/global-extload-i32.ll
    llvm/trunk/test/CodeGen/AMDGPU/global-extload-i8.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/global-extload-i8.ll
    llvm/trunk/test/CodeGen/AMDGPU/global-zero-initializer.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/global-zero-initializer.ll
    llvm/trunk/test/CodeGen/AMDGPU/global_atomics.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/global_atomics.ll
    llvm/trunk/test/CodeGen/AMDGPU/gv-const-addrspace-fail.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/gv-const-addrspace-fail.ll
    llvm/trunk/test/CodeGen/AMDGPU/gv-const-addrspace.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/gv-const-addrspace.ll
    llvm/trunk/test/CodeGen/AMDGPU/half.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/half.ll
    llvm/trunk/test/CodeGen/AMDGPU/hsa.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/hsa.ll
    llvm/trunk/test/CodeGen/AMDGPU/i1-copy-implicit-def.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/i1-copy-implicit-def.ll
    llvm/trunk/test/CodeGen/AMDGPU/i1-copy-phi.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/i1-copy-phi.ll
    llvm/trunk/test/CodeGen/AMDGPU/i8-to-double-to-float.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/i8-to-double-to-float.ll
    llvm/trunk/test/CodeGen/AMDGPU/icmp-select-sete-reverse-args.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/icmp-select-sete-reverse-args.ll
    llvm/trunk/test/CodeGen/AMDGPU/icmp64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/icmp64.ll
    llvm/trunk/test/CodeGen/AMDGPU/imm.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/imm.ll
    llvm/trunk/test/CodeGen/AMDGPU/indirect-addressing-si.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/indirect-addressing-si.ll
    llvm/trunk/test/CodeGen/AMDGPU/indirect-private-64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/indirect-private-64.ll
    llvm/trunk/test/CodeGen/AMDGPU/infinite-loop-evergreen.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/infinite-loop-evergreen.ll
    llvm/trunk/test/CodeGen/AMDGPU/infinite-loop.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/infinite-loop.ll
    llvm/trunk/test/CodeGen/AMDGPU/inline-asm.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/inline-asm.ll
    llvm/trunk/test/CodeGen/AMDGPU/inline-calls.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/inline-calls.ll
    llvm/trunk/test/CodeGen/AMDGPU/input-mods.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/input-mods.ll
    llvm/trunk/test/CodeGen/AMDGPU/insert_subreg.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/insert_subreg.ll
    llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/insert_vector_elt.ll
    llvm/trunk/test/CodeGen/AMDGPU/jump-address.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/jump-address.ll
    llvm/trunk/test/CodeGen/AMDGPU/kcache-fold.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/kcache-fold.ll
    llvm/trunk/test/CodeGen/AMDGPU/kernel-args.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/kernel-args.ll
    llvm/trunk/test/CodeGen/AMDGPU/large-alloca.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/large-alloca.ll
    llvm/trunk/test/CodeGen/AMDGPU/large-constant-initializer.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/large-constant-initializer.ll
    llvm/trunk/test/CodeGen/AMDGPU/lds-initializer.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/lds-initializer.ll
    llvm/trunk/test/CodeGen/AMDGPU/lds-oqap-crash.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/lds-oqap-crash.ll
    llvm/trunk/test/CodeGen/AMDGPU/lds-output-queue.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/lds-output-queue.ll
    llvm/trunk/test/CodeGen/AMDGPU/lds-size.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/lds-size.ll
    llvm/trunk/test/CodeGen/AMDGPU/lds-zero-initializer.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/lds-zero-initializer.ll
    llvm/trunk/test/CodeGen/AMDGPU/legalizedag-bug-expand-setcc.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/legalizedag-bug-expand-setcc.ll
    llvm/trunk/test/CodeGen/AMDGPU/lit.local.cfg
    llvm/trunk/test/CodeGen/AMDGPU/literals.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/literals.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.abs.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.abs.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.barrier.global.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.barrier.local.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.i32.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfe.u32.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfi.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.bfm.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.brev.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.brev.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.clamp.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.class.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.class.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.cube.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cube.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.cvt_f32_ubyte.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_fixup.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_fmas.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.div_scale.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.flbit.i32.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.flbit.i32.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.fract.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.fract.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imad24.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imad24.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imax.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imax.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imin.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imin.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.imul24.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imul24.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.kill.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.ldexp.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.legacy.rsq.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.mul.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.mul.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rcp.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rcp.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.clamped.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.rsq.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.tex.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.tex.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.trig_preop.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.trunc.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umad24.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umax.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umax.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umin.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umin.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.AMDGPU.umul24.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umul24.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.fs.interp.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.fs.interp.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.gather4.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.gather4.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.getlod.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.getlod.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.image.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.sample.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.image.sample.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.image.sample.o.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.image.sample.o.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.imageload.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.imageload.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.load.dword.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.resinfo.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.resinfo.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sample-masked.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.sample-masked.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sample.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.sample.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sampled.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.sampled.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sendmsg-m0.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.sendmsg-m0.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.sendmsg.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.sendmsg.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.SI.tid.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.SI.tid.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.dp4.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.amdgpu.dp4.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.kilp.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.amdgpu.kilp.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgpu.lrp.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.amdgpu.lrp.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.cos.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.cos.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.exp2.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.exp2.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.log2.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.log2.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.memcpy.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.memcpy.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.pow.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.pow.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.rint.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.rint.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.rint.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.rint.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.round.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.round.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.round.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.round.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.sin.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.sin.ll
    llvm/trunk/test/CodeGen/AMDGPU/llvm.sqrt.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/llvm.sqrt.ll
    llvm/trunk/test/CodeGen/AMDGPU/load-i1.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/load-i1.ll
    llvm/trunk/test/CodeGen/AMDGPU/load-input-fold.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/load-input-fold.ll
    llvm/trunk/test/CodeGen/AMDGPU/load.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/load.ll
    llvm/trunk/test/CodeGen/AMDGPU/load.vec.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/load.vec.ll
    llvm/trunk/test/CodeGen/AMDGPU/load64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/load64.ll
    llvm/trunk/test/CodeGen/AMDGPU/local-64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/local-64.ll
    llvm/trunk/test/CodeGen/AMDGPU/local-atomics.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/local-atomics.ll
    llvm/trunk/test/CodeGen/AMDGPU/local-atomics64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/local-atomics64.ll
    llvm/trunk/test/CodeGen/AMDGPU/local-memory-two-objects.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/local-memory-two-objects.ll
    llvm/trunk/test/CodeGen/AMDGPU/local-memory.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/local-memory.ll
    llvm/trunk/test/CodeGen/AMDGPU/loop-address.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/loop-address.ll
    llvm/trunk/test/CodeGen/AMDGPU/loop-idiom.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/loop-idiom.ll
    llvm/trunk/test/CodeGen/AMDGPU/lshl.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/lshl.ll
    llvm/trunk/test/CodeGen/AMDGPU/lshr.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/lshr.ll
    llvm/trunk/test/CodeGen/AMDGPU/m0-spill.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/m0-spill.ll
    llvm/trunk/test/CodeGen/AMDGPU/mad-combine.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/mad-combine.ll
    llvm/trunk/test/CodeGen/AMDGPU/mad-sub.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/mad-sub.ll
    llvm/trunk/test/CodeGen/AMDGPU/mad_int24.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/mad_int24.ll
    llvm/trunk/test/CodeGen/AMDGPU/mad_uint24.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/mad_uint24.ll
    llvm/trunk/test/CodeGen/AMDGPU/madak.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/madak.ll
    llvm/trunk/test/CodeGen/AMDGPU/madmk.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/madmk.ll
    llvm/trunk/test/CodeGen/AMDGPU/max-literals.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/max-literals.ll
    llvm/trunk/test/CodeGen/AMDGPU/max.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/max.ll
    llvm/trunk/test/CodeGen/AMDGPU/max3.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/max3.ll
    llvm/trunk/test/CodeGen/AMDGPU/merge-stores.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/merge-stores.ll
    llvm/trunk/test/CodeGen/AMDGPU/min.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/min.ll
    llvm/trunk/test/CodeGen/AMDGPU/min3.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/min3.ll
    llvm/trunk/test/CodeGen/AMDGPU/missing-store.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/missing-store.ll
    llvm/trunk/test/CodeGen/AMDGPU/mubuf.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/mubuf.ll
    llvm/trunk/test/CodeGen/AMDGPU/mul.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/mul.ll
    llvm/trunk/test/CodeGen/AMDGPU/mul_int24.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/mul_int24.ll
    llvm/trunk/test/CodeGen/AMDGPU/mul_uint24.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/mul_uint24.ll
    llvm/trunk/test/CodeGen/AMDGPU/mulhu.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/mulhu.ll
    llvm/trunk/test/CodeGen/AMDGPU/no-initializer-constant-addrspace.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/no-initializer-constant-addrspace.ll
    llvm/trunk/test/CodeGen/AMDGPU/no-shrink-extloads.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/no-shrink-extloads.ll
    llvm/trunk/test/CodeGen/AMDGPU/operand-folding.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/operand-folding.ll
    llvm/trunk/test/CodeGen/AMDGPU/operand-spacing.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/operand-spacing.ll
    llvm/trunk/test/CodeGen/AMDGPU/or.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/or.ll
    llvm/trunk/test/CodeGen/AMDGPU/packetizer.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/packetizer.ll
    llvm/trunk/test/CodeGen/AMDGPU/parallelandifcollapse.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/parallelandifcollapse.ll
    llvm/trunk/test/CodeGen/AMDGPU/parallelorifcollapse.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/parallelorifcollapse.ll
    llvm/trunk/test/CodeGen/AMDGPU/predicate-dp4.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/predicate-dp4.ll
    llvm/trunk/test/CodeGen/AMDGPU/predicates.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/predicates.ll
    llvm/trunk/test/CodeGen/AMDGPU/private-memory-atomics.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/private-memory-atomics.ll
    llvm/trunk/test/CodeGen/AMDGPU/private-memory-broken.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/private-memory-broken.ll
    llvm/trunk/test/CodeGen/AMDGPU/private-memory.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/private-memory.ll
    llvm/trunk/test/CodeGen/AMDGPU/pv-packing.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/pv-packing.ll
    llvm/trunk/test/CodeGen/AMDGPU/pv.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/pv.ll
    llvm/trunk/test/CodeGen/AMDGPU/r600-encoding.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/r600-encoding.ll
    llvm/trunk/test/CodeGen/AMDGPU/r600-export-fix.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/r600-export-fix.ll
    llvm/trunk/test/CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll
    llvm/trunk/test/CodeGen/AMDGPU/r600cfg.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/r600cfg.ll
    llvm/trunk/test/CodeGen/AMDGPU/reciprocal.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/reciprocal.ll
    llvm/trunk/test/CodeGen/AMDGPU/register-count-comments.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/register-count-comments.ll
    llvm/trunk/test/CodeGen/AMDGPU/reorder-stores.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/reorder-stores.ll
    llvm/trunk/test/CodeGen/AMDGPU/rotl.i64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/rotl.i64.ll
    llvm/trunk/test/CodeGen/AMDGPU/rotl.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/rotl.ll
    llvm/trunk/test/CodeGen/AMDGPU/rotr.i64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/rotr.i64.ll
    llvm/trunk/test/CodeGen/AMDGPU/rotr.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/rotr.ll
    llvm/trunk/test/CodeGen/AMDGPU/rsq.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/rsq.ll
    llvm/trunk/test/CodeGen/AMDGPU/rv7x0_count3.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/rv7x0_count3.ll
    llvm/trunk/test/CodeGen/AMDGPU/s_movk_i32.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/s_movk_i32.ll
    llvm/trunk/test/CodeGen/AMDGPU/saddo.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/saddo.ll
    llvm/trunk/test/CodeGen/AMDGPU/salu-to-valu.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/salu-to-valu.ll
    llvm/trunk/test/CodeGen/AMDGPU/scalar_to_vector.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/scalar_to_vector.ll
    llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop-nested-if.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested-if.ll
    llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested.ll
    llvm/trunk/test/CodeGen/AMDGPU/schedule-fs-loop.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/schedule-fs-loop.ll
    llvm/trunk/test/CodeGen/AMDGPU/schedule-global-loads.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/schedule-global-loads.ll
    llvm/trunk/test/CodeGen/AMDGPU/schedule-if-2.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/schedule-if-2.ll
    llvm/trunk/test/CodeGen/AMDGPU/schedule-if.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/schedule-if.ll
    llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/schedule-kernel-arg-loads.ll
    llvm/trunk/test/CodeGen/AMDGPU/schedule-vs-if-nested-loop-failure.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
    llvm/trunk/test/CodeGen/AMDGPU/schedule-vs-if-nested-loop.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop.ll
    llvm/trunk/test/CodeGen/AMDGPU/scratch-buffer.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/scratch-buffer.ll
    llvm/trunk/test/CodeGen/AMDGPU/sdiv.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sdiv.ll
    llvm/trunk/test/CodeGen/AMDGPU/sdivrem24.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sdivrem24.ll
    llvm/trunk/test/CodeGen/AMDGPU/sdivrem64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sdivrem64.ll
    llvm/trunk/test/CodeGen/AMDGPU/select-i1.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/select-i1.ll
    llvm/trunk/test/CodeGen/AMDGPU/select-vectors.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/select-vectors.ll
    llvm/trunk/test/CodeGen/AMDGPU/select.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/select.ll
    llvm/trunk/test/CodeGen/AMDGPU/select64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/select64.ll
    llvm/trunk/test/CodeGen/AMDGPU/selectcc-cnd.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/selectcc-cnd.ll
    llvm/trunk/test/CodeGen/AMDGPU/selectcc-cnde-int.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/selectcc-cnde-int.ll
    llvm/trunk/test/CodeGen/AMDGPU/selectcc-icmp-select-float.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/selectcc-icmp-select-float.ll
    llvm/trunk/test/CodeGen/AMDGPU/selectcc-opt.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/selectcc-opt.ll
    llvm/trunk/test/CodeGen/AMDGPU/selectcc.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/selectcc.ll
    llvm/trunk/test/CodeGen/AMDGPU/set-dx10.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/set-dx10.ll
    llvm/trunk/test/CodeGen/AMDGPU/setcc-equivalent.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/setcc-equivalent.ll
    llvm/trunk/test/CodeGen/AMDGPU/setcc-opt.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/setcc-opt.ll
    llvm/trunk/test/CodeGen/AMDGPU/setcc.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/setcc.ll
    llvm/trunk/test/CodeGen/AMDGPU/setcc64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/setcc64.ll
    llvm/trunk/test/CodeGen/AMDGPU/seto.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/seto.ll
    llvm/trunk/test/CodeGen/AMDGPU/setuo.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/setuo.ll
    llvm/trunk/test/CodeGen/AMDGPU/sext-eliminate.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sext-eliminate.ll
    llvm/trunk/test/CodeGen/AMDGPU/sext-in-reg.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sext-in-reg.ll
    llvm/trunk/test/CodeGen/AMDGPU/sgpr-control-flow.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sgpr-control-flow.ll
    llvm/trunk/test/CodeGen/AMDGPU/sgpr-copy-duplicate-operand.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
    llvm/trunk/test/CodeGen/AMDGPU/sgpr-copy.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sgpr-copy.ll
    llvm/trunk/test/CodeGen/AMDGPU/shared-op-cycle.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/shared-op-cycle.ll
    llvm/trunk/test/CodeGen/AMDGPU/shl.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/shl.ll
    llvm/trunk/test/CodeGen/AMDGPU/shl_add_constant.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/shl_add_constant.ll
    llvm/trunk/test/CodeGen/AMDGPU/shl_add_ptr.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/shl_add_ptr.ll
    llvm/trunk/test/CodeGen/AMDGPU/si-annotate-cf-assertion.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/si-annotate-cf-assertion.ll
    llvm/trunk/test/CodeGen/AMDGPU/si-annotate-cf.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/si-annotate-cf.ll
    llvm/trunk/test/CodeGen/AMDGPU/si-lod-bias.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/si-lod-bias.ll
    llvm/trunk/test/CodeGen/AMDGPU/si-sgpr-spill.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/si-sgpr-spill.ll
    llvm/trunk/test/CodeGen/AMDGPU/si-spill-cf.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/si-spill-cf.ll
    llvm/trunk/test/CodeGen/AMDGPU/si-triv-disjoint-mem-access.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/si-triv-disjoint-mem-access.ll
    llvm/trunk/test/CodeGen/AMDGPU/si-vector-hang.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/si-vector-hang.ll
    llvm/trunk/test/CodeGen/AMDGPU/sign_extend.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sign_extend.ll
    llvm/trunk/test/CodeGen/AMDGPU/simplify-demanded-bits-build-pair.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
    llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sint_to_fp.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/sint_to_fp.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sint_to_fp.ll
    llvm/trunk/test/CodeGen/AMDGPU/smrd.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/smrd.ll
    llvm/trunk/test/CodeGen/AMDGPU/split-scalar-i64-add.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/split-scalar-i64-add.ll
    llvm/trunk/test/CodeGen/AMDGPU/sra.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sra.ll
    llvm/trunk/test/CodeGen/AMDGPU/srem.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/srem.ll
    llvm/trunk/test/CodeGen/AMDGPU/srl.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/srl.ll
    llvm/trunk/test/CodeGen/AMDGPU/ssubo.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/ssubo.ll
    llvm/trunk/test/CodeGen/AMDGPU/store-barrier.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/store-barrier.ll
    llvm/trunk/test/CodeGen/AMDGPU/store-v3i32.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/store-v3i32.ll
    llvm/trunk/test/CodeGen/AMDGPU/store-v3i64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/store-v3i64.ll
    llvm/trunk/test/CodeGen/AMDGPU/store-vector-ptrs.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/store-vector-ptrs.ll
    llvm/trunk/test/CodeGen/AMDGPU/store.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/store.ll
    llvm/trunk/test/CodeGen/AMDGPU/store.r600.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/store.r600.ll
    llvm/trunk/test/CodeGen/AMDGPU/structurize.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/structurize.ll
    llvm/trunk/test/CodeGen/AMDGPU/structurize1.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/structurize1.ll
    llvm/trunk/test/CodeGen/AMDGPU/sub.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/sub.ll
    llvm/trunk/test/CodeGen/AMDGPU/subreg-coalescer-crash.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/subreg-coalescer-crash.ll
    llvm/trunk/test/CodeGen/AMDGPU/subreg-eliminate-dead.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/subreg-eliminate-dead.ll
    llvm/trunk/test/CodeGen/AMDGPU/swizzle-export.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/swizzle-export.ll
    llvm/trunk/test/CodeGen/AMDGPU/tex-clause-antidep.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/tex-clause-antidep.ll
    llvm/trunk/test/CodeGen/AMDGPU/texture-input-merge.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/texture-input-merge.ll
    llvm/trunk/test/CodeGen/AMDGPU/trunc-cmp-constant.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/trunc-cmp-constant.ll
    llvm/trunk/test/CodeGen/AMDGPU/trunc-store-f64-to-f16.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/trunc-store-f64-to-f16.ll
    llvm/trunk/test/CodeGen/AMDGPU/trunc-store-i1.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/trunc-store-i1.ll
    llvm/trunk/test/CodeGen/AMDGPU/trunc-vector-store-assertion-failure.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/trunc-vector-store-assertion-failure.ll
    llvm/trunk/test/CodeGen/AMDGPU/trunc.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/trunc.ll
    llvm/trunk/test/CodeGen/AMDGPU/tti-unroll-prefs.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/tti-unroll-prefs.ll
    llvm/trunk/test/CodeGen/AMDGPU/uaddo.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/uaddo.ll
    llvm/trunk/test/CodeGen/AMDGPU/udiv.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/udiv.ll
    llvm/trunk/test/CodeGen/AMDGPU/udivrem.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/udivrem.ll
    llvm/trunk/test/CodeGen/AMDGPU/udivrem24.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/udivrem24.ll
    llvm/trunk/test/CodeGen/AMDGPU/udivrem64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/udivrem64.ll
    llvm/trunk/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/uint_to_fp.f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/uint_to_fp.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/uint_to_fp.ll
    llvm/trunk/test/CodeGen/AMDGPU/unaligned-load-store.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/unaligned-load-store.ll
    llvm/trunk/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
    llvm/trunk/test/CodeGen/AMDGPU/unroll.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/unroll.ll
    llvm/trunk/test/CodeGen/AMDGPU/unsupported-cc.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/unsupported-cc.ll
    llvm/trunk/test/CodeGen/AMDGPU/urecip.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/urecip.ll
    llvm/trunk/test/CodeGen/AMDGPU/urem.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/urem.ll
    llvm/trunk/test/CodeGen/AMDGPU/use-sgpr-multiple-times.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/use-sgpr-multiple-times.ll
    llvm/trunk/test/CodeGen/AMDGPU/usubo.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/usubo.ll
    llvm/trunk/test/CodeGen/AMDGPU/v1i64-kernel-arg.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/v1i64-kernel-arg.ll
    llvm/trunk/test/CodeGen/AMDGPU/v_cndmask.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/v_cndmask.ll
    llvm/trunk/test/CodeGen/AMDGPU/valu-i1.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/valu-i1.ll
    llvm/trunk/test/CodeGen/AMDGPU/vector-alloca.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/vector-alloca.ll
    llvm/trunk/test/CodeGen/AMDGPU/vertex-fetch-encoding.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/vertex-fetch-encoding.ll
    llvm/trunk/test/CodeGen/AMDGPU/vop-shrink.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/vop-shrink.ll
    llvm/trunk/test/CodeGen/AMDGPU/vselect.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/vselect.ll
    llvm/trunk/test/CodeGen/AMDGPU/vselect64.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/vselect64.ll
    llvm/trunk/test/CodeGen/AMDGPU/vtx-fetch-branch.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/vtx-fetch-branch.ll
    llvm/trunk/test/CodeGen/AMDGPU/vtx-schedule.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/vtx-schedule.ll
    llvm/trunk/test/CodeGen/AMDGPU/wait.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/wait.ll
    llvm/trunk/test/CodeGen/AMDGPU/work-item-intrinsics.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/work-item-intrinsics.ll
    llvm/trunk/test/CodeGen/AMDGPU/wrong-transalu-pos-fix.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/wrong-transalu-pos-fix.ll
    llvm/trunk/test/CodeGen/AMDGPU/xor.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/xor.ll
    llvm/trunk/test/CodeGen/AMDGPU/zero_extend.ll
      - copied, changed from r239647, llvm/trunk/test/CodeGen/R600/zero_extend.ll
    llvm/trunk/test/MC/AMDGPU/
    llvm/trunk/test/MC/AMDGPU/ds-err.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/ds-err.s
    llvm/trunk/test/MC/AMDGPU/ds.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/ds.s
    llvm/trunk/test/MC/AMDGPU/flat.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/flat.s
    llvm/trunk/test/MC/AMDGPU/lit.local.cfg
    llvm/trunk/test/MC/AMDGPU/mubuf.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/mubuf.s
    llvm/trunk/test/MC/AMDGPU/smrd.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/smrd.s
    llvm/trunk/test/MC/AMDGPU/sop1-err.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/sop1-err.s
    llvm/trunk/test/MC/AMDGPU/sop1.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/sop1.s
    llvm/trunk/test/MC/AMDGPU/sop2.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/sop2.s
    llvm/trunk/test/MC/AMDGPU/sopc.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/sopc.s
    llvm/trunk/test/MC/AMDGPU/sopk.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/sopk.s
    llvm/trunk/test/MC/AMDGPU/sopp.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/sopp.s
    llvm/trunk/test/MC/AMDGPU/vop1.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/vop1.s
    llvm/trunk/test/MC/AMDGPU/vop2-err.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/vop2-err.s
    llvm/trunk/test/MC/AMDGPU/vop2.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/vop2.s
    llvm/trunk/test/MC/AMDGPU/vop3-errs.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/vop3-errs.s
    llvm/trunk/test/MC/AMDGPU/vop3.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/vop3.s
    llvm/trunk/test/MC/AMDGPU/vopc.s
      - copied, changed from r239647, llvm/trunk/test/MC/R600/vopc.s
Removed:
    llvm/trunk/docs/R600Usage.rst
    llvm/trunk/lib/Target/R600/AMDGPU.h
    llvm/trunk/lib/Target/R600/AMDGPU.td
    llvm/trunk/lib/Target/R600/AMDGPUAlwaysInlinePass.cpp
    llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.cpp
    llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.h
    llvm/trunk/lib/Target/R600/AMDGPUCallingConv.td
    llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.cpp
    llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.h
    llvm/trunk/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
    llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp
    llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h
    llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp
    llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.h
    llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.td
    llvm/trunk/lib/Target/R600/AMDGPUInstructions.td
    llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.cpp
    llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.h
    llvm/trunk/lib/Target/R600/AMDGPUIntrinsics.td
    llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.cpp
    llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.h
    llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.cpp
    llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.h
    llvm/trunk/lib/Target/R600/AMDGPUPromoteAlloca.cpp
    llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.cpp
    llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.h
    llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.td
    llvm/trunk/lib/Target/R600/AMDGPUSubtarget.cpp
    llvm/trunk/lib/Target/R600/AMDGPUSubtarget.h
    llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.cpp
    llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.h
    llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
    llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.h
    llvm/trunk/lib/Target/R600/AMDILCFGStructurizer.cpp
    llvm/trunk/lib/Target/R600/AMDKernelCodeT.h
    llvm/trunk/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp
    llvm/trunk/lib/Target/R600/AsmParser/CMakeLists.txt
    llvm/trunk/lib/Target/R600/AsmParser/LLVMBuild.txt
    llvm/trunk/lib/Target/R600/AsmParser/Makefile
    llvm/trunk/lib/Target/R600/CIInstructions.td
    llvm/trunk/lib/Target/R600/CMakeLists.txt
    llvm/trunk/lib/Target/R600/CaymanInstructions.td
    llvm/trunk/lib/Target/R600/EvergreenInstructions.td
    llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
    llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
    llvm/trunk/lib/Target/R600/InstPrinter/CMakeLists.txt
    llvm/trunk/lib/Target/R600/InstPrinter/LLVMBuild.txt
    llvm/trunk/lib/Target/R600/InstPrinter/Makefile
    llvm/trunk/lib/Target/R600/LLVMBuild.txt
    llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
    llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp
    llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h
    llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
    llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
    llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
    llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
    llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
    llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
    llvm/trunk/lib/Target/R600/MCTargetDesc/CMakeLists.txt
    llvm/trunk/lib/Target/R600/MCTargetDesc/LLVMBuild.txt
    llvm/trunk/lib/Target/R600/MCTargetDesc/Makefile
    llvm/trunk/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
    llvm/trunk/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
    llvm/trunk/lib/Target/R600/Makefile
    llvm/trunk/lib/Target/R600/Processors.td
    llvm/trunk/lib/Target/R600/R600ClauseMergePass.cpp
    llvm/trunk/lib/Target/R600/R600ControlFlowFinalizer.cpp
    llvm/trunk/lib/Target/R600/R600Defines.h
    llvm/trunk/lib/Target/R600/R600EmitClauseMarkers.cpp
    llvm/trunk/lib/Target/R600/R600ExpandSpecialInstrs.cpp
    llvm/trunk/lib/Target/R600/R600ISelLowering.cpp
    llvm/trunk/lib/Target/R600/R600ISelLowering.h
    llvm/trunk/lib/Target/R600/R600InstrFormats.td
    llvm/trunk/lib/Target/R600/R600InstrInfo.cpp
    llvm/trunk/lib/Target/R600/R600InstrInfo.h
    llvm/trunk/lib/Target/R600/R600Instructions.td
    llvm/trunk/lib/Target/R600/R600Intrinsics.td
    llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.cpp
    llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.h
    llvm/trunk/lib/Target/R600/R600MachineScheduler.cpp
    llvm/trunk/lib/Target/R600/R600MachineScheduler.h
    llvm/trunk/lib/Target/R600/R600OptimizeVectorRegisters.cpp
    llvm/trunk/lib/Target/R600/R600Packetizer.cpp
    llvm/trunk/lib/Target/R600/R600RegisterInfo.cpp
    llvm/trunk/lib/Target/R600/R600RegisterInfo.h
    llvm/trunk/lib/Target/R600/R600RegisterInfo.td
    llvm/trunk/lib/Target/R600/R600Schedule.td
    llvm/trunk/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp
    llvm/trunk/lib/Target/R600/R700Instructions.td
    llvm/trunk/lib/Target/R600/SIAnnotateControlFlow.cpp
    llvm/trunk/lib/Target/R600/SIDefines.h
    llvm/trunk/lib/Target/R600/SIFixControlFlowLiveIntervals.cpp
    llvm/trunk/lib/Target/R600/SIFixSGPRCopies.cpp
    llvm/trunk/lib/Target/R600/SIFixSGPRLiveRanges.cpp
    llvm/trunk/lib/Target/R600/SIFoldOperands.cpp
    llvm/trunk/lib/Target/R600/SIISelLowering.cpp
    llvm/trunk/lib/Target/R600/SIISelLowering.h
    llvm/trunk/lib/Target/R600/SIInsertWaits.cpp
    llvm/trunk/lib/Target/R600/SIInstrFormats.td
    llvm/trunk/lib/Target/R600/SIInstrInfo.cpp
    llvm/trunk/lib/Target/R600/SIInstrInfo.h
    llvm/trunk/lib/Target/R600/SIInstrInfo.td
    llvm/trunk/lib/Target/R600/SIInstructions.td
    llvm/trunk/lib/Target/R600/SIIntrinsics.td
    llvm/trunk/lib/Target/R600/SILoadStoreOptimizer.cpp
    llvm/trunk/lib/Target/R600/SILowerControlFlow.cpp
    llvm/trunk/lib/Target/R600/SILowerI1Copies.cpp
    llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.cpp
    llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.h
    llvm/trunk/lib/Target/R600/SIPrepareScratchRegs.cpp
    llvm/trunk/lib/Target/R600/SIRegisterInfo.cpp
    llvm/trunk/lib/Target/R600/SIRegisterInfo.h
    llvm/trunk/lib/Target/R600/SIRegisterInfo.td
    llvm/trunk/lib/Target/R600/SISchedule.td
    llvm/trunk/lib/Target/R600/SIShrinkInstructions.cpp
    llvm/trunk/lib/Target/R600/SITypeRewriter.cpp
    llvm/trunk/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp
    llvm/trunk/lib/Target/R600/TargetInfo/CMakeLists.txt
    llvm/trunk/lib/Target/R600/TargetInfo/LLVMBuild.txt
    llvm/trunk/lib/Target/R600/TargetInfo/Makefile
    llvm/trunk/lib/Target/R600/VIInstrFormats.td
    llvm/trunk/lib/Target/R600/VIInstructions.td
    llvm/trunk/test/CodeGen/R600/32-bit-local-address-space.ll
    llvm/trunk/test/CodeGen/R600/README
    llvm/trunk/test/CodeGen/R600/add-debug.ll
    llvm/trunk/test/CodeGen/R600/add.ll
    llvm/trunk/test/CodeGen/R600/add_i64.ll
    llvm/trunk/test/CodeGen/R600/address-space.ll
    llvm/trunk/test/CodeGen/R600/and.ll
    llvm/trunk/test/CodeGen/R600/anyext.ll
    llvm/trunk/test/CodeGen/R600/array-ptr-calc-i32.ll
    llvm/trunk/test/CodeGen/R600/array-ptr-calc-i64.ll
    llvm/trunk/test/CodeGen/R600/atomic_cmp_swap_local.ll
    llvm/trunk/test/CodeGen/R600/atomic_load_add.ll
    llvm/trunk/test/CodeGen/R600/atomic_load_sub.ll
    llvm/trunk/test/CodeGen/R600/basic-branch.ll
    llvm/trunk/test/CodeGen/R600/basic-loop.ll
    llvm/trunk/test/CodeGen/R600/bfe_uint.ll
    llvm/trunk/test/CodeGen/R600/bfi_int.ll
    llvm/trunk/test/CodeGen/R600/big_alu.ll
    llvm/trunk/test/CodeGen/R600/bitcast.ll
    llvm/trunk/test/CodeGen/R600/bswap.ll
    llvm/trunk/test/CodeGen/R600/build_vector.ll
    llvm/trunk/test/CodeGen/R600/call.ll
    llvm/trunk/test/CodeGen/R600/call_fs.ll
    llvm/trunk/test/CodeGen/R600/cayman-loop-bug.ll
    llvm/trunk/test/CodeGen/R600/cf-stack-bug.ll
    llvm/trunk/test/CodeGen/R600/cf_end.ll
    llvm/trunk/test/CodeGen/R600/cgp-addressing-modes.ll
    llvm/trunk/test/CodeGen/R600/coalescer_remat.ll
    llvm/trunk/test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
    llvm/trunk/test/CodeGen/R600/combine_vloads.ll
    llvm/trunk/test/CodeGen/R600/commute-compares.ll
    llvm/trunk/test/CodeGen/R600/commute_modifiers.ll
    llvm/trunk/test/CodeGen/R600/complex-folding.ll
    llvm/trunk/test/CodeGen/R600/concat_vectors.ll
    llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll
    llvm/trunk/test/CodeGen/R600/copy-to-reg.ll
    llvm/trunk/test/CodeGen/R600/ctlz_zero_undef.ll
    llvm/trunk/test/CodeGen/R600/ctpop.ll
    llvm/trunk/test/CodeGen/R600/ctpop64.ll
    llvm/trunk/test/CodeGen/R600/cttz_zero_undef.ll
    llvm/trunk/test/CodeGen/R600/cvt_f32_ubyte.ll
    llvm/trunk/test/CodeGen/R600/cvt_flr_i32_f32.ll
    llvm/trunk/test/CodeGen/R600/cvt_rpi_i32_f32.ll
    llvm/trunk/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll
    llvm/trunk/test/CodeGen/R600/debug.ll
    llvm/trunk/test/CodeGen/R600/default-fp-mode.ll
    llvm/trunk/test/CodeGen/R600/disconnected-predset-break-bug.ll
    llvm/trunk/test/CodeGen/R600/dot4-folding.ll
    llvm/trunk/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll
    llvm/trunk/test/CodeGen/R600/ds_read2.ll
    llvm/trunk/test/CodeGen/R600/ds_read2_offset_order.ll
    llvm/trunk/test/CodeGen/R600/ds_read2st64.ll
    llvm/trunk/test/CodeGen/R600/ds_write2.ll
    llvm/trunk/test/CodeGen/R600/ds_write2st64.ll
    llvm/trunk/test/CodeGen/R600/elf.ll
    llvm/trunk/test/CodeGen/R600/elf.r600.ll
    llvm/trunk/test/CodeGen/R600/empty-function.ll
    llvm/trunk/test/CodeGen/R600/endcf-loop-header.ll
    llvm/trunk/test/CodeGen/R600/extload-private.ll
    llvm/trunk/test/CodeGen/R600/extload.ll
    llvm/trunk/test/CodeGen/R600/extract_vector_elt_i16.ll
    llvm/trunk/test/CodeGen/R600/fabs.f64.ll
    llvm/trunk/test/CodeGen/R600/fabs.ll
    llvm/trunk/test/CodeGen/R600/fadd.ll
    llvm/trunk/test/CodeGen/R600/fadd64.ll
    llvm/trunk/test/CodeGen/R600/fceil.ll
    llvm/trunk/test/CodeGen/R600/fceil64.ll
    llvm/trunk/test/CodeGen/R600/fcmp-cnd.ll
    llvm/trunk/test/CodeGen/R600/fcmp-cnde-int-args.ll
    llvm/trunk/test/CodeGen/R600/fcmp.ll
    llvm/trunk/test/CodeGen/R600/fcmp64.ll
    llvm/trunk/test/CodeGen/R600/fconst64.ll
    llvm/trunk/test/CodeGen/R600/fcopysign.f32.ll
    llvm/trunk/test/CodeGen/R600/fcopysign.f64.ll
    llvm/trunk/test/CodeGen/R600/fdiv.f64.ll
    llvm/trunk/test/CodeGen/R600/fdiv.ll
    llvm/trunk/test/CodeGen/R600/fetch-limits.r600.ll
    llvm/trunk/test/CodeGen/R600/fetch-limits.r700+.ll
    llvm/trunk/test/CodeGen/R600/ffloor.f64.ll
    llvm/trunk/test/CodeGen/R600/ffloor.ll
    llvm/trunk/test/CodeGen/R600/flat-address-space.ll
    llvm/trunk/test/CodeGen/R600/floor.ll
    llvm/trunk/test/CodeGen/R600/fma-combine.ll
    llvm/trunk/test/CodeGen/R600/fma.f64.ll
    llvm/trunk/test/CodeGen/R600/fma.ll
    llvm/trunk/test/CodeGen/R600/fmad.ll
    llvm/trunk/test/CodeGen/R600/fmax.ll
    llvm/trunk/test/CodeGen/R600/fmax3.f64.ll
    llvm/trunk/test/CodeGen/R600/fmax3.ll
    llvm/trunk/test/CodeGen/R600/fmax_legacy.f64.ll
    llvm/trunk/test/CodeGen/R600/fmax_legacy.ll
    llvm/trunk/test/CodeGen/R600/fmaxnum.f64.ll
    llvm/trunk/test/CodeGen/R600/fmaxnum.ll
    llvm/trunk/test/CodeGen/R600/fmin.ll
    llvm/trunk/test/CodeGen/R600/fmin3.ll
    llvm/trunk/test/CodeGen/R600/fmin_legacy.f64.ll
    llvm/trunk/test/CodeGen/R600/fmin_legacy.ll
    llvm/trunk/test/CodeGen/R600/fminnum.f64.ll
    llvm/trunk/test/CodeGen/R600/fminnum.ll
    llvm/trunk/test/CodeGen/R600/fmul.ll
    llvm/trunk/test/CodeGen/R600/fmul64.ll
    llvm/trunk/test/CodeGen/R600/fmuladd.ll
    llvm/trunk/test/CodeGen/R600/fnearbyint.ll
    llvm/trunk/test/CodeGen/R600/fneg-fabs.f64.ll
    llvm/trunk/test/CodeGen/R600/fneg-fabs.ll
    llvm/trunk/test/CodeGen/R600/fneg.f64.ll
    llvm/trunk/test/CodeGen/R600/fneg.ll
    llvm/trunk/test/CodeGen/R600/fp-classify.ll
    llvm/trunk/test/CodeGen/R600/fp16_to_fp.ll
    llvm/trunk/test/CodeGen/R600/fp32_to_fp16.ll
    llvm/trunk/test/CodeGen/R600/fp_to_sint.f64.ll
    llvm/trunk/test/CodeGen/R600/fp_to_sint.ll
    llvm/trunk/test/CodeGen/R600/fp_to_uint.f64.ll
    llvm/trunk/test/CodeGen/R600/fp_to_uint.ll
    llvm/trunk/test/CodeGen/R600/fpext.ll
    llvm/trunk/test/CodeGen/R600/fptrunc.ll
    llvm/trunk/test/CodeGen/R600/frem.ll
    llvm/trunk/test/CodeGen/R600/fsqrt.ll
    llvm/trunk/test/CodeGen/R600/fsub.ll
    llvm/trunk/test/CodeGen/R600/fsub64.ll
    llvm/trunk/test/CodeGen/R600/ftrunc.f64.ll
    llvm/trunk/test/CodeGen/R600/ftrunc.ll
    llvm/trunk/test/CodeGen/R600/gep-address-space.ll
    llvm/trunk/test/CodeGen/R600/global-directive.ll
    llvm/trunk/test/CodeGen/R600/global-extload-i1.ll
    llvm/trunk/test/CodeGen/R600/global-extload-i16.ll
    llvm/trunk/test/CodeGen/R600/global-extload-i32.ll
    llvm/trunk/test/CodeGen/R600/global-extload-i8.ll
    llvm/trunk/test/CodeGen/R600/global-zero-initializer.ll
    llvm/trunk/test/CodeGen/R600/global_atomics.ll
    llvm/trunk/test/CodeGen/R600/gv-const-addrspace-fail.ll
    llvm/trunk/test/CodeGen/R600/gv-const-addrspace.ll
    llvm/trunk/test/CodeGen/R600/half.ll
    llvm/trunk/test/CodeGen/R600/hsa.ll
    llvm/trunk/test/CodeGen/R600/i1-copy-implicit-def.ll
    llvm/trunk/test/CodeGen/R600/i1-copy-phi.ll
    llvm/trunk/test/CodeGen/R600/i8-to-double-to-float.ll
    llvm/trunk/test/CodeGen/R600/icmp-select-sete-reverse-args.ll
    llvm/trunk/test/CodeGen/R600/icmp64.ll
    llvm/trunk/test/CodeGen/R600/imm.ll
    llvm/trunk/test/CodeGen/R600/indirect-addressing-si.ll
    llvm/trunk/test/CodeGen/R600/indirect-private-64.ll
    llvm/trunk/test/CodeGen/R600/infinite-loop-evergreen.ll
    llvm/trunk/test/CodeGen/R600/infinite-loop.ll
    llvm/trunk/test/CodeGen/R600/inline-asm.ll
    llvm/trunk/test/CodeGen/R600/inline-calls.ll
    llvm/trunk/test/CodeGen/R600/input-mods.ll
    llvm/trunk/test/CodeGen/R600/insert_subreg.ll
    llvm/trunk/test/CodeGen/R600/insert_vector_elt.ll
    llvm/trunk/test/CodeGen/R600/jump-address.ll
    llvm/trunk/test/CodeGen/R600/kcache-fold.ll
    llvm/trunk/test/CodeGen/R600/kernel-args.ll
    llvm/trunk/test/CodeGen/R600/large-alloca.ll
    llvm/trunk/test/CodeGen/R600/large-constant-initializer.ll
    llvm/trunk/test/CodeGen/R600/lds-initializer.ll
    llvm/trunk/test/CodeGen/R600/lds-oqap-crash.ll
    llvm/trunk/test/CodeGen/R600/lds-output-queue.ll
    llvm/trunk/test/CodeGen/R600/lds-size.ll
    llvm/trunk/test/CodeGen/R600/lds-zero-initializer.ll
    llvm/trunk/test/CodeGen/R600/legalizedag-bug-expand-setcc.ll
    llvm/trunk/test/CodeGen/R600/lit.local.cfg
    llvm/trunk/test/CodeGen/R600/literals.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.abs.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfi.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfm.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.brev.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.clamp.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.class.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cube.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fixup.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.flbit.i32.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.f64.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imad24.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imax.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imin.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imul24.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.kill.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.ldexp.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.legacy.rsq.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.mul.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rcp.f64.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rcp.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.f64.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.clamped.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.rsq.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.tex.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trunc.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umax.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umin.ll
    llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umul24.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.fs.interp.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.gather4.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.getlod.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.image.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.image.sample.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.image.sample.o.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.imageload.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.load.dword.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.resinfo.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.sample-masked.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.sample.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.sampled.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.sendmsg-m0.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.sendmsg.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.tbuffer.store.ll
    llvm/trunk/test/CodeGen/R600/llvm.SI.tid.ll
    llvm/trunk/test/CodeGen/R600/llvm.amdgpu.dp4.ll
    llvm/trunk/test/CodeGen/R600/llvm.amdgpu.kilp.ll
    llvm/trunk/test/CodeGen/R600/llvm.amdgpu.lrp.ll
    llvm/trunk/test/CodeGen/R600/llvm.cos.ll
    llvm/trunk/test/CodeGen/R600/llvm.exp2.ll
    llvm/trunk/test/CodeGen/R600/llvm.log2.ll
    llvm/trunk/test/CodeGen/R600/llvm.memcpy.ll
    llvm/trunk/test/CodeGen/R600/llvm.pow.ll
    llvm/trunk/test/CodeGen/R600/llvm.rint.f64.ll
    llvm/trunk/test/CodeGen/R600/llvm.rint.ll
    llvm/trunk/test/CodeGen/R600/llvm.round.f64.ll
    llvm/trunk/test/CodeGen/R600/llvm.round.ll
    llvm/trunk/test/CodeGen/R600/llvm.sin.ll
    llvm/trunk/test/CodeGen/R600/llvm.sqrt.ll
    llvm/trunk/test/CodeGen/R600/load-i1.ll
    llvm/trunk/test/CodeGen/R600/load-input-fold.ll
    llvm/trunk/test/CodeGen/R600/load.ll
    llvm/trunk/test/CodeGen/R600/load.vec.ll
    llvm/trunk/test/CodeGen/R600/load64.ll
    llvm/trunk/test/CodeGen/R600/local-64.ll
    llvm/trunk/test/CodeGen/R600/local-atomics.ll
    llvm/trunk/test/CodeGen/R600/local-atomics64.ll
    llvm/trunk/test/CodeGen/R600/local-memory-two-objects.ll
    llvm/trunk/test/CodeGen/R600/local-memory.ll
    llvm/trunk/test/CodeGen/R600/loop-address.ll
    llvm/trunk/test/CodeGen/R600/loop-idiom.ll
    llvm/trunk/test/CodeGen/R600/lshl.ll
    llvm/trunk/test/CodeGen/R600/lshr.ll
    llvm/trunk/test/CodeGen/R600/m0-spill.ll
    llvm/trunk/test/CodeGen/R600/mad-combine.ll
    llvm/trunk/test/CodeGen/R600/mad-sub.ll
    llvm/trunk/test/CodeGen/R600/mad_int24.ll
    llvm/trunk/test/CodeGen/R600/mad_uint24.ll
    llvm/trunk/test/CodeGen/R600/madak.ll
    llvm/trunk/test/CodeGen/R600/madmk.ll
    llvm/trunk/test/CodeGen/R600/max-literals.ll
    llvm/trunk/test/CodeGen/R600/max.ll
    llvm/trunk/test/CodeGen/R600/max3.ll
    llvm/trunk/test/CodeGen/R600/merge-stores.ll
    llvm/trunk/test/CodeGen/R600/min.ll
    llvm/trunk/test/CodeGen/R600/min3.ll
    llvm/trunk/test/CodeGen/R600/missing-store.ll
    llvm/trunk/test/CodeGen/R600/mubuf.ll
    llvm/trunk/test/CodeGen/R600/mul.ll
    llvm/trunk/test/CodeGen/R600/mul_int24.ll
    llvm/trunk/test/CodeGen/R600/mul_uint24.ll
    llvm/trunk/test/CodeGen/R600/mulhu.ll
    llvm/trunk/test/CodeGen/R600/no-initializer-constant-addrspace.ll
    llvm/trunk/test/CodeGen/R600/no-shrink-extloads.ll
    llvm/trunk/test/CodeGen/R600/operand-folding.ll
    llvm/trunk/test/CodeGen/R600/operand-spacing.ll
    llvm/trunk/test/CodeGen/R600/or.ll
    llvm/trunk/test/CodeGen/R600/packetizer.ll
    llvm/trunk/test/CodeGen/R600/parallelandifcollapse.ll
    llvm/trunk/test/CodeGen/R600/parallelorifcollapse.ll
    llvm/trunk/test/CodeGen/R600/predicate-dp4.ll
    llvm/trunk/test/CodeGen/R600/predicates.ll
    llvm/trunk/test/CodeGen/R600/private-memory-atomics.ll
    llvm/trunk/test/CodeGen/R600/private-memory-broken.ll
    llvm/trunk/test/CodeGen/R600/private-memory.ll
    llvm/trunk/test/CodeGen/R600/pv-packing.ll
    llvm/trunk/test/CodeGen/R600/pv.ll
    llvm/trunk/test/CodeGen/R600/r600-encoding.ll
    llvm/trunk/test/CodeGen/R600/r600-export-fix.ll
    llvm/trunk/test/CodeGen/R600/r600-infinite-loop-bug-while-reorganizing-vector.ll
    llvm/trunk/test/CodeGen/R600/r600cfg.ll
    llvm/trunk/test/CodeGen/R600/reciprocal.ll
    llvm/trunk/test/CodeGen/R600/register-count-comments.ll
    llvm/trunk/test/CodeGen/R600/reorder-stores.ll
    llvm/trunk/test/CodeGen/R600/rotl.i64.ll
    llvm/trunk/test/CodeGen/R600/rotl.ll
    llvm/trunk/test/CodeGen/R600/rotr.i64.ll
    llvm/trunk/test/CodeGen/R600/rotr.ll
    llvm/trunk/test/CodeGen/R600/rsq.ll
    llvm/trunk/test/CodeGen/R600/rv7x0_count3.ll
    llvm/trunk/test/CodeGen/R600/s_movk_i32.ll
    llvm/trunk/test/CodeGen/R600/saddo.ll
    llvm/trunk/test/CodeGen/R600/salu-to-valu.ll
    llvm/trunk/test/CodeGen/R600/scalar_to_vector.ll
    llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested-if.ll
    llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested.ll
    llvm/trunk/test/CodeGen/R600/schedule-fs-loop.ll
    llvm/trunk/test/CodeGen/R600/schedule-global-loads.ll
    llvm/trunk/test/CodeGen/R600/schedule-if-2.ll
    llvm/trunk/test/CodeGen/R600/schedule-if.ll
    llvm/trunk/test/CodeGen/R600/schedule-kernel-arg-loads.ll
    llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
    llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop.ll
    llvm/trunk/test/CodeGen/R600/scratch-buffer.ll
    llvm/trunk/test/CodeGen/R600/sdiv.ll
    llvm/trunk/test/CodeGen/R600/sdivrem24.ll
    llvm/trunk/test/CodeGen/R600/sdivrem64.ll
    llvm/trunk/test/CodeGen/R600/select-i1.ll
    llvm/trunk/test/CodeGen/R600/select-vectors.ll
    llvm/trunk/test/CodeGen/R600/select.ll
    llvm/trunk/test/CodeGen/R600/select64.ll
    llvm/trunk/test/CodeGen/R600/selectcc-cnd.ll
    llvm/trunk/test/CodeGen/R600/selectcc-cnde-int.ll
    llvm/trunk/test/CodeGen/R600/selectcc-icmp-select-float.ll
    llvm/trunk/test/CodeGen/R600/selectcc-opt.ll
    llvm/trunk/test/CodeGen/R600/selectcc.ll
    llvm/trunk/test/CodeGen/R600/set-dx10.ll
    llvm/trunk/test/CodeGen/R600/setcc-equivalent.ll
    llvm/trunk/test/CodeGen/R600/setcc-opt.ll
    llvm/trunk/test/CodeGen/R600/setcc.ll
    llvm/trunk/test/CodeGen/R600/setcc64.ll
    llvm/trunk/test/CodeGen/R600/seto.ll
    llvm/trunk/test/CodeGen/R600/setuo.ll
    llvm/trunk/test/CodeGen/R600/sext-eliminate.ll
    llvm/trunk/test/CodeGen/R600/sext-in-reg.ll
    llvm/trunk/test/CodeGen/R600/sgpr-control-flow.ll
    llvm/trunk/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
    llvm/trunk/test/CodeGen/R600/sgpr-copy.ll
    llvm/trunk/test/CodeGen/R600/shared-op-cycle.ll
    llvm/trunk/test/CodeGen/R600/shl.ll
    llvm/trunk/test/CodeGen/R600/shl_add_constant.ll
    llvm/trunk/test/CodeGen/R600/shl_add_ptr.ll
    llvm/trunk/test/CodeGen/R600/si-annotate-cf-assertion.ll
    llvm/trunk/test/CodeGen/R600/si-annotate-cf.ll
    llvm/trunk/test/CodeGen/R600/si-lod-bias.ll
    llvm/trunk/test/CodeGen/R600/si-sgpr-spill.ll
    llvm/trunk/test/CodeGen/R600/si-spill-cf.ll
    llvm/trunk/test/CodeGen/R600/si-triv-disjoint-mem-access.ll
    llvm/trunk/test/CodeGen/R600/si-vector-hang.ll
    llvm/trunk/test/CodeGen/R600/sign_extend.ll
    llvm/trunk/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
    llvm/trunk/test/CodeGen/R600/sint_to_fp.f64.ll
    llvm/trunk/test/CodeGen/R600/sint_to_fp.ll
    llvm/trunk/test/CodeGen/R600/smrd.ll
    llvm/trunk/test/CodeGen/R600/split-scalar-i64-add.ll
    llvm/trunk/test/CodeGen/R600/sra.ll
    llvm/trunk/test/CodeGen/R600/srem.ll
    llvm/trunk/test/CodeGen/R600/srl.ll
    llvm/trunk/test/CodeGen/R600/ssubo.ll
    llvm/trunk/test/CodeGen/R600/store-barrier.ll
    llvm/trunk/test/CodeGen/R600/store-v3i32.ll
    llvm/trunk/test/CodeGen/R600/store-v3i64.ll
    llvm/trunk/test/CodeGen/R600/store-vector-ptrs.ll
    llvm/trunk/test/CodeGen/R600/store.ll
    llvm/trunk/test/CodeGen/R600/store.r600.ll
    llvm/trunk/test/CodeGen/R600/structurize.ll
    llvm/trunk/test/CodeGen/R600/structurize1.ll
    llvm/trunk/test/CodeGen/R600/sub.ll
    llvm/trunk/test/CodeGen/R600/subreg-coalescer-crash.ll
    llvm/trunk/test/CodeGen/R600/subreg-eliminate-dead.ll
    llvm/trunk/test/CodeGen/R600/swizzle-export.ll
    llvm/trunk/test/CodeGen/R600/tex-clause-antidep.ll
    llvm/trunk/test/CodeGen/R600/texture-input-merge.ll
    llvm/trunk/test/CodeGen/R600/trunc-cmp-constant.ll
    llvm/trunk/test/CodeGen/R600/trunc-store-f64-to-f16.ll
    llvm/trunk/test/CodeGen/R600/trunc-store-i1.ll
    llvm/trunk/test/CodeGen/R600/trunc-vector-store-assertion-failure.ll
    llvm/trunk/test/CodeGen/R600/trunc.ll
    llvm/trunk/test/CodeGen/R600/tti-unroll-prefs.ll
    llvm/trunk/test/CodeGen/R600/uaddo.ll
    llvm/trunk/test/CodeGen/R600/udiv.ll
    llvm/trunk/test/CodeGen/R600/udivrem.ll
    llvm/trunk/test/CodeGen/R600/udivrem24.ll
    llvm/trunk/test/CodeGen/R600/udivrem64.ll
    llvm/trunk/test/CodeGen/R600/uint_to_fp.f64.ll
    llvm/trunk/test/CodeGen/R600/uint_to_fp.ll
    llvm/trunk/test/CodeGen/R600/unaligned-load-store.ll
    llvm/trunk/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
    llvm/trunk/test/CodeGen/R600/unroll.ll
    llvm/trunk/test/CodeGen/R600/unsupported-cc.ll
    llvm/trunk/test/CodeGen/R600/urecip.ll
    llvm/trunk/test/CodeGen/R600/urem.ll
    llvm/trunk/test/CodeGen/R600/use-sgpr-multiple-times.ll
    llvm/trunk/test/CodeGen/R600/usubo.ll
    llvm/trunk/test/CodeGen/R600/v1i64-kernel-arg.ll
    llvm/trunk/test/CodeGen/R600/v_cndmask.ll
    llvm/trunk/test/CodeGen/R600/valu-i1.ll
    llvm/trunk/test/CodeGen/R600/vector-alloca.ll
    llvm/trunk/test/CodeGen/R600/vertex-fetch-encoding.ll
    llvm/trunk/test/CodeGen/R600/vop-shrink.ll
    llvm/trunk/test/CodeGen/R600/vselect.ll
    llvm/trunk/test/CodeGen/R600/vselect64.ll
    llvm/trunk/test/CodeGen/R600/vtx-fetch-branch.ll
    llvm/trunk/test/CodeGen/R600/vtx-schedule.ll
    llvm/trunk/test/CodeGen/R600/wait.ll
    llvm/trunk/test/CodeGen/R600/work-item-intrinsics.ll
    llvm/trunk/test/CodeGen/R600/wrong-transalu-pos-fix.ll
    llvm/trunk/test/CodeGen/R600/xor.ll
    llvm/trunk/test/CodeGen/R600/zero_extend.ll
    llvm/trunk/test/MC/R600/ds-err.s
    llvm/trunk/test/MC/R600/ds.s
    llvm/trunk/test/MC/R600/flat.s
    llvm/trunk/test/MC/R600/lit.local.cfg
    llvm/trunk/test/MC/R600/mubuf.s
    llvm/trunk/test/MC/R600/smrd.s
    llvm/trunk/test/MC/R600/sop1-err.s
    llvm/trunk/test/MC/R600/sop1.s
    llvm/trunk/test/MC/R600/sop2.s
    llvm/trunk/test/MC/R600/sopc.s
    llvm/trunk/test/MC/R600/sopk.s
    llvm/trunk/test/MC/R600/sopp.s
    llvm/trunk/test/MC/R600/vop1.s
    llvm/trunk/test/MC/R600/vop2-err.s
    llvm/trunk/test/MC/R600/vop2.s
    llvm/trunk/test/MC/R600/vop3-errs.s
    llvm/trunk/test/MC/R600/vop3.s
    llvm/trunk/test/MC/R600/vopc.s
Modified:
    llvm/trunk/CMakeLists.txt
    llvm/trunk/autoconf/configure.ac
    llvm/trunk/configure
    llvm/trunk/docs/CompilerWriterInfo.rst
    llvm/trunk/docs/GettingStarted.rst
    llvm/trunk/docs/index.rst
    llvm/trunk/lib/Target/LLVMBuild.txt

Modified: llvm/trunk/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/CMakeLists.txt?rev=239657&r1=239656&r2=239657&view=diff
==============================================================================
--- llvm/trunk/CMakeLists.txt (original)
+++ llvm/trunk/CMakeLists.txt Fri Jun 12 22:28:10 2015
@@ -176,6 +176,7 @@ set(LLVM_INCLUDE_DIR ${CMAKE_CURRENT_BIN
 
 set(LLVM_ALL_TARGETS
   AArch64
+  AMDGPU
   ARM
   BPF
   CppBackend
@@ -184,7 +185,6 @@ set(LLVM_ALL_TARGETS
   MSP430
   NVPTX
   PowerPC
-  R600
   Sparc
   SystemZ
   X86

Modified: llvm/trunk/autoconf/configure.ac
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/autoconf/configure.ac?rev=239657&r1=239656&r2=239657&view=diff
==============================================================================
--- llvm/trunk/autoconf/configure.ac (original)
+++ llvm/trunk/autoconf/configure.ac Fri Jun 12 22:28:10 2015
@@ -1097,7 +1097,7 @@ if test "$llvm_cv_enable_crash_overrides
 fi
 
 dnl List all possible targets
-ALL_TARGETS="X86 Sparc PowerPC ARM AArch64 Mips XCore MSP430 CppBackend NVPTX Hexagon SystemZ R600 BPF"
+ALL_TARGETS="X86 Sparc PowerPC ARM AArch64 Mips XCore MSP430 CppBackend NVPTX Hexagon SystemZ AMDGPU BPF"
 AC_SUBST(ALL_TARGETS,$ALL_TARGETS)
 
 dnl Allow specific targets to be specified for building (or not)
@@ -1132,7 +1132,8 @@ case "$enableval" in
         hexagon)  TARGETS_TO_BUILD="Hexagon $TARGETS_TO_BUILD" ;;
         nvptx)    TARGETS_TO_BUILD="NVPTX $TARGETS_TO_BUILD" ;;
         systemz)  TARGETS_TO_BUILD="SystemZ $TARGETS_TO_BUILD" ;;
-        r600)     TARGETS_TO_BUILD="R600 $TARGETS_TO_BUILD" ;;
+        amdgpu)  ;&
+        r600)     TARGETS_TO_BUILD="AMDGPU $TARGETS_TO_BUILD" ;;
         host) case "$llvm_cv_target_arch" in
             x86)         TARGETS_TO_BUILD="X86 $TARGETS_TO_BUILD" ;;
             x86_64)      TARGETS_TO_BUILD="X86 $TARGETS_TO_BUILD" ;;

Modified: llvm/trunk/configure
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/configure?rev=239657&r1=239656&r2=239657&view=diff
==============================================================================
--- llvm/trunk/configure (original)
+++ llvm/trunk/configure Fri Jun 12 22:28:10 2015
@@ -5628,7 +5628,7 @@ _ACEOF
 
 fi
 
-ALL_TARGETS="X86 Sparc PowerPC ARM AArch64 Mips XCore MSP430 CppBackend NVPTX Hexagon SystemZ R600 BPF"
+ALL_TARGETS="X86 Sparc PowerPC ARM AArch64 Mips XCore MSP430 CppBackend NVPTX Hexagon SystemZ AMDGPU BPF"
 ALL_TARGETS=$ALL_TARGETS
 
 
@@ -5665,7 +5665,8 @@ case "$enableval" in
         hexagon)  TARGETS_TO_BUILD="Hexagon $TARGETS_TO_BUILD" ;;
         nvptx)    TARGETS_TO_BUILD="NVPTX $TARGETS_TO_BUILD" ;;
         systemz)  TARGETS_TO_BUILD="SystemZ $TARGETS_TO_BUILD" ;;
-        r600)     TARGETS_TO_BUILD="R600 $TARGETS_TO_BUILD" ;;
+        amdgpu)  ;&
+        r600)     TARGETS_TO_BUILD="AMDGPU $TARGETS_TO_BUILD" ;;
         host) case "$llvm_cv_target_arch" in
             x86)         TARGETS_TO_BUILD="X86 $TARGETS_TO_BUILD" ;;
             x86_64)      TARGETS_TO_BUILD="X86 $TARGETS_TO_BUILD" ;;

Copied: llvm/trunk/docs/AMDGPUUsage.rst (from r239647, llvm/trunk/docs/R600Usage.rst)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/docs/AMDGPUUsage.rst?p2=llvm/trunk/docs/AMDGPUUsage.rst&p1=llvm/trunk/docs/R600Usage.rst&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/docs/R600Usage.rst (original)
+++ llvm/trunk/docs/AMDGPUUsage.rst Fri Jun 12 22:28:10 2015
@@ -1,11 +1,11 @@
-============================
-User Guide for R600 Back-end
-============================
+==============================
+User Guide for AMDGPU Back-end
+==============================
 
 Introduction
 ============
 
-The R600 back-end provides ISA code generation for AMD GPUs, starting with
+The AMDGPU back-end provides ISA code generation for AMD GPUs, starting with
 the R600 family up until the current Volcanic Islands (GCN Gen 3).
 
 
@@ -14,7 +14,7 @@ Assembler
 
 The assembler is currently considered experimental.
 
-For syntax examples look in test/MC/R600.
+For syntax examples look in test/MC/AMDGPU.
 
 Below some of the currently supported features (modulo bugs).  These
 all apply to the Southern Islands ISA, Sea Islands and Volcanic Islands

Modified: llvm/trunk/docs/CompilerWriterInfo.rst
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/docs/CompilerWriterInfo.rst?rev=239657&r1=239656&r2=239657&view=diff
==============================================================================
--- llvm/trunk/docs/CompilerWriterInfo.rst (original)
+++ llvm/trunk/docs/CompilerWriterInfo.rst Fri Jun 12 22:28:10 2015
@@ -68,8 +68,8 @@ Other documents, collections, notes
 * `PowerPC64 alignment of long doubles (from GCC) <http://gcc.gnu.org/ml/gcc-patches/2003-09/msg00997.html>`_
 * `Long branch stubs for powerpc64-linux (from binutils) <http://sources.redhat.com/ml/binutils/2002-04/msg00573.html>`_
 
-R600
-----
+AMDGPU
+------
 
 * `AMD R6xx shader ISA <http://developer.amd.com/wordpress/media/2012/10/R600_Instruction_Set_Architecture.pdf>`_
 * `AMD R7xx shader ISA <http://developer.amd.com/wordpress/media/2012/10/R700-Family_Instruction_Set_Architecture.pdf>`_

Modified: llvm/trunk/docs/GettingStarted.rst
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/docs/GettingStarted.rst?rev=239657&r1=239656&r2=239657&view=diff
==============================================================================
--- llvm/trunk/docs/GettingStarted.rst (original)
+++ llvm/trunk/docs/GettingStarted.rst Fri Jun 12 22:28:10 2015
@@ -711,7 +711,7 @@ used by people developing LLVM.
 |                         | as ``LLVM_ALL_TARGETS``, and can be set to include |
 |                         | out-of-tree targets. The default value includes:   |
 |                         | ``AArch64, ARM, CppBackend, Hexagon,               |
-|                         | Mips, MSP430, NVPTX, PowerPC, R600, Sparc,         |
+|                         | Mips, MSP430, NVPTX, PowerPC, AMDGPU, Sparc,       |
 |                         | SystemZ, X86, XCore``.                             |
 +-------------------------+----------------------------------------------------+
 | LLVM_ENABLE_DOXYGEN     | Build doxygen-based documentation from the source  |

Removed: llvm/trunk/docs/R600Usage.rst
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/docs/R600Usage.rst?rev=239656&view=auto
==============================================================================
--- llvm/trunk/docs/R600Usage.rst (original)
+++ llvm/trunk/docs/R600Usage.rst (removed)
@@ -1,94 +0,0 @@
-============================
-User Guide for R600 Back-end
-============================
-
-Introduction
-============
-
-The R600 back-end provides ISA code generation for AMD GPUs, starting with
-the R600 family up until the current Volcanic Islands (GCN Gen 3).
-
-
-Assembler
-=========
-
-The assembler is currently considered experimental.
-
-For syntax examples look in test/MC/R600.
-
-Below some of the currently supported features (modulo bugs).  These
-all apply to the Southern Islands ISA, Sea Islands and Volcanic Islands
-are also supported but may be missing some instructions and have more bugs:
-
-DS Instructions
----------------
-All DS instructions are supported.
-
-FLAT Instructions
-------------------
-These instructions are only present in the Sea Islands and Volcanic Islands
-instruction set.  All FLAT instructions are supported for these architectures
-
-MUBUF Instructions
-------------------
-All non-atomic MUBUF instructions are supported.
-
-SMRD Instructions
------------------
-Only the s_load_dword* SMRD instructions are supported.
-
-SOP1 Instructions
------------------
-All SOP1 instructions are supported.
-
-SOP2 Instructions
------------------
-All SOP2 instructions are supported.
-
-SOPC Instructions
------------------
-All SOPC instructions are supported.
-
-SOPP Instructions
------------------
-
-Unless otherwise mentioned, all SOPP instructions that have one or more
-operands accept integer operands only.  No verification is performed
-on the operands, so it is up to the programmer to be familiar with the
-range or acceptable values.
-
-s_waitcnt
-^^^^^^^^^
-
-s_waitcnt accepts named arguments to specify which memory counter(s) to
-wait for.
-
-.. code-block:: nasm
-
-   // Wait for all counters to be 0
-   s_waitcnt 0
-
-   // Equivalent to s_waitcnt 0.  Counter names can also be delimited by
-   // '&' or ','.
-   s_waitcnt vmcnt(0) expcnt(0) lgkcmt(0)
-
-   // Wait for vmcnt counter to be 1.
-   s_waitcnt vmcnt(1)
-
-VOP1, VOP2, VOP3, VOPC Instructions
------------------------------------
-
-All 32-bit and 64-bit encodings should work.
-
-The assembler will automatically detect which encoding size to use for
-VOP1, VOP2, and VOPC instructions based on the operands.  If you want to force
-a specific encoding size, you can add an _e32 (for 32-bit encoding) or
-_e64 (for 64-bit encoding) suffix to the instruction.  Most, but not all
-instructions support an explicit suffix.  These are all valid assembly
-strings:
-
-.. code-block:: nasm
-
-   v_mul_i32_i24 v1, v2, v3
-   v_mul_i32_i24_e32 v1, v2, v3
-   v_mul_i32_i24_e64 v1, v2, v3

Modified: llvm/trunk/docs/index.rst
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/docs/index.rst?rev=239657&r1=239656&r2=239657&view=diff
==============================================================================
--- llvm/trunk/docs/index.rst (original)
+++ llvm/trunk/docs/index.rst Fri Jun 12 22:28:10 2015
@@ -252,7 +252,7 @@ For API clients and LLVM developers.
    WritingAnLLVMPass
    HowToUseAttributes
    NVPTXUsage
-   R600Usage
+   AMDGPUUsage
    StackMaps
    InAlloca
    BigEndianNEON
@@ -338,8 +338,8 @@ For API clients and LLVM developers.
 :doc:`NVPTXUsage`
    This document describes using the NVPTX back-end to compile GPU kernels.
 
-:doc:`R600Usage`
-   This document describes how to use the R600 back-end.
+:doc:`AMDGPUUsage`
+   This document describes how to use the AMDGPU back-end.
 
 :doc:`StackMaps`
   LLVM support for mapping instruction addresses to the location of

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPU.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPU.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPU.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPU.h&p1=llvm/trunk/lib/Target/R600/AMDGPU.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPU.td (from r239647, llvm/trunk/lib/Target/R600/AMDGPU.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPU.td?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPU.td&p1=llvm/trunk/lib/Target/R600/AMDGPU.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUAlwaysInlinePass.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUAlwaysInlinePass.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp Fri Jun 12 22:28:10 2015
@@ -80,7 +80,7 @@ createAMDGPUAsmPrinterPass(TargetMachine
   return new AMDGPUAsmPrinter(tm, std::move(Streamer));
 }
 
-extern "C" void LLVMInitializeR600AsmPrinter() {
+extern "C" void LLVMInitializeAMDGPUAsmPrinter() {
   TargetRegistry::RegisterAsmPrinter(TheAMDGPUTarget, createAMDGPUAsmPrinterPass);
   TargetRegistry::RegisterAsmPrinter(TheGCNTarget, createAMDGPUAsmPrinterPass);
 }

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUAsmPrinter.h&p1=llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUCallingConv.td (from r239647, llvm/trunk/lib/Target/R600/AMDGPUCallingConv.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUCallingConv.td?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUCallingConv.td&p1=llvm/trunk/lib/Target/R600/AMDGPUCallingConv.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUFrameLowering.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUFrameLowering.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUFrameLowering.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUFrameLowering.h&p1=llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUISelDAGToDAG.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUISelDAGToDAG.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.h&p1=llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.h&p1=llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td (from r239647, llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUInstrInfo.td&p1=llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td (from r239647, llvm/trunk/lib/Target/R600/AMDGPUInstructions.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td&p1=llvm/trunk/lib/Target/R600/AMDGPUInstructions.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.h&p1=llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsics.td (from r239647, llvm/trunk/lib/Target/R600/AMDGPUIntrinsics.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsics.td?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUIntrinsics.td&p1=llvm/trunk/lib/Target/R600/AMDGPUIntrinsics.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUMCInstLower.h&p1=llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineFunction.h&p1=llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUPromoteAlloca.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUPromoteAlloca.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.h&p1=llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.td (from r239647, llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.td?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPURegisterInfo.td&p1=llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUSubtarget.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUSubtarget.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPUSubtarget.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h&p1=llvm/trunk/lib/Target/R600/AMDGPUSubtarget.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp Fri Jun 12 22:28:10 2015
@@ -37,7 +37,7 @@
 
 using namespace llvm;
 
-extern "C" void LLVMInitializeR600Target() {
+extern "C" void LLVMInitializeAMDGPUTarget() {
   // Register the target
   RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
   RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.h&p1=llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp&p1=llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h (from r239647, llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h&p1=llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp (from r239647, llvm/trunk/lib/Target/R600/AMDILCFGStructurizer.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AMDILCFGStructurizer.cpp&p1=llvm/trunk/lib/Target/R600/AMDILCFGStructurizer.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AMDKernelCodeT.h (from r239647, llvm/trunk/lib/Target/R600/AMDKernelCodeT.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDKernelCodeT.h?p2=llvm/trunk/lib/Target/AMDGPU/AMDKernelCodeT.h&p1=llvm/trunk/lib/Target/R600/AMDKernelCodeT.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp (from r239647, llvm/trunk/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp?p2=llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp&p1=llvm/trunk/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp Fri Jun 12 22:28:10 2015
@@ -1369,7 +1369,7 @@ void AMDGPUAsmParser::cvtVOP3(MCInst &In
 }
 
 /// Force static initialization.
-extern "C" void LLVMInitializeR600AsmParser() {
+extern "C" void LLVMInitializeAMDGPUAsmParser() {
   RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
   RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
 }

Added: llvm/trunk/lib/Target/AMDGPU/AsmParser/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AsmParser/CMakeLists.txt?rev=239657&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AsmParser/CMakeLists.txt (added)
+++ llvm/trunk/lib/Target/AMDGPU/AsmParser/CMakeLists.txt Fri Jun 12 22:28:10 2015
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMAMDGPUAsmParser
+  AMDGPUAsmParser.cpp
+  )

Copied: llvm/trunk/lib/Target/AMDGPU/AsmParser/LLVMBuild.txt (from r239647, llvm/trunk/lib/Target/R600/AsmParser/LLVMBuild.txt)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AsmParser/LLVMBuild.txt?p2=llvm/trunk/lib/Target/AMDGPU/AsmParser/LLVMBuild.txt&p1=llvm/trunk/lib/Target/R600/AsmParser/LLVMBuild.txt&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/AsmParser/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AMDGPU/AsmParser/LLVMBuild.txt Fri Jun 12 22:28:10 2015
@@ -1,4 +1,4 @@
-;===- ./lib/Target/R600/AsmParser/LLVMBuild.txt -------------*- Conf -*--===;
+;===- ./lib/Target/AMDGPU/AsmParser/LLVMBuild.txt -------------*- Conf -*--===;
 ;
 ;                     The LLVM Compiler Infrastructure
 ;
@@ -17,7 +17,7 @@
 
 [component_0]
 type = Library
-name = R600AsmParser
-parent = R600
-required_libraries = MC MCParser R600Desc R600Info Support
-add_to_library_groups = R600
+name = AMDGPUAsmParser
+parent = AMDGPU
+required_libraries = MC MCParser AMDGPUDesc AMDGPUInfo Support
+add_to_library_groups = AMDGPU

Copied: llvm/trunk/lib/Target/AMDGPU/AsmParser/Makefile (from r239647, llvm/trunk/lib/Target/R600/AsmParser/Makefile)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AsmParser/Makefile?p2=llvm/trunk/lib/Target/AMDGPU/AsmParser/Makefile&p1=llvm/trunk/lib/Target/R600/AsmParser/Makefile&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/CIInstructions.td (from r239647, llvm/trunk/lib/Target/R600/CIInstructions.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/CIInstructions.td?p2=llvm/trunk/lib/Target/AMDGPU/CIInstructions.td&p1=llvm/trunk/lib/Target/R600/CIInstructions.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/CMakeLists.txt (from r239647, llvm/trunk/lib/Target/R600/CMakeLists.txt)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/CMakeLists.txt?p2=llvm/trunk/lib/Target/AMDGPU/CMakeLists.txt&p1=llvm/trunk/lib/Target/R600/CMakeLists.txt&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/AMDGPU/CMakeLists.txt Fri Jun 12 22:28:10 2015
@@ -12,7 +12,7 @@ tablegen(LLVM AMDGPUGenAsmWriter.inc -ge
 tablegen(LLVM AMDGPUGenAsmMatcher.inc -gen-asm-matcher)
 add_public_tablegen_target(AMDGPUCommonTableGen)
 
-add_llvm_target(R600CodeGen
+add_llvm_target(AMDGPUCodeGen
   AMDILCFGStructurizer.cpp
   AMDGPUAlwaysInlinePass.cpp
   AMDGPUAsmPrinter.cpp

Copied: llvm/trunk/lib/Target/AMDGPU/CaymanInstructions.td (from r239647, llvm/trunk/lib/Target/R600/CaymanInstructions.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/CaymanInstructions.td?p2=llvm/trunk/lib/Target/AMDGPU/CaymanInstructions.td&p1=llvm/trunk/lib/Target/R600/CaymanInstructions.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/EvergreenInstructions.td (from r239647, llvm/trunk/lib/Target/R600/EvergreenInstructions.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/EvergreenInstructions.td?p2=llvm/trunk/lib/Target/AMDGPU/EvergreenInstructions.td&p1=llvm/trunk/lib/Target/R600/EvergreenInstructions.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp (from r239647, llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp?p2=llvm/trunk/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp&p1=llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h (from r239647, llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h?p2=llvm/trunk/lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h&p1=llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Added: llvm/trunk/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt?rev=239657&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt (added)
+++ llvm/trunk/lib/Target/AMDGPU/InstPrinter/CMakeLists.txt Fri Jun 12 22:28:10 2015
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMAMDGPUAsmPrinter
+  AMDGPUInstPrinter.cpp
+  )

Copied: llvm/trunk/lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt (from r239647, llvm/trunk/lib/Target/R600/InstPrinter/LLVMBuild.txt)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt?p2=llvm/trunk/lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt&p1=llvm/trunk/lib/Target/R600/InstPrinter/LLVMBuild.txt&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/InstPrinter/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt Fri Jun 12 22:28:10 2015
@@ -1,4 +1,4 @@
-;===- ./lib/Target/R600/InstPrinter/LLVMBuild.txt -----------*- Conf -*--===;
+;===- ./lib/Target/AMDGPU/InstPrinter/LLVMBuild.txt -----------*- Conf -*--===;
 ;
 ;                     The LLVM Compiler Infrastructure
 ;
@@ -17,8 +17,8 @@
 
 [component_0]
 type = Library
-name = R600AsmPrinter
-parent = R600
+name = AMDGPUAsmPrinter
+parent = AMDGPU
 required_libraries = MC Support
-add_to_library_groups = R600
+add_to_library_groups = AMDGPU
 

Copied: llvm/trunk/lib/Target/AMDGPU/InstPrinter/Makefile (from r239647, llvm/trunk/lib/Target/R600/InstPrinter/Makefile)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/InstPrinter/Makefile?p2=llvm/trunk/lib/Target/AMDGPU/InstPrinter/Makefile&p1=llvm/trunk/lib/Target/R600/InstPrinter/Makefile&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/LLVMBuild.txt (from r239647, llvm/trunk/lib/Target/R600/LLVMBuild.txt)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/LLVMBuild.txt?p2=llvm/trunk/lib/Target/AMDGPU/LLVMBuild.txt&p1=llvm/trunk/lib/Target/R600/LLVMBuild.txt&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AMDGPU/LLVMBuild.txt Fri Jun 12 22:28:10 2015
@@ -20,14 +20,14 @@ subdirectories = AsmParser InstPrinter M
 
 [component_0]
 type = TargetGroup
-name = R600
+name = AMDGPU
 parent = Target
 has_asmparser = 1
 has_asmprinter = 1
 
 [component_1]
 type = Library
-name = R600CodeGen
-parent = R600
-required_libraries = Analysis AsmPrinter CodeGen Core IPO MC R600AsmParser R600AsmPrinter R600Desc R600Info Scalar SelectionDAG Support Target TransformUtils
-add_to_library_groups = R600
+name = AMDGPUCodeGen
+parent = AMDGPU
+required_libraries = Analysis AsmPrinter CodeGen Core IPO MC AMDGPUAsmParser AMDGPUAsmPrinter AMDGPUDesc AMDGPUInfo Scalar SelectionDAG Support Target TransformUtils
+add_to_library_groups = AMDGPU

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUELFObjectWriter.cpp&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUFixupKinds.h&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.cpp&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCAsmInfo.h&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.cpp&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCCodeEmitter.h&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp Fri Jun 12 22:28:10 2015
@@ -72,7 +72,7 @@ static MCInstPrinter *createAMDGPUMCInst
   return new AMDGPUInstPrinter(MAI, MII, MRI);
 }
 
-extern "C" void LLVMInitializeR600TargetMC() {
+extern "C" void LLVMInitializeAMDGPUTargetMC() {
   for (Target *T : {&TheAMDGPUTarget, &TheGCNTarget}) {
     RegisterMCAsmInfo<AMDGPUMCAsmInfo> X(*T);
 

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.h&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/CMakeLists.txt)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/CMakeLists.txt&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/CMakeLists.txt Fri Jun 12 22:28:10 2015
@@ -1,5 +1,5 @@
 
-add_llvm_library(LLVMR600Desc
+add_llvm_library(LLVMAMDGPUDesc
   AMDGPUAsmBackend.cpp
   AMDGPUELFObjectWriter.cpp
   AMDGPUMCCodeEmitter.cpp

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/LLVMBuild.txt)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/LLVMBuild.txt&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt Fri Jun 12 22:28:10 2015
@@ -1,4 +1,4 @@
-;===- ./lib/Target/R600/MCTargetDesc/LLVMBuild.txt -------------*- Conf -*--===;
+;===- ./lib/Target/AMDGPU/MCTargetDesc/LLVMBuild.txt -------------*- Conf -*--===;
 ;
 ;                     The LLVM Compiler Infrastructure
 ;
@@ -17,7 +17,7 @@
 
 [component_0]
 type = Library
-name = R600Desc
-parent = R600
-required_libraries = MC R600AsmPrinter R600Info Support
-add_to_library_groups = R600
+name = AMDGPUDesc
+parent = AMDGPU
+required_libraries = MC AMDGPUAsmPrinter AMDGPUInfo Support
+add_to_library_groups = AMDGPU

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/Makefile (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/Makefile)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/Makefile?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/Makefile&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/Makefile&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/R600MCCodeEmitter.cpp&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp (from r239647, llvm/trunk/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp?p2=llvm/trunk/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp&p1=llvm/trunk/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/Makefile (from r239647, llvm/trunk/lib/Target/R600/Makefile)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/Makefile?p2=llvm/trunk/lib/Target/AMDGPU/Makefile&p1=llvm/trunk/lib/Target/R600/Makefile&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/Processors.td (from r239647, llvm/trunk/lib/Target/R600/Processors.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/Processors.td?p2=llvm/trunk/lib/Target/AMDGPU/Processors.td&p1=llvm/trunk/lib/Target/R600/Processors.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp (from r239647, llvm/trunk/lib/Target/R600/R600ClauseMergePass.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600ClauseMergePass.cpp&p1=llvm/trunk/lib/Target/R600/R600ClauseMergePass.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp (from r239647, llvm/trunk/lib/Target/R600/R600ControlFlowFinalizer.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600ControlFlowFinalizer.cpp&p1=llvm/trunk/lib/Target/R600/R600ControlFlowFinalizer.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600Defines.h (from r239647, llvm/trunk/lib/Target/R600/R600Defines.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600Defines.h?p2=llvm/trunk/lib/Target/AMDGPU/R600Defines.h&p1=llvm/trunk/lib/Target/R600/R600Defines.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp (from r239647, llvm/trunk/lib/Target/R600/R600EmitClauseMarkers.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600EmitClauseMarkers.cpp&p1=llvm/trunk/lib/Target/R600/R600EmitClauseMarkers.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp (from r239647, llvm/trunk/lib/Target/R600/R600ExpandSpecialInstrs.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600ExpandSpecialInstrs.cpp&p1=llvm/trunk/lib/Target/R600/R600ExpandSpecialInstrs.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp (from r239647, llvm/trunk/lib/Target/R600/R600ISelLowering.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp&p1=llvm/trunk/lib/Target/R600/R600ISelLowering.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.h (from r239647, llvm/trunk/lib/Target/R600/R600ISelLowering.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.h?p2=llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.h&p1=llvm/trunk/lib/Target/R600/R600ISelLowering.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600InstrFormats.td (from r239647, llvm/trunk/lib/Target/R600/R600InstrFormats.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600InstrFormats.td?p2=llvm/trunk/lib/Target/AMDGPU/R600InstrFormats.td&p1=llvm/trunk/lib/Target/R600/R600InstrFormats.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/R600InstrInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.cpp&p1=llvm/trunk/lib/Target/R600/R600InstrInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.h (from r239647, llvm/trunk/lib/Target/R600/R600InstrInfo.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.h?p2=llvm/trunk/lib/Target/AMDGPU/R600InstrInfo.h&p1=llvm/trunk/lib/Target/R600/R600InstrInfo.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600Instructions.td (from r239647, llvm/trunk/lib/Target/R600/R600Instructions.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600Instructions.td?p2=llvm/trunk/lib/Target/AMDGPU/R600Instructions.td&p1=llvm/trunk/lib/Target/R600/R600Instructions.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600Intrinsics.td (from r239647, llvm/trunk/lib/Target/R600/R600Intrinsics.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600Intrinsics.td?p2=llvm/trunk/lib/Target/AMDGPU/R600Intrinsics.td&p1=llvm/trunk/lib/Target/R600/R600Intrinsics.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600MachineFunctionInfo.cpp&p1=llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600MachineFunctionInfo.h (from r239647, llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600MachineFunctionInfo.h?p2=llvm/trunk/lib/Target/AMDGPU/R600MachineFunctionInfo.h&p1=llvm/trunk/lib/Target/R600/R600MachineFunctionInfo.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600MachineScheduler.cpp (from r239647, llvm/trunk/lib/Target/R600/R600MachineScheduler.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600MachineScheduler.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600MachineScheduler.cpp&p1=llvm/trunk/lib/Target/R600/R600MachineScheduler.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600MachineScheduler.h (from r239647, llvm/trunk/lib/Target/R600/R600MachineScheduler.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600MachineScheduler.h?p2=llvm/trunk/lib/Target/AMDGPU/R600MachineScheduler.h&p1=llvm/trunk/lib/Target/R600/R600MachineScheduler.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp (from r239647, llvm/trunk/lib/Target/R600/R600OptimizeVectorRegisters.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp&p1=llvm/trunk/lib/Target/R600/R600OptimizeVectorRegisters.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600Packetizer.cpp (from r239647, llvm/trunk/lib/Target/R600/R600Packetizer.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600Packetizer.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600Packetizer.cpp&p1=llvm/trunk/lib/Target/R600/R600Packetizer.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/R600RegisterInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.cpp&p1=llvm/trunk/lib/Target/R600/R600RegisterInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.h (from r239647, llvm/trunk/lib/Target/R600/R600RegisterInfo.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.h?p2=llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.h&p1=llvm/trunk/lib/Target/R600/R600RegisterInfo.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.td (from r239647, llvm/trunk/lib/Target/R600/R600RegisterInfo.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.td?p2=llvm/trunk/lib/Target/AMDGPU/R600RegisterInfo.td&p1=llvm/trunk/lib/Target/R600/R600RegisterInfo.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600Schedule.td (from r239647, llvm/trunk/lib/Target/R600/R600Schedule.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600Schedule.td?p2=llvm/trunk/lib/Target/AMDGPU/R600Schedule.td&p1=llvm/trunk/lib/Target/R600/R600Schedule.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R600TextureIntrinsicsReplacer.cpp (from r239647, llvm/trunk/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600TextureIntrinsicsReplacer.cpp?p2=llvm/trunk/lib/Target/AMDGPU/R600TextureIntrinsicsReplacer.cpp&p1=llvm/trunk/lib/Target/R600/R600TextureIntrinsicsReplacer.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/R700Instructions.td (from r239647, llvm/trunk/lib/Target/R600/R700Instructions.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R700Instructions.td?p2=llvm/trunk/lib/Target/AMDGPU/R700Instructions.td&p1=llvm/trunk/lib/Target/R600/R700Instructions.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp (from r239647, llvm/trunk/lib/Target/R600/SIAnnotateControlFlow.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp&p1=llvm/trunk/lib/Target/R600/SIAnnotateControlFlow.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIDefines.h (from r239647, llvm/trunk/lib/Target/R600/SIDefines.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIDefines.h?p2=llvm/trunk/lib/Target/AMDGPU/SIDefines.h&p1=llvm/trunk/lib/Target/R600/SIDefines.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp (from r239647, llvm/trunk/lib/Target/R600/SIFixControlFlowLiveIntervals.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIFixControlFlowLiveIntervals.cpp&p1=llvm/trunk/lib/Target/R600/SIFixControlFlowLiveIntervals.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp (from r239647, llvm/trunk/lib/Target/R600/SIFixSGPRCopies.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIFixSGPRCopies.cpp&p1=llvm/trunk/lib/Target/R600/SIFixSGPRCopies.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp (from r239647, llvm/trunk/lib/Target/R600/SIFixSGPRLiveRanges.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIFixSGPRLiveRanges.cpp&p1=llvm/trunk/lib/Target/R600/SIFixSGPRLiveRanges.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp (from r239647, llvm/trunk/lib/Target/R600/SIFoldOperands.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIFoldOperands.cpp&p1=llvm/trunk/lib/Target/R600/SIFoldOperands.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (from r239647, llvm/trunk/lib/Target/R600/SIISelLowering.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp&p1=llvm/trunk/lib/Target/R600/SIISelLowering.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h (from r239647, llvm/trunk/lib/Target/R600/SIISelLowering.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h?p2=llvm/trunk/lib/Target/AMDGPU/SIISelLowering.h&p1=llvm/trunk/lib/Target/R600/SIISelLowering.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp (from r239647, llvm/trunk/lib/Target/R600/SIInsertWaits.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIInsertWaits.cpp&p1=llvm/trunk/lib/Target/R600/SIInsertWaits.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td (from r239647, llvm/trunk/lib/Target/R600/SIInstrFormats.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td?p2=llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td&p1=llvm/trunk/lib/Target/R600/SIInstrFormats.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/SIInstrInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp&p1=llvm/trunk/lib/Target/R600/SIInstrInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h (from r239647, llvm/trunk/lib/Target/R600/SIInstrInfo.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h?p2=llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.h&p1=llvm/trunk/lib/Target/R600/SIInstrInfo.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td (from r239647, llvm/trunk/lib/Target/R600/SIInstrInfo.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td?p2=llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td&p1=llvm/trunk/lib/Target/R600/SIInstrInfo.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIInstructions.td (from r239647, llvm/trunk/lib/Target/R600/SIInstructions.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstructions.td?p2=llvm/trunk/lib/Target/AMDGPU/SIInstructions.td&p1=llvm/trunk/lib/Target/R600/SIInstructions.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIIntrinsics.td (from r239647, llvm/trunk/lib/Target/R600/SIIntrinsics.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIIntrinsics.td?p2=llvm/trunk/lib/Target/AMDGPU/SIIntrinsics.td&p1=llvm/trunk/lib/Target/R600/SIIntrinsics.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp (from r239647, llvm/trunk/lib/Target/R600/SILoadStoreOptimizer.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp&p1=llvm/trunk/lib/Target/R600/SILoadStoreOptimizer.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SILowerControlFlow.cpp (from r239647, llvm/trunk/lib/Target/R600/SILowerControlFlow.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SILowerControlFlow.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SILowerControlFlow.cpp&p1=llvm/trunk/lib/Target/R600/SILowerControlFlow.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SILowerI1Copies.cpp (from r239647, llvm/trunk/lib/Target/R600/SILowerI1Copies.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SILowerI1Copies.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SILowerI1Copies.cpp&p1=llvm/trunk/lib/Target/R600/SILowerI1Copies.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp&p1=llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.h (from r239647, llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.h?p2=llvm/trunk/lib/Target/AMDGPU/SIMachineFunctionInfo.h&p1=llvm/trunk/lib/Target/R600/SIMachineFunctionInfo.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIPrepareScratchRegs.cpp (from r239647, llvm/trunk/lib/Target/R600/SIPrepareScratchRegs.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIPrepareScratchRegs.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIPrepareScratchRegs.cpp&p1=llvm/trunk/lib/Target/R600/SIPrepareScratchRegs.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/SIRegisterInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp&p1=llvm/trunk/lib/Target/R600/SIRegisterInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.h (from r239647, llvm/trunk/lib/Target/R600/SIRegisterInfo.h)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.h?p2=llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.h&p1=llvm/trunk/lib/Target/R600/SIRegisterInfo.h&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.td (from r239647, llvm/trunk/lib/Target/R600/SIRegisterInfo.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.td?p2=llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.td&p1=llvm/trunk/lib/Target/R600/SIRegisterInfo.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SISchedule.td (from r239647, llvm/trunk/lib/Target/R600/SISchedule.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SISchedule.td?p2=llvm/trunk/lib/Target/AMDGPU/SISchedule.td&p1=llvm/trunk/lib/Target/R600/SISchedule.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp (from r239647, llvm/trunk/lib/Target/R600/SIShrinkInstructions.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SIShrinkInstructions.cpp&p1=llvm/trunk/lib/Target/R600/SIShrinkInstructions.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/SITypeRewriter.cpp (from r239647, llvm/trunk/lib/Target/R600/SITypeRewriter.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SITypeRewriter.cpp?p2=llvm/trunk/lib/Target/AMDGPU/SITypeRewriter.cpp&p1=llvm/trunk/lib/Target/R600/SITypeRewriter.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp (from r239647, llvm/trunk/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp?p2=llvm/trunk/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp&p1=llvm/trunk/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/TargetInfo/AMDGPUTargetInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/TargetInfo/AMDGPUTargetInfo.cpp Fri Jun 12 22:28:10 2015
@@ -23,7 +23,7 @@ Target llvm::TheAMDGPUTarget;
 Target llvm::TheGCNTarget;
 
 /// \brief Extern function to initialize the targets for the AMDGPU backend
-extern "C" void LLVMInitializeR600TargetInfo() {
+extern "C" void LLVMInitializeAMDGPUTargetInfo() {
   RegisterTarget<Triple::r600, false>
     R600(TheAMDGPUTarget, "r600", "AMD GPUs HD2XXX-HD6XXX");
   RegisterTarget<Triple::amdgcn, false> GCN(TheGCNTarget, "amdgcn", "AMD GCN GPUs");

Added: llvm/trunk/lib/Target/AMDGPU/TargetInfo/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/TargetInfo/CMakeLists.txt?rev=239657&view=auto
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/TargetInfo/CMakeLists.txt (added)
+++ llvm/trunk/lib/Target/AMDGPU/TargetInfo/CMakeLists.txt Fri Jun 12 22:28:10 2015
@@ -0,0 +1,3 @@
+add_llvm_library(LLVMAMDGPUInfo
+  AMDGPUTargetInfo.cpp
+  )

Copied: llvm/trunk/lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt (from r239647, llvm/trunk/lib/Target/R600/TargetInfo/LLVMBuild.txt)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt?p2=llvm/trunk/lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt&p1=llvm/trunk/lib/Target/R600/TargetInfo/LLVMBuild.txt&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/TargetInfo/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt Fri Jun 12 22:28:10 2015
@@ -1,4 +1,4 @@
-;===- ./lib/Target/R600/TargetInfo/LLVMBuild.txt --------------*- Conf -*--===;
+;===- ./lib/Target/AMDGPU/TargetInfo/LLVMBuild.txt --------------*- Conf -*--===;
 ;
 ;                     The LLVM Compiler Infrastructure
 ;
@@ -17,7 +17,7 @@
 
 [component_0]
 type = Library
-name = R600Info
-parent = R600
+name = AMDGPUInfo
+parent = AMDGPU
 required_libraries = Support
-add_to_library_groups = R600
+add_to_library_groups = AMDGPU

Copied: llvm/trunk/lib/Target/AMDGPU/TargetInfo/Makefile (from r239647, llvm/trunk/lib/Target/R600/TargetInfo/Makefile)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/TargetInfo/Makefile?p2=llvm/trunk/lib/Target/AMDGPU/TargetInfo/Makefile&p1=llvm/trunk/lib/Target/R600/TargetInfo/Makefile&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/VIInstrFormats.td (from r239647, llvm/trunk/lib/Target/R600/VIInstrFormats.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/VIInstrFormats.td?p2=llvm/trunk/lib/Target/AMDGPU/VIInstrFormats.td&p1=llvm/trunk/lib/Target/R600/VIInstrFormats.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Copied: llvm/trunk/lib/Target/AMDGPU/VIInstructions.td (from r239647, llvm/trunk/lib/Target/R600/VIInstructions.td)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/VIInstructions.td?p2=llvm/trunk/lib/Target/AMDGPU/VIInstructions.td&p1=llvm/trunk/lib/Target/R600/VIInstructions.td&r1=239647&r2=239657&rev=239657&view=diff
==============================================================================
    (empty)

Modified: llvm/trunk/lib/Target/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/LLVMBuild.txt?rev=239657&r1=239656&r2=239657&view=diff
==============================================================================
--- llvm/trunk/lib/Target/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/LLVMBuild.txt Fri Jun 12 22:28:10 2015
@@ -19,6 +19,7 @@
 ; will typically require only insertion of a line.
 [common]
 subdirectories =
+ AMDGPU
  ARM
  AArch64
  BPF
@@ -28,7 +29,6 @@ subdirectories =
  NVPTX
  Mips
  PowerPC
- R600
  Sparc
  SystemZ
  X86

Removed: llvm/trunk/lib/Target/R600/AMDGPU.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPU.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPU.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPU.h (removed)
@@ -1,148 +0,0 @@
-//===-- AMDGPU.h - MachineFunction passes hw codegen --------------*- C++ -*-=//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_AMDGPU_H
-#define LLVM_LIB_TARGET_R600_AMDGPU_H
-
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Target/TargetMachine.h"
-
-namespace llvm {
-
-class AMDGPUInstrPrinter;
-class AMDGPUSubtarget;
-class AMDGPUTargetMachine;
-class FunctionPass;
-class MCAsmInfo;
-class raw_ostream;
-class Target;
-class TargetMachine;
-
-// R600 Passes
-FunctionPass *createR600VectorRegMerger(TargetMachine &tm);
-FunctionPass *createR600TextureIntrinsicsReplacer();
-FunctionPass *createR600ExpandSpecialInstrsPass(TargetMachine &tm);
-FunctionPass *createR600EmitClauseMarkers();
-FunctionPass *createR600ClauseMergePass(TargetMachine &tm);
-FunctionPass *createR600Packetizer(TargetMachine &tm);
-FunctionPass *createR600ControlFlowFinalizer(TargetMachine &tm);
-FunctionPass *createAMDGPUCFGStructurizerPass();
-
-// SI Passes
-FunctionPass *createSITypeRewriter();
-FunctionPass *createSIAnnotateControlFlowPass();
-FunctionPass *createSIFoldOperandsPass();
-FunctionPass *createSILowerI1CopiesPass();
-FunctionPass *createSIShrinkInstructionsPass();
-FunctionPass *createSILoadStoreOptimizerPass(TargetMachine &tm);
-FunctionPass *createSILowerControlFlowPass(TargetMachine &tm);
-FunctionPass *createSIFixControlFlowLiveIntervalsPass();
-FunctionPass *createSIFixSGPRCopiesPass(TargetMachine &tm);
-FunctionPass *createSIFixSGPRLiveRangesPass();
-FunctionPass *createSICodeEmitterPass(formatted_raw_ostream &OS);
-FunctionPass *createSIInsertWaits(TargetMachine &tm);
-FunctionPass *createSIPrepareScratchRegs();
-
-void initializeSIFoldOperandsPass(PassRegistry &);
-extern char &SIFoldOperandsID;
-
-void initializeSILowerI1CopiesPass(PassRegistry &);
-extern char &SILowerI1CopiesID;
-
-void initializeSILoadStoreOptimizerPass(PassRegistry &);
-extern char &SILoadStoreOptimizerID;
-
-// Passes common to R600 and SI
-FunctionPass *createAMDGPUPromoteAlloca(const AMDGPUSubtarget &ST);
-Pass *createAMDGPUStructurizeCFGPass();
-FunctionPass *createAMDGPUISelDag(TargetMachine &tm);
-ModulePass *createAMDGPUAlwaysInlinePass();
-
-void initializeSIFixControlFlowLiveIntervalsPass(PassRegistry&);
-extern char &SIFixControlFlowLiveIntervalsID;
-
-void initializeSIFixSGPRLiveRangesPass(PassRegistry&);
-extern char &SIFixSGPRLiveRangesID;
-
-
-extern Target TheAMDGPUTarget;
-extern Target TheGCNTarget;
-
-namespace AMDGPU {
-enum TargetIndex {
-  TI_CONSTDATA_START,
-  TI_SCRATCH_RSRC_DWORD0,
-  TI_SCRATCH_RSRC_DWORD1,
-  TI_SCRATCH_RSRC_DWORD2,
-  TI_SCRATCH_RSRC_DWORD3
-};
-}
-
-#define END_OF_TEXT_LABEL_NAME "EndOfTextLabel"
-
-} // End namespace llvm
-
-namespace ShaderType {
-  enum Type {
-    PIXEL = 0,
-    VERTEX = 1,
-    GEOMETRY = 2,
-    COMPUTE = 3
-  };
-}
-
-/// OpenCL uses address spaces to differentiate between
-/// various memory regions on the hardware. On the CPU
-/// all of the address spaces point to the same memory,
-/// however on the GPU, each address space points to
-/// a separate piece of memory that is unique from other
-/// memory locations.
-namespace AMDGPUAS {
-enum AddressSpaces : unsigned {
-  PRIVATE_ADDRESS  = 0, ///< Address space for private memory.
-  GLOBAL_ADDRESS   = 1, ///< Address space for global memory (RAT0, VTX0).
-  CONSTANT_ADDRESS = 2, ///< Address space for constant memory
-  LOCAL_ADDRESS    = 3, ///< Address space for local memory.
-  FLAT_ADDRESS     = 4, ///< Address space for flat memory.
-  REGION_ADDRESS   = 5, ///< Address space for region memory.
-  PARAM_D_ADDRESS  = 6, ///< Address space for direct addressible parameter memory (CONST0)
-  PARAM_I_ADDRESS  = 7, ///< Address space for indirect addressible parameter memory (VTX1)
-
-  // Do not re-order the CONSTANT_BUFFER_* enums.  Several places depend on this
-  // order to be able to dynamically index a constant buffer, for example:
-  //
-  // ConstantBufferAS = CONSTANT_BUFFER_0 + CBIdx
-
-  CONSTANT_BUFFER_0 = 8,
-  CONSTANT_BUFFER_1 = 9,
-  CONSTANT_BUFFER_2 = 10,
-  CONSTANT_BUFFER_3 = 11,
-  CONSTANT_BUFFER_4 = 12,
-  CONSTANT_BUFFER_5 = 13,
-  CONSTANT_BUFFER_6 = 14,
-  CONSTANT_BUFFER_7 = 15,
-  CONSTANT_BUFFER_8 = 16,
-  CONSTANT_BUFFER_9 = 17,
-  CONSTANT_BUFFER_10 = 18,
-  CONSTANT_BUFFER_11 = 19,
-  CONSTANT_BUFFER_12 = 20,
-  CONSTANT_BUFFER_13 = 21,
-  CONSTANT_BUFFER_14 = 22,
-  CONSTANT_BUFFER_15 = 23,
-  ADDRESS_NONE = 24, ///< Address space for unknown memory.
-  LAST_ADDRESS = ADDRESS_NONE,
-
-  // Some places use this if the address space can't be determined.
-  UNKNOWN_ADDRESS_SPACE = ~0u
-};
-
-} // namespace AMDGPUAS
-
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDGPU.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPU.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPU.td (original)
+++ llvm/trunk/lib/Target/R600/AMDGPU.td (removed)
@@ -1,266 +0,0 @@
-//===-- AMDGPU.td - AMDGPU Tablegen files ------------------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-include "llvm/Target/Target.td"
-
-//===----------------------------------------------------------------------===//
-// Subtarget Features
-//===----------------------------------------------------------------------===//
-
-// Debugging Features
-
-def FeatureDumpCode : SubtargetFeature <"DumpCode",
-        "DumpCode",
-        "true",
-        "Dump MachineInstrs in the CodeEmitter">;
-
-def FeatureDumpCodeLower : SubtargetFeature <"dumpcode",
-        "DumpCode",
-        "true",
-        "Dump MachineInstrs in the CodeEmitter">;
-
-def FeatureIRStructurizer : SubtargetFeature <"disable-irstructurizer",
-        "EnableIRStructurizer",
-        "false",
-        "Disable IR Structurizer">;
-
-def FeaturePromoteAlloca : SubtargetFeature <"promote-alloca",
-        "EnablePromoteAlloca",
-        "true",
-        "Enable promote alloca pass">;
-
-// Target features
-
-def FeatureIfCvt : SubtargetFeature <"disable-ifcvt",
-        "EnableIfCvt",
-        "false",
-        "Disable the if conversion pass">;
-
-def FeatureFP64 : SubtargetFeature<"fp64",
-        "FP64",
-        "true",
-        "Enable double precision operations">;
-
-def FeatureFP64Denormals : SubtargetFeature<"fp64-denormals",
-        "FP64Denormals",
-        "true",
-        "Enable double precision denormal handling",
-        [FeatureFP64]>;
-
-def FeatureFastFMAF32 : SubtargetFeature<"fast-fmaf",
-        "FastFMAF32",
-        "true",
-        "Assuming f32 fma is at least as fast as mul + add",
-        []>;
-
-// Some instructions do not support denormals despite this flag. Using
-// fp32 denormals also causes instructions to run at the double
-// precision rate for the device.
-def FeatureFP32Denormals : SubtargetFeature<"fp32-denormals",
-        "FP32Denormals",
-        "true",
-        "Enable single precision denormal handling">;
-
-def Feature64BitPtr : SubtargetFeature<"64BitPtr",
-        "Is64bit",
-        "true",
-        "Specify if 64-bit addressing should be used">;
-
-def FeatureR600ALUInst : SubtargetFeature<"R600ALUInst",
-        "R600ALUInst",
-        "false",
-        "Older version of ALU instructions encoding">;
-
-def FeatureVertexCache : SubtargetFeature<"HasVertexCache",
-        "HasVertexCache",
-        "true",
-        "Specify use of dedicated vertex cache">;
-
-def FeatureCaymanISA : SubtargetFeature<"caymanISA",
-        "CaymanISA",
-        "true",
-        "Use Cayman ISA">;
-
-def FeatureCFALUBug : SubtargetFeature<"cfalubug",
-        "CFALUBug",
-        "true",
-        "GPU has CF_ALU bug">;
-
-// XXX - This should probably be removed once enabled by default
-def FeatureEnableLoadStoreOpt : SubtargetFeature <"load-store-opt",
-        "EnableLoadStoreOpt",
-        "true",
-        "Enable SI load/store optimizer pass">;
-
-def FeatureFlatAddressSpace : SubtargetFeature<"flat-address-space",
-        "FlatAddressSpace",
-        "true",
-        "Support flat address space">;
-
-def FeatureVGPRSpilling : SubtargetFeature<"vgpr-spilling",
-        "EnableVGPRSpilling",
-        "true",
-        "Enable spilling of VGPRs to scratch memory">;
-
-def FeatureSGPRInitBug : SubtargetFeature<"sgpr-init-bug",
-        "SGPRInitBug",
-        "true",
-        "VI SGPR initilization bug requiring a fixed SGPR allocation size">;
-
-class SubtargetFeatureFetchLimit <string Value> :
-                          SubtargetFeature <"fetch"#Value,
-        "TexVTXClauseSize",
-        Value,
-        "Limit the maximum number of fetches in a clause to "#Value>;
-
-def FeatureFetchLimit8 : SubtargetFeatureFetchLimit <"8">;
-def FeatureFetchLimit16 : SubtargetFeatureFetchLimit <"16">;
-
-class SubtargetFeatureWavefrontSize <int Value> : SubtargetFeature<
-        "wavefrontsize"#Value,
-        "WavefrontSize",
-        !cast<string>(Value),
-        "The number of threads per wavefront">;
-
-def FeatureWavefrontSize16 : SubtargetFeatureWavefrontSize<16>;
-def FeatureWavefrontSize32 : SubtargetFeatureWavefrontSize<32>;
-def FeatureWavefrontSize64 : SubtargetFeatureWavefrontSize<64>;
-
-class SubtargetFeatureLDSBankCount <int Value> : SubtargetFeature <
-      "ldsbankcount"#Value,
-      "LDSBankCount",
-      !cast<string>(Value),
-      "The number of LDS banks per compute unit.">;
-
-def FeatureLDSBankCount16 : SubtargetFeatureLDSBankCount<16>;
-def FeatureLDSBankCount32 : SubtargetFeatureLDSBankCount<32>;
-
-class SubtargetFeatureLocalMemorySize <int Value> : SubtargetFeature<
-        "localmemorysize"#Value,
-        "LocalMemorySize",
-        !cast<string>(Value),
-        "The size of local memory in bytes">;
-
-def FeatureGCN : SubtargetFeature<"gcn",
-        "IsGCN",
-        "true",
-        "GCN or newer GPU">;
-
-def FeatureGCN1Encoding : SubtargetFeature<"gcn1-encoding",
-        "GCN1Encoding",
-        "true",
-        "Encoding format for SI and CI">;
-
-def FeatureGCN3Encoding : SubtargetFeature<"gcn3-encoding",
-        "GCN3Encoding",
-        "true",
-        "Encoding format for VI">;
-
-def FeatureCIInsts : SubtargetFeature<"ci-insts",
-        "CIInsts",
-        "true",
-        "Additional intstructions for CI+">;
-
-// Dummy feature used to disable assembler instructions.
-def FeatureDisable : SubtargetFeature<"",
-                                      "FeatureDisable","true",
-                                      "Dummy feature to disable assembler"
-                                      " instructions">;
-
-class SubtargetFeatureGeneration <string Value,
-                                  list<SubtargetFeature> Implies> :
-        SubtargetFeature <Value, "Gen", "AMDGPUSubtarget::"#Value,
-                          Value#" GPU generation", Implies>;
-
-def FeatureLocalMemorySize0 : SubtargetFeatureLocalMemorySize<0>;
-def FeatureLocalMemorySize32768 : SubtargetFeatureLocalMemorySize<32768>;
-def FeatureLocalMemorySize65536 : SubtargetFeatureLocalMemorySize<65536>;
-
-def FeatureR600 : SubtargetFeatureGeneration<"R600",
-        [FeatureR600ALUInst, FeatureFetchLimit8, FeatureLocalMemorySize0]>;
-
-def FeatureR700 : SubtargetFeatureGeneration<"R700",
-        [FeatureFetchLimit16, FeatureLocalMemorySize0]>;
-
-def FeatureEvergreen : SubtargetFeatureGeneration<"EVERGREEN",
-        [FeatureFetchLimit16, FeatureLocalMemorySize32768]>;
-
-def FeatureNorthernIslands : SubtargetFeatureGeneration<"NORTHERN_ISLANDS",
-        [FeatureFetchLimit16, FeatureWavefrontSize64,
-         FeatureLocalMemorySize32768]
->;
-
-def FeatureSouthernIslands : SubtargetFeatureGeneration<"SOUTHERN_ISLANDS",
-        [Feature64BitPtr, FeatureFP64, FeatureLocalMemorySize32768,
-         FeatureWavefrontSize64, FeatureGCN, FeatureGCN1Encoding,
-         FeatureLDSBankCount32]>;
-
-def FeatureSeaIslands : SubtargetFeatureGeneration<"SEA_ISLANDS",
-        [Feature64BitPtr, FeatureFP64, FeatureLocalMemorySize65536,
-         FeatureWavefrontSize64, FeatureGCN, FeatureFlatAddressSpace,
-         FeatureGCN1Encoding, FeatureCIInsts]>;
-
-def FeatureVolcanicIslands : SubtargetFeatureGeneration<"VOLCANIC_ISLANDS",
-        [Feature64BitPtr, FeatureFP64, FeatureLocalMemorySize65536,
-         FeatureWavefrontSize64, FeatureFlatAddressSpace, FeatureGCN,
-         FeatureGCN3Encoding, FeatureCIInsts, FeatureLDSBankCount32]>;
-
-//===----------------------------------------------------------------------===//
-
-def AMDGPUInstrInfo : InstrInfo {
-  let guessInstructionProperties = 1;
-  let noNamedPositionallyEncodedOperands = 1;
-}
-
-def AMDGPUAsmParser : AsmParser {
-  // Some of the R600 registers have the same name, so this crashes.
-  // For example T0_XYZW and T0_XY both have the asm name T0.
-  let ShouldEmitMatchRegisterName = 0;
-}
-
-def AMDGPU : Target {
-  // Pull in Instruction Info:
-  let InstructionSet = AMDGPUInstrInfo;
-  let AssemblyParsers = [AMDGPUAsmParser];
-}
-
-// Dummy Instruction itineraries for pseudo instructions
-def ALU_NULL : FuncUnit;
-def NullALU : InstrItinClass;
-
-//===----------------------------------------------------------------------===//
-// Predicate helper class
-//===----------------------------------------------------------------------===//
-
-def TruePredicate : Predicate<"true">;
-def isSICI : Predicate<
-  "Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS ||"
-  "Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS"
->, AssemblerPredicate<"FeatureGCN1Encoding">;
-
-class PredicateControl {
-  Predicate SubtargetPredicate;
-  Predicate SIAssemblerPredicate = isSICI;
-  list<Predicate> AssemblerPredicates = [];
-  Predicate AssemblerPredicate = TruePredicate;
-  list<Predicate> OtherPredicates = [];
-  list<Predicate> Predicates = !listconcat([SubtargetPredicate, AssemblerPredicate],
-                                            AssemblerPredicates,
-                                            OtherPredicates);
-}
-
-// Include AMDGPU TD files
-include "R600Schedule.td"
-include "SISchedule.td"
-include "Processors.td"
-include "AMDGPUInstrInfo.td"
-include "AMDGPUIntrinsics.td"
-include "AMDGPURegisterInfo.td"
-include "AMDGPUInstructions.td"
-include "AMDGPUCallingConv.td"

Removed: llvm/trunk/lib/Target/R600/AMDGPUAlwaysInlinePass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUAlwaysInlinePass.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUAlwaysInlinePass.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUAlwaysInlinePass.cpp (removed)
@@ -1,67 +0,0 @@
-//===-- AMDGPUAlwaysInlinePass.cpp - Promote Allocas ----------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// This pass marks all internal functions as always_inline and creates
-/// duplicates of all other functions a marks the duplicates as always_inline.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Transforms/Utils/Cloning.h"
-
-using namespace llvm;
-
-namespace {
-
-class AMDGPUAlwaysInline : public ModulePass {
-
-  static char ID;
-
-public:
-  AMDGPUAlwaysInline() : ModulePass(ID) { }
-  bool runOnModule(Module &M) override;
-  const char *getPassName() const override { return "AMDGPU Always Inline Pass"; }
-};
-
-} // End anonymous namespace
-
-char AMDGPUAlwaysInline::ID = 0;
-
-bool AMDGPUAlwaysInline::runOnModule(Module &M) {
-
-  std::vector<Function*> FuncsToClone;
-  for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
-    Function &F = *I;
-    if (!F.hasLocalLinkage() && !F.isDeclaration() && !F.use_empty() &&
-        !F.hasFnAttribute(Attribute::NoInline))
-      FuncsToClone.push_back(&F);
-  }
-
-  for (Function *F : FuncsToClone) {
-    ValueToValueMapTy VMap;
-    Function *NewFunc = CloneFunction(F, VMap, false);
-    NewFunc->setLinkage(GlobalValue::InternalLinkage);
-    F->getParent()->getFunctionList().push_back(NewFunc);
-    F->replaceAllUsesWith(NewFunc);
-  }
-
-  for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
-    Function &F = *I;
-    if (F.hasLocalLinkage() && !F.hasFnAttribute(Attribute::NoInline)) {
-      F.addFnAttr(Attribute::AlwaysInline);
-    }
-  }
-  return false;
-}
-
-ModulePass *llvm::createAMDGPUAlwaysInlinePass() {
-  return new AMDGPUAlwaysInline();
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.cpp (removed)
@@ -1,600 +0,0 @@
-//===-- AMDGPUAsmPrinter.cpp - AMDGPU Assebly printer  --------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-///
-/// The AMDGPUAsmPrinter is used to print both assembly string and also binary
-/// code.  When passed an MCAsmStreamer it prints assembly and when passed
-/// an MCObjectStreamer it outputs binary code.
-//
-//===----------------------------------------------------------------------===//
-//
-
-#include "AMDGPUAsmPrinter.h"
-#include "InstPrinter/AMDGPUInstPrinter.h"
-#include "AMDGPU.h"
-#include "AMDKernelCodeT.h"
-#include "AMDGPUSubtarget.h"
-#include "R600Defines.h"
-#include "R600MachineFunctionInfo.h"
-#include "R600RegisterInfo.h"
-#include "SIDefines.h"
-#include "SIMachineFunctionInfo.h"
-#include "SIRegisterInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSectionELF.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/Support/ELF.h"
-#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Target/TargetLoweringObjectFile.h"
-
-using namespace llvm;
-
-// TODO: This should get the default rounding mode from the kernel. We just set
-// the default here, but this could change if the OpenCL rounding mode pragmas
-// are used.
-//
-// The denormal mode here should match what is reported by the OpenCL runtime
-// for the CL_FP_DENORM bit from CL_DEVICE_{HALF|SINGLE|DOUBLE}_FP_CONFIG, but
-// can also be override to flush with the -cl-denorms-are-zero compiler flag.
-//
-// AMD OpenCL only sets flush none and reports CL_FP_DENORM for double
-// precision, and leaves single precision to flush all and does not report
-// CL_FP_DENORM for CL_DEVICE_SINGLE_FP_CONFIG. Mesa's OpenCL currently reports
-// CL_FP_DENORM for both.
-//
-// FIXME: It seems some instructions do not support single precision denormals
-// regardless of the mode (exp_*_f32, rcp_*_f32, rsq_*_f32, rsq_*f32, sqrt_f32,
-// and sin_f32, cos_f32 on most parts).
-
-// We want to use these instructions, and using fp32 denormals also causes
-// instructions to run at the double precision rate for the device so it's
-// probably best to just report no single precision denormals.
-static uint32_t getFPMode(const MachineFunction &F) {
-  const AMDGPUSubtarget& ST = F.getSubtarget<AMDGPUSubtarget>();
-  // TODO: Is there any real use for the flush in only / flush out only modes?
-
-  uint32_t FP32Denormals =
-    ST.hasFP32Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
-
-  uint32_t FP64Denormals =
-    ST.hasFP64Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
-
-  return FP_ROUND_MODE_SP(FP_ROUND_ROUND_TO_NEAREST) |
-         FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEAREST) |
-         FP_DENORM_MODE_SP(FP32Denormals) |
-         FP_DENORM_MODE_DP(FP64Denormals);
-}
-
-static AsmPrinter *
-createAMDGPUAsmPrinterPass(TargetMachine &tm,
-                           std::unique_ptr<MCStreamer> &&Streamer) {
-  return new AMDGPUAsmPrinter(tm, std::move(Streamer));
-}
-
-extern "C" void LLVMInitializeR600AsmPrinter() {
-  TargetRegistry::RegisterAsmPrinter(TheAMDGPUTarget, createAMDGPUAsmPrinterPass);
-  TargetRegistry::RegisterAsmPrinter(TheGCNTarget, createAMDGPUAsmPrinterPass);
-}
-
-AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM,
-                                   std::unique_ptr<MCStreamer> Streamer)
-    : AsmPrinter(TM, std::move(Streamer)) {}
-
-void AMDGPUAsmPrinter::EmitEndOfAsmFile(Module &M) {
-
-  // This label is used to mark the end of the .text section.
-  const TargetLoweringObjectFile &TLOF = getObjFileLowering();
-  OutStreamer->SwitchSection(TLOF.getTextSection());
-  MCSymbol *EndOfTextLabel =
-      OutContext.getOrCreateSymbol(StringRef(END_OF_TEXT_LABEL_NAME));
-  OutStreamer->EmitLabel(EndOfTextLabel);
-}
-
-bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
-
-  // The starting address of all shader programs must be 256 bytes aligned.
-  MF.setAlignment(8);
-
-  SetupMachineFunction(MF);
-
-  MCContext &Context = getObjFileLowering().getContext();
-  MCSectionELF *ConfigSection =
-      Context.getELFSection(".AMDGPU.config", ELF::SHT_PROGBITS, 0);
-  OutStreamer->SwitchSection(ConfigSection);
-
-  const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
-  SIProgramInfo KernelInfo;
-  if (STM.isAmdHsaOS()) {
-    getSIProgramInfo(KernelInfo, MF);
-    EmitAmdKernelCodeT(MF, KernelInfo);
-    OutStreamer->EmitCodeAlignment(2 << (MF.getAlignment() - 1));
-  } else if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
-    getSIProgramInfo(KernelInfo, MF);
-    EmitProgramInfoSI(MF, KernelInfo);
-  } else {
-    EmitProgramInfoR600(MF);
-  }
-
-  DisasmLines.clear();
-  HexLines.clear();
-  DisasmLineMaxLen = 0;
-
-  EmitFunctionBody();
-
-  if (isVerbose()) {
-    MCSectionELF *CommentSection =
-        Context.getELFSection(".AMDGPU.csdata", ELF::SHT_PROGBITS, 0);
-    OutStreamer->SwitchSection(CommentSection);
-
-    if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
-      OutStreamer->emitRawComment(" Kernel info:", false);
-      OutStreamer->emitRawComment(" codeLenInByte = " + Twine(KernelInfo.CodeLen),
-                                  false);
-      OutStreamer->emitRawComment(" NumSgprs: " + Twine(KernelInfo.NumSGPR),
-                                  false);
-      OutStreamer->emitRawComment(" NumVgprs: " + Twine(KernelInfo.NumVGPR),
-                                  false);
-      OutStreamer->emitRawComment(" FloatMode: " + Twine(KernelInfo.FloatMode),
-                                  false);
-      OutStreamer->emitRawComment(" IeeeMode: " + Twine(KernelInfo.IEEEMode),
-                                  false);
-      OutStreamer->emitRawComment(" ScratchSize: " + Twine(KernelInfo.ScratchSize),
-                                  false);
-    } else {
-      R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
-      OutStreamer->emitRawComment(
-        Twine("SQ_PGM_RESOURCES:STACK_SIZE = " + Twine(MFI->StackSize)));
-    }
-  }
-
-  if (STM.dumpCode()) {
-
-    OutStreamer->SwitchSection(
-        Context.getELFSection(".AMDGPU.disasm", ELF::SHT_NOTE, 0));
-
-    for (size_t i = 0; i < DisasmLines.size(); ++i) {
-      std::string Comment(DisasmLineMaxLen - DisasmLines[i].size(), ' ');
-      Comment += " ; " + HexLines[i] + "\n";
-
-      OutStreamer->EmitBytes(StringRef(DisasmLines[i]));
-      OutStreamer->EmitBytes(StringRef(Comment));
-    }
-  }
-
-  return false;
-}
-
-void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
-  unsigned MaxGPR = 0;
-  bool killPixel = false;
-  const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
-  const R600RegisterInfo *RI =
-      static_cast<const R600RegisterInfo *>(STM.getRegisterInfo());
-  const R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
-
-  for (const MachineBasicBlock &MBB : MF) {
-    for (const MachineInstr &MI : MBB) {
-      if (MI.getOpcode() == AMDGPU::KILLGT)
-        killPixel = true;
-      unsigned numOperands = MI.getNumOperands();
-      for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
-        const MachineOperand &MO = MI.getOperand(op_idx);
-        if (!MO.isReg())
-          continue;
-        unsigned HWReg = RI->getEncodingValue(MO.getReg()) & 0xff;
-
-        // Register with value > 127 aren't GPR
-        if (HWReg > 127)
-          continue;
-        MaxGPR = std::max(MaxGPR, HWReg);
-      }
-    }
-  }
-
-  unsigned RsrcReg;
-  if (STM.getGeneration() >= AMDGPUSubtarget::EVERGREEN) {
-    // Evergreen / Northern Islands
-    switch (MFI->getShaderType()) {
-    default: // Fall through
-    case ShaderType::COMPUTE:  RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
-    case ShaderType::GEOMETRY: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
-    case ShaderType::PIXEL:    RsrcReg = R_028844_SQ_PGM_RESOURCES_PS; break;
-    case ShaderType::VERTEX:   RsrcReg = R_028860_SQ_PGM_RESOURCES_VS; break;
-    }
-  } else {
-    // R600 / R700
-    switch (MFI->getShaderType()) {
-    default: // Fall through
-    case ShaderType::GEOMETRY: // Fall through
-    case ShaderType::COMPUTE:  // Fall through
-    case ShaderType::VERTEX:   RsrcReg = R_028868_SQ_PGM_RESOURCES_VS; break;
-    case ShaderType::PIXEL:    RsrcReg = R_028850_SQ_PGM_RESOURCES_PS; break;
-    }
-  }
-
-  OutStreamer->EmitIntValue(RsrcReg, 4);
-  OutStreamer->EmitIntValue(S_NUM_GPRS(MaxGPR + 1) |
-                           S_STACK_SIZE(MFI->StackSize), 4);
-  OutStreamer->EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
-  OutStreamer->EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
-
-  if (MFI->getShaderType() == ShaderType::COMPUTE) {
-    OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
-    OutStreamer->EmitIntValue(RoundUpToAlignment(MFI->LDSSize, 4) >> 2, 4);
-  }
-}
-
-void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
-                                        const MachineFunction &MF) const {
-  const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
-  const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
-  uint64_t CodeSize = 0;
-  unsigned MaxSGPR = 0;
-  unsigned MaxVGPR = 0;
-  bool VCCUsed = false;
-  bool FlatUsed = false;
-  const SIRegisterInfo *RI =
-      static_cast<const SIRegisterInfo *>(STM.getRegisterInfo());
-
-  for (const MachineBasicBlock &MBB : MF) {
-    for (const MachineInstr &MI : MBB) {
-      // TODO: CodeSize should account for multiple functions.
-      CodeSize += MI.getDesc().Size;
-
-      unsigned numOperands = MI.getNumOperands();
-      for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
-        const MachineOperand &MO = MI.getOperand(op_idx);
-        unsigned width = 0;
-        bool isSGPR = false;
-
-        if (!MO.isReg()) {
-          continue;
-        }
-        unsigned reg = MO.getReg();
-        if (reg == AMDGPU::VCC || reg == AMDGPU::VCC_LO ||
-	    reg == AMDGPU::VCC_HI) {
-          VCCUsed = true;
-          continue;
-        } else if (reg == AMDGPU::FLAT_SCR ||
-                   reg == AMDGPU::FLAT_SCR_LO ||
-                   reg == AMDGPU::FLAT_SCR_HI) {
-          FlatUsed = true;
-          continue;
-        }
-
-        switch (reg) {
-        default: break;
-        case AMDGPU::SCC:
-        case AMDGPU::EXEC:
-        case AMDGPU::M0:
-          continue;
-        }
-
-        if (AMDGPU::SReg_32RegClass.contains(reg)) {
-          isSGPR = true;
-          width = 1;
-        } else if (AMDGPU::VGPR_32RegClass.contains(reg)) {
-          isSGPR = false;
-          width = 1;
-        } else if (AMDGPU::SReg_64RegClass.contains(reg)) {
-          isSGPR = true;
-          width = 2;
-        } else if (AMDGPU::VReg_64RegClass.contains(reg)) {
-          isSGPR = false;
-          width = 2;
-        } else if (AMDGPU::VReg_96RegClass.contains(reg)) {
-          isSGPR = false;
-          width = 3;
-        } else if (AMDGPU::SReg_128RegClass.contains(reg)) {
-          isSGPR = true;
-          width = 4;
-        } else if (AMDGPU::VReg_128RegClass.contains(reg)) {
-          isSGPR = false;
-          width = 4;
-        } else if (AMDGPU::SReg_256RegClass.contains(reg)) {
-          isSGPR = true;
-          width = 8;
-        } else if (AMDGPU::VReg_256RegClass.contains(reg)) {
-          isSGPR = false;
-          width = 8;
-        } else if (AMDGPU::SReg_512RegClass.contains(reg)) {
-          isSGPR = true;
-          width = 16;
-        } else if (AMDGPU::VReg_512RegClass.contains(reg)) {
-          isSGPR = false;
-          width = 16;
-        } else {
-          llvm_unreachable("Unknown register class");
-        }
-        unsigned hwReg = RI->getEncodingValue(reg) & 0xff;
-        unsigned maxUsed = hwReg + width - 1;
-        if (isSGPR) {
-          MaxSGPR = maxUsed > MaxSGPR ? maxUsed : MaxSGPR;
-        } else {
-          MaxVGPR = maxUsed > MaxVGPR ? maxUsed : MaxVGPR;
-        }
-      }
-    }
-  }
-
-  if (VCCUsed)
-    MaxSGPR += 2;
-
-  if (FlatUsed)
-    MaxSGPR += 2;
-
-  // We found the maximum register index. They start at 0, so add one to get the
-  // number of registers.
-  ProgInfo.NumVGPR = MaxVGPR + 1;
-  ProgInfo.NumSGPR = MaxSGPR + 1;
-
-  if (STM.hasSGPRInitBug()) {
-    if (ProgInfo.NumSGPR > AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG)
-      llvm_unreachable("Too many SGPRs used with the SGPR init bug");
-
-    ProgInfo.NumSGPR = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG;
-  }
-
-  ProgInfo.VGPRBlocks = (ProgInfo.NumVGPR - 1) / 4;
-  ProgInfo.SGPRBlocks = (ProgInfo.NumSGPR - 1) / 8;
-  // Set the value to initialize FP_ROUND and FP_DENORM parts of the mode
-  // register.
-  ProgInfo.FloatMode = getFPMode(MF);
-
-  // XXX: Not quite sure what this does, but sc seems to unset this.
-  ProgInfo.IEEEMode = 0;
-
-  // Do not clamp NAN to 0.
-  ProgInfo.DX10Clamp = 0;
-
-  const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
-  ProgInfo.ScratchSize = FrameInfo->estimateStackSize(MF);
-
-  ProgInfo.FlatUsed = FlatUsed;
-  ProgInfo.VCCUsed = VCCUsed;
-  ProgInfo.CodeLen = CodeSize;
-
-  unsigned LDSAlignShift;
-  if (STM.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
-    // LDS is allocated in 64 dword blocks.
-    LDSAlignShift = 8;
-  } else {
-    // LDS is allocated in 128 dword blocks.
-    LDSAlignShift = 9;
-  }
-
-  unsigned LDSSpillSize = MFI->LDSWaveSpillSize *
-                          MFI->getMaximumWorkGroupSize(MF);
-
-  ProgInfo.LDSSize = MFI->LDSSize + LDSSpillSize;
-  ProgInfo.LDSBlocks =
-     RoundUpToAlignment(ProgInfo.LDSSize, 1 << LDSAlignShift) >> LDSAlignShift;
-
-  // Scratch is allocated in 256 dword blocks.
-  unsigned ScratchAlignShift = 10;
-  // We need to program the hardware with the amount of scratch memory that
-  // is used by the entire wave.  ProgInfo.ScratchSize is the amount of
-  // scratch memory used per thread.
-  ProgInfo.ScratchBlocks =
-    RoundUpToAlignment(ProgInfo.ScratchSize * STM.getWavefrontSize(),
-                       1 << ScratchAlignShift) >> ScratchAlignShift;
-
-  ProgInfo.ComputePGMRSrc1 =
-      S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
-      S_00B848_SGPRS(ProgInfo.SGPRBlocks) |
-      S_00B848_PRIORITY(ProgInfo.Priority) |
-      S_00B848_FLOAT_MODE(ProgInfo.FloatMode) |
-      S_00B848_PRIV(ProgInfo.Priv) |
-      S_00B848_DX10_CLAMP(ProgInfo.DX10Clamp) |
-      S_00B848_IEEE_MODE(ProgInfo.DebugMode) |
-      S_00B848_IEEE_MODE(ProgInfo.IEEEMode);
-
-  ProgInfo.ComputePGMRSrc2 =
-      S_00B84C_SCRATCH_EN(ProgInfo.ScratchBlocks > 0) |
-      S_00B84C_USER_SGPR(MFI->NumUserSGPRs) |
-      S_00B84C_TGID_X_EN(1) |
-      S_00B84C_TGID_Y_EN(1) |
-      S_00B84C_TGID_Z_EN(1) |
-      S_00B84C_TG_SIZE_EN(1) |
-      S_00B84C_TIDIG_COMP_CNT(2) |
-      S_00B84C_LDS_SIZE(ProgInfo.LDSBlocks);
-}
-
-static unsigned getRsrcReg(unsigned ShaderType) {
-  switch (ShaderType) {
-  default: // Fall through
-  case ShaderType::COMPUTE:  return R_00B848_COMPUTE_PGM_RSRC1;
-  case ShaderType::GEOMETRY: return R_00B228_SPI_SHADER_PGM_RSRC1_GS;
-  case ShaderType::PIXEL:    return R_00B028_SPI_SHADER_PGM_RSRC1_PS;
-  case ShaderType::VERTEX:   return R_00B128_SPI_SHADER_PGM_RSRC1_VS;
-  }
-}
-
-void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
-                                         const SIProgramInfo &KernelInfo) {
-  const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
-  const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
-  unsigned RsrcReg = getRsrcReg(MFI->getShaderType());
-
-  if (MFI->getShaderType() == ShaderType::COMPUTE) {
-    OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4);
-
-    OutStreamer->EmitIntValue(KernelInfo.ComputePGMRSrc1, 4);
-
-    OutStreamer->EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4);
-    OutStreamer->EmitIntValue(KernelInfo.ComputePGMRSrc2, 4);
-
-    OutStreamer->EmitIntValue(R_00B860_COMPUTE_TMPRING_SIZE, 4);
-    OutStreamer->EmitIntValue(S_00B860_WAVESIZE(KernelInfo.ScratchBlocks), 4);
-
-    // TODO: Should probably note flat usage somewhere. SC emits a "FlatPtr32 =
-    // 0" comment but I don't see a corresponding field in the register spec.
-  } else {
-    OutStreamer->EmitIntValue(RsrcReg, 4);
-    OutStreamer->EmitIntValue(S_00B028_VGPRS(KernelInfo.VGPRBlocks) |
-                              S_00B028_SGPRS(KernelInfo.SGPRBlocks), 4);
-    if (STM.isVGPRSpillingEnabled(MFI)) {
-      OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4);
-      OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(KernelInfo.ScratchBlocks), 4);
-    }
-  }
-
-  if (MFI->getShaderType() == ShaderType::PIXEL) {
-    OutStreamer->EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4);
-    OutStreamer->EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(KernelInfo.LDSBlocks), 4);
-    OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
-    OutStreamer->EmitIntValue(MFI->PSInputAddr, 4);
-  }
-}
-
-void AMDGPUAsmPrinter::EmitAmdKernelCodeT(const MachineFunction &MF,
-                                        const SIProgramInfo &KernelInfo) const {
-  const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
-  const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
-  amd_kernel_code_t header;
-
-  memset(&header, 0, sizeof(header));
-
-  header.amd_code_version_major = AMD_CODE_VERSION_MAJOR;
-  header.amd_code_version_minor = AMD_CODE_VERSION_MINOR;
-
-  header.struct_byte_size = sizeof(amd_kernel_code_t);
-
-  header.target_chip = STM.getAmdKernelCodeChipID();
-
-  header.kernel_code_entry_byte_offset = (1ULL << MF.getAlignment());
-
-  header.compute_pgm_resource_registers =
-      KernelInfo.ComputePGMRSrc1 |
-      (KernelInfo.ComputePGMRSrc2 << 32);
-
-  // Code Properties:
-  header.code_properties = AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR |
-                           AMD_CODE_PROPERTY_IS_PTR64;
-
-  if (KernelInfo.FlatUsed)
-    header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT;
-
-  if (KernelInfo.ScratchBlocks)
-    header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE;
-
-  header.workitem_private_segment_byte_size = KernelInfo.ScratchSize;
-  header.workgroup_group_segment_byte_size = KernelInfo.LDSSize;
-
-  // MFI->ABIArgOffset is the number of bytes for the kernel arguments
-  // plus 36.  36 is the number of bytes reserved at the begining of the
-  // input buffer to store work-group size information.
-  // FIXME: We should be adding the size of the implicit arguments
-  // to this value.
-  header.kernarg_segment_byte_size = MFI->ABIArgOffset;
-
-  header.wavefront_sgpr_count = KernelInfo.NumSGPR;
-  header.workitem_vgpr_count = KernelInfo.NumVGPR;
-
-  // FIXME: What values do I put for these alignments
-  header.kernarg_segment_alignment = 0;
-  header.group_segment_alignment = 0;
-  header.private_segment_alignment = 0;
-
-  header.code_type = 1; // HSA_EXT_CODE_KERNEL
-
-  header.wavefront_size = STM.getWavefrontSize();
-
-  MCSectionELF *VersionSection =
-      OutContext.getELFSection(".hsa.version", ELF::SHT_PROGBITS, 0);
-  OutStreamer->SwitchSection(VersionSection);
-  OutStreamer->EmitBytes(Twine("HSA Code Unit:" +
-                         Twine(header.hsail_version_major) + "." +
-                         Twine(header.hsail_version_minor) + ":" +
-                         "AMD:" +
-                         Twine(header.amd_code_version_major) + "." +
-                         Twine(header.amd_code_version_minor) +  ":" +
-                         "GFX8.1:0").str());
-
-  OutStreamer->SwitchSection(getObjFileLowering().getTextSection());
-
-  if (isVerbose()) {
-    OutStreamer->emitRawComment("amd_code_version_major = " +
-                                Twine(header.amd_code_version_major), false);
-    OutStreamer->emitRawComment("amd_code_version_minor = " +
-                                Twine(header.amd_code_version_minor), false);
-    OutStreamer->emitRawComment("struct_byte_size = " +
-                                Twine(header.struct_byte_size), false);
-    OutStreamer->emitRawComment("target_chip = " +
-                                Twine(header.target_chip), false);
-    OutStreamer->emitRawComment(" compute_pgm_rsrc1: " +
-                                Twine::utohexstr(KernelInfo.ComputePGMRSrc1),
-                                false);
-    OutStreamer->emitRawComment(" compute_pgm_rsrc2: " +
-                                Twine::utohexstr(KernelInfo.ComputePGMRSrc2),
-                                false);
-    OutStreamer->emitRawComment("enable_sgpr_private_segment_buffer = " +
-      Twine((bool)(header.code_properties &
-                   AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE)), false);
-    OutStreamer->emitRawComment("enable_sgpr_kernarg_segment_ptr = " +
-      Twine((bool)(header.code_properties &
-                   AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)), false);
-    OutStreamer->emitRawComment("private_element_size = 2 ", false);
-    OutStreamer->emitRawComment("is_ptr64 = " +
-        Twine((bool)(header.code_properties & AMD_CODE_PROPERTY_IS_PTR64)), false);
-    OutStreamer->emitRawComment("workitem_private_segment_byte_size = " +
-                                Twine(header.workitem_private_segment_byte_size),
-                                false);
-    OutStreamer->emitRawComment("workgroup_group_segment_byte_size = " +
-                                Twine(header.workgroup_group_segment_byte_size),
-                                false);
-    OutStreamer->emitRawComment("gds_segment_byte_size = " +
-                                Twine(header.gds_segment_byte_size), false);
-    OutStreamer->emitRawComment("kernarg_segment_byte_size = " +
-                                Twine(header.kernarg_segment_byte_size), false);
-    OutStreamer->emitRawComment("wavefront_sgpr_count = " +
-                                Twine(header.wavefront_sgpr_count), false);
-    OutStreamer->emitRawComment("workitem_vgpr_count = " +
-                                Twine(header.workitem_vgpr_count), false);
-    OutStreamer->emitRawComment("code_type = " + Twine(header.code_type), false);
-    OutStreamer->emitRawComment("wavefront_size = " +
-                                Twine((int)header.wavefront_size), false);
-    OutStreamer->emitRawComment("optimization_level = " +
-                                Twine(header.optimization_level), false);
-    OutStreamer->emitRawComment("hsail_profile = " +
-                                Twine(header.hsail_profile), false);
-    OutStreamer->emitRawComment("hsail_machine_model = " +
-                                Twine(header.hsail_machine_model), false);
-    OutStreamer->emitRawComment("hsail_version_major = " +
-                                Twine(header.hsail_version_major), false);
-    OutStreamer->emitRawComment("hsail_version_minor = " +
-                                Twine(header.hsail_version_minor), false);
-  }
-
-  OutStreamer->EmitBytes(StringRef((char*)&header, sizeof(header)));
-}
-
-bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
-                                       unsigned AsmVariant,
-                                       const char *ExtraCode, raw_ostream &O) {
-  if (ExtraCode && ExtraCode[0]) {
-    if (ExtraCode[1] != 0)
-      return true; // Unknown modifier.
-
-    switch (ExtraCode[0]) {
-    default:
-      // See if this is a generic print operand
-      return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
-    case 'r':
-      break;
-    }
-  }
-
-  AMDGPUInstPrinter::printRegOperand(MI->getOperand(OpNo).getReg(), O,
-                   *TM.getSubtargetImpl(*MF->getFunction())->getRegisterInfo());
-  return false;
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUAsmPrinter.h (removed)
@@ -1,113 +0,0 @@
-//===-- AMDGPUAsmPrinter.h - Print AMDGPU assembly code ---------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief AMDGPU Assembly printer class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUASMPRINTER_H
-#define LLVM_LIB_TARGET_R600_AMDGPUASMPRINTER_H
-
-#include "llvm/CodeGen/AsmPrinter.h"
-#include <vector>
-
-namespace llvm {
-
-class AMDGPUAsmPrinter : public AsmPrinter {
-private:
-  struct SIProgramInfo {
-    SIProgramInfo() :
-      VGPRBlocks(0),
-      SGPRBlocks(0),
-      Priority(0),
-      FloatMode(0),
-      Priv(0),
-      DX10Clamp(0),
-      DebugMode(0),
-      IEEEMode(0),
-      ScratchSize(0),
-      ComputePGMRSrc1(0),
-      LDSBlocks(0),
-      ScratchBlocks(0),
-      ComputePGMRSrc2(0),
-      NumVGPR(0),
-      NumSGPR(0),
-      FlatUsed(false),
-      VCCUsed(false),
-      CodeLen(0) {}
-
-    // Fields set in PGM_RSRC1 pm4 packet.
-    uint32_t VGPRBlocks;
-    uint32_t SGPRBlocks;
-    uint32_t Priority;
-    uint32_t FloatMode;
-    uint32_t Priv;
-    uint32_t DX10Clamp;
-    uint32_t DebugMode;
-    uint32_t IEEEMode;
-    uint32_t ScratchSize;
-
-    uint64_t ComputePGMRSrc1;
-
-    // Fields set in PGM_RSRC2 pm4 packet.
-    uint32_t LDSBlocks;
-    uint32_t ScratchBlocks;
-
-    uint64_t ComputePGMRSrc2;
-
-    uint32_t NumVGPR;
-    uint32_t NumSGPR;
-    uint32_t LDSSize;
-    bool FlatUsed;
-
-    // Bonus information for debugging.
-    bool VCCUsed;
-    uint64_t CodeLen;
-  };
-
-  void getSIProgramInfo(SIProgramInfo &Out, const MachineFunction &MF) const;
-  void findNumUsedRegistersSI(const MachineFunction &MF,
-                              unsigned &NumSGPR,
-                              unsigned &NumVGPR) const;
-
-  /// \brief Emit register usage information so that the GPU driver
-  /// can correctly setup the GPU state.
-  void EmitProgramInfoR600(const MachineFunction &MF);
-  void EmitProgramInfoSI(const MachineFunction &MF, const SIProgramInfo &KernelInfo);
-  void EmitAmdKernelCodeT(const MachineFunction &MF,
-                          const SIProgramInfo &KernelInfo) const;
-
-public:
-  explicit AMDGPUAsmPrinter(TargetMachine &TM,
-                            std::unique_ptr<MCStreamer> Streamer);
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "AMDGPU Assembly Printer";
-  }
-
-  /// Implemented in AMDGPUMCInstLower.cpp
-  void EmitInstruction(const MachineInstr *MI) override;
-
-  void EmitEndOfAsmFile(Module &M) override;
-
-  bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
-                       unsigned AsmVariant, const char *ExtraCode,
-                       raw_ostream &O) override;
-
-protected:
-  std::vector<std::string> DisasmLines, HexLines;
-  size_t DisasmLineMaxLen;
-};
-
-} // End anonymous llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDGPUCallingConv.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUCallingConv.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUCallingConv.td (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUCallingConv.td (removed)
@@ -1,82 +0,0 @@
-//===---- AMDCallingConv.td - Calling Conventions for Radeon GPUs ---------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This describes the calling conventions for the AMD Radeon GPUs.
-//
-//===----------------------------------------------------------------------===//
-
-// Inversion of CCIfInReg
-class CCIfNotInReg<CCAction A> : CCIf<"!ArgFlags.isInReg()", A> {}
-
-// Calling convention for SI
-def CC_SI : CallingConv<[
-
-  CCIfInReg<CCIfType<[f32, i32] , CCAssignToReg<[
-    SGPR0, SGPR1, SGPR2, SGPR3, SGPR4, SGPR5, SGPR6, SGPR7,
-    SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15,
-    SGPR16, SGPR17, SGPR18, SGPR19, SGPR20, SGPR21
-  ]>>>,
-
-  CCIfInReg<CCIfType<[i64] , CCAssignToRegWithShadow<
-    [ SGPR0, SGPR2, SGPR4, SGPR6, SGPR8, SGPR10, SGPR12, SGPR14 ],
-    [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR13, SGPR15 ]
-  >>>,
-
-  CCIfNotInReg<CCIfType<[f32, i32] , CCAssignToReg<[
-    VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
-    VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
-    VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
-    VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31
-  ]>>>,
-
-  CCIfByVal<CCIfType<[i64] , CCAssignToRegWithShadow<
-    [ SGPR0, SGPR2, SGPR4, SGPR6, SGPR8, SGPR10, SGPR12, SGPR14 ],
-    [ SGPR1, SGPR3, SGPR5, SGPR7, SGPR9, SGPR11, SGPR13, SGPR15 ]
-  >>>
-
-]>;
-
-// Calling convention for R600
-def CC_R600 : CallingConv<[
-  CCIfInReg<CCIfType<[v4f32, v4i32] , CCAssignToReg<[
-    T0_XYZW, T1_XYZW, T2_XYZW, T3_XYZW, T4_XYZW, T5_XYZW, T6_XYZW, T7_XYZW,
-    T8_XYZW, T9_XYZW, T10_XYZW, T11_XYZW, T12_XYZW, T13_XYZW, T14_XYZW, T15_XYZW,
-    T16_XYZW, T17_XYZW, T18_XYZW, T19_XYZW, T20_XYZW, T21_XYZW, T22_XYZW,
-    T23_XYZW, T24_XYZW, T25_XYZW, T26_XYZW, T27_XYZW, T28_XYZW, T29_XYZW,
-    T30_XYZW, T31_XYZW, T32_XYZW
-  ]>>>
-]>;
-
-// Calling convention for compute kernels
-def CC_AMDGPU_Kernel : CallingConv<[
-  CCCustom<"allocateStack">
-]>;
-
-def CC_AMDGPU : CallingConv<[
-  CCIf<"static_cast<const AMDGPUSubtarget&>"
-        "(State.getMachineFunction().getSubtarget()).getGeneration() >="
-          "AMDGPUSubtarget::SOUTHERN_ISLANDS && "
-        "State.getMachineFunction().getInfo<SIMachineFunctionInfo>()"
-         "->getShaderType() == ShaderType::COMPUTE",
-       CCDelegateTo<CC_AMDGPU_Kernel>>,
-  CCIf<"static_cast<const AMDGPUSubtarget&>"
-        "(State.getMachineFunction().getSubtarget()).getGeneration() < "
-          "AMDGPUSubtarget::SOUTHERN_ISLANDS && "
-         "State.getMachineFunction().getInfo<R600MachineFunctionInfo>()"
-          "->getShaderType() == ShaderType::COMPUTE",
-        CCDelegateTo<CC_AMDGPU_Kernel>>,
-   CCIf<"static_cast<const AMDGPUSubtarget&>"
-         "(State.getMachineFunction().getSubtarget()).getGeneration() >= "
-           "AMDGPUSubtarget::SOUTHERN_ISLANDS",
-        CCDelegateTo<CC_SI>>,
-   CCIf<"static_cast<const AMDGPUSubtarget&>"
-          "(State.getMachineFunction().getSubtarget()).getGeneration() < "
-            "AMDGPUSubtarget::SOUTHERN_ISLANDS",
-        CCDelegateTo<CC_R600>>
-]>;

Removed: llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.cpp (removed)
@@ -1,112 +0,0 @@
-//===----------------------- AMDGPUFrameLowering.cpp ----------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-// Interface to describe a layout of a stack frame on a AMDIL target machine
-//
-//===----------------------------------------------------------------------===//
-#include "AMDGPUFrameLowering.h"
-#include "AMDGPURegisterInfo.h"
-#include "R600MachineFunctionInfo.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/Instructions.h"
-
-using namespace llvm;
-AMDGPUFrameLowering::AMDGPUFrameLowering(StackDirection D, unsigned StackAl,
-    int LAO, unsigned TransAl)
-  : TargetFrameLowering(D, StackAl, LAO, TransAl) { }
-
-AMDGPUFrameLowering::~AMDGPUFrameLowering() { }
-
-unsigned AMDGPUFrameLowering::getStackWidth(const MachineFunction &MF) const {
-
-  // XXX: Hardcoding to 1 for now.
-  //
-  // I think the StackWidth should stored as metadata associated with the
-  // MachineFunction.  This metadata can either be added by a frontend, or
-  // calculated by a R600 specific LLVM IR pass.
-  //
-  // The StackWidth determines how stack objects are laid out in memory.
-  // For a vector stack variable, like: int4 stack[2], the data will be stored
-  // in the following ways depending on the StackWidth.
-  //
-  // StackWidth = 1:
-  //
-  // T0.X = stack[0].x
-  // T1.X = stack[0].y
-  // T2.X = stack[0].z
-  // T3.X = stack[0].w
-  // T4.X = stack[1].x
-  // T5.X = stack[1].y
-  // T6.X = stack[1].z
-  // T7.X = stack[1].w
-  //
-  // StackWidth = 2:
-  //
-  // T0.X = stack[0].x
-  // T0.Y = stack[0].y
-  // T1.X = stack[0].z
-  // T1.Y = stack[0].w
-  // T2.X = stack[1].x
-  // T2.Y = stack[1].y
-  // T3.X = stack[1].z
-  // T3.Y = stack[1].w
-  // 
-  // StackWidth = 4:
-  // T0.X = stack[0].x
-  // T0.Y = stack[0].y
-  // T0.Z = stack[0].z
-  // T0.W = stack[0].w
-  // T1.X = stack[1].x
-  // T1.Y = stack[1].y
-  // T1.Z = stack[1].z
-  // T1.W = stack[1].w
-  return 1;
-}
-
-/// \returns The number of registers allocated for \p FI.
-int AMDGPUFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
-                                         int FI) const {
-  const MachineFrameInfo *MFI = MF.getFrameInfo();
-  // Start the offset at 2 so we don't overwrite work group information.
-  // XXX: We should only do this when the shader actually uses this
-  // information.
-  unsigned OffsetBytes = 2 * (getStackWidth(MF) * 4);
-  int UpperBound = FI == -1 ? MFI->getNumObjects() : FI;
-
-  for (int i = MFI->getObjectIndexBegin(); i < UpperBound; ++i) {
-    OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(i));
-    OffsetBytes += MFI->getObjectSize(i);
-    // Each register holds 4 bytes, so we must always align the offset to at
-    // least 4 bytes, so that 2 frame objects won't share the same register.
-    OffsetBytes = RoundUpToAlignment(OffsetBytes, 4);
-  }
-
-  if (FI != -1)
-    OffsetBytes = RoundUpToAlignment(OffsetBytes, MFI->getObjectAlignment(FI));
-
-  return OffsetBytes / (getStackWidth(MF) * 4);
-}
-
-const TargetFrameLowering::SpillSlot *
-AMDGPUFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const {
-  NumEntries = 0;
-  return nullptr;
-}
-void AMDGPUFrameLowering::emitPrologue(MachineFunction &MF,
-                                       MachineBasicBlock &MBB) const {}
-void
-AMDGPUFrameLowering::emitEpilogue(MachineFunction &MF,
-                                  MachineBasicBlock &MBB) const {
-}
-
-bool
-AMDGPUFrameLowering::hasFP(const MachineFunction &MF) const {
-  return false;
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUFrameLowering.h (removed)
@@ -1,45 +0,0 @@
-//===--------------------- AMDGPUFrameLowering.h ----------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface to describe a layout of a stack frame on a AMDIL target
-/// machine.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUFRAMELOWERING_H
-#define LLVM_LIB_TARGET_R600_AMDGPUFRAMELOWERING_H
-
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/Target/TargetFrameLowering.h"
-
-namespace llvm {
-
-/// \brief Information about the stack frame layout on the AMDGPU targets.
-///
-/// It holds the direction of the stack growth, the known stack alignment on
-/// entry to each function, and the offset to the locals area.
-/// See TargetFrameInfo for more comments.
-class AMDGPUFrameLowering : public TargetFrameLowering {
-public:
-  AMDGPUFrameLowering(StackDirection D, unsigned StackAl, int LAO,
-                      unsigned TransAl = 1);
-  virtual ~AMDGPUFrameLowering();
-
-  /// \returns The number of 32-bit sub-registers that are used when storing
-  /// values to the stack.
-  unsigned getStackWidth(const MachineFunction &MF) const;
-  int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;
-  const SpillSlot *
-    getCalleeSavedSpillSlots(unsigned &NumEntries) const override;
-  void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
-  void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
-  bool hasFP(const MachineFunction &MF) const override;
-};
-} // namespace llvm
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUISelDAGToDAG.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUISelDAGToDAG.cpp (removed)
@@ -1,1371 +0,0 @@
-//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Defines an instruction selector for the AMDGPU target.
-//
-//===----------------------------------------------------------------------===//
-#include "AMDGPUInstrInfo.h"
-#include "AMDGPUISelLowering.h" // For AMDGPUISD
-#include "AMDGPURegisterInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "R600InstrInfo.h"
-#include "SIDefines.h"
-#include "SIISelLowering.h"
-#include "SIMachineFunctionInfo.h"
-#include "llvm/CodeGen/FunctionLoweringInfo.h"
-#include "llvm/CodeGen/PseudoSourceValue.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/SelectionDAGISel.h"
-#include "llvm/IR/Function.h"
-
-using namespace llvm;
-
-//===----------------------------------------------------------------------===//
-// Instruction Selector Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-/// AMDGPU specific code to select AMDGPU machine instructions for
-/// SelectionDAG operations.
-class AMDGPUDAGToDAGISel : public SelectionDAGISel {
-  // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
-  // make the right decision when generating code for different targets.
-  const AMDGPUSubtarget *Subtarget;
-public:
-  AMDGPUDAGToDAGISel(TargetMachine &TM);
-  virtual ~AMDGPUDAGToDAGISel();
-  bool runOnMachineFunction(MachineFunction &MF) override;
-  SDNode *Select(SDNode *N) override;
-  const char *getPassName() const override;
-  void PostprocessISelDAG() override;
-
-private:
-  bool isInlineImmediate(SDNode *N) const;
-  bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
-                   const R600InstrInfo *TII);
-  bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
-  bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
-
-  // Complex pattern selectors
-  bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
-  bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
-  bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
-
-  static bool checkType(const Value *ptr, unsigned int addrspace);
-  static bool checkPrivateAddress(const MachineMemOperand *Op);
-
-  static bool isGlobalStore(const StoreSDNode *N);
-  static bool isFlatStore(const StoreSDNode *N);
-  static bool isPrivateStore(const StoreSDNode *N);
-  static bool isLocalStore(const StoreSDNode *N);
-  static bool isRegionStore(const StoreSDNode *N);
-
-  bool isCPLoad(const LoadSDNode *N) const;
-  bool isConstantLoad(const LoadSDNode *N, int cbID) const;
-  bool isGlobalLoad(const LoadSDNode *N) const;
-  bool isFlatLoad(const LoadSDNode *N) const;
-  bool isParamLoad(const LoadSDNode *N) const;
-  bool isPrivateLoad(const LoadSDNode *N) const;
-  bool isLocalLoad(const LoadSDNode *N) const;
-  bool isRegionLoad(const LoadSDNode *N) const;
-
-  SDNode *glueCopyToM0(SDNode *N) const;
-
-  const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
-  bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
-  bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
-                                       SDValue& Offset);
-  bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
-  bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
-  bool isDSOffsetLegal(const SDValue &Base, unsigned Offset,
-                       unsigned OffsetBits) const;
-  bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
-  bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
-                                 SDValue &Offset1) const;
-  void SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
-                   SDValue &SOffset, SDValue &Offset, SDValue &Offen,
-                   SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
-                   SDValue &TFE) const;
-  bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
-                         SDValue &SOffset, SDValue &Offset, SDValue &GLC,
-                         SDValue &SLC, SDValue &TFE) const;
-  bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
-                         SDValue &VAddr, SDValue &SOffset, SDValue &Offset,
-                         SDValue &SLC) const;
-  bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
-                          SDValue &SOffset, SDValue &ImmOffset) const;
-  bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
-                         SDValue &Offset, SDValue &GLC, SDValue &SLC,
-                         SDValue &TFE) const;
-  bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
-                         SDValue &Offset, SDValue &GLC) const;
-  SDNode *SelectAddrSpaceCast(SDNode *N);
-  bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
-  bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
-                       SDValue &Clamp, SDValue &Omod) const;
-
-  bool SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, SDValue &SrcMods,
-                            SDValue &Omod) const;
-  bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods,
-                                 SDValue &Clamp,
-                                 SDValue &Omod) const;
-
-  SDNode *SelectADD_SUB_I64(SDNode *N);
-  SDNode *SelectDIV_SCALE(SDNode *N);
-
-  SDNode *getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
-                   uint32_t Offset, uint32_t Width);
-  SDNode *SelectS_BFEFromShifts(SDNode *N);
-  SDNode *SelectS_BFE(SDNode *N);
-
-  // Include the pieces autogenerated from the target description.
-#include "AMDGPUGenDAGISel.inc"
-};
-}  // end anonymous namespace
-
-/// \brief This pass converts a legalized DAG into a AMDGPU-specific
-// DAG, ready for instruction scheduling.
-FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
-  return new AMDGPUDAGToDAGISel(TM);
-}
-
-AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
-    : SelectionDAGISel(TM) {}
-
-bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
-  Subtarget = &static_cast<const AMDGPUSubtarget &>(MF.getSubtarget());
-  return SelectionDAGISel::runOnMachineFunction(MF);
-}
-
-AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
-}
-
-bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
-  const SITargetLowering *TL
-      = static_cast<const SITargetLowering *>(getTargetLowering());
-  return TL->analyzeImmediate(N) == 0;
-}
-
-/// \brief Determine the register class for \p OpNo
-/// \returns The register class of the virtual register that will be used for
-/// the given operand number \OpNo or NULL if the register class cannot be
-/// determined.
-const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
-                                                          unsigned OpNo) const {
-  if (!N->isMachineOpcode())
-    return nullptr;
-
-  switch (N->getMachineOpcode()) {
-  default: {
-    const MCInstrDesc &Desc =
-        Subtarget->getInstrInfo()->get(N->getMachineOpcode());
-    unsigned OpIdx = Desc.getNumDefs() + OpNo;
-    if (OpIdx >= Desc.getNumOperands())
-      return nullptr;
-    int RegClass = Desc.OpInfo[OpIdx].RegClass;
-    if (RegClass == -1)
-      return nullptr;
-
-    return Subtarget->getRegisterInfo()->getRegClass(RegClass);
-  }
-  case AMDGPU::REG_SEQUENCE: {
-    unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
-    const TargetRegisterClass *SuperRC =
-        Subtarget->getRegisterInfo()->getRegClass(RCID);
-
-    SDValue SubRegOp = N->getOperand(OpNo + 1);
-    unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
-    return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
-                                                              SubRegIdx);
-  }
-  }
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRParam(
-  SDValue Addr, SDValue& R1, SDValue& R2) {
-
-  if (Addr.getOpcode() == ISD::FrameIndex) {
-    if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
-      R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
-      R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
-    } else {
-      R1 = Addr;
-      R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
-    }
-  } else if (Addr.getOpcode() == ISD::ADD) {
-    R1 = Addr.getOperand(0);
-    R2 = Addr.getOperand(1);
-  } else {
-    R1 = Addr;
-    R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
-  }
-  return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
-  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
-      Addr.getOpcode() == ISD::TargetGlobalAddress) {
-    return false;
-  }
-  return SelectADDRParam(Addr, R1, R2);
-}
-
-
-bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
-  if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
-      Addr.getOpcode() == ISD::TargetGlobalAddress) {
-    return false;
-  }
-
-  if (Addr.getOpcode() == ISD::FrameIndex) {
-    if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
-      R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
-      R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
-    } else {
-      R1 = Addr;
-      R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
-    }
-  } else if (Addr.getOpcode() == ISD::ADD) {
-    R1 = Addr.getOperand(0);
-    R2 = Addr.getOperand(1);
-  } else {
-    R1 = Addr;
-    R2 = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i64);
-  }
-  return true;
-}
-
-SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N) const {
-  if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
-      !checkType(cast<MemSDNode>(N)->getMemOperand()->getValue(),
-                 AMDGPUAS::LOCAL_ADDRESS))
-    return N;
-
-  const SITargetLowering& Lowering =
-      *static_cast<const SITargetLowering*>(getTargetLowering());
-
-  // Write max value to m0 before each load operation
-
-  SDValue M0 = Lowering.copyToM0(*CurDAG, CurDAG->getEntryNode(), SDLoc(N),
-                                 CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
-
-  SDValue Glue = M0.getValue(1);
-
-  SmallVector <SDValue, 8> Ops;
-  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
-     Ops.push_back(N->getOperand(i));
-  }
-  Ops.push_back(Glue);
-  CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
-
-  return N;
-}
-
-SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
-  unsigned int Opc = N->getOpcode();
-  if (N->isMachineOpcode()) {
-    N->setNodeId(-1);
-    return nullptr;   // Already selected.
-  }
-
-  if (isa<AtomicSDNode>(N))
-    N = glueCopyToM0(N);
-
-  switch (Opc) {
-  default: break;
-  // We are selecting i64 ADD here instead of custom lower it during
-  // DAG legalization, so we can fold some i64 ADDs used for address
-  // calculation into the LOAD and STORE instructions.
-  case ISD::ADD:
-  case ISD::SUB: {
-    if (N->getValueType(0) != MVT::i64 ||
-        Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
-      break;
-
-    return SelectADD_SUB_I64(N);
-  }
-  case ISD::SCALAR_TO_VECTOR:
-  case AMDGPUISD::BUILD_VERTICAL_VECTOR:
-  case ISD::BUILD_VECTOR: {
-    unsigned RegClassID;
-    const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo();
-    EVT VT = N->getValueType(0);
-    unsigned NumVectorElts = VT.getVectorNumElements();
-    EVT EltVT = VT.getVectorElementType();
-    assert(EltVT.bitsEq(MVT::i32));
-    if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
-      bool UseVReg = true;
-      for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
-                                                    U != E; ++U) {
-        if (!U->isMachineOpcode()) {
-          continue;
-        }
-        const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
-        if (!RC) {
-          continue;
-        }
-        if (static_cast<const SIRegisterInfo *>(TRI)->isSGPRClass(RC)) {
-          UseVReg = false;
-        }
-      }
-      switch(NumVectorElts) {
-      case 1: RegClassID = UseVReg ? AMDGPU::VGPR_32RegClassID :
-                                     AMDGPU::SReg_32RegClassID;
-        break;
-      case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
-                                     AMDGPU::SReg_64RegClassID;
-        break;
-      case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
-                                     AMDGPU::SReg_128RegClassID;
-        break;
-      case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
-                                     AMDGPU::SReg_256RegClassID;
-        break;
-      case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
-                                      AMDGPU::SReg_512RegClassID;
-        break;
-      default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
-      }
-    } else {
-      // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
-      // that adds a 128 bits reg copy when going through TwoAddressInstructions
-      // pass. We want to avoid 128 bits copies as much as possible because they
-      // can't be bundled by our scheduler.
-      switch(NumVectorElts) {
-      case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
-      case 4:
-        if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
-          RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
-        else
-          RegClassID = AMDGPU::R600_Reg128RegClassID;
-        break;
-      default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
-      }
-    }
-
-    SDLoc DL(N);
-    SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
-
-    if (NumVectorElts == 1) {
-      return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
-                                  N->getOperand(0), RegClass);
-    }
-
-    assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
-                                  "supported yet");
-    // 16 = Max Num Vector Elements
-    // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
-    // 1 = Vector Register Class
-    SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
-
-    RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
-    bool IsRegSeq = true;
-    unsigned NOps = N->getNumOperands();
-    for (unsigned i = 0; i < NOps; i++) {
-      // XXX: Why is this here?
-      if (isa<RegisterSDNode>(N->getOperand(i))) {
-        IsRegSeq = false;
-        break;
-      }
-      RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
-      RegSeqArgs[1 + (2 * i) + 1] =
-              CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), DL,
-                                        MVT::i32);
-    }
-
-    if (NOps != NumVectorElts) {
-      // Fill in the missing undef elements if this was a scalar_to_vector.
-      assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
-
-      MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
-                                                     DL, EltVT);
-      for (unsigned i = NOps; i < NumVectorElts; ++i) {
-        RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
-        RegSeqArgs[1 + (2 * i) + 1] =
-          CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), DL, MVT::i32);
-      }
-    }
-
-    if (!IsRegSeq)
-      break;
-    return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
-                                RegSeqArgs);
-  }
-  case ISD::BUILD_PAIR: {
-    SDValue RC, SubReg0, SubReg1;
-    if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
-      break;
-    }
-    SDLoc DL(N);
-    if (N->getValueType(0) == MVT::i128) {
-      RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32);
-      SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
-      SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
-    } else if (N->getValueType(0) == MVT::i64) {
-      RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
-      SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
-      SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
-    } else {
-      llvm_unreachable("Unhandled value type for BUILD_PAIR");
-    }
-    const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
-                            N->getOperand(1), SubReg1 };
-    return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
-                                  DL, N->getValueType(0), Ops);
-  }
-
-  case ISD::Constant:
-  case ISD::ConstantFP: {
-    if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
-        N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
-      break;
-
-    uint64_t Imm;
-    if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
-      Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
-    else {
-      ConstantSDNode *C = cast<ConstantSDNode>(N);
-      Imm = C->getZExtValue();
-    }
-
-    SDLoc DL(N);
-    SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
-                                CurDAG->getConstant(Imm & 0xFFFFFFFF, DL,
-                                                    MVT::i32));
-    SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
-                                CurDAG->getConstant(Imm >> 32, DL, MVT::i32));
-    const SDValue Ops[] = {
-      CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
-      SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
-      SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
-    };
-
-    return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
-                                  N->getValueType(0), Ops);
-  }
-
-  case ISD::LOAD: {
-    LoadSDNode *LD = cast<LoadSDNode>(N);
-    SDLoc SL(N);
-    EVT VT = N->getValueType(0);
-
-    if (VT != MVT::i64 || LD->getExtensionType() != ISD::NON_EXTLOAD) {
-      N = glueCopyToM0(N);
-      break;
-    }
-
-    // To simplify the TableGen patters, we replace all i64 loads with
-    // v2i32 loads.  Alternatively, we could promote i64 loads to v2i32
-    // during DAG legalization, however, so places (ExpandUnalignedLoad)
-    // in the DAG legalizer assume that if i64 is legal, so doing this
-    // promotion early can cause problems.
-
-    SDValue NewLoad = CurDAG->getLoad(MVT::v2i32, SDLoc(N), LD->getChain(),
-                                      LD->getBasePtr(), LD->getMemOperand());
-    SDValue BitCast = CurDAG->getNode(ISD::BITCAST, SL,
-                                      MVT::i64, NewLoad);
-    CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLoad.getValue(1));
-    CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), BitCast);
-    SDNode *Load = glueCopyToM0(NewLoad.getNode());
-    SelectCode(Load);
-    N = BitCast.getNode();
-    break;
-  }
-
-  case ISD::STORE: {
-    // Handle i64 stores here for the same reason mentioned above for loads.
-    StoreSDNode *ST = cast<StoreSDNode>(N);
-    SDValue Value = ST->getValue();
-    if (Value.getValueType() == MVT::i64 && !ST->isTruncatingStore()) {
-
-      SDValue NewValue = CurDAG->getNode(ISD::BITCAST, SDLoc(N),
-                                        MVT::v2i32, Value);
-      SDValue NewStore = CurDAG->getStore(ST->getChain(), SDLoc(N), NewValue,
-                                          ST->getBasePtr(), ST->getMemOperand());
-
-      CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewStore);
-
-      if (NewValue.getOpcode() == ISD::BITCAST) {
-        Select(NewStore.getNode());
-        return SelectCode(NewValue.getNode());
-      }
-
-      // getNode() may fold the bitcast if its input was another bitcast.  If that
-      // happens we should only select the new store.
-      N = NewStore.getNode();
-    }
-
-    N = glueCopyToM0(N);
-    break;
-  }
-
-  case AMDGPUISD::REGISTER_LOAD: {
-    if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
-      break;
-    SDValue Addr, Offset;
-
-    SDLoc DL(N);
-    SelectADDRIndirect(N->getOperand(1), Addr, Offset);
-    const SDValue Ops[] = {
-      Addr,
-      Offset,
-      CurDAG->getTargetConstant(0, DL, MVT::i32),
-      N->getOperand(0),
-    };
-    return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, DL,
-                                  CurDAG->getVTList(MVT::i32, MVT::i64,
-                                                    MVT::Other),
-                                  Ops);
-  }
-  case AMDGPUISD::REGISTER_STORE: {
-    if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
-      break;
-    SDValue Addr, Offset;
-    SelectADDRIndirect(N->getOperand(2), Addr, Offset);
-    SDLoc DL(N);
-    const SDValue Ops[] = {
-      N->getOperand(1),
-      Addr,
-      Offset,
-      CurDAG->getTargetConstant(0, DL, MVT::i32),
-      N->getOperand(0),
-    };
-    return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, DL,
-                                        CurDAG->getVTList(MVT::Other),
-                                        Ops);
-  }
-
-  case AMDGPUISD::BFE_I32:
-  case AMDGPUISD::BFE_U32: {
-    if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
-      break;
-
-    // There is a scalar version available, but unlike the vector version which
-    // has a separate operand for the offset and width, the scalar version packs
-    // the width and offset into a single operand. Try to move to the scalar
-    // version if the offsets are constant, so that we can try to keep extended
-    // loads of kernel arguments in SGPRs.
-
-    // TODO: Technically we could try to pattern match scalar bitshifts of
-    // dynamic values, but it's probably not useful.
-    ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
-    if (!Offset)
-      break;
-
-    ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
-    if (!Width)
-      break;
-
-    bool Signed = Opc == AMDGPUISD::BFE_I32;
-
-    uint32_t OffsetVal = Offset->getZExtValue();
-    uint32_t WidthVal = Width->getZExtValue();
-
-    return getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32, SDLoc(N),
-                    N->getOperand(0), OffsetVal, WidthVal);
-
-  }
-  case AMDGPUISD::DIV_SCALE: {
-    return SelectDIV_SCALE(N);
-  }
-  case ISD::CopyToReg: {
-    const SITargetLowering& Lowering =
-      *static_cast<const SITargetLowering*>(getTargetLowering());
-    Lowering.legalizeTargetIndependentNode(N, *CurDAG);
-    break;
-  }
-  case ISD::ADDRSPACECAST:
-    return SelectAddrSpaceCast(N);
-  case ISD::AND:
-  case ISD::SRL:
-  case ISD::SRA:
-    if (N->getValueType(0) != MVT::i32 ||
-        Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
-      break;
-
-    return SelectS_BFE(N);
-  }
-
-  return SelectCode(N);
-}
-
-
-bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
-  assert(AS != 0 && "Use checkPrivateAddress instead.");
-  if (!Ptr)
-    return false;
-
-  return Ptr->getType()->getPointerAddressSpace() == AS;
-}
-
-bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
-  if (Op->getPseudoValue())
-    return true;
-
-  if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
-    return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
-
-  return false;
-}
-
-bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
-  return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
-  const Value *MemVal = N->getMemOperand()->getValue();
-  return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
-          !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
-          !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
-}
-
-bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
-  return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isFlatStore(const StoreSDNode *N) {
-  return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
-  return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
-  const Value *MemVal = N->getMemOperand()->getValue();
-  if (CbId == -1)
-    return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
-
-  return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
-}
-
-bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
-  if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS)
-    if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
-        N->getMemoryVT().bitsLT(MVT::i32))
-      return true;
-
-  return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
-  return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isLocalLoad(const  LoadSDNode *N) const {
-  return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isFlatLoad(const  LoadSDNode *N) const {
-  return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isRegionLoad(const  LoadSDNode *N) const {
-  return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
-}
-
-bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
-  MachineMemOperand *MMO = N->getMemOperand();
-  if (checkPrivateAddress(N->getMemOperand())) {
-    if (MMO) {
-      const PseudoSourceValue *PSV = MMO->getPseudoValue();
-      if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
-  if (checkPrivateAddress(N->getMemOperand())) {
-    // Check to make sure we are not a constant pool load or a constant load
-    // that is marked as a private load
-    if (isCPLoad(N) || isConstantLoad(N, -1)) {
-      return false;
-    }
-  }
-
-  const Value *MemVal = N->getMemOperand()->getValue();
-  if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
-      !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
-      !checkType(MemVal, AMDGPUAS::FLAT_ADDRESS) &&
-      !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
-      !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
-      !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
-      !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)) {
-    return true;
-  }
-  return false;
-}
-
-const char *AMDGPUDAGToDAGISel::getPassName() const {
-  return "AMDGPU DAG->DAG Pattern Instruction Selection";
-}
-
-#ifdef DEBUGTMP
-#undef INT64_C
-#endif
-#undef DEBUGTMP
-
-//===----------------------------------------------------------------------===//
-// Complex Patterns
-//===----------------------------------------------------------------------===//
-
-bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
-                                                         SDValue& IntPtr) {
-  if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
-    IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr),
-                                       true);
-    return true;
-  }
-  return false;
-}
-
-bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
-    SDValue& BaseReg, SDValue &Offset) {
-  if (!isa<ConstantSDNode>(Addr)) {
-    BaseReg = Addr;
-    Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true);
-    return true;
-  }
-  return false;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
-                                           SDValue &Offset) {
-  ConstantSDNode *IMMOffset;
-
-  if (Addr.getOpcode() == ISD::ADD
-      && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
-      && isInt<16>(IMMOffset->getZExtValue())) {
-
-      Base = Addr.getOperand(0);
-      Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
-                                         MVT::i32);
-      return true;
-  // If the pointer address is constant, we can move it to the offset field.
-  } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
-             && isInt<16>(IMMOffset->getZExtValue())) {
-    Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
-                                  SDLoc(CurDAG->getEntryNode()),
-                                  AMDGPU::ZERO, MVT::i32);
-    Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
-                                       MVT::i32);
-    return true;
-  }
-
-  // Default case, no offset
-  Base = Addr;
-  Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
-  return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
-                                            SDValue &Offset) {
-  ConstantSDNode *C;
-  SDLoc DL(Addr);
-
-  if ((C = dyn_cast<ConstantSDNode>(Addr))) {
-    Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
-    Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
-  } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
-            (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
-    Base = Addr.getOperand(0);
-    Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
-  } else {
-    Base = Addr;
-    Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
-  }
-
-  return true;
-}
-
-SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
-  SDLoc DL(N);
-  SDValue LHS = N->getOperand(0);
-  SDValue RHS = N->getOperand(1);
-
-  bool IsAdd = (N->getOpcode() == ISD::ADD);
-
-  SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
-  SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
-
-  SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
-                                       DL, MVT::i32, LHS, Sub0);
-  SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
-                                       DL, MVT::i32, LHS, Sub1);
-
-  SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
-                                       DL, MVT::i32, RHS, Sub0);
-  SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
-                                       DL, MVT::i32, RHS, Sub1);
-
-  SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
-  SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
-
-
-  unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
-  unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
-
-  SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs);
-  SDValue Carry(AddLo, 1);
-  SDNode *AddHi
-    = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32,
-                             SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
-
-  SDValue Args[5] = {
-    CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
-    SDValue(AddLo,0),
-    Sub0,
-    SDValue(AddHi,0),
-    Sub1,
-  };
-  return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
-}
-
-// We need to handle this here because tablegen doesn't support matching
-// instructions with multiple outputs.
-SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
-  SDLoc SL(N);
-  EVT VT = N->getValueType(0);
-
-  assert(VT == MVT::f32 || VT == MVT::f64);
-
-  unsigned Opc
-    = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
-
-  // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod
-  SDValue Ops[8];
-
-  SelectVOP3Mods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
-  SelectVOP3Mods(N->getOperand(1), Ops[3], Ops[2]);
-  SelectVOP3Mods(N->getOperand(2), Ops[5], Ops[4]);
-  return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
-}
-
-bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
-                                         unsigned OffsetBits) const {
-  if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
-      (OffsetBits == 8 && !isUInt<8>(Offset)))
-    return false;
-
-  if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS)
-    return true;
-
-  // On Southern Islands instruction with a negative base value and an offset
-  // don't seem to work.
-  return CurDAG->SignBitIsZero(Base);
-}
-
-bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
-                                              SDValue &Offset) const {
-  if (CurDAG->isBaseWithConstantOffset(Addr)) {
-    SDValue N0 = Addr.getOperand(0);
-    SDValue N1 = Addr.getOperand(1);
-    ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
-    if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
-      // (add n0, c0)
-      Base = N0;
-      Offset = N1;
-      return true;
-    }
-  }
-
-  SDLoc DL(Addr);
-
-  // If we have a constant address, prefer to put the constant into the
-  // offset. This can save moves to load the constant address since multiple
-  // operations can share the zero base address register, and enables merging
-  // into read2 / write2 instructions.
-  if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
-    if (isUInt<16>(CAddr->getZExtValue())) {
-      SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
-      MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
-                                 DL, MVT::i32, Zero);
-      Base = SDValue(MovZero, 0);
-      Offset = Addr;
-      return true;
-    }
-  }
-
-  // default case
-  Base = Addr;
-  Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
-  return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
-                                                   SDValue &Offset0,
-                                                   SDValue &Offset1) const {
-  SDLoc DL(Addr);
-
-  if (CurDAG->isBaseWithConstantOffset(Addr)) {
-    SDValue N0 = Addr.getOperand(0);
-    SDValue N1 = Addr.getOperand(1);
-    ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
-    unsigned DWordOffset0 = C1->getZExtValue() / 4;
-    unsigned DWordOffset1 = DWordOffset0 + 1;
-    // (add n0, c0)
-    if (isDSOffsetLegal(N0, DWordOffset1, 8)) {
-      Base = N0;
-      Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
-      Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
-      return true;
-    }
-  }
-
-  if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
-    unsigned DWordOffset0 = CAddr->getZExtValue() / 4;
-    unsigned DWordOffset1 = DWordOffset0 + 1;
-    assert(4 * DWordOffset0 == CAddr->getZExtValue());
-
-    if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) {
-      SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
-      MachineSDNode *MovZero
-        = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
-                                 DL, MVT::i32, Zero);
-      Base = SDValue(MovZero, 0);
-      Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
-      Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
-      return true;
-    }
-  }
-
-  // default case
-  Base = Addr;
-  Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
-  Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
-  return true;
-}
-
-static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
-  return isUInt<12>(Imm->getZExtValue());
-}
-
-void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
-                                     SDValue &VAddr, SDValue &SOffset,
-                                     SDValue &Offset, SDValue &Offen,
-                                     SDValue &Idxen, SDValue &Addr64,
-                                     SDValue &GLC, SDValue &SLC,
-                                     SDValue &TFE) const {
-  SDLoc DL(Addr);
-
-  GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
-  SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
-  TFE = CurDAG->getTargetConstant(0, DL, MVT::i1);
-
-  Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
-  Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
-  Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
-  SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
-
-  if (CurDAG->isBaseWithConstantOffset(Addr)) {
-    SDValue N0 = Addr.getOperand(0);
-    SDValue N1 = Addr.getOperand(1);
-    ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
-
-    if (N0.getOpcode() == ISD::ADD) {
-      // (add (add N2, N3), C1) -> addr64
-      SDValue N2 = N0.getOperand(0);
-      SDValue N3 = N0.getOperand(1);
-      Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
-      Ptr = N2;
-      VAddr = N3;
-    } else {
-
-      // (add N0, C1) -> offset
-      VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
-      Ptr = N0;
-    }
-
-    if (isLegalMUBUFImmOffset(C1)) {
-        Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
-        return;
-    } else if (isUInt<32>(C1->getZExtValue())) {
-      // Illegal offset, store it in soffset.
-      Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
-      SOffset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
-                   CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
-                        0);
-      return;
-    }
-  }
-
-  if (Addr.getOpcode() == ISD::ADD) {
-    // (add N0, N1) -> addr64
-    SDValue N0 = Addr.getOperand(0);
-    SDValue N1 = Addr.getOperand(1);
-    Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
-    Ptr = N0;
-    VAddr = N1;
-    Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
-    return;
-  }
-
-  // default case -> offset
-  VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
-  Ptr = Addr;
-  Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
-
-}
-
-bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
-                                           SDValue &VAddr, SDValue &SOffset,
-                                           SDValue &Offset, SDValue &GLC,
-                                           SDValue &SLC, SDValue &TFE) const {
-  SDValue Ptr, Offen, Idxen, Addr64;
-
-  SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
-              GLC, SLC, TFE);
-
-  ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
-  if (C->getSExtValue()) {
-    SDLoc DL(Addr);
-
-    const SITargetLowering& Lowering =
-      *static_cast<const SITargetLowering*>(getTargetLowering());
-
-    SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
-    return true;
-  }
-
-  return false;
-}
-
-bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
-                                           SDValue &VAddr, SDValue &SOffset,
-					   SDValue &Offset,
-					   SDValue &SLC) const {
-  SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1);
-  SDValue GLC, TFE;
-
-  return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE);
-}
-
-bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
-                                            SDValue &VAddr, SDValue &SOffset,
-                                            SDValue &ImmOffset) const {
-
-  SDLoc DL(Addr);
-  MachineFunction &MF = CurDAG->getMachineFunction();
-  const SIRegisterInfo *TRI =
-      static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
-  MachineRegisterInfo &MRI = MF.getRegInfo();
-  const SITargetLowering& Lowering =
-    *static_cast<const SITargetLowering*>(getTargetLowering());
-
-  unsigned ScratchOffsetReg =
-      TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
-  Lowering.CreateLiveInRegister(*CurDAG, &AMDGPU::SReg_32RegClass,
-                                ScratchOffsetReg, MVT::i32);
-  SDValue Sym0 = CurDAG->getExternalSymbol("SCRATCH_RSRC_DWORD0", MVT::i32);
-  SDValue ScratchRsrcDword0 =
-      SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, Sym0), 0);
-
-  SDValue Sym1 = CurDAG->getExternalSymbol("SCRATCH_RSRC_DWORD1", MVT::i32);
-  SDValue ScratchRsrcDword1 =
-      SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, Sym1), 0);
-
-  const SDValue RsrcOps[] = {
-      CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
-      ScratchRsrcDword0,
-      CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
-      ScratchRsrcDword1,
-      CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
-  };
-  SDValue ScratchPtr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
-                                              MVT::v2i32, RsrcOps), 0);
-  Rsrc = SDValue(Lowering.buildScratchRSRC(*CurDAG, DL, ScratchPtr), 0);
-  SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
-      MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32);
-
-  // (add n0, c1)
-  if (CurDAG->isBaseWithConstantOffset(Addr)) {
-    SDValue N1 = Addr.getOperand(1);
-    ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
-
-    if (isLegalMUBUFImmOffset(C1)) {
-      VAddr = Addr.getOperand(0);
-      ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
-      return true;
-    }
-  }
-
-  // (node)
-  VAddr = Addr;
-  ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
-  return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
-                                           SDValue &SOffset, SDValue &Offset,
-                                           SDValue &GLC, SDValue &SLC,
-                                           SDValue &TFE) const {
-  SDValue Ptr, VAddr, Offen, Idxen, Addr64;
-  const SIInstrInfo *TII =
-    static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
-
-  SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
-              GLC, SLC, TFE);
-
-  if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
-      !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
-      !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
-    uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
-                    APInt::getAllOnesValue(32).getZExtValue(); // Size
-    SDLoc DL(Addr);
-
-    const SITargetLowering& Lowering =
-      *static_cast<const SITargetLowering*>(getTargetLowering());
-
-    SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
-    return true;
-  }
-  return false;
-}
-
-bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
-                                           SDValue &Soffset, SDValue &Offset,
-                                           SDValue &GLC) const {
-  SDValue SLC, TFE;
-
-  return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE);
-}
-
-// FIXME: This is incorrect and only enough to be able to compile.
-SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
-  AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(N);
-  SDLoc DL(N);
-
-  assert(Subtarget->hasFlatAddressSpace() &&
-         "addrspacecast only supported with flat address space!");
-
-  assert((ASC->getSrcAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS &&
-          ASC->getDestAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) &&
-         "Cannot cast address space to / from constant address!");
-
-  assert((ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
-          ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) &&
-         "Can only cast to / from flat address space!");
-
-  // The flat instructions read the address as the index of the VGPR holding the
-  // address, so casting should just be reinterpreting the base VGPR, so just
-  // insert trunc / bitcast / zext.
-
-  SDValue Src = ASC->getOperand(0);
-  EVT DestVT = ASC->getValueType(0);
-  EVT SrcVT = Src.getValueType();
-
-  unsigned SrcSize = SrcVT.getSizeInBits();
-  unsigned DestSize = DestVT.getSizeInBits();
-
-  if (SrcSize > DestSize) {
-    assert(SrcSize == 64 && DestSize == 32);
-    return CurDAG->getMachineNode(
-      TargetOpcode::EXTRACT_SUBREG,
-      DL,
-      DestVT,
-      Src,
-      CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32));
-  }
-
-
-  if (DestSize > SrcSize) {
-    assert(SrcSize == 32 && DestSize == 64);
-
-    // FIXME: This is probably wrong, we should never be defining
-    // a register class with both VGPRs and SGPRs
-    SDValue RC = CurDAG->getTargetConstant(AMDGPU::VS_64RegClassID, DL,
-                                           MVT::i32);
-
-    const SDValue Ops[] = {
-      RC,
-      Src,
-      CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
-      SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
-                                     CurDAG->getConstant(0, DL, MVT::i32)), 0),
-      CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
-    };
-
-    return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
-                                  DL, N->getValueType(0), Ops);
-  }
-
-  assert(SrcSize == 64 && DestSize == 64);
-  return CurDAG->getNode(ISD::BITCAST, DL, DestVT, Src).getNode();
-}
-
-SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
-                                     uint32_t Offset, uint32_t Width) {
-  // Transformation function, pack the offset and width of a BFE into
-  // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
-  // source, bits [5:0] contain the offset and bits [22:16] the width.
-  uint32_t PackedVal = Offset | (Width << 16);
-  SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
-
-  return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
-}
-
-SDNode *AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
-  // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
-  // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
-  // Predicate: 0 < b <= c < 32
-
-  const SDValue &Shl = N->getOperand(0);
-  ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
-  ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
-
-  if (B && C) {
-    uint32_t BVal = B->getZExtValue();
-    uint32_t CVal = C->getZExtValue();
-
-    if (0 < BVal && BVal <= CVal && CVal < 32) {
-      bool Signed = N->getOpcode() == ISD::SRA;
-      unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
-
-      return getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0),
-                      CVal - BVal, 32 - CVal);
-    }
-  }
-  return SelectCode(N);
-}
-
-SDNode *AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
-  switch (N->getOpcode()) {
-  case ISD::AND:
-    if (N->getOperand(0).getOpcode() == ISD::SRL) {
-      // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
-      // Predicate: isMask(mask)
-      const SDValue &Srl = N->getOperand(0);
-      ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
-      ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
-
-      if (Shift && Mask) {
-        uint32_t ShiftVal = Shift->getZExtValue();
-        uint32_t MaskVal = Mask->getZExtValue();
-
-        if (isMask_32(MaskVal)) {
-          uint32_t WidthVal = countPopulation(MaskVal);
-
-          return getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), Srl.getOperand(0),
-                          ShiftVal, WidthVal);
-        }
-      }
-    }
-    break;
-  case ISD::SRL:
-    if (N->getOperand(0).getOpcode() == ISD::AND) {
-      // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
-      // Predicate: isMask(mask >> b)
-      const SDValue &And = N->getOperand(0);
-      ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
-      ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
-
-      if (Shift && Mask) {
-        uint32_t ShiftVal = Shift->getZExtValue();
-        uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
-
-        if (isMask_32(MaskVal)) {
-          uint32_t WidthVal = countPopulation(MaskVal);
-
-          return getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), And.getOperand(0),
-                          ShiftVal, WidthVal);
-        }
-      }
-    } else if (N->getOperand(0).getOpcode() == ISD::SHL)
-      return SelectS_BFEFromShifts(N);
-    break;
-  case ISD::SRA:
-    if (N->getOperand(0).getOpcode() == ISD::SHL)
-      return SelectS_BFEFromShifts(N);
-    break;
-  }
-
-  return SelectCode(N);
-}
-
-bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
-                                        SDValue &SrcMods) const {
-
-  unsigned Mods = 0;
-
-  Src = In;
-
-  if (Src.getOpcode() == ISD::FNEG) {
-    Mods |= SISrcMods::NEG;
-    Src = Src.getOperand(0);
-  }
-
-  if (Src.getOpcode() == ISD::FABS) {
-    Mods |= SISrcMods::ABS;
-    Src = Src.getOperand(0);
-  }
-
-  SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
-
-  return true;
-}
-
-bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
-                                         SDValue &SrcMods, SDValue &Clamp,
-                                         SDValue &Omod) const {
-  SDLoc DL(In);
-  // FIXME: Handle Clamp and Omod
-  Clamp = CurDAG->getTargetConstant(0, DL, MVT::i32);
-  Omod = CurDAG->getTargetConstant(0, DL, MVT::i32);
-
-  return SelectVOP3Mods(In, Src, SrcMods);
-}
-
-bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp(SDValue In, SDValue &Src,
-                                              SDValue &SrcMods,
-                                              SDValue &Omod) const {
-  // FIXME: Handle Omod
-  Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
-
-  return SelectVOP3Mods(In, Src, SrcMods);
-}
-
-bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src,
-                                                   SDValue &SrcMods,
-                                                   SDValue &Clamp,
-                                                   SDValue &Omod) const {
-  Clamp = Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
-  return SelectVOP3Mods(In, Src, SrcMods);
-}
-
-void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
-  const AMDGPUTargetLowering& Lowering =
-    *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
-  bool IsModified = false;
-  do {
-    IsModified = false;
-    // Go over all selected nodes and try to fold them a bit more
-    for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
-         E = CurDAG->allnodes_end(); I != E; ++I) {
-
-      SDNode *Node = I;
-
-      MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
-      if (!MachineNode)
-        continue;
-
-      SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
-      if (ResNode != Node) {
-        ReplaceUses(Node, ResNode);
-        IsModified = true;
-      }
-    }
-    CurDAG->RemoveDeadNodes();
-  } while (IsModified);
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUISelLowering.cpp (removed)
@@ -1,2866 +0,0 @@
-//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief This is the parent TargetLowering class for hardware code gen
-/// targets.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUISelLowering.h"
-#include "AMDGPU.h"
-#include "AMDGPUFrameLowering.h"
-#include "AMDGPUIntrinsicInfo.h"
-#include "AMDGPURegisterInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "R600MachineFunctionInfo.h"
-#include "SIMachineFunctionInfo.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/DiagnosticInfo.h"
-#include "llvm/IR/DiagnosticPrinter.h"
-
-using namespace llvm;
-
-namespace {
-
-/// Diagnostic information for unimplemented or unsupported feature reporting.
-class DiagnosticInfoUnsupported : public DiagnosticInfo {
-private:
-  const Twine &Description;
-  const Function &Fn;
-
-  static int KindID;
-
-  static int getKindID() {
-    if (KindID == 0)
-      KindID = llvm::getNextAvailablePluginDiagnosticKind();
-    return KindID;
-  }
-
-public:
-  DiagnosticInfoUnsupported(const Function &Fn, const Twine &Desc,
-                          DiagnosticSeverity Severity = DS_Error)
-    : DiagnosticInfo(getKindID(), Severity),
-      Description(Desc),
-      Fn(Fn) { }
-
-  const Function &getFunction() const { return Fn; }
-  const Twine &getDescription() const { return Description; }
-
-  void print(DiagnosticPrinter &DP) const override {
-    DP << "unsupported " << getDescription() << " in " << Fn.getName();
-  }
-
-  static bool classof(const DiagnosticInfo *DI) {
-    return DI->getKind() == getKindID();
-  }
-};
-
-int DiagnosticInfoUnsupported::KindID = 0;
-}
-
-
-static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
-                      CCValAssign::LocInfo LocInfo,
-                      ISD::ArgFlagsTy ArgFlags, CCState &State) {
-  unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
-                                        ArgFlags.getOrigAlign());
-  State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
-
-  return true;
-}
-
-#include "AMDGPUGenCallingConv.inc"
-
-// Find a larger type to do a load / store of a vector with.
-EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
-  unsigned StoreSize = VT.getStoreSizeInBits();
-  if (StoreSize <= 32)
-    return EVT::getIntegerVT(Ctx, StoreSize);
-
-  assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
-  return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
-}
-
-// Type for a vector that will be loaded to.
-EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
-  unsigned StoreSize = VT.getStoreSizeInBits();
-  if (StoreSize <= 32)
-    return EVT::getIntegerVT(Ctx, 32);
-
-  return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
-}
-
-AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
-                                           const AMDGPUSubtarget &STI)
-    : TargetLowering(TM), Subtarget(&STI) {
-  setOperationAction(ISD::Constant, MVT::i32, Legal);
-  setOperationAction(ISD::Constant, MVT::i64, Legal);
-  setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
-  setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
-
-  setOperationAction(ISD::BR_JT, MVT::Other, Expand);
-  setOperationAction(ISD::BRIND, MVT::Other, Expand);
-
-  // We need to custom lower some of the intrinsics
-  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
-
-  // Library functions.  These default to Expand, but we have instructions
-  // for them.
-  setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
-  setOperationAction(ISD::FEXP2,  MVT::f32, Legal);
-  setOperationAction(ISD::FPOW,   MVT::f32, Legal);
-  setOperationAction(ISD::FLOG2,  MVT::f32, Legal);
-  setOperationAction(ISD::FABS,   MVT::f32, Legal);
-  setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
-  setOperationAction(ISD::FRINT,  MVT::f32, Legal);
-  setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
-  setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
-  setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
-
-  setOperationAction(ISD::FROUND, MVT::f32, Custom);
-  setOperationAction(ISD::FROUND, MVT::f64, Custom);
-
-  setOperationAction(ISD::FREM, MVT::f32, Custom);
-  setOperationAction(ISD::FREM, MVT::f64, Custom);
-
-  // v_mad_f32 does not support denormals according to some sources.
-  if (!Subtarget->hasFP32Denormals())
-    setOperationAction(ISD::FMAD, MVT::f32, Legal);
-
-  // Expand to fneg + fadd.
-  setOperationAction(ISD::FSUB, MVT::f64, Expand);
-
-  // Lower floating point store/load to integer store/load to reduce the number
-  // of patterns in tablegen.
-  setOperationAction(ISD::STORE, MVT::f32, Promote);
-  AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
-
-  setOperationAction(ISD::STORE, MVT::v2f32, Promote);
-  AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
-
-  setOperationAction(ISD::STORE, MVT::v4f32, Promote);
-  AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
-
-  setOperationAction(ISD::STORE, MVT::v8f32, Promote);
-  AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
-
-  setOperationAction(ISD::STORE, MVT::v16f32, Promote);
-  AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
-
-  setOperationAction(ISD::STORE, MVT::f64, Promote);
-  AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
-
-  setOperationAction(ISD::STORE, MVT::v2f64, Promote);
-  AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
-
-  // Custom lowering of vector stores is required for local address space
-  // stores.
-  setOperationAction(ISD::STORE, MVT::v4i32, Custom);
-
-  setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
-  setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
-  setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
-
-  // XXX: This can be change to Custom, once ExpandVectorStores can
-  // handle 64-bit stores.
-  setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
-
-  setTruncStoreAction(MVT::i64, MVT::i16, Expand);
-  setTruncStoreAction(MVT::i64, MVT::i8, Expand);
-  setTruncStoreAction(MVT::i64, MVT::i1, Expand);
-  setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
-  setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
-
-
-  setOperationAction(ISD::LOAD, MVT::f32, Promote);
-  AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
-
-  setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
-  AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
-
-  setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
-  AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
-
-  setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
-  AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
-
-  setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
-  AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
-
-  setOperationAction(ISD::LOAD, MVT::f64, Promote);
-  AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
-
-  setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
-  AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
-
-  setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
-  setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
-  setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
-  setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
-  setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
-  setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
-  setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
-  setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
-  setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
-  setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
-
-  // There are no 64-bit extloads. These should be done as a 32-bit extload and
-  // an extension to 64-bit.
-  for (MVT VT : MVT::integer_valuetypes()) {
-    setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
-    setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
-    setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
-  }
-
-  for (MVT VT : MVT::integer_vector_valuetypes()) {
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
-    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
-    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
-    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
-    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
-  }
-
-  setOperationAction(ISD::BR_CC, MVT::i1, Expand);
-
-  if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
-    setOperationAction(ISD::FCEIL, MVT::f64, Custom);
-    setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
-    setOperationAction(ISD::FRINT, MVT::f64, Custom);
-    setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
-  }
-
-  if (!Subtarget->hasBFI()) {
-    // fcopysign can be done in a single instruction with BFI.
-    setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
-    setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
-  }
-
-  setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
-
-  setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
-  setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
-  setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
-  setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
-
-  setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
-  setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
-  setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
-  setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
-
-  setTruncStoreAction(MVT::f32, MVT::f16, Expand);
-  setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
-  setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
-  setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
-
-  setTruncStoreAction(MVT::f64, MVT::f16, Expand);
-  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
-
-  const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
-  for (MVT VT : ScalarIntVTs) {
-    setOperationAction(ISD::SREM, VT, Expand);
-    setOperationAction(ISD::SDIV, VT, Expand);
-
-    // GPU does not have divrem function for signed or unsigned.
-    setOperationAction(ISD::SDIVREM, VT, Custom);
-    setOperationAction(ISD::UDIVREM, VT, Custom);
-
-    // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
-    setOperationAction(ISD::SMUL_LOHI, VT, Expand);
-    setOperationAction(ISD::UMUL_LOHI, VT, Expand);
-
-    setOperationAction(ISD::BSWAP, VT, Expand);
-    setOperationAction(ISD::CTTZ, VT, Expand);
-    setOperationAction(ISD::CTLZ, VT, Expand);
-  }
-
-  if (!Subtarget->hasBCNT(32))
-    setOperationAction(ISD::CTPOP, MVT::i32, Expand);
-
-  if (!Subtarget->hasBCNT(64))
-    setOperationAction(ISD::CTPOP, MVT::i64, Expand);
-
-  // The hardware supports 32-bit ROTR, but not ROTL.
-  setOperationAction(ISD::ROTL, MVT::i32, Expand);
-  setOperationAction(ISD::ROTL, MVT::i64, Expand);
-  setOperationAction(ISD::ROTR, MVT::i64, Expand);
-
-  setOperationAction(ISD::MUL, MVT::i64, Expand);
-  setOperationAction(ISD::MULHU, MVT::i64, Expand);
-  setOperationAction(ISD::MULHS, MVT::i64, Expand);
-  setOperationAction(ISD::UDIV, MVT::i32, Expand);
-  setOperationAction(ISD::UREM, MVT::i32, Expand);
-  setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
-  setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
-  setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
-  setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
-  setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
-
-  setOperationAction(ISD::SMIN, MVT::i32, Legal);
-  setOperationAction(ISD::UMIN, MVT::i32, Legal);
-  setOperationAction(ISD::SMAX, MVT::i32, Legal);
-  setOperationAction(ISD::UMAX, MVT::i32, Legal);
-
-  if (!Subtarget->hasFFBH())
-    setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
-
-  if (!Subtarget->hasFFBL())
-    setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
-
-  static const MVT::SimpleValueType VectorIntTypes[] = {
-    MVT::v2i32, MVT::v4i32
-  };
-
-  for (MVT VT : VectorIntTypes) {
-    // Expand the following operations for the current type by default.
-    setOperationAction(ISD::ADD,  VT, Expand);
-    setOperationAction(ISD::AND,  VT, Expand);
-    setOperationAction(ISD::FP_TO_SINT, VT, Expand);
-    setOperationAction(ISD::FP_TO_UINT, VT, Expand);
-    setOperationAction(ISD::MUL,  VT, Expand);
-    setOperationAction(ISD::OR,   VT, Expand);
-    setOperationAction(ISD::SHL,  VT, Expand);
-    setOperationAction(ISD::SRA,  VT, Expand);
-    setOperationAction(ISD::SRL,  VT, Expand);
-    setOperationAction(ISD::ROTL, VT, Expand);
-    setOperationAction(ISD::ROTR, VT, Expand);
-    setOperationAction(ISD::SUB,  VT, Expand);
-    setOperationAction(ISD::SINT_TO_FP, VT, Expand);
-    setOperationAction(ISD::UINT_TO_FP, VT, Expand);
-    setOperationAction(ISD::SDIV, VT, Expand);
-    setOperationAction(ISD::UDIV, VT, Expand);
-    setOperationAction(ISD::SREM, VT, Expand);
-    setOperationAction(ISD::UREM, VT, Expand);
-    setOperationAction(ISD::SMUL_LOHI, VT, Expand);
-    setOperationAction(ISD::UMUL_LOHI, VT, Expand);
-    setOperationAction(ISD::SDIVREM, VT, Custom);
-    setOperationAction(ISD::UDIVREM, VT, Custom);
-    setOperationAction(ISD::ADDC, VT, Expand);
-    setOperationAction(ISD::SUBC, VT, Expand);
-    setOperationAction(ISD::ADDE, VT, Expand);
-    setOperationAction(ISD::SUBE, VT, Expand);
-    setOperationAction(ISD::SELECT, VT, Expand);
-    setOperationAction(ISD::VSELECT, VT, Expand);
-    setOperationAction(ISD::SELECT_CC, VT, Expand);
-    setOperationAction(ISD::XOR,  VT, Expand);
-    setOperationAction(ISD::BSWAP, VT, Expand);
-    setOperationAction(ISD::CTPOP, VT, Expand);
-    setOperationAction(ISD::CTTZ, VT, Expand);
-    setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
-    setOperationAction(ISD::CTLZ, VT, Expand);
-    setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
-    setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
-  }
-
-  static const MVT::SimpleValueType FloatVectorTypes[] = {
-    MVT::v2f32, MVT::v4f32
-  };
-
-  for (MVT VT : FloatVectorTypes) {
-    setOperationAction(ISD::FABS, VT, Expand);
-    setOperationAction(ISD::FMINNUM, VT, Expand);
-    setOperationAction(ISD::FMAXNUM, VT, Expand);
-    setOperationAction(ISD::FADD, VT, Expand);
-    setOperationAction(ISD::FCEIL, VT, Expand);
-    setOperationAction(ISD::FCOS, VT, Expand);
-    setOperationAction(ISD::FDIV, VT, Expand);
-    setOperationAction(ISD::FEXP2, VT, Expand);
-    setOperationAction(ISD::FLOG2, VT, Expand);
-    setOperationAction(ISD::FREM, VT, Expand);
-    setOperationAction(ISD::FPOW, VT, Expand);
-    setOperationAction(ISD::FFLOOR, VT, Expand);
-    setOperationAction(ISD::FTRUNC, VT, Expand);
-    setOperationAction(ISD::FMUL, VT, Expand);
-    setOperationAction(ISD::FMA, VT, Expand);
-    setOperationAction(ISD::FRINT, VT, Expand);
-    setOperationAction(ISD::FNEARBYINT, VT, Expand);
-    setOperationAction(ISD::FSQRT, VT, Expand);
-    setOperationAction(ISD::FSIN, VT, Expand);
-    setOperationAction(ISD::FSUB, VT, Expand);
-    setOperationAction(ISD::FNEG, VT, Expand);
-    setOperationAction(ISD::SELECT, VT, Expand);
-    setOperationAction(ISD::VSELECT, VT, Expand);
-    setOperationAction(ISD::SELECT_CC, VT, Expand);
-    setOperationAction(ISD::FCOPYSIGN, VT, Expand);
-    setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
-  }
-
-  setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
-  setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
-
-  setTargetDAGCombine(ISD::MUL);
-  setTargetDAGCombine(ISD::SELECT);
-  setTargetDAGCombine(ISD::SELECT_CC);
-  setTargetDAGCombine(ISD::STORE);
-
-  setTargetDAGCombine(ISD::FADD);
-  setTargetDAGCombine(ISD::FSUB);
-
-  setBooleanContents(ZeroOrNegativeOneBooleanContent);
-  setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
-
-  setSchedulingPreference(Sched::RegPressure);
-  setJumpIsExpensive(true);
-
-  // SI at least has hardware support for floating point exceptions, but no way
-  // of using or handling them is implemented. They are also optional in OpenCL
-  // (Section 7.3)
-  setHasFloatingPointExceptions(false);
-
-  setSelectIsExpensive(false);
-  PredictableSelectIsExpensive = false;
-
-  // There are no integer divide instructions, and these expand to a pretty
-  // large sequence of instructions.
-  setIntDivIsCheap(false);
-  setPow2SDivIsCheap(false);
-  setFsqrtIsCheap(true);
-
-  // FIXME: Need to really handle these.
-  MaxStoresPerMemcpy  = 4096;
-  MaxStoresPerMemmove = 4096;
-  MaxStoresPerMemset  = 4096;
-}
-
-//===----------------------------------------------------------------------===//
-// Target Information
-//===----------------------------------------------------------------------===//
-
-MVT AMDGPUTargetLowering::getVectorIdxTy() const {
-  return MVT::i32;
-}
-
-bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
-  return true;
-}
-
-// The backend supports 32 and 64 bit floating point immediates.
-// FIXME: Why are we reporting vectors of FP immediates as legal?
-bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
-  EVT ScalarVT = VT.getScalarType();
-  return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64);
-}
-
-// We don't want to shrink f64 / f32 constants.
-bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
-  EVT ScalarVT = VT.getScalarType();
-  return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
-}
-
-bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
-                                                 ISD::LoadExtType,
-                                                 EVT NewVT) const {
-
-  unsigned NewSize = NewVT.getStoreSizeInBits();
-
-  // If we are reducing to a 32-bit load, this is always better.
-  if (NewSize == 32)
-    return true;
-
-  EVT OldVT = N->getValueType(0);
-  unsigned OldSize = OldVT.getStoreSizeInBits();
-
-  // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
-  // extloads, so doing one requires using a buffer_load. In cases where we
-  // still couldn't use a scalar load, using the wider load shouldn't really
-  // hurt anything.
-
-  // If the old size already had to be an extload, there's no harm in continuing
-  // to reduce the width.
-  return (OldSize < 32);
-}
-
-bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
-                                                   EVT CastTy) const {
-  if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
-    return true;
-
-  unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
-  unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
-
-  return ((LScalarSize <= CastScalarSize) ||
-          (CastScalarSize >= 32) ||
-          (LScalarSize < 32));
-}
-
-// SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
-// profitable with the expansion for 64-bit since it's generally good to
-// speculate things.
-// FIXME: These should really have the size as a parameter.
-bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
-  return true;
-}
-
-bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
-  return true;
-}
-
-//===---------------------------------------------------------------------===//
-// Target Properties
-//===---------------------------------------------------------------------===//
-
-bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
-  assert(VT.isFloatingPoint());
-  return VT == MVT::f32 || VT == MVT::f64;
-}
-
-bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
-  assert(VT.isFloatingPoint());
-  return VT == MVT::f32 || VT == MVT::f64;
-}
-
-bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
-                                                         unsigned NumElem,
-                                                         unsigned AS) const {
-  return true;
-}
-
-bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
-  // Truncate is just accessing a subregister.
-  return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
-}
-
-bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
-  // Truncate is just accessing a subregister.
-  return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
-         (Dest->getPrimitiveSizeInBits() % 32 == 0);
-}
-
-bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
-  const DataLayout *DL = getDataLayout();
-  unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
-  unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
-
-  return SrcSize == 32 && DestSize == 64;
-}
-
-bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
-  // Any register load of a 64-bit value really requires 2 32-bit moves. For all
-  // practical purposes, the extra mov 0 to load a 64-bit is free.  As used,
-  // this will enable reducing 64-bit operations the 32-bit, which is always
-  // good.
-  return Src == MVT::i32 && Dest == MVT::i64;
-}
-
-bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
-  return isZExtFree(Val.getValueType(), VT2);
-}
-
-bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
-  // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
-  // limited number of native 64-bit operations. Shrinking an operation to fit
-  // in a single 32-bit register should always be helpful. As currently used,
-  // this is much less general than the name suggests, and is only used in
-  // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
-  // not profitable, and may actually be harmful.
-  return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
-}
-
-//===---------------------------------------------------------------------===//
-// TargetLowering Callbacks
-//===---------------------------------------------------------------------===//
-
-void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
-                             const SmallVectorImpl<ISD::InputArg> &Ins) const {
-
-  State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
-}
-
-SDValue AMDGPUTargetLowering::LowerReturn(
-                                     SDValue Chain,
-                                     CallingConv::ID CallConv,
-                                     bool isVarArg,
-                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
-                                     const SmallVectorImpl<SDValue> &OutVals,
-                                     SDLoc DL, SelectionDAG &DAG) const {
-  return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
-}
-
-//===---------------------------------------------------------------------===//
-// Target specific lowering
-//===---------------------------------------------------------------------===//
-
-SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
-                                        SmallVectorImpl<SDValue> &InVals) const {
-  SDValue Callee = CLI.Callee;
-  SelectionDAG &DAG = CLI.DAG;
-
-  const Function &Fn = *DAG.getMachineFunction().getFunction();
-
-  StringRef FuncName("<unknown>");
-
-  if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
-    FuncName = G->getSymbol();
-  else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
-    FuncName = G->getGlobal()->getName();
-
-  DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
-  DAG.getContext()->diagnose(NoCalls);
-  return SDValue();
-}
-
-SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
-                                             SelectionDAG &DAG) const {
-  switch (Op.getOpcode()) {
-  default:
-    Op.getNode()->dump();
-    llvm_unreachable("Custom lowering code for this"
-                     "instruction is not implemented yet!");
-    break;
-  case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
-  case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
-  case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
-  case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
-  case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
-  case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
-  case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
-  case ISD::FREM: return LowerFREM(Op, DAG);
-  case ISD::FCEIL: return LowerFCEIL(Op, DAG);
-  case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
-  case ISD::FRINT: return LowerFRINT(Op, DAG);
-  case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
-  case ISD::FROUND: return LowerFROUND(Op, DAG);
-  case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
-  case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
-  case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
-  case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
-  case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
-  }
-  return Op;
-}
-
-void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
-                                              SmallVectorImpl<SDValue> &Results,
-                                              SelectionDAG &DAG) const {
-  switch (N->getOpcode()) {
-  case ISD::SIGN_EXTEND_INREG:
-    // Different parts of legalization seem to interpret which type of
-    // sign_extend_inreg is the one to check for custom lowering. The extended
-    // from type is what really matters, but some places check for custom
-    // lowering of the result type. This results in trying to use
-    // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
-    // nothing here and let the illegal result integer be handled normally.
-    return;
-  case ISD::LOAD: {
-    SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode();
-    if (!Node)
-      return;
-
-    Results.push_back(SDValue(Node, 0));
-    Results.push_back(SDValue(Node, 1));
-    // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode
-    // function
-    DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1));
-    return;
-  }
-  case ISD::STORE: {
-    SDValue Lowered = LowerSTORE(SDValue(N, 0), DAG);
-    if (Lowered.getNode())
-      Results.push_back(Lowered);
-    return;
-  }
-  default:
-    return;
-  }
-}
-
-// FIXME: This implements accesses to initialized globals in the constant
-// address space by copying them to private and accessing that. It does not
-// properly handle illegal types or vectors. The private vector loads are not
-// scalarized, and the illegal scalars hit an assertion. This technique will not
-// work well with large initializers, and this should eventually be
-// removed. Initialized globals should be placed into a data section that the
-// runtime will load into a buffer before the kernel is executed. Uses of the
-// global need to be replaced with a pointer loaded from an implicit kernel
-// argument into this buffer holding the copy of the data, which will remove the
-// need for any of this.
-SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
-                                                       const GlobalValue *GV,
-                                                       const SDValue &InitPtr,
-                                                       SDValue Chain,
-                                                       SelectionDAG &DAG) const {
-  const DataLayout *TD = getDataLayout();
-  SDLoc DL(InitPtr);
-  Type *InitTy = Init->getType();
-
-  if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
-    EVT VT = EVT::getEVT(InitTy);
-    PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
-    return DAG.getStore(Chain, DL, DAG.getConstant(*CI, DL, VT), InitPtr,
-                        MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
-                        TD->getPrefTypeAlignment(InitTy));
-  }
-
-  if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
-    EVT VT = EVT::getEVT(CFP->getType());
-    PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
-    return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, DL, VT), InitPtr,
-                 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
-                 TD->getPrefTypeAlignment(CFP->getType()));
-  }
-
-  if (StructType *ST = dyn_cast<StructType>(InitTy)) {
-    const StructLayout *SL = TD->getStructLayout(ST);
-
-    EVT PtrVT = InitPtr.getValueType();
-    SmallVector<SDValue, 8> Chains;
-
-    for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
-      SDValue Offset = DAG.getConstant(SL->getElementOffset(I), DL, PtrVT);
-      SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
-
-      Constant *Elt = Init->getAggregateElement(I);
-      Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
-    }
-
-    return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
-  }
-
-  if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) {
-    EVT PtrVT = InitPtr.getValueType();
-
-    unsigned NumElements;
-    if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy))
-      NumElements = AT->getNumElements();
-    else if (VectorType *VT = dyn_cast<VectorType>(SeqTy))
-      NumElements = VT->getNumElements();
-    else
-      llvm_unreachable("Unexpected type");
-
-    unsigned EltSize = TD->getTypeAllocSize(SeqTy->getElementType());
-    SmallVector<SDValue, 8> Chains;
-    for (unsigned i = 0; i < NumElements; ++i) {
-      SDValue Offset = DAG.getConstant(i * EltSize, DL, PtrVT);
-      SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
-
-      Constant *Elt = Init->getAggregateElement(i);
-      Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
-    }
-
-    return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
-  }
-
-  if (isa<UndefValue>(Init)) {
-    EVT VT = EVT::getEVT(InitTy);
-    PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
-    return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
-                        MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
-                        TD->getPrefTypeAlignment(InitTy));
-  }
-
-  Init->dump();
-  llvm_unreachable("Unhandled constant initializer");
-}
-
-static bool hasDefinedInitializer(const GlobalValue *GV) {
-  const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
-  if (!GVar || !GVar->hasInitializer())
-    return false;
-
-  if (isa<UndefValue>(GVar->getInitializer()))
-    return false;
-
-  return true;
-}
-
-SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
-                                                 SDValue Op,
-                                                 SelectionDAG &DAG) const {
-
-  const DataLayout *TD = getDataLayout();
-  GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
-  const GlobalValue *GV = G->getGlobal();
-
-  switch (G->getAddressSpace()) {
-  case AMDGPUAS::LOCAL_ADDRESS: {
-    // XXX: What does the value of G->getOffset() mean?
-    assert(G->getOffset() == 0 &&
-         "Do not know what to do with an non-zero offset");
-
-    // TODO: We could emit code to handle the initialization somewhere.
-    if (hasDefinedInitializer(GV))
-      break;
-
-    unsigned Offset;
-    if (MFI->LocalMemoryObjects.count(GV) == 0) {
-      uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
-      Offset = MFI->LDSSize;
-      MFI->LocalMemoryObjects[GV] = Offset;
-      // XXX: Account for alignment?
-      MFI->LDSSize += Size;
-    } else {
-      Offset = MFI->LocalMemoryObjects[GV];
-    }
-
-    return DAG.getConstant(Offset, SDLoc(Op),
-                           getPointerTy(AMDGPUAS::LOCAL_ADDRESS));
-  }
-  case AMDGPUAS::CONSTANT_ADDRESS: {
-    MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
-    Type *EltType = GV->getType()->getElementType();
-    unsigned Size = TD->getTypeAllocSize(EltType);
-    unsigned Alignment = TD->getPrefTypeAlignment(EltType);
-
-    MVT PrivPtrVT = getPointerTy(AMDGPUAS::PRIVATE_ADDRESS);
-    MVT ConstPtrVT = getPointerTy(AMDGPUAS::CONSTANT_ADDRESS);
-
-    int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
-    SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT);
-
-    const GlobalVariable *Var = cast<GlobalVariable>(GV);
-    if (!Var->hasInitializer()) {
-      // This has no use, but bugpoint will hit it.
-      return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
-    }
-
-    const Constant *Init = Var->getInitializer();
-    SmallVector<SDNode*, 8> WorkList;
-
-    for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
-                              E = DAG.getEntryNode()->use_end(); I != E; ++I) {
-      if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
-        continue;
-      WorkList.push_back(*I);
-    }
-    SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
-    for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
-                                           E = WorkList.end(); I != E; ++I) {
-      SmallVector<SDValue, 8> Ops;
-      Ops.push_back(Chain);
-      for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
-        Ops.push_back((*I)->getOperand(i));
-      }
-      DAG.UpdateNodeOperands(*I, Ops);
-    }
-    return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
-  }
-  }
-
-  const Function &Fn = *DAG.getMachineFunction().getFunction();
-  DiagnosticInfoUnsupported BadInit(Fn,
-                                    "initializer for address space");
-  DAG.getContext()->diagnose(BadInit);
-  return SDValue();
-}
-
-SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
-                                                  SelectionDAG &DAG) const {
-  SmallVector<SDValue, 8> Args;
-
-  for (const SDUse &U : Op->ops())
-    DAG.ExtractVectorElements(U.get(), Args);
-
-  return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
-}
-
-SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
-                                                     SelectionDAG &DAG) const {
-
-  SmallVector<SDValue, 8> Args;
-  unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
-  EVT VT = Op.getValueType();
-  DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
-                            VT.getVectorNumElements());
-
-  return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
-}
-
-SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
-                                              SelectionDAG &DAG) const {
-
-  MachineFunction &MF = DAG.getMachineFunction();
-  const AMDGPUFrameLowering *TFL = Subtarget->getFrameLowering();
-
-  FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
-
-  unsigned FrameIndex = FIN->getIndex();
-  unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
-  return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), SDLoc(Op),
-                         Op.getValueType());
-}
-
-SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
-    SelectionDAG &DAG) const {
-  unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-
-  switch (IntrinsicID) {
-    default: return Op;
-    case AMDGPUIntrinsic::AMDGPU_abs:
-    case AMDGPUIntrinsic::AMDIL_abs: // Legacy name.
-      return LowerIntrinsicIABS(Op, DAG);
-    case AMDGPUIntrinsic::AMDGPU_lrp:
-      return LowerIntrinsicLRP(Op, DAG);
-
-    case AMDGPUIntrinsic::AMDGPU_clamp:
-    case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name.
-      return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
-                         Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
-
-    case Intrinsic::AMDGPU_div_scale: {
-      // 3rd parameter required to be a constant.
-      const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
-      if (!Param)
-        return DAG.getUNDEF(VT);
-
-      // Translate to the operands expected by the machine instruction. The
-      // first parameter must be the same as the first instruction.
-      SDValue Numerator = Op.getOperand(1);
-      SDValue Denominator = Op.getOperand(2);
-
-      // Note this order is opposite of the machine instruction's operations,
-      // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
-      // intrinsic has the numerator as the first operand to match a normal
-      // division operation.
-
-      SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
-
-      return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
-                         Denominator, Numerator);
-    }
-
-    case Intrinsic::AMDGPU_div_fmas:
-      return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
-                         Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
-                         Op.getOperand(4));
-
-    case Intrinsic::AMDGPU_div_fixup:
-      return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
-                         Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
-
-    case Intrinsic::AMDGPU_trig_preop:
-      return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
-                         Op.getOperand(1), Op.getOperand(2));
-
-    case Intrinsic::AMDGPU_rcp:
-      return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
-
-    case Intrinsic::AMDGPU_rsq:
-      return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
-
-    case AMDGPUIntrinsic::AMDGPU_legacy_rsq:
-      return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
-
-    case Intrinsic::AMDGPU_rsq_clamped:
-      if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
-        Type *Type = VT.getTypeForEVT(*DAG.getContext());
-        APFloat Max = APFloat::getLargest(Type->getFltSemantics());
-        APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
-
-        SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
-        SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
-                                  DAG.getConstantFP(Max, DL, VT));
-        return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
-                           DAG.getConstantFP(Min, DL, VT));
-      } else {
-        return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1));
-      }
-
-    case Intrinsic::AMDGPU_ldexp:
-      return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, Op.getOperand(1),
-                                                   Op.getOperand(2));
-
-    case AMDGPUIntrinsic::AMDGPU_imax:
-      return DAG.getNode(ISD::SMAX, DL, VT, Op.getOperand(1),
-                                            Op.getOperand(2));
-    case AMDGPUIntrinsic::AMDGPU_umax:
-      return DAG.getNode(ISD::UMAX, DL, VT, Op.getOperand(1),
-                                            Op.getOperand(2));
-    case AMDGPUIntrinsic::AMDGPU_imin:
-      return DAG.getNode(ISD::SMIN, DL, VT, Op.getOperand(1),
-                                            Op.getOperand(2));
-    case AMDGPUIntrinsic::AMDGPU_umin:
-      return DAG.getNode(ISD::UMIN, DL, VT, Op.getOperand(1),
-                                            Op.getOperand(2));
-
-    case AMDGPUIntrinsic::AMDGPU_umul24:
-      return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT,
-                         Op.getOperand(1), Op.getOperand(2));
-
-    case AMDGPUIntrinsic::AMDGPU_imul24:
-      return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT,
-                         Op.getOperand(1), Op.getOperand(2));
-
-    case AMDGPUIntrinsic::AMDGPU_umad24:
-      return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT,
-                         Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
-
-    case AMDGPUIntrinsic::AMDGPU_imad24:
-      return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT,
-                         Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
-
-    case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0:
-      return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1));
-
-    case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1:
-      return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1));
-
-    case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2:
-      return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1));
-
-    case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3:
-      return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1));
-
-    case AMDGPUIntrinsic::AMDGPU_bfe_i32:
-      return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
-                         Op.getOperand(1),
-                         Op.getOperand(2),
-                         Op.getOperand(3));
-
-    case AMDGPUIntrinsic::AMDGPU_bfe_u32:
-      return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
-                         Op.getOperand(1),
-                         Op.getOperand(2),
-                         Op.getOperand(3));
-
-    case AMDGPUIntrinsic::AMDGPU_bfi:
-      return DAG.getNode(AMDGPUISD::BFI, DL, VT,
-                         Op.getOperand(1),
-                         Op.getOperand(2),
-                         Op.getOperand(3));
-
-    case AMDGPUIntrinsic::AMDGPU_bfm:
-      return DAG.getNode(AMDGPUISD::BFM, DL, VT,
-                         Op.getOperand(1),
-                         Op.getOperand(2));
-
-    case AMDGPUIntrinsic::AMDGPU_brev:
-      return DAG.getNode(AMDGPUISD::BREV, DL, VT, Op.getOperand(1));
-
-  case Intrinsic::AMDGPU_class:
-    return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
-                       Op.getOperand(1), Op.getOperand(2));
-
-    case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
-      return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
-
-    case AMDGPUIntrinsic::AMDIL_round_nearest: // Legacy name.
-      return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
-    case AMDGPUIntrinsic::AMDGPU_trunc: // Legacy name.
-      return DAG.getNode(ISD::FTRUNC, DL, VT, Op.getOperand(1));
-  }
-}
-
-///IABS(a) = SMAX(sub(0, a), a)
-SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
-                                                 SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-  SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
-                            Op.getOperand(1));
-
-  return DAG.getNode(ISD::SMAX, DL, VT, Neg, Op.getOperand(1));
-}
-
-/// Linear Interpolation
-/// LRP(a, b, c) = muladd(a,  b, (1 - a) * c)
-SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
-                                                SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-  SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
-                                DAG.getConstantFP(1.0f, DL, MVT::f32),
-                                Op.getOperand(1));
-  SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
-                                                    Op.getOperand(3));
-  return DAG.getNode(ISD::FADD, DL, VT,
-      DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
-      OneSubAC);
-}
-
-/// \brief Generate Min/Max node
-SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(SDLoc DL,
-                                                   EVT VT,
-                                                   SDValue LHS,
-                                                   SDValue RHS,
-                                                   SDValue True,
-                                                   SDValue False,
-                                                   SDValue CC,
-                                                   DAGCombinerInfo &DCI) const {
-  if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
-    return SDValue();
-
-  if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
-    return SDValue();
-
-  SelectionDAG &DAG = DCI.DAG;
-  ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
-  switch (CCOpcode) {
-  case ISD::SETOEQ:
-  case ISD::SETONE:
-  case ISD::SETUNE:
-  case ISD::SETNE:
-  case ISD::SETUEQ:
-  case ISD::SETEQ:
-  case ISD::SETFALSE:
-  case ISD::SETFALSE2:
-  case ISD::SETTRUE:
-  case ISD::SETTRUE2:
-  case ISD::SETUO:
-  case ISD::SETO:
-    break;
-  case ISD::SETULE:
-  case ISD::SETULT: {
-    if (LHS == True)
-      return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
-    return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
-  }
-  case ISD::SETOLE:
-  case ISD::SETOLT:
-  case ISD::SETLE:
-  case ISD::SETLT: {
-    // Ordered. Assume ordered for undefined.
-
-    // Only do this after legalization to avoid interfering with other combines
-    // which might occur.
-    if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
-        !DCI.isCalledByLegalizer())
-      return SDValue();
-
-    // We need to permute the operands to get the correct NaN behavior. The
-    // selected operand is the second one based on the failing compare with NaN,
-    // so permute it based on the compare type the hardware uses.
-    if (LHS == True)
-      return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
-    return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
-  }
-  case ISD::SETUGE:
-  case ISD::SETUGT: {
-    if (LHS == True)
-      return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
-    return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
-  }
-  case ISD::SETGT:
-  case ISD::SETGE:
-  case ISD::SETOGE:
-  case ISD::SETOGT: {
-    if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
-        !DCI.isCalledByLegalizer())
-      return SDValue();
-
-    if (LHS == True)
-      return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
-    return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
-  }
-  case ISD::SETCC_INVALID:
-    llvm_unreachable("Invalid setcc condcode!");
-  }
-  return SDValue();
-}
-
-// FIXME: Remove this when combines added to DAGCombiner.
-SDValue AMDGPUTargetLowering::CombineIMinMax(SDLoc DL,
-                                             EVT VT,
-                                             SDValue LHS,
-                                             SDValue RHS,
-                                             SDValue True,
-                                             SDValue False,
-                                             SDValue CC,
-                                             SelectionDAG &DAG) const {
-  if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
-    return SDValue();
-
-  ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
-  switch (CCOpcode) {
-  case ISD::SETULE:
-  case ISD::SETULT: {
-    unsigned Opc = (LHS == True) ? ISD::UMIN : ISD::UMAX;
-    return DAG.getNode(Opc, DL, VT, LHS, RHS);
-  }
-  case ISD::SETLE:
-  case ISD::SETLT: {
-    unsigned Opc = (LHS == True) ? ISD::SMIN : ISD::SMAX;
-    return DAG.getNode(Opc, DL, VT, LHS, RHS);
-  }
-  case ISD::SETGT:
-  case ISD::SETGE: {
-    unsigned Opc = (LHS == True) ? ISD::SMAX : ISD::SMIN;
-    return DAG.getNode(Opc, DL, VT, LHS, RHS);
-  }
-  case ISD::SETUGE:
-  case ISD::SETUGT: {
-    unsigned Opc = (LHS == True) ? ISD::UMAX : ISD::UMIN;
-    return DAG.getNode(Opc, DL, VT, LHS, RHS);
-  }
-  default:
-    return SDValue();
-  }
-}
-
-SDValue AMDGPUTargetLowering::ScalarizeVectorLoad(const SDValue Op,
-                                                  SelectionDAG &DAG) const {
-  LoadSDNode *Load = cast<LoadSDNode>(Op);
-  EVT MemVT = Load->getMemoryVT();
-  EVT MemEltVT = MemVT.getVectorElementType();
-
-  EVT LoadVT = Op.getValueType();
-  EVT EltVT = LoadVT.getVectorElementType();
-  EVT PtrVT = Load->getBasePtr().getValueType();
-
-  unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
-  SmallVector<SDValue, 8> Loads;
-  SmallVector<SDValue, 8> Chains;
-
-  SDLoc SL(Op);
-  unsigned MemEltSize = MemEltVT.getStoreSize();
-  MachinePointerInfo SrcValue(Load->getMemOperand()->getValue());
-
-  for (unsigned i = 0; i < NumElts; ++i) {
-    SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
-                              DAG.getConstant(i * MemEltSize, SL, PtrVT));
-
-    SDValue NewLoad
-      = DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
-                       Load->getChain(), Ptr,
-                       SrcValue.getWithOffset(i * MemEltSize),
-                       MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
-                       Load->isInvariant(), Load->getAlignment());
-    Loads.push_back(NewLoad.getValue(0));
-    Chains.push_back(NewLoad.getValue(1));
-  }
-
-  SDValue Ops[] = {
-    DAG.getNode(ISD::BUILD_VECTOR, SL, LoadVT, Loads),
-    DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains)
-  };
-
-  return DAG.getMergeValues(Ops, SL);
-}
-
-SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
-                                              SelectionDAG &DAG) const {
-  EVT VT = Op.getValueType();
-
-  // If this is a 2 element vector, we really want to scalarize and not create
-  // weird 1 element vectors.
-  if (VT.getVectorNumElements() == 2)
-    return ScalarizeVectorLoad(Op, DAG);
-
-  LoadSDNode *Load = cast<LoadSDNode>(Op);
-  SDValue BasePtr = Load->getBasePtr();
-  EVT PtrVT = BasePtr.getValueType();
-  EVT MemVT = Load->getMemoryVT();
-  SDLoc SL(Op);
-  MachinePointerInfo SrcValue(Load->getMemOperand()->getValue());
-
-  EVT LoVT, HiVT;
-  EVT LoMemVT, HiMemVT;
-  SDValue Lo, Hi;
-
-  std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
-  std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
-  std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT);
-  SDValue LoLoad
-    = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
-                     Load->getChain(), BasePtr,
-                     SrcValue,
-                     LoMemVT, Load->isVolatile(), Load->isNonTemporal(),
-                     Load->isInvariant(), Load->getAlignment());
-
-  SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
-                              DAG.getConstant(LoMemVT.getStoreSize(), SL,
-                                              PtrVT));
-
-  SDValue HiLoad
-    = DAG.getExtLoad(Load->getExtensionType(), SL, HiVT,
-                     Load->getChain(), HiPtr,
-                     SrcValue.getWithOffset(LoMemVT.getStoreSize()),
-                     HiMemVT, Load->isVolatile(), Load->isNonTemporal(),
-                     Load->isInvariant(), Load->getAlignment());
-
-  SDValue Ops[] = {
-    DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad),
-    DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
-                LoLoad.getValue(1), HiLoad.getValue(1))
-  };
-
-  return DAG.getMergeValues(Ops, SL);
-}
-
-SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
-                                               SelectionDAG &DAG) const {
-  StoreSDNode *Store = cast<StoreSDNode>(Op);
-  EVT MemVT = Store->getMemoryVT();
-  unsigned MemBits = MemVT.getSizeInBits();
-
-  // Byte stores are really expensive, so if possible, try to pack 32-bit vector
-  // truncating store into an i32 store.
-  // XXX: We could also handle optimize other vector bitwidths.
-  if (!MemVT.isVector() || MemBits > 32) {
-    return SDValue();
-  }
-
-  SDLoc DL(Op);
-  SDValue Value = Store->getValue();
-  EVT VT = Value.getValueType();
-  EVT ElemVT = VT.getVectorElementType();
-  SDValue Ptr = Store->getBasePtr();
-  EVT MemEltVT = MemVT.getVectorElementType();
-  unsigned MemEltBits = MemEltVT.getSizeInBits();
-  unsigned MemNumElements = MemVT.getVectorNumElements();
-  unsigned PackedSize = MemVT.getStoreSizeInBits();
-  SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, DL, MVT::i32);
-
-  assert(Value.getValueType().getScalarSizeInBits() >= 32);
-
-  SDValue PackedValue;
-  for (unsigned i = 0; i < MemNumElements; ++i) {
-    SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
-                              DAG.getConstant(i, DL, MVT::i32));
-    Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
-    Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
-
-    SDValue Shift = DAG.getConstant(MemEltBits * i, DL, MVT::i32);
-    Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
-
-    if (i == 0) {
-      PackedValue = Elt;
-    } else {
-      PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
-    }
-  }
-
-  if (PackedSize < 32) {
-    EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
-    return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
-                             Store->getMemOperand()->getPointerInfo(),
-                             PackedVT,
-                             Store->isNonTemporal(), Store->isVolatile(),
-                             Store->getAlignment());
-  }
-
-  return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
-                      Store->getMemOperand()->getPointerInfo(),
-                      Store->isVolatile(),  Store->isNonTemporal(),
-                      Store->getAlignment());
-}
-
-SDValue AMDGPUTargetLowering::ScalarizeVectorStore(SDValue Op,
-                                                   SelectionDAG &DAG) const {
-  StoreSDNode *Store = cast<StoreSDNode>(Op);
-  EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
-  EVT EltVT = Store->getValue().getValueType().getVectorElementType();
-  EVT PtrVT = Store->getBasePtr().getValueType();
-  unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
-  SDLoc SL(Op);
-
-  SmallVector<SDValue, 8> Chains;
-
-  unsigned EltSize = MemEltVT.getStoreSize();
-  MachinePointerInfo SrcValue(Store->getMemOperand()->getValue());
-
-  for (unsigned i = 0, e = NumElts; i != e; ++i) {
-    SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
-                              Store->getValue(),
-                              DAG.getConstant(i, SL, MVT::i32));
-
-    SDValue Offset = DAG.getConstant(i * MemEltVT.getStoreSize(), SL, PtrVT);
-    SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Store->getBasePtr(), Offset);
-    SDValue NewStore =
-      DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
-                        SrcValue.getWithOffset(i * EltSize),
-                        MemEltVT, Store->isNonTemporal(), Store->isVolatile(),
-                        Store->getAlignment());
-    Chains.push_back(NewStore);
-  }
-
-  return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
-}
-
-SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
-                                               SelectionDAG &DAG) const {
-  StoreSDNode *Store = cast<StoreSDNode>(Op);
-  SDValue Val = Store->getValue();
-  EVT VT = Val.getValueType();
-
-  // If this is a 2 element vector, we really want to scalarize and not create
-  // weird 1 element vectors.
-  if (VT.getVectorNumElements() == 2)
-    return ScalarizeVectorStore(Op, DAG);
-
-  EVT MemVT = Store->getMemoryVT();
-  SDValue Chain = Store->getChain();
-  SDValue BasePtr = Store->getBasePtr();
-  SDLoc SL(Op);
-
-  EVT LoVT, HiVT;
-  EVT LoMemVT, HiMemVT;
-  SDValue Lo, Hi;
-
-  std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
-  std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
-  std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT);
-
-  EVT PtrVT = BasePtr.getValueType();
-  SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
-                              DAG.getConstant(LoMemVT.getStoreSize(), SL,
-                                              PtrVT));
-
-  MachinePointerInfo SrcValue(Store->getMemOperand()->getValue());
-  SDValue LoStore
-    = DAG.getTruncStore(Chain, SL, Lo,
-                        BasePtr,
-                        SrcValue,
-                        LoMemVT,
-                        Store->isNonTemporal(),
-                        Store->isVolatile(),
-                        Store->getAlignment());
-  SDValue HiStore
-    = DAG.getTruncStore(Chain, SL, Hi,
-                        HiPtr,
-                        SrcValue.getWithOffset(LoMemVT.getStoreSize()),
-                        HiMemVT,
-                        Store->isNonTemporal(),
-                        Store->isVolatile(),
-                        Store->getAlignment());
-
-  return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
-}
-
-
-SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  LoadSDNode *Load = cast<LoadSDNode>(Op);
-  ISD::LoadExtType ExtType = Load->getExtensionType();
-  EVT VT = Op.getValueType();
-  EVT MemVT = Load->getMemoryVT();
-
-  if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
-    assert(VT == MVT::i1 && "Only i1 non-extloads expected");
-    // FIXME: Copied from PPC
-    // First, load into 32 bits, then truncate to 1 bit.
-
-    SDValue Chain = Load->getChain();
-    SDValue BasePtr = Load->getBasePtr();
-    MachineMemOperand *MMO = Load->getMemOperand();
-
-    SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
-                                   BasePtr, MVT::i8, MMO);
-
-    SDValue Ops[] = {
-      DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD),
-      NewLD.getValue(1)
-    };
-
-    return DAG.getMergeValues(Ops, DL);
-  }
-
-  if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS ||
-      Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
-      ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
-    return SDValue();
-
-  // <SI && AS=PRIVATE && EXTLOAD && size < 32bit,
-  // register (2-)byte extract.
-
-  // Get Register holding the target.
-  SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
-                            DAG.getConstant(2, DL, MVT::i32));
-  // Load the Register.
-  SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
-                            Load->getChain(), Ptr,
-                            DAG.getTargetConstant(0, DL, MVT::i32),
-                            Op.getOperand(2));
-
-  // Get offset within the register.
-  SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
-                                Load->getBasePtr(),
-                                DAG.getConstant(0x3, DL, MVT::i32));
-
-  // Bit offset of target byte (byteIdx * 8).
-  SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
-                                 DAG.getConstant(3, DL, MVT::i32));
-
-  // Shift to the right.
-  Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
-
-  // Eliminate the upper bits by setting them to ...
-  EVT MemEltVT = MemVT.getScalarType();
-
-  // ... ones.
-  if (ExtType == ISD::SEXTLOAD) {
-    SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
-
-    SDValue Ops[] = {
-      DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode),
-      Load->getChain()
-    };
-
-    return DAG.getMergeValues(Ops, DL);
-  }
-
-  // ... or zeros.
-  SDValue Ops[] = {
-    DAG.getZeroExtendInReg(Ret, DL, MemEltVT),
-    Load->getChain()
-  };
-
-  return DAG.getMergeValues(Ops, DL);
-}
-
-SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
-  if (Result.getNode()) {
-    return Result;
-  }
-
-  StoreSDNode *Store = cast<StoreSDNode>(Op);
-  SDValue Chain = Store->getChain();
-  if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
-       Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
-      Store->getValue().getValueType().isVector()) {
-    return ScalarizeVectorStore(Op, DAG);
-  }
-
-  EVT MemVT = Store->getMemoryVT();
-  if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
-      MemVT.bitsLT(MVT::i32)) {
-    unsigned Mask = 0;
-    if (Store->getMemoryVT() == MVT::i8) {
-      Mask = 0xff;
-    } else if (Store->getMemoryVT() == MVT::i16) {
-      Mask = 0xffff;
-    }
-    SDValue BasePtr = Store->getBasePtr();
-    SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
-                              DAG.getConstant(2, DL, MVT::i32));
-    SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
-                              Chain, Ptr,
-                              DAG.getTargetConstant(0, DL, MVT::i32));
-
-    SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
-                                  DAG.getConstant(0x3, DL, MVT::i32));
-
-    SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
-                                   DAG.getConstant(3, DL, MVT::i32));
-
-    SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
-                                    Store->getValue());
-
-    SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
-
-    SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
-                                       MaskedValue, ShiftAmt);
-
-    SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32,
-                                  DAG.getConstant(Mask, DL, MVT::i32),
-                                  ShiftAmt);
-    DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
-                          DAG.getConstant(0xffffffff, DL, MVT::i32));
-    Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
-
-    SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
-    return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
-                       Chain, Value, Ptr,
-                       DAG.getTargetConstant(0, DL, MVT::i32));
-  }
-  return SDValue();
-}
-
-// This is a shortcut for integer division because we have fast i32<->f32
-// conversions, and fast f32 reciprocal instructions. The fractional part of a
-// float is enough to accurately represent up to a 24-bit integer.
-SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const {
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-  SDValue LHS = Op.getOperand(0);
-  SDValue RHS = Op.getOperand(1);
-  MVT IntVT = MVT::i32;
-  MVT FltVT = MVT::f32;
-
-  ISD::NodeType ToFp  = sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
-  ISD::NodeType ToInt = sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
-
-  if (VT.isVector()) {
-    unsigned NElts = VT.getVectorNumElements();
-    IntVT = MVT::getVectorVT(MVT::i32, NElts);
-    FltVT = MVT::getVectorVT(MVT::f32, NElts);
-  }
-
-  unsigned BitSize = VT.getScalarType().getSizeInBits();
-
-  SDValue jq = DAG.getConstant(1, DL, IntVT);
-
-  if (sign) {
-    // char|short jq = ia ^ ib;
-    jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
-
-    // jq = jq >> (bitsize - 2)
-    jq = DAG.getNode(ISD::SRA, DL, VT, jq,
-                     DAG.getConstant(BitSize - 2, DL, VT));
-
-    // jq = jq | 0x1
-    jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
-
-    // jq = (int)jq
-    jq = DAG.getSExtOrTrunc(jq, DL, IntVT);
-  }
-
-  // int ia = (int)LHS;
-  SDValue ia = sign ?
-    DAG.getSExtOrTrunc(LHS, DL, IntVT) : DAG.getZExtOrTrunc(LHS, DL, IntVT);
-
-  // int ib, (int)RHS;
-  SDValue ib = sign ?
-    DAG.getSExtOrTrunc(RHS, DL, IntVT) : DAG.getZExtOrTrunc(RHS, DL, IntVT);
-
-  // float fa = (float)ia;
-  SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
-
-  // float fb = (float)ib;
-  SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
-
-  // float fq = native_divide(fa, fb);
-  SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
-                           fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
-
-  // fq = trunc(fq);
-  fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
-
-  // float fqneg = -fq;
-  SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
-
-  // float fr = mad(fqneg, fb, fa);
-  SDValue fr = DAG.getNode(ISD::FADD, DL, FltVT,
-                           DAG.getNode(ISD::FMUL, DL, FltVT, fqneg, fb), fa);
-
-  // int iq = (int)fq;
-  SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
-
-  // fr = fabs(fr);
-  fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
-
-  // fb = fabs(fb);
-  fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
-
-  EVT SetCCVT = getSetCCResultType(*DAG.getContext(), VT);
-
-  // int cv = fr >= fb;
-  SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
-
-  // jq = (cv ? jq : 0);
-  jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
-
-  // dst = trunc/extend to legal type
-  iq = sign ? DAG.getSExtOrTrunc(iq, DL, VT) : DAG.getZExtOrTrunc(iq, DL, VT);
-
-  // dst = iq + jq;
-  SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
-
-  // Rem needs compensation, it's easier to recompute it
-  SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
-  Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
-
-  SDValue Res[2] = {
-    Div,
-    Rem
-  };
-  return DAG.getMergeValues(Res, DL);
-}
-
-void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
-                                      SelectionDAG &DAG,
-                                      SmallVectorImpl<SDValue> &Results) const {
-  assert(Op.getValueType() == MVT::i64);
-
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-  EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
-
-  SDValue one = DAG.getConstant(1, DL, HalfVT);
-  SDValue zero = DAG.getConstant(0, DL, HalfVT);
-
-  //HiLo split
-  SDValue LHS = Op.getOperand(0);
-  SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero);
-  SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one);
-
-  SDValue RHS = Op.getOperand(1);
-  SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
-  SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
-
-  if (VT == MVT::i64 &&
-    DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
-    DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
-
-    SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
-                              LHS_Lo, RHS_Lo);
-
-    SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(0), zero);
-    SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(1), zero);
-    Results.push_back(DIV);
-    Results.push_back(REM);
-    return;
-  }
-
-  // Get Speculative values
-  SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
-  SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
-
-  SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
-  SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, zero);
-
-  SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
-  SDValue DIV_Lo = zero;
-
-  const unsigned halfBitWidth = HalfVT.getSizeInBits();
-
-  for (unsigned i = 0; i < halfBitWidth; ++i) {
-    const unsigned bitPos = halfBitWidth - i - 1;
-    SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
-    // Get value of high bit
-    SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
-    HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
-    HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
-
-    // Shift
-    REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
-    // Add LHS high bit
-    REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
-
-    SDValue BIT = DAG.getConstant(1 << bitPos, DL, HalfVT);
-    SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE);
-
-    DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
-
-    // Update REM
-    SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
-    REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
-  }
-
-  SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, DIV_Lo, DIV_Hi);
-  Results.push_back(DIV);
-  Results.push_back(REM);
-}
-
-SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
-                                           SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-
-  if (VT == MVT::i64) {
-    SmallVector<SDValue, 2> Results;
-    LowerUDIVREM64(Op, DAG, Results);
-    return DAG.getMergeValues(Results, DL);
-  }
-
-  SDValue Num = Op.getOperand(0);
-  SDValue Den = Op.getOperand(1);
-
-  if (VT == MVT::i32) {
-    if (DAG.MaskedValueIsZero(Num, APInt::getHighBitsSet(32, 8)) &&
-        DAG.MaskedValueIsZero(Den, APInt::getHighBitsSet(32, 8))) {
-      // TODO: We technically could do this for i64, but shouldn't that just be
-      // handled by something generally reducing 64-bit division on 32-bit
-      // values to 32-bit?
-      return LowerDIVREM24(Op, DAG, false);
-    }
-  }
-
-  // RCP =  URECIP(Den) = 2^32 / Den + e
-  // e is rounding error.
-  SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
-
-  // RCP_LO = mul(RCP, Den) */
-  SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den);
-
-  // RCP_HI = mulhu (RCP, Den) */
-  SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
-
-  // NEG_RCP_LO = -RCP_LO
-  SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
-                                                     RCP_LO);
-
-  // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
-  SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
-                                           NEG_RCP_LO, RCP_LO,
-                                           ISD::SETEQ);
-  // Calculate the rounding error from the URECIP instruction
-  // E = mulhu(ABS_RCP_LO, RCP)
-  SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
-
-  // RCP_A_E = RCP + E
-  SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
-
-  // RCP_S_E = RCP - E
-  SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
-
-  // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
-  SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
-                                     RCP_A_E, RCP_S_E,
-                                     ISD::SETEQ);
-  // Quotient = mulhu(Tmp0, Num)
-  SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
-
-  // Num_S_Remainder = Quotient * Den
-  SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den);
-
-  // Remainder = Num - Num_S_Remainder
-  SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
-
-  // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
-  SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
-                                                 DAG.getConstant(-1, DL, VT),
-                                                 DAG.getConstant(0, DL, VT),
-                                                 ISD::SETUGE);
-  // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
-  SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
-                                                  Num_S_Remainder,
-                                                  DAG.getConstant(-1, DL, VT),
-                                                  DAG.getConstant(0, DL, VT),
-                                                  ISD::SETUGE);
-  // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
-  SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
-                                               Remainder_GE_Zero);
-
-  // Calculate Division result:
-
-  // Quotient_A_One = Quotient + 1
-  SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
-                                       DAG.getConstant(1, DL, VT));
-
-  // Quotient_S_One = Quotient - 1
-  SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
-                                       DAG.getConstant(1, DL, VT));
-
-  // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
-  SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
-                                     Quotient, Quotient_A_One, ISD::SETEQ);
-
-  // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
-  Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
-                            Quotient_S_One, Div, ISD::SETEQ);
-
-  // Calculate Rem result:
-
-  // Remainder_S_Den = Remainder - Den
-  SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
-
-  // Remainder_A_Den = Remainder + Den
-  SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
-
-  // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
-  SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
-                                    Remainder, Remainder_S_Den, ISD::SETEQ);
-
-  // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
-  Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
-                            Remainder_A_Den, Rem, ISD::SETEQ);
-  SDValue Ops[2] = {
-    Div,
-    Rem
-  };
-  return DAG.getMergeValues(Ops, DL);
-}
-
-SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
-                                           SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-
-  SDValue LHS = Op.getOperand(0);
-  SDValue RHS = Op.getOperand(1);
-
-  SDValue Zero = DAG.getConstant(0, DL, VT);
-  SDValue NegOne = DAG.getConstant(-1, DL, VT);
-
-  if (VT == MVT::i32 &&
-      DAG.ComputeNumSignBits(LHS) > 8 &&
-      DAG.ComputeNumSignBits(RHS) > 8) {
-    return LowerDIVREM24(Op, DAG, true);
-  }
-  if (VT == MVT::i64 &&
-      DAG.ComputeNumSignBits(LHS) > 32 &&
-      DAG.ComputeNumSignBits(RHS) > 32) {
-    EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
-
-    //HiLo split
-    SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
-    SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
-    SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
-                                 LHS_Lo, RHS_Lo);
-    SDValue Res[2] = {
-      DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
-      DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
-    };
-    return DAG.getMergeValues(Res, DL);
-  }
-
-  SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
-  SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
-  SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
-  SDValue RSign = LHSign; // Remainder sign is the same as LHS
-
-  LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
-  RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
-
-  LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
-  RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
-
-  SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
-  SDValue Rem = Div.getValue(1);
-
-  Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
-  Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
-
-  Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
-  Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
-
-  SDValue Res[2] = {
-    Div,
-    Rem
-  };
-  return DAG.getMergeValues(Res, DL);
-}
-
-// (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
-SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc SL(Op);
-  EVT VT = Op.getValueType();
-  SDValue X = Op.getOperand(0);
-  SDValue Y = Op.getOperand(1);
-
-  SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
-  SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
-  SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
-
-  return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
-}
-
-SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc SL(Op);
-  SDValue Src = Op.getOperand(0);
-
-  // result = trunc(src)
-  // if (src > 0.0 && src != result)
-  //   result += 1.0
-
-  SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
-
-  const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
-  const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
-
-  EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
-
-  SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
-  SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
-  SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
-
-  SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
-  return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
-}
-
-static SDValue extractF64Exponent(SDValue Hi, SDLoc SL, SelectionDAG &DAG) {
-  const unsigned FractBits = 52;
-  const unsigned ExpBits = 11;
-
-  SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
-                                Hi,
-                                DAG.getConstant(FractBits - 32, SL, MVT::i32),
-                                DAG.getConstant(ExpBits, SL, MVT::i32));
-  SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
-                            DAG.getConstant(1023, SL, MVT::i32));
-
-  return Exp;
-}
-
-SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc SL(Op);
-  SDValue Src = Op.getOperand(0);
-
-  assert(Op.getValueType() == MVT::f64);
-
-  const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
-  const SDValue One = DAG.getConstant(1, SL, MVT::i32);
-
-  SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
-
-  // Extract the upper half, since this is where we will find the sign and
-  // exponent.
-  SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
-
-  SDValue Exp = extractF64Exponent(Hi, SL, DAG);
-
-  const unsigned FractBits = 52;
-
-  // Extract the sign bit.
-  const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
-  SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
-
-  // Extend back to to 64-bits.
-  SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
-                                  Zero, SignBit);
-  SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
-
-  SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
-  const SDValue FractMask
-    = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
-
-  SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
-  SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
-  SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
-
-  EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::i32);
-
-  const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
-
-  SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
-  SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
-
-  SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
-  SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
-
-  return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
-}
-
-SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc SL(Op);
-  SDValue Src = Op.getOperand(0);
-
-  assert(Op.getValueType() == MVT::f64);
-
-  APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
-  SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
-  SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
-
-  SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
-  SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
-
-  SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
-
-  APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
-  SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
-
-  EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
-  SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
-
-  return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
-}
-
-SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
-  // FNEARBYINT and FRINT are the same, except in their handling of FP
-  // exceptions. Those aren't really meaningful for us, and OpenCL only has
-  // rint, so just treat them as equivalent.
-  return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
-}
-
-// XXX - May require not supporting f32 denormals?
-SDValue AMDGPUTargetLowering::LowerFROUND32(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc SL(Op);
-  SDValue X = Op.getOperand(0);
-
-  SDValue T = DAG.getNode(ISD::FTRUNC, SL, MVT::f32, X);
-
-  SDValue Diff = DAG.getNode(ISD::FSUB, SL, MVT::f32, X, T);
-
-  SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, MVT::f32, Diff);
-
-  const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f32);
-  const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
-  const SDValue Half = DAG.getConstantFP(0.5, SL, MVT::f32);
-
-  SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f32, One, X);
-
-  EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f32);
-
-  SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
-
-  SDValue Sel = DAG.getNode(ISD::SELECT, SL, MVT::f32, Cmp, SignOne, Zero);
-
-  return DAG.getNode(ISD::FADD, SL, MVT::f32, T, Sel);
-}
-
-SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc SL(Op);
-  SDValue X = Op.getOperand(0);
-
-  SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
-
-  const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
-  const SDValue One = DAG.getConstant(1, SL, MVT::i32);
-  const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32);
-  const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32);
-  EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::i32);
-
-
-  SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
-
-  SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One);
-
-  SDValue Exp = extractF64Exponent(Hi, SL, DAG);
-
-  const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL,
-                                       MVT::i64);
-
-  SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
-  SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
-                          DAG.getConstant(INT64_C(0x0008000000000000), SL,
-                                          MVT::i64),
-                          Exp);
-
-  SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
-  SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
-                              DAG.getConstant(0, SL, MVT::i64), Tmp0,
-                              ISD::SETNE);
-
-  SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
-                             D, DAG.getConstant(0, SL, MVT::i64));
-  SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
-
-  K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
-  K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K);
-
-  SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
-  SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
-  SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ);
-
-  SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
-                            ExpEqNegOne,
-                            DAG.getConstantFP(1.0, SL, MVT::f64),
-                            DAG.getConstantFP(0.0, SL, MVT::f64));
-
-  SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
-
-  K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K);
-  K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K);
-
-  return K;
-}
-
-SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
-  EVT VT = Op.getValueType();
-
-  if (VT == MVT::f32)
-    return LowerFROUND32(Op, DAG);
-
-  if (VT == MVT::f64)
-    return LowerFROUND64(Op, DAG);
-
-  llvm_unreachable("unhandled type");
-}
-
-SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc SL(Op);
-  SDValue Src = Op.getOperand(0);
-
-  // result = trunc(src);
-  // if (src < 0.0 && src != result)
-  //   result += -1.0.
-
-  SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
-
-  const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
-  const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
-
-  EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
-
-  SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
-  SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
-  SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
-
-  SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
-  return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
-}
-
-SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
-                                               bool Signed) const {
-  SDLoc SL(Op);
-  SDValue Src = Op.getOperand(0);
-
-  SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
-
-  SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
-                           DAG.getConstant(0, SL, MVT::i32));
-  SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
-                           DAG.getConstant(1, SL, MVT::i32));
-
-  SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
-                              SL, MVT::f64, Hi);
-
-  SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
-
-  SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
-                              DAG.getConstant(32, SL, MVT::i32));
-
-  return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
-}
-
-SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
-                                               SelectionDAG &DAG) const {
-  SDValue S0 = Op.getOperand(0);
-  if (S0.getValueType() != MVT::i64)
-    return SDValue();
-
-  EVT DestVT = Op.getValueType();
-  if (DestVT == MVT::f64)
-    return LowerINT_TO_FP64(Op, DAG, false);
-
-  assert(DestVT == MVT::f32);
-
-  SDLoc DL(Op);
-
-  // f32 uint_to_fp i64
-  SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
-                           DAG.getConstant(0, DL, MVT::i32));
-  SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
-  SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
-                           DAG.getConstant(1, DL, MVT::i32));
-  SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
-  FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
-                        DAG.getConstantFP(4294967296.0f, DL, MVT::f32)); // 2^32
-  return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
-}
-
-SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
-                                              SelectionDAG &DAG) const {
-  SDValue Src = Op.getOperand(0);
-  if (Src.getValueType() == MVT::i64 && Op.getValueType() == MVT::f64)
-    return LowerINT_TO_FP64(Op, DAG, true);
-
-  return SDValue();
-}
-
-SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
-                                               bool Signed) const {
-  SDLoc SL(Op);
-
-  SDValue Src = Op.getOperand(0);
-
-  SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
-
-  SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
-                                 MVT::f64);
-  SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
-                                 MVT::f64);
-
-  SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
-
-  SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
-
-
-  SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
-
-  SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
-                           MVT::i32, FloorMul);
-  SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
-
-  SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Lo, Hi);
-
-  return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
-}
-
-SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
-                                              SelectionDAG &DAG) const {
-  SDValue Src = Op.getOperand(0);
-
-  if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
-    return LowerFP64_TO_INT(Op, DAG, true);
-
-  return SDValue();
-}
-
-SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
-                                              SelectionDAG &DAG) const {
-  SDValue Src = Op.getOperand(0);
-
-  if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
-    return LowerFP64_TO_INT(Op, DAG, false);
-
-  return SDValue();
-}
-
-SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
-                                                     SelectionDAG &DAG) const {
-  EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
-  MVT VT = Op.getSimpleValueType();
-  MVT ScalarVT = VT.getScalarType();
-
-  if (!VT.isVector())
-    return SDValue();
-
-  SDValue Src = Op.getOperand(0);
-  SDLoc DL(Op);
-
-  // TODO: Don't scalarize on Evergreen?
-  unsigned NElts = VT.getVectorNumElements();
-  SmallVector<SDValue, 8> Args;
-  DAG.ExtractVectorElements(Src, Args, 0, NElts);
-
-  SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
-  for (unsigned I = 0; I < NElts; ++I)
-    Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
-
-  return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
-}
-
-//===----------------------------------------------------------------------===//
-// Custom DAG optimizations
-//===----------------------------------------------------------------------===//
-
-static bool isU24(SDValue Op, SelectionDAG &DAG) {
-  APInt KnownZero, KnownOne;
-  EVT VT = Op.getValueType();
-  DAG.computeKnownBits(Op, KnownZero, KnownOne);
-
-  return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
-}
-
-static bool isI24(SDValue Op, SelectionDAG &DAG) {
-  EVT VT = Op.getValueType();
-
-  // In order for this to be a signed 24-bit value, bit 23, must
-  // be a sign bit.
-  return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
-                                     // as unsigned 24-bit values.
-         (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
-}
-
-static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
-
-  SelectionDAG &DAG = DCI.DAG;
-  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-  EVT VT = Op.getValueType();
-
-  APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
-  APInt KnownZero, KnownOne;
-  TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
-  if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
-    DCI.CommitTargetLoweringOpt(TLO);
-}
-
-template <typename IntTy>
-static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
-                               uint32_t Offset, uint32_t Width, SDLoc DL) {
-  if (Width + Offset < 32) {
-    uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
-    IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
-    return DAG.getConstant(Result, DL, MVT::i32);
-  }
-
-  return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
-}
-
-static bool usesAllNormalStores(SDNode *LoadVal) {
-  for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) {
-    if (!ISD::isNormalStore(*I))
-      return false;
-  }
-
-  return true;
-}
-
-// If we have a copy of an illegal type, replace it with a load / store of an
-// equivalently sized legal type. This avoids intermediate bit pack / unpack
-// instructions emitted when handling extloads and truncstores. Ideally we could
-// recognize the pack / unpack pattern to eliminate it.
-SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
-                                                  DAGCombinerInfo &DCI) const {
-  if (!DCI.isBeforeLegalize())
-    return SDValue();
-
-  StoreSDNode *SN = cast<StoreSDNode>(N);
-  SDValue Value = SN->getValue();
-  EVT VT = Value.getValueType();
-
-  if (isTypeLegal(VT) || SN->isVolatile() ||
-      !ISD::isNormalLoad(Value.getNode()) || VT.getSizeInBits() < 8)
-    return SDValue();
-
-  LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
-  if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal))
-    return SDValue();
-
-  EVT MemVT = LoadVal->getMemoryVT();
-
-  SDLoc SL(N);
-  SelectionDAG &DAG = DCI.DAG;
-  EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
-
-  SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
-                                LoadVT, SL,
-                                LoadVal->getChain(),
-                                LoadVal->getBasePtr(),
-                                LoadVal->getOffset(),
-                                LoadVT,
-                                LoadVal->getMemOperand());
-
-  SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0));
-  DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false);
-
-  return DAG.getStore(SN->getChain(), SL, NewLoad,
-                      SN->getBasePtr(), SN->getMemOperand());
-}
-
-SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
-                                                DAGCombinerInfo &DCI) const {
-  EVT VT = N->getValueType(0);
-
-  if (VT.isVector() || VT.getSizeInBits() > 32)
-    return SDValue();
-
-  SelectionDAG &DAG = DCI.DAG;
-  SDLoc DL(N);
-
-  SDValue N0 = N->getOperand(0);
-  SDValue N1 = N->getOperand(1);
-  SDValue Mul;
-
-  if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
-    N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
-    N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
-    Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
-  } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
-    N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
-    N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
-    Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
-  } else {
-    return SDValue();
-  }
-
-  // We need to use sext even for MUL_U24, because MUL_U24 is used
-  // for signed multiply of 8 and 16-bit types.
-  return DAG.getSExtOrTrunc(Mul, DL, VT);
-}
-
-SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
-                                                DAGCombinerInfo &DCI) const {
-  SelectionDAG &DAG = DCI.DAG;
-  SDLoc DL(N);
-
-  switch(N->getOpcode()) {
-    default: break;
-    case ISD::MUL:
-      return performMulCombine(N, DCI);
-    case AMDGPUISD::MUL_I24:
-    case AMDGPUISD::MUL_U24: {
-      SDValue N0 = N->getOperand(0);
-      SDValue N1 = N->getOperand(1);
-      simplifyI24(N0, DCI);
-      simplifyI24(N1, DCI);
-      return SDValue();
-    }
-  case ISD::SELECT: {
-    SDValue Cond = N->getOperand(0);
-    if (Cond.getOpcode() == ISD::SETCC && Cond.hasOneUse()) {
-      EVT VT = N->getValueType(0);
-      SDValue LHS = Cond.getOperand(0);
-      SDValue RHS = Cond.getOperand(1);
-      SDValue CC = Cond.getOperand(2);
-
-      SDValue True = N->getOperand(1);
-      SDValue False = N->getOperand(2);
-
-      if (VT == MVT::f32)
-        return CombineFMinMaxLegacy(DL, VT, LHS, RHS, True, False, CC, DCI);
-
-      // TODO: Implement min / max Evergreen instructions.
-      if (VT == MVT::i32 &&
-          Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
-        return CombineIMinMax(DL, VT, LHS, RHS, True, False, CC, DAG);
-      }
-    }
-
-    break;
-  }
-  case AMDGPUISD::BFE_I32:
-  case AMDGPUISD::BFE_U32: {
-    assert(!N->getValueType(0).isVector() &&
-           "Vector handling of BFE not implemented");
-    ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
-    if (!Width)
-      break;
-
-    uint32_t WidthVal = Width->getZExtValue() & 0x1f;
-    if (WidthVal == 0)
-      return DAG.getConstant(0, DL, MVT::i32);
-
-    ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
-    if (!Offset)
-      break;
-
-    SDValue BitsFrom = N->getOperand(0);
-    uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
-
-    bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
-
-    if (OffsetVal == 0) {
-      // This is already sign / zero extended, so try to fold away extra BFEs.
-      unsigned SignBits =  Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
-
-      unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
-      if (OpSignBits >= SignBits)
-        return BitsFrom;
-
-      EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
-      if (Signed) {
-        // This is a sign_extend_inreg. Replace it to take advantage of existing
-        // DAG Combines. If not eliminated, we will match back to BFE during
-        // selection.
-
-        // TODO: The sext_inreg of extended types ends, although we can could
-        // handle them in a single BFE.
-        return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
-                           DAG.getValueType(SmallVT));
-      }
-
-      return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
-    }
-
-    if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
-      if (Signed) {
-        return constantFoldBFE<int32_t>(DAG,
-                                        CVal->getSExtValue(),
-                                        OffsetVal,
-                                        WidthVal,
-                                        DL);
-      }
-
-      return constantFoldBFE<uint32_t>(DAG,
-                                       CVal->getZExtValue(),
-                                       OffsetVal,
-                                       WidthVal,
-                                       DL);
-    }
-
-    if ((OffsetVal + WidthVal) >= 32) {
-      SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
-      return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
-                         BitsFrom, ShiftVal);
-    }
-
-    if (BitsFrom.hasOneUse()) {
-      APInt Demanded = APInt::getBitsSet(32,
-                                         OffsetVal,
-                                         OffsetVal + WidthVal);
-
-      APInt KnownZero, KnownOne;
-      TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
-                                            !DCI.isBeforeLegalizeOps());
-      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
-      if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
-          TLI.SimplifyDemandedBits(BitsFrom, Demanded,
-                                   KnownZero, KnownOne, TLO)) {
-        DCI.CommitTargetLoweringOpt(TLO);
-      }
-    }
-
-    break;
-  }
-
-  case ISD::STORE:
-    return performStoreCombine(N, DCI);
-  }
-  return SDValue();
-}
-
-//===----------------------------------------------------------------------===//
-// Helper functions
-//===----------------------------------------------------------------------===//
-
-void AMDGPUTargetLowering::getOriginalFunctionArgs(
-                               SelectionDAG &DAG,
-                               const Function *F,
-                               const SmallVectorImpl<ISD::InputArg> &Ins,
-                               SmallVectorImpl<ISD::InputArg> &OrigIns) const {
-
-  for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
-    if (Ins[i].ArgVT == Ins[i].VT) {
-      OrigIns.push_back(Ins[i]);
-      continue;
-    }
-
-    EVT VT;
-    if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
-      // Vector has been split into scalars.
-      VT = Ins[i].ArgVT.getVectorElementType();
-    } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
-               Ins[i].ArgVT.getVectorElementType() !=
-               Ins[i].VT.getVectorElementType()) {
-      // Vector elements have been promoted
-      VT = Ins[i].ArgVT;
-    } else {
-      // Vector has been spilt into smaller vectors.
-      VT = Ins[i].VT;
-    }
-
-    ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
-                      Ins[i].OrigArgIndex, Ins[i].PartOffset);
-    OrigIns.push_back(Arg);
-  }
-}
-
-bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
-  if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
-    return CFP->isExactlyValue(1.0);
-  }
-  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
-    return C->isAllOnesValue();
-  }
-  return false;
-}
-
-bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
-  if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
-    return CFP->getValueAPF().isZero();
-  }
-  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
-    return C->isNullValue();
-  }
-  return false;
-}
-
-SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
-                                                  const TargetRegisterClass *RC,
-                                                   unsigned Reg, EVT VT) const {
-  MachineFunction &MF = DAG.getMachineFunction();
-  MachineRegisterInfo &MRI = MF.getRegInfo();
-  unsigned VirtualRegister;
-  if (!MRI.isLiveIn(Reg)) {
-    VirtualRegister = MRI.createVirtualRegister(RC);
-    MRI.addLiveIn(Reg, VirtualRegister);
-  } else {
-    VirtualRegister = MRI.getLiveInVirtReg(Reg);
-  }
-  return DAG.getRegister(VirtualRegister, VT);
-}
-
-#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
-
-const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
-  switch ((AMDGPUISD::NodeType)Opcode) {
-  case AMDGPUISD::FIRST_NUMBER: break;
-  // AMDIL DAG nodes
-  NODE_NAME_CASE(CALL);
-  NODE_NAME_CASE(UMUL);
-  NODE_NAME_CASE(RET_FLAG);
-  NODE_NAME_CASE(BRANCH_COND);
-
-  // AMDGPU DAG nodes
-  NODE_NAME_CASE(DWORDADDR)
-  NODE_NAME_CASE(FRACT)
-  NODE_NAME_CASE(CLAMP)
-  NODE_NAME_CASE(COS_HW)
-  NODE_NAME_CASE(SIN_HW)
-  NODE_NAME_CASE(FMAX_LEGACY)
-  NODE_NAME_CASE(FMIN_LEGACY)
-  NODE_NAME_CASE(FMAX3)
-  NODE_NAME_CASE(SMAX3)
-  NODE_NAME_CASE(UMAX3)
-  NODE_NAME_CASE(FMIN3)
-  NODE_NAME_CASE(SMIN3)
-  NODE_NAME_CASE(UMIN3)
-  NODE_NAME_CASE(URECIP)
-  NODE_NAME_CASE(DIV_SCALE)
-  NODE_NAME_CASE(DIV_FMAS)
-  NODE_NAME_CASE(DIV_FIXUP)
-  NODE_NAME_CASE(TRIG_PREOP)
-  NODE_NAME_CASE(RCP)
-  NODE_NAME_CASE(RSQ)
-  NODE_NAME_CASE(RSQ_LEGACY)
-  NODE_NAME_CASE(RSQ_CLAMPED)
-  NODE_NAME_CASE(LDEXP)
-  NODE_NAME_CASE(FP_CLASS)
-  NODE_NAME_CASE(DOT4)
-  NODE_NAME_CASE(CARRY)
-  NODE_NAME_CASE(BORROW)
-  NODE_NAME_CASE(BFE_U32)
-  NODE_NAME_CASE(BFE_I32)
-  NODE_NAME_CASE(BFI)
-  NODE_NAME_CASE(BFM)
-  NODE_NAME_CASE(BREV)
-  NODE_NAME_CASE(MUL_U24)
-  NODE_NAME_CASE(MUL_I24)
-  NODE_NAME_CASE(MAD_U24)
-  NODE_NAME_CASE(MAD_I24)
-  NODE_NAME_CASE(TEXTURE_FETCH)
-  NODE_NAME_CASE(EXPORT)
-  NODE_NAME_CASE(CONST_ADDRESS)
-  NODE_NAME_CASE(REGISTER_LOAD)
-  NODE_NAME_CASE(REGISTER_STORE)
-  NODE_NAME_CASE(LOAD_CONSTANT)
-  NODE_NAME_CASE(LOAD_INPUT)
-  NODE_NAME_CASE(SAMPLE)
-  NODE_NAME_CASE(SAMPLEB)
-  NODE_NAME_CASE(SAMPLED)
-  NODE_NAME_CASE(SAMPLEL)
-  NODE_NAME_CASE(CVT_F32_UBYTE0)
-  NODE_NAME_CASE(CVT_F32_UBYTE1)
-  NODE_NAME_CASE(CVT_F32_UBYTE2)
-  NODE_NAME_CASE(CVT_F32_UBYTE3)
-  NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
-  NODE_NAME_CASE(CONST_DATA_PTR)
-  case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
-  NODE_NAME_CASE(SENDMSG)
-  NODE_NAME_CASE(INTERP_MOV)
-  NODE_NAME_CASE(INTERP_P1)
-  NODE_NAME_CASE(INTERP_P2)
-  NODE_NAME_CASE(STORE_MSKOR)
-  NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
-  case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
-  }
-  return nullptr;
-}
-
-SDValue AMDGPUTargetLowering::getRsqrtEstimate(SDValue Operand,
-                                               DAGCombinerInfo &DCI,
-                                               unsigned &RefinementSteps,
-                                               bool &UseOneConstNR) const {
-  SelectionDAG &DAG = DCI.DAG;
-  EVT VT = Operand.getValueType();
-
-  if (VT == MVT::f32) {
-    RefinementSteps = 0;
-    return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
-  }
-
-  // TODO: There is also f64 rsq instruction, but the documentation is less
-  // clear on its precision.
-
-  return SDValue();
-}
-
-SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
-                                               DAGCombinerInfo &DCI,
-                                               unsigned &RefinementSteps) const {
-  SelectionDAG &DAG = DCI.DAG;
-  EVT VT = Operand.getValueType();
-
-  if (VT == MVT::f32) {
-    // Reciprocal, < 1 ulp error.
-    //
-    // This reciprocal approximation converges to < 0.5 ulp error with one
-    // newton rhapson performed with two fused multiple adds (FMAs).
-
-    RefinementSteps = 0;
-    return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
-  }
-
-  // TODO: There is also f64 rcp instruction, but the documentation is less
-  // clear on its precision.
-
-  return SDValue();
-}
-
-static void computeKnownBitsForMinMax(const SDValue Op0,
-                                      const SDValue Op1,
-                                      APInt &KnownZero,
-                                      APInt &KnownOne,
-                                      const SelectionDAG &DAG,
-                                      unsigned Depth) {
-  APInt Op0Zero, Op0One;
-  APInt Op1Zero, Op1One;
-  DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
-  DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
-
-  KnownZero = Op0Zero & Op1Zero;
-  KnownOne = Op0One & Op1One;
-}
-
-void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
-  const SDValue Op,
-  APInt &KnownZero,
-  APInt &KnownOne,
-  const SelectionDAG &DAG,
-  unsigned Depth) const {
-
-  KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
-
-  APInt KnownZero2;
-  APInt KnownOne2;
-  unsigned Opc = Op.getOpcode();
-
-  switch (Opc) {
-  default:
-    break;
-  case ISD::INTRINSIC_WO_CHAIN: {
-    // FIXME: The intrinsic should just use the node.
-    switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
-    case AMDGPUIntrinsic::AMDGPU_imax:
-    case AMDGPUIntrinsic::AMDGPU_umax:
-    case AMDGPUIntrinsic::AMDGPU_imin:
-    case AMDGPUIntrinsic::AMDGPU_umin:
-      computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
-                                KnownZero, KnownOne, DAG, Depth);
-      break;
-    default:
-      break;
-    }
-
-    break;
-  }
-  case AMDGPUISD::CARRY:
-  case AMDGPUISD::BORROW: {
-    KnownZero = APInt::getHighBitsSet(32, 31);
-    break;
-  }
-
-  case AMDGPUISD::BFE_I32:
-  case AMDGPUISD::BFE_U32: {
-    ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
-    if (!CWidth)
-      return;
-
-    unsigned BitWidth = 32;
-    uint32_t Width = CWidth->getZExtValue() & 0x1f;
-
-    if (Opc == AMDGPUISD::BFE_U32)
-      KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
-
-    break;
-  }
-  }
-}
-
-unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
-  SDValue Op,
-  const SelectionDAG &DAG,
-  unsigned Depth) const {
-  switch (Op.getOpcode()) {
-  case AMDGPUISD::BFE_I32: {
-    ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
-    if (!Width)
-      return 1;
-
-    unsigned SignBits = 32 - Width->getZExtValue() + 1;
-    ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(Op.getOperand(1));
-    if (!Offset || !Offset->isNullValue())
-      return SignBits;
-
-    // TODO: Could probably figure something out with non-0 offsets.
-    unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
-    return std::max(SignBits, Op0SignBits);
-  }
-
-  case AMDGPUISD::BFE_U32: {
-    ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
-    return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
-  }
-
-  case AMDGPUISD::CARRY:
-  case AMDGPUISD::BORROW:
-    return 31;
-
-  default:
-    return 1;
-  }
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUISelLowering.h (removed)
@@ -1,307 +0,0 @@
-//===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface definition of the TargetLowering class that is common
-/// to all AMD GPUs.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUISELLOWERING_H
-#define LLVM_LIB_TARGET_R600_AMDGPUISELLOWERING_H
-
-#include "llvm/Target/TargetLowering.h"
-
-namespace llvm {
-
-class AMDGPUMachineFunction;
-class AMDGPUSubtarget;
-class MachineRegisterInfo;
-
-class AMDGPUTargetLowering : public TargetLowering {
-protected:
-  const AMDGPUSubtarget *Subtarget;
-
-private:
-  SDValue LowerConstantInitializer(const Constant* Init, const GlobalValue *GV,
-                                   const SDValue &InitPtr,
-                                   SDValue Chain,
-                                   SelectionDAG &DAG) const;
-  SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
-  /// \brief Lower vector stores by merging the vector elements into an integer
-  /// of the same bitwidth.
-  SDValue MergeVectorStore(const SDValue &Op, SelectionDAG &DAG) const;
-  /// \brief Split a vector store into multiple scalar stores.
-  /// \returns The resulting chain.
-
-  SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const;
-
-  SDValue LowerFROUND32(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFROUND64(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const;
-
-  SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const;
-  SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
-
-  SDValue LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, bool Signed) const;
-  SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
-
-  SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
-
-  SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-  SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-
-protected:
-  static EVT getEquivalentMemType(LLVMContext &Context, EVT VT);
-  static EVT getEquivalentLoadRegType(LLVMContext &Context, EVT VT);
-
-  virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
-                                     SelectionDAG &DAG) const;
-
-  /// \brief Split a vector load into a scalar load of each component.
-  SDValue ScalarizeVectorLoad(SDValue Op, SelectionDAG &DAG) const;
-
-  /// \brief Split a vector load into 2 loads of half the vector.
-  SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const;
-
-  /// \brief Split a vector store into a scalar store of each component.
-  SDValue ScalarizeVectorStore(SDValue Op, SelectionDAG &DAG) const;
-
-  /// \brief Split a vector store into 2 stores of half the vector.
-  SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const;
-
-  SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const;
-  void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG,
-                                    SmallVectorImpl<SDValue> &Results) const;
-  bool isHWTrueValue(SDValue Op) const;
-  bool isHWFalseValue(SDValue Op) const;
-
-  /// The SelectionDAGBuilder will automatically promote function arguments
-  /// with illegal types.  However, this does not work for the AMDGPU targets
-  /// since the function arguments are stored in memory as these illegal types.
-  /// In order to handle this properly we need to get the origianl types sizes
-  /// from the LLVM IR Function and fixup the ISD:InputArg values before
-  /// passing them to AnalyzeFormalArguments()
-  void getOriginalFunctionArgs(SelectionDAG &DAG,
-                               const Function *F,
-                               const SmallVectorImpl<ISD::InputArg> &Ins,
-                               SmallVectorImpl<ISD::InputArg> &OrigIns) const;
-  void AnalyzeFormalArguments(CCState &State,
-                              const SmallVectorImpl<ISD::InputArg> &Ins) const;
-
-public:
-  AMDGPUTargetLowering(TargetMachine &TM, const AMDGPUSubtarget &STI);
-
-  bool isFAbsFree(EVT VT) const override;
-  bool isFNegFree(EVT VT) const override;
-  bool isTruncateFree(EVT Src, EVT Dest) const override;
-  bool isTruncateFree(Type *Src, Type *Dest) const override;
-
-  bool isZExtFree(Type *Src, Type *Dest) const override;
-  bool isZExtFree(EVT Src, EVT Dest) const override;
-  bool isZExtFree(SDValue Val, EVT VT2) const override;
-
-  bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
-
-  MVT getVectorIdxTy() const override;
-  bool isSelectSupported(SelectSupportKind) const override;
-
-  bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
-  bool ShouldShrinkFPConstant(EVT VT) const override;
-  bool shouldReduceLoadWidth(SDNode *Load,
-                             ISD::LoadExtType ExtType,
-                             EVT ExtVT) const override;
-
-  bool isLoadBitCastBeneficial(EVT, EVT) const override;
-
-  bool storeOfVectorConstantIsCheap(EVT MemVT,
-                                    unsigned NumElem,
-                                    unsigned AS) const override;
-  bool isCheapToSpeculateCttz() const override;
-  bool isCheapToSpeculateCtlz() const override;
-
-  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv,
-                      bool isVarArg,
-                      const SmallVectorImpl<ISD::OutputArg> &Outs,
-                      const SmallVectorImpl<SDValue> &OutVals,
-                      SDLoc DL, SelectionDAG &DAG) const override;
-  SDValue LowerCall(CallLoweringInfo &CLI,
-                    SmallVectorImpl<SDValue> &InVals) const override;
-
-  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
-  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
-  void ReplaceNodeResults(SDNode * N,
-                          SmallVectorImpl<SDValue> &Results,
-                          SelectionDAG &DAG) const override;
-
-  SDValue LowerIntrinsicIABS(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerIntrinsicLRP(SDValue Op, SelectionDAG &DAG) const;
-  SDValue CombineFMinMaxLegacy(SDLoc DL,
-                               EVT VT,
-                               SDValue LHS,
-                               SDValue RHS,
-                               SDValue True,
-                               SDValue False,
-                               SDValue CC,
-                               DAGCombinerInfo &DCI) const;
-  SDValue CombineIMinMax(SDLoc DL,
-                         EVT VT,
-                         SDValue LHS,
-                         SDValue RHS,
-                         SDValue True,
-                         SDValue False,
-                         SDValue CC,
-                         SelectionDAG &DAG) const;
-
-  const char* getTargetNodeName(unsigned Opcode) const override;
-
-  SDValue getRsqrtEstimate(SDValue Operand,
-                           DAGCombinerInfo &DCI,
-                           unsigned &RefinementSteps,
-                           bool &UseOneConstNR) const override;
-  SDValue getRecipEstimate(SDValue Operand,
-                           DAGCombinerInfo &DCI,
-                           unsigned &RefinementSteps) const override;
-
-  virtual SDNode *PostISelFolding(MachineSDNode *N,
-                                  SelectionDAG &DAG) const {
-    return N;
-  }
-
-  /// \brief Determine which of the bits specified in \p Mask are known to be
-  /// either zero or one and return them in the \p KnownZero and \p KnownOne
-  /// bitsets.
-  void computeKnownBitsForTargetNode(const SDValue Op,
-                                     APInt &KnownZero,
-                                     APInt &KnownOne,
-                                     const SelectionDAG &DAG,
-                                     unsigned Depth = 0) const override;
-
-  unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const SelectionDAG &DAG,
-                                           unsigned Depth = 0) const override;
-
-  /// \brief Helper function that adds Reg to the LiveIn list of the DAG's
-  /// MachineFunction.
-  ///
-  /// \returns a RegisterSDNode representing Reg.
-  virtual SDValue CreateLiveInRegister(SelectionDAG &DAG,
-                                       const TargetRegisterClass *RC,
-                                       unsigned Reg, EVT VT) const;
-};
-
-namespace AMDGPUISD {
-
-enum NodeType : unsigned {
-  // AMDIL ISD Opcodes
-  FIRST_NUMBER = ISD::BUILTIN_OP_END,
-  CALL,        // Function call based on a single integer
-  UMUL,        // 32bit unsigned multiplication
-  RET_FLAG,
-  BRANCH_COND,
-  // End AMDIL ISD Opcodes
-  DWORDADDR,
-  FRACT,
-  CLAMP,
-
-  // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi.
-  // Denormals handled on some parts.
-  COS_HW,
-  SIN_HW,
-  FMAX_LEGACY,
-  FMIN_LEGACY,
-  FMAX3,
-  SMAX3,
-  UMAX3,
-  FMIN3,
-  SMIN3,
-  UMIN3,
-  URECIP,
-  DIV_SCALE,
-  DIV_FMAS,
-  DIV_FIXUP,
-  TRIG_PREOP, // 1 ULP max error for f64
-
-  // RCP, RSQ - For f32, 1 ULP max error, no denormal handling.
-  //            For f64, max error 2^29 ULP, handles denormals.
-  RCP,
-  RSQ,
-  RSQ_LEGACY,
-  RSQ_CLAMPED,
-  LDEXP,
-  FP_CLASS,
-  DOT4,
-  CARRY,
-  BORROW,
-  BFE_U32, // Extract range of bits with zero extension to 32-bits.
-  BFE_I32, // Extract range of bits with sign extension to 32-bits.
-  BFI, // (src0 & src1) | (~src0 & src2)
-  BFM, // Insert a range of bits into a 32-bit word.
-  BREV, // Reverse bits.
-  MUL_U24,
-  MUL_I24,
-  MAD_U24,
-  MAD_I24,
-  TEXTURE_FETCH,
-  EXPORT,
-  CONST_ADDRESS,
-  REGISTER_LOAD,
-  REGISTER_STORE,
-  LOAD_INPUT,
-  SAMPLE,
-  SAMPLEB,
-  SAMPLED,
-  SAMPLEL,
-
-  // These cvt_f32_ubyte* nodes need to remain consecutive and in order.
-  CVT_F32_UBYTE0,
-  CVT_F32_UBYTE1,
-  CVT_F32_UBYTE2,
-  CVT_F32_UBYTE3,
-  /// This node is for VLIW targets and it is used to represent a vector
-  /// that is stored in consecutive registers with the same channel.
-  /// For example:
-  ///   |X  |Y|Z|W|
-  /// T0|v.x| | | |
-  /// T1|v.y| | | |
-  /// T2|v.z| | | |
-  /// T3|v.w| | | |
-  BUILD_VERTICAL_VECTOR,
-  /// Pointer to the start of the shader's constant data.
-  CONST_DATA_PTR,
-  SENDMSG,
-  INTERP_MOV,
-  INTERP_P1,
-  INTERP_P2,
-  FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
-  STORE_MSKOR,
-  LOAD_CONSTANT,
-  TBUFFER_STORE_FORMAT,
-  LAST_AMDGPU_ISD_NUMBER
-};
-
-
-} // End namespace AMDGPUISD
-
-} // End namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp (removed)
@@ -1,369 +0,0 @@
-//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Implementation of the TargetInstrInfo class that is common to all
-/// AMD GPUs.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUInstrInfo.h"
-#include "AMDGPURegisterInfo.h"
-#include "AMDGPUTargetMachine.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-
-using namespace llvm;
-
-#define GET_INSTRINFO_CTOR_DTOR
-#define GET_INSTRINFO_NAMED_OPS
-#define GET_INSTRMAP_INFO
-#include "AMDGPUGenInstrInfo.inc"
-
-// Pin the vtable to this file.
-void AMDGPUInstrInfo::anchor() {}
-
-AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &st)
-    : AMDGPUGenInstrInfo(-1, -1), ST(st) {}
-
-const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
-  return RI;
-}
-
-bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
-                                           unsigned &SrcReg, unsigned &DstReg,
-                                           unsigned &SubIdx) const {
-// TODO: Implement this function
-  return false;
-}
-
-unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
-                                             int &FrameIndex) const {
-// TODO: Implement this function
-  return 0;
-}
-
-unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
-                                                   int &FrameIndex) const {
-// TODO: Implement this function
-  return 0;
-}
-
-bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
-                                          const MachineMemOperand *&MMO,
-                                          int &FrameIndex) const {
-// TODO: Implement this function
-  return false;
-}
-unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
-                                              int &FrameIndex) const {
-// TODO: Implement this function
-  return 0;
-}
-unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
-                                                    int &FrameIndex) const {
-// TODO: Implement this function
-  return 0;
-}
-bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
-                                           const MachineMemOperand *&MMO,
-                                           int &FrameIndex) const {
-// TODO: Implement this function
-  return false;
-}
-
-MachineInstr *
-AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
-                                      MachineBasicBlock::iterator &MBBI,
-                                      LiveVariables *LV) const {
-// TODO: Implement this function
-  return nullptr;
-}
-
-void
-AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
-                                    MachineBasicBlock::iterator MI,
-                                    unsigned SrcReg, bool isKill,
-                                    int FrameIndex,
-                                    const TargetRegisterClass *RC,
-                                    const TargetRegisterInfo *TRI) const {
-  llvm_unreachable("Not Implemented");
-}
-
-void
-AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
-                                     MachineBasicBlock::iterator MI,
-                                     unsigned DestReg, int FrameIndex,
-                                     const TargetRegisterClass *RC,
-                                     const TargetRegisterInfo *TRI) const {
-  llvm_unreachable("Not Implemented");
-}
-
-bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
-  MachineBasicBlock *MBB = MI->getParent();
-  int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
-                                               AMDGPU::OpName::addr);
-   // addr is a custom operand with multiple MI operands, and only the
-   // first MI operand is given a name.
-  int RegOpIdx = OffsetOpIdx + 1;
-  int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
-                                             AMDGPU::OpName::chan);
-  if (isRegisterLoad(*MI)) {
-    int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
-                                              AMDGPU::OpName::dst);
-    unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
-    unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
-    unsigned Address = calculateIndirectAddress(RegIndex, Channel);
-    unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
-    if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
-      buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
-                    getIndirectAddrRegClass()->getRegister(Address));
-    } else {
-      buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
-                        Address, OffsetReg);
-    }
-  } else if (isRegisterStore(*MI)) {
-    int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
-                                              AMDGPU::OpName::val);
-    unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
-    unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
-    unsigned Address = calculateIndirectAddress(RegIndex, Channel);
-    unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
-    if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
-      buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
-                    MI->getOperand(ValOpIdx).getReg());
-    } else {
-      buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
-                         calculateIndirectAddress(RegIndex, Channel),
-                         OffsetReg);
-    }
-  } else {
-    return false;
-  }
-
-  MBB->erase(MI);
-  return true;
-}
-
-MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
-    MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
-    MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
-// TODO: Implement this function
-  return nullptr;
-}
-MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
-    MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
-    MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
-  // TODO: Implement this function
-  return nullptr;
-}
-bool AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
-                                           ArrayRef<unsigned> Ops) const {
-  // TODO: Implement this function
-  return false;
-}
-bool
-AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
-                                 unsigned Reg, bool UnfoldLoad,
-                                 bool UnfoldStore,
-                                 SmallVectorImpl<MachineInstr*> &NewMIs) const {
-  // TODO: Implement this function
-  return false;
-}
-
-bool
-AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
-                                    SmallVectorImpl<SDNode*> &NewNodes) const {
-  // TODO: Implement this function
-  return false;
-}
-
-unsigned
-AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
-                                           bool UnfoldLoad, bool UnfoldStore,
-                                           unsigned *LoadRegIndex) const {
-  // TODO: Implement this function
-  return 0;
-}
-
-bool AMDGPUInstrInfo::enableClusterLoads() const {
-  return true;
-}
-
-// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
-// the first 16 loads will be interleaved with the stores, and the next 16 will
-// be clustered as expected. It should really split into 2 16 store batches.
-//
-// Loads are clustered until this returns false, rather than trying to schedule
-// groups of stores. This also means we have to deal with saying different
-// address space loads should be clustered, and ones which might cause bank
-// conflicts.
-//
-// This might be deprecated so it might not be worth that much effort to fix.
-bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
-                                              int64_t Offset0, int64_t Offset1,
-                                              unsigned NumLoads) const {
-  assert(Offset1 > Offset0 &&
-         "Second offset should be larger than first offset!");
-  // If we have less than 16 loads in a row, and the offsets are within 64
-  // bytes, then schedule together.
-
-  // A cacheline is 64 bytes (for global memory).
-  return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
-}
-
-bool
-AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
-  const {
-  // TODO: Implement this function
-  return true;
-}
-void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
-                                MachineBasicBlock::iterator MI) const {
-  // TODO: Implement this function
-}
-
-bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
-  // TODO: Implement this function
-  return false;
-}
-
-bool AMDGPUInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
-                                        ArrayRef<MachineOperand> Pred2) const {
-  // TODO: Implement this function
-  return false;
-}
-
-bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
-                                      std::vector<MachineOperand> &Pred) const {
-  // TODO: Implement this function
-  return false;
-}
-
-bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
-  // TODO: Implement this function
-  return MI->getDesc().isPredicable();
-}
-
-bool
-AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
-  // TODO: Implement this function
-  return true;
-}
-
-bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
-  return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
-}
-
-bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
-  return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
-}
-
-int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
-  const MachineRegisterInfo &MRI = MF.getRegInfo();
-  const MachineFrameInfo *MFI = MF.getFrameInfo();
-  int Offset = -1;
-
-  if (MFI->getNumObjects() == 0) {
-    return -1;
-  }
-
-  if (MRI.livein_empty()) {
-    return 0;
-  }
-
-  const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
-  for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
-                                            LE = MRI.livein_end();
-                                            LI != LE; ++LI) {
-    unsigned Reg = LI->first;
-    if (TargetRegisterInfo::isVirtualRegister(Reg) ||
-        !IndirectRC->contains(Reg))
-      continue;
-
-    unsigned RegIndex;
-    unsigned RegEnd;
-    for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
-                                                          ++RegIndex) {
-      if (IndirectRC->getRegister(RegIndex) == Reg)
-        break;
-    }
-    Offset = std::max(Offset, (int)RegIndex);
-  }
-
-  return Offset + 1;
-}
-
-int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
-  int Offset = 0;
-  const MachineFrameInfo *MFI = MF.getFrameInfo();
-
-  // Variable sized objects are not supported
-  assert(!MFI->hasVarSizedObjects());
-
-  if (MFI->getNumObjects() == 0) {
-    return -1;
-  }
-
-  Offset = MF.getSubtarget().getFrameLowering()->getFrameIndexOffset(MF, -1);
-
-  return getIndirectIndexBegin(MF) + Offset;
-}
-
-int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
-  switch (Channels) {
-  default: return Opcode;
-  case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
-  case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
-  case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
-  }
-}
-
-// Wrapper for Tablegen'd function.  enum Subtarget is not defined in any
-// header files, so we need to wrap it in a function that takes unsigned
-// instead.
-namespace llvm {
-namespace AMDGPU {
-static int getMCOpcode(uint16_t Opcode, unsigned Gen) {
-  return getMCOpcodeGen(Opcode, (enum Subtarget)Gen);
-}
-}
-}
-
-// This must be kept in sync with the SISubtarget class in SIInstrInfo.td
-enum SISubtarget {
-  SI = 0,
-  VI = 1
-};
-
-static enum SISubtarget AMDGPUSubtargetToSISubtarget(unsigned Gen) {
-  switch (Gen) {
-  default:
-    return SI;
-  case AMDGPUSubtarget::VOLCANIC_ISLANDS:
-    return VI;
-  }
-}
-
-int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
-  int MCOp = AMDGPU::getMCOpcode(
-      Opcode, AMDGPUSubtargetToSISubtarget(ST.getGeneration()));
-
-  // -1 means that Opcode is already a native instruction.
-  if (MCOp == -1)
-    return Opcode;
-
-  // (uint16_t)-1 means that Opcode is a pseudo instruction that has
-  // no encoding in the given subtarget generation.
-  if (MCOp == (uint16_t)-1)
-    return -1;
-
-  return MCOp;
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.h (removed)
@@ -1,206 +0,0 @@
-//===-- AMDGPUInstrInfo.h - AMDGPU Instruction Information ------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Contains the definition of a TargetInstrInfo class that is common
-/// to all AMD GPUs.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUINSTRINFO_H
-#define LLVM_LIB_TARGET_R600_AMDGPUINSTRINFO_H
-
-#include "AMDGPURegisterInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include <map>
-
-#define GET_INSTRINFO_HEADER
-#define GET_INSTRINFO_ENUM
-#define GET_INSTRINFO_OPERAND_ENUM
-#include "AMDGPUGenInstrInfo.inc"
-
-#define OPCODE_IS_ZERO_INT AMDGPU::PRED_SETE_INT
-#define OPCODE_IS_NOT_ZERO_INT AMDGPU::PRED_SETNE_INT
-#define OPCODE_IS_ZERO AMDGPU::PRED_SETE
-#define OPCODE_IS_NOT_ZERO AMDGPU::PRED_SETNE
-
-namespace llvm {
-
-class AMDGPUSubtarget;
-class MachineFunction;
-class MachineInstr;
-class MachineInstrBuilder;
-
-class AMDGPUInstrInfo : public AMDGPUGenInstrInfo {
-private:
-  const AMDGPURegisterInfo RI;
-  virtual void anchor();
-protected:
-  const AMDGPUSubtarget &ST;
-public:
-  explicit AMDGPUInstrInfo(const AMDGPUSubtarget &st);
-
-  virtual const AMDGPURegisterInfo &getRegisterInfo() const = 0;
-
-  bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
-                             unsigned &DstReg, unsigned &SubIdx) const override;
-
-  unsigned isLoadFromStackSlot(const MachineInstr *MI,
-                               int &FrameIndex) const override;
-  unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
-                                     int &FrameIndex) const override;
-  bool hasLoadFromStackSlot(const MachineInstr *MI,
-                            const MachineMemOperand *&MMO,
-                            int &FrameIndex) const override;
-  unsigned isStoreFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
-  unsigned isStoreFromStackSlotPostFE(const MachineInstr *MI,
-                                      int &FrameIndex) const;
-  bool hasStoreFromStackSlot(const MachineInstr *MI,
-                             const MachineMemOperand *&MMO,
-                             int &FrameIndex) const;
-
-  MachineInstr *
-  convertToThreeAddress(MachineFunction::iterator &MFI,
-                        MachineBasicBlock::iterator &MBBI,
-                        LiveVariables *LV) const override;
-
-
-  bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
-
-  void storeRegToStackSlot(MachineBasicBlock &MBB,
-                           MachineBasicBlock::iterator MI,
-                           unsigned SrcReg, bool isKill, int FrameIndex,
-                           const TargetRegisterClass *RC,
-                           const TargetRegisterInfo *TRI) const override;
-  void loadRegFromStackSlot(MachineBasicBlock &MBB,
-                            MachineBasicBlock::iterator MI,
-                            unsigned DestReg, int FrameIndex,
-                            const TargetRegisterClass *RC,
-                            const TargetRegisterInfo *TRI) const override;
-
-protected:
-  MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
-                                      ArrayRef<unsigned> Ops,
-                                      MachineBasicBlock::iterator InsertPt,
-                                      int FrameIndex) const override;
-  MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
-                                      ArrayRef<unsigned> Ops,
-                                      MachineBasicBlock::iterator InsertPt,
-                                      MachineInstr *LoadMI) const override;
-
-public:
-  /// \returns the smallest register index that will be accessed by an indirect
-  /// read or write or -1 if indirect addressing is not used by this program.
-  int getIndirectIndexBegin(const MachineFunction &MF) const;
-
-  /// \returns the largest register index that will be accessed by an indirect
-  /// read or write or -1 if indirect addressing is not used by this program.
-  int getIndirectIndexEnd(const MachineFunction &MF) const;
-
-  bool canFoldMemoryOperand(const MachineInstr *MI,
-                            ArrayRef<unsigned> Ops) const override;
-  bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
-                        unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
-                        SmallVectorImpl<MachineInstr *> &NewMIs) const override;
-  bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
-                           SmallVectorImpl<SDNode *> &NewNodes) const override;
-  unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
-                               bool UnfoldLoad, bool UnfoldStore,
-                               unsigned *LoadRegIndex = nullptr) const override;
-
-  bool enableClusterLoads() const override;
-
-  bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
-                               int64_t Offset1, int64_t Offset2,
-                               unsigned NumLoads) const override;
-
-  bool
-  ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
-  void insertNoop(MachineBasicBlock &MBB,
-                  MachineBasicBlock::iterator MI) const override;
-  bool isPredicated(const MachineInstr *MI) const override;
-  bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
-                         ArrayRef<MachineOperand> Pred2) const override;
-  bool DefinesPredicate(MachineInstr *MI,
-                        std::vector<MachineOperand> &Pred) const override;
-  bool isPredicable(MachineInstr *MI) const override;
-  bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
-
-  // Helper functions that check the opcode for status information
-  bool isRegisterStore(const MachineInstr &MI) const;
-  bool isRegisterLoad(const MachineInstr &MI) const;
-
-  /// \brief Return a target-specific opcode if Opcode is a pseudo instruction.
-  /// Return -1 if the target-specific opcode for the pseudo instruction does
-  /// not exist. If Opcode is not a pseudo instruction, this is identity.
-  int pseudoToMCOpcode(int Opcode) const;
-
-  /// \brief Return the descriptor of the target-specific machine instruction
-  /// that corresponds to the specified pseudo or native opcode.
-  const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const {
-    return get(pseudoToMCOpcode(Opcode));
-  }
-
-//===---------------------------------------------------------------------===//
-// Pure virtual funtions to be implemented by sub-classes.
-//===---------------------------------------------------------------------===//
-
-  virtual bool isMov(unsigned opcode) const = 0;
-
-  /// \brief Calculate the "Indirect Address" for the given \p RegIndex and
-  ///        \p Channel
-  ///
-  /// We model indirect addressing using a virtual address space that can be
-  /// accesed with loads and stores.  The "Indirect Address" is the memory
-  /// address in this virtual address space that maps to the given \p RegIndex
-  /// and \p Channel.
-  virtual unsigned calculateIndirectAddress(unsigned RegIndex,
-                                            unsigned Channel) const = 0;
-
-  /// \returns The register class to be used for loading and storing values
-  /// from an "Indirect Address" .
-  virtual const TargetRegisterClass *getIndirectAddrRegClass() const = 0;
-
-  /// \brief Build instruction(s) for an indirect register write.
-  ///
-  /// \returns The instruction that performs the indirect register write
-  virtual MachineInstrBuilder buildIndirectWrite(MachineBasicBlock *MBB,
-                                    MachineBasicBlock::iterator I,
-                                    unsigned ValueReg, unsigned Address,
-                                    unsigned OffsetReg) const = 0;
-
-  /// \brief Build instruction(s) for an indirect register read.
-  ///
-  /// \returns The instruction that performs the indirect register read
-  virtual MachineInstrBuilder buildIndirectRead(MachineBasicBlock *MBB,
-                                    MachineBasicBlock::iterator I,
-                                    unsigned ValueReg, unsigned Address,
-                                    unsigned OffsetReg) const = 0;
-
-  /// \brief Build a MOV instruction.
-  virtual MachineInstr *buildMovInstr(MachineBasicBlock *MBB,
-                                      MachineBasicBlock::iterator I,
-                                      unsigned DstReg, unsigned SrcReg) const = 0;
-
-  /// \brief Given a MIMG \p Opcode that writes all 4 channels, return the
-  /// equivalent opcode that writes \p Channels Channels.
-  int getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const;
-
-};
-
-namespace AMDGPU {
-  int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex);
-}  // End namespace AMDGPU
-
-} // End llvm namespace
-
-#define AMDGPU_FLAG_REGISTER_LOAD  (UINT64_C(1) << 63)
-#define AMDGPU_FLAG_REGISTER_STORE (UINT64_C(1) << 62)
-
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.td (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.td (removed)
@@ -1,245 +0,0 @@
-//===-- AMDGPUInstrInfo.td - AMDGPU DAG nodes --------------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains DAG node defintions for the AMDGPU target.
-//
-//===----------------------------------------------------------------------===//
-
-//===----------------------------------------------------------------------===//
-// AMDGPU DAG Profiles
-//===----------------------------------------------------------------------===//
-
-def AMDGPUDTIntTernaryOp : SDTypeProfile<1, 3, [
-  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<0>, SDTCisInt<3>
-]>;
-
-def AMDGPUTrigPreOp : SDTypeProfile<1, 2,
-  [SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisInt<2>]
->;
-
-def AMDGPULdExpOp : SDTypeProfile<1, 2,
-  [SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisInt<2>]
->;
-
-def AMDGPUFPClassOp : SDTypeProfile<1, 2,
-  [SDTCisInt<0>, SDTCisFP<1>, SDTCisInt<2>]
->;
-
-def AMDGPUDivScaleOp : SDTypeProfile<2, 3,
-  [SDTCisFP<0>, SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisSameAs<0, 4>]
->;
-
-// float, float, float, vcc
-def AMDGPUFmasOp : SDTypeProfile<1, 4,
-  [SDTCisFP<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisInt<4>]
->;
-
-//===----------------------------------------------------------------------===//
-// AMDGPU DAG Nodes
-//
-
-// This argument to this node is a dword address.
-def AMDGPUdwordaddr : SDNode<"AMDGPUISD::DWORDADDR", SDTIntUnaryOp>;
-
-def AMDGPUcos : SDNode<"AMDGPUISD::COS_HW", SDTFPUnaryOp>;
-def AMDGPUsin : SDNode<"AMDGPUISD::SIN_HW", SDTFPUnaryOp>;
-
-// out = a - floor(a)
-def AMDGPUfract : SDNode<"AMDGPUISD::FRACT", SDTFPUnaryOp>;
-
-// out = 1.0 / a
-def AMDGPUrcp : SDNode<"AMDGPUISD::RCP", SDTFPUnaryOp>;
-
-// out = 1.0 / sqrt(a)
-def AMDGPUrsq : SDNode<"AMDGPUISD::RSQ", SDTFPUnaryOp>;
-
-// out = 1.0 / sqrt(a)
-def AMDGPUrsq_legacy : SDNode<"AMDGPUISD::RSQ_LEGACY", SDTFPUnaryOp>;
-
-// out = 1.0 / sqrt(a) result clamped to +/- max_float.
-def AMDGPUrsq_clamped : SDNode<"AMDGPUISD::RSQ_CLAMPED", SDTFPUnaryOp>;
-
-def AMDGPUldexp : SDNode<"AMDGPUISD::LDEXP", AMDGPULdExpOp>;
-
-def AMDGPUfp_class : SDNode<"AMDGPUISD::FP_CLASS", AMDGPUFPClassOp>;
-
-// out = max(a, b) a and b are floats, where a nan comparison fails.
-// This is not commutative because this gives the second operand:
-//   x < nan ? x : nan -> nan
-//   nan < x ? nan : x -> x
-def AMDGPUfmax_legacy : SDNode<"AMDGPUISD::FMAX_LEGACY", SDTFPBinOp,
-  []
->;
-
-def AMDGPUclamp : SDNode<"AMDGPUISD::CLAMP", SDTFPTernaryOp, []>;
-
-// out = max(a, b) a and b are signed ints
-def AMDGPUsmax : SDNode<"AMDGPUISD::SMAX", SDTIntBinOp,
-  [SDNPCommutative, SDNPAssociative]
->;
-
-// out = max(a, b) a and b are unsigned ints
-def AMDGPUumax : SDNode<"AMDGPUISD::UMAX", SDTIntBinOp,
-  [SDNPCommutative, SDNPAssociative]
->;
-
-// out = min(a, b) a and b are floats, where a nan comparison fails.
-def AMDGPUfmin_legacy : SDNode<"AMDGPUISD::FMIN_LEGACY", SDTFPBinOp,
-  []
->;
-
-// FIXME: TableGen doesn't like commutative instructions with more
-// than 2 operands.
-// out = max(a, b, c) a, b and c are floats
-def AMDGPUfmax3 : SDNode<"AMDGPUISD::FMAX3", SDTFPTernaryOp,
-  [/*SDNPCommutative, SDNPAssociative*/]
->;
-
-// out = max(a, b, c) a, b, and c are signed ints
-def AMDGPUsmax3 : SDNode<"AMDGPUISD::SMAX3", AMDGPUDTIntTernaryOp,
-  [/*SDNPCommutative, SDNPAssociative*/]
->;
-
-// out = max(a, b, c) a, b and c are unsigned ints
-def AMDGPUumax3 : SDNode<"AMDGPUISD::UMAX3", AMDGPUDTIntTernaryOp,
-  [/*SDNPCommutative, SDNPAssociative*/]
->;
-
-// out = min(a, b, c) a, b and c are floats
-def AMDGPUfmin3 : SDNode<"AMDGPUISD::FMIN3", SDTFPTernaryOp,
-  [/*SDNPCommutative, SDNPAssociative*/]
->;
-
-// out = min(a, b, c) a, b and c are signed ints
-def AMDGPUsmin3 : SDNode<"AMDGPUISD::SMIN3", AMDGPUDTIntTernaryOp,
-  [/*SDNPCommutative, SDNPAssociative*/]
->;
-
-// out = min(a, b) a and b are unsigned ints
-def AMDGPUumin3 : SDNode<"AMDGPUISD::UMIN3", AMDGPUDTIntTernaryOp,
-  [/*SDNPCommutative, SDNPAssociative*/]
->;
-
-// out = (src0 + src1 > 0xFFFFFFFF) ? 1 : 0
-def AMDGPUcarry : SDNode<"AMDGPUISD::CARRY", SDTIntBinOp, []>;
-
-// out = (src1 > src0) ? 1 : 0
-def AMDGPUborrow : SDNode<"AMDGPUISD::BORROW", SDTIntBinOp, []>;
-
-
-def AMDGPUcvt_f32_ubyte0 : SDNode<"AMDGPUISD::CVT_F32_UBYTE0",
-  SDTIntToFPOp, []>;
-def AMDGPUcvt_f32_ubyte1 : SDNode<"AMDGPUISD::CVT_F32_UBYTE1",
-  SDTIntToFPOp, []>;
-def AMDGPUcvt_f32_ubyte2 : SDNode<"AMDGPUISD::CVT_F32_UBYTE2",
-  SDTIntToFPOp, []>;
-def AMDGPUcvt_f32_ubyte3 : SDNode<"AMDGPUISD::CVT_F32_UBYTE3",
-  SDTIntToFPOp, []>;
-
-
-// urecip - This operation is a helper for integer division, it returns the
-// result of 1 / a as a fractional unsigned integer.
-// out = (2^32 / a) + e
-// e is rounding error
-def AMDGPUurecip : SDNode<"AMDGPUISD::URECIP", SDTIntUnaryOp>;
-
-// Special case divide preop and flags.
-def AMDGPUdiv_scale : SDNode<"AMDGPUISD::DIV_SCALE", AMDGPUDivScaleOp>;
-
-//  Special case divide FMA with scale and flags (src0 = Quotient,
-//  src1 = Denominator, src2 = Numerator).
-def AMDGPUdiv_fmas : SDNode<"AMDGPUISD::DIV_FMAS", AMDGPUFmasOp>;
-
-// Single or double precision division fixup.
-// Special case divide fixup and flags(src0 = Quotient, src1 =
-// Denominator, src2 = Numerator).
-def AMDGPUdiv_fixup : SDNode<"AMDGPUISD::DIV_FIXUP", SDTFPTernaryOp>;
-
-// Look Up 2.0 / pi src0 with segment select src1[4:0]
-def AMDGPUtrig_preop : SDNode<"AMDGPUISD::TRIG_PREOP", AMDGPUTrigPreOp>;
-
-def AMDGPUregister_load : SDNode<"AMDGPUISD::REGISTER_LOAD",
-                          SDTypeProfile<1, 2, [SDTCisPtrTy<1>, SDTCisInt<2>]>,
-                          [SDNPHasChain, SDNPMayLoad]>;
-
-def AMDGPUregister_store : SDNode<"AMDGPUISD::REGISTER_STORE",
-                           SDTypeProfile<0, 3, [SDTCisPtrTy<1>, SDTCisInt<2>]>,
-                           [SDNPHasChain, SDNPMayStore]>;
-
-// MSKOR instructions are atomic memory instructions used mainly for storing
-// 8-bit and 16-bit values.  The definition is:
-//
-// MSKOR(dst, mask, src) MEM[dst] = ((MEM[dst] & ~mask) | src)
-//
-// src0: vec4(src, 0, 0, mask)
-// src1: dst - rat offset (aka pointer) in dwords
-def AMDGPUstore_mskor : SDNode<"AMDGPUISD::STORE_MSKOR",
-                        SDTypeProfile<0, 2, []>,
-                        [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
-
-def AMDGPUround : SDNode<"ISD::FROUND",
-                         SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>>;
-
-def AMDGPUbfe_u32 : SDNode<"AMDGPUISD::BFE_U32", AMDGPUDTIntTernaryOp>;
-def AMDGPUbfe_i32 : SDNode<"AMDGPUISD::BFE_I32", AMDGPUDTIntTernaryOp>;
-def AMDGPUbfi : SDNode<"AMDGPUISD::BFI", AMDGPUDTIntTernaryOp>;
-def AMDGPUbfm : SDNode<"AMDGPUISD::BFM", SDTIntBinOp>;
-
-def AMDGPUbrev : SDNode<"AMDGPUISD::BREV", SDTIntUnaryOp>;
-
-// Signed and unsigned 24-bit mulitply.  The highest 8-bits are ignore when
-// performing the mulitply.  The result is a 32-bit value.
-def AMDGPUmul_u24 : SDNode<"AMDGPUISD::MUL_U24", SDTIntBinOp,
-  [SDNPCommutative]
->;
-def AMDGPUmul_i24 : SDNode<"AMDGPUISD::MUL_I24", SDTIntBinOp,
-  [SDNPCommutative]
->;
-
-def AMDGPUmad_u24 : SDNode<"AMDGPUISD::MAD_U24", AMDGPUDTIntTernaryOp,
-  []
->;
-def AMDGPUmad_i24 : SDNode<"AMDGPUISD::MAD_I24", AMDGPUDTIntTernaryOp,
-  []
->;
-
-def AMDGPUsendmsg : SDNode<"AMDGPUISD::SENDMSG",
-                    SDTypeProfile<0, 1, [SDTCisInt<0>]>,
-                    [SDNPHasChain, SDNPInGlue]>;
-
-def AMDGPUinterp_mov : SDNode<"AMDGPUISD::INTERP_MOV",
-                        SDTypeProfile<1, 3, [SDTCisFP<0>]>,
-                        [SDNPInGlue]>;
-
-def AMDGPUinterp_p1 : SDNode<"AMDGPUISD::INTERP_P1",
-                      SDTypeProfile<1, 3, [SDTCisFP<0>]>,
-                      [SDNPInGlue, SDNPOutGlue]>;
-
-def AMDGPUinterp_p2 : SDNode<"AMDGPUISD::INTERP_P2",
-                      SDTypeProfile<1, 4, [SDTCisFP<0>]>,
-                      [SDNPInGlue]>;
-
-//===----------------------------------------------------------------------===//
-// Flow Control Profile Types
-//===----------------------------------------------------------------------===//
-// Branch instruction where second and third are basic blocks
-def SDTIL_BRCond : SDTypeProfile<0, 2, [
-    SDTCisVT<0, OtherVT>
-    ]>;
-
-//===----------------------------------------------------------------------===//
-// Flow Control DAG Nodes
-//===----------------------------------------------------------------------===//
-def IL_brcond      : SDNode<"AMDGPUISD::BRANCH_COND", SDTIL_BRCond, [SDNPHasChain]>;
-
-//===----------------------------------------------------------------------===//
-// Call/Return DAG Nodes
-//===----------------------------------------------------------------------===//
-def IL_retflag       : SDNode<"AMDGPUISD::RET_FLAG", SDTNone,
-    [SDNPHasChain, SDNPOptInGlue]>;

Removed: llvm/trunk/lib/Target/R600/AMDGPUInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUInstructions.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUInstructions.td (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUInstructions.td (removed)
@@ -1,682 +0,0 @@
-//===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file contains instruction defs that are common to all hw codegen
-// targets.
-//
-//===----------------------------------------------------------------------===//
-
-class AMDGPUInst <dag outs, dag ins, string asm, list<dag> pattern> : Instruction {
-  field bit isRegisterLoad = 0;
-  field bit isRegisterStore = 0;
-
-  let Namespace = "AMDGPU";
-  let OutOperandList = outs;
-  let InOperandList = ins;
-  let AsmString = asm;
-  let Pattern = pattern;
-  let Itinerary = NullALU;
-
-  let TSFlags{63} = isRegisterLoad;
-  let TSFlags{62} = isRegisterStore;
-}
-
-class AMDGPUShaderInst <dag outs, dag ins, string asm, list<dag> pattern>
-    : AMDGPUInst<outs, ins, asm, pattern> {
-
-  field bits<32> Inst = 0xffffffff;
-
-}
-
-def FP32Denormals : Predicate<"Subtarget.hasFP32Denormals()">;
-def FP64Denormals : Predicate<"Subtarget.hasFP64Denormals()">;
-def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
-
-def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
-def ADDRIndirect : ComplexPattern<iPTR, 2, "SelectADDRIndirect", [], []>;
-
-let OperandType = "OPERAND_IMMEDIATE" in {
-
-def u32imm : Operand<i32> {
-  let PrintMethod = "printU32ImmOperand";
-}
-
-def u16imm : Operand<i16> {
-  let PrintMethod = "printU16ImmOperand";
-}
-
-def u8imm : Operand<i8> {
-  let PrintMethod = "printU8ImmOperand";
-}
-
-} // End OperandType = "OPERAND_IMMEDIATE"
-
-//===--------------------------------------------------------------------===//
-// Custom Operands
-//===--------------------------------------------------------------------===//
-def brtarget   : Operand<OtherVT>;
-
-//===----------------------------------------------------------------------===//
-// PatLeafs for floating-point comparisons
-//===----------------------------------------------------------------------===//
-
-def COND_OEQ : PatLeaf <
-  (cond),
-  [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}]
->;
-
-def COND_ONE : PatLeaf <
-  (cond),
-  [{return N->get() == ISD::SETONE || N->get() == ISD::SETNE;}]
->;
-
-def COND_OGT : PatLeaf <
-  (cond),
-  [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
->;
-
-def COND_OGE : PatLeaf <
-  (cond),
-  [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}]
->;
-
-def COND_OLT : PatLeaf <
-  (cond),
-  [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}]
->;
-
-def COND_OLE : PatLeaf <
-  (cond),
-  [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}]
->;
-
-
-def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>;
-def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>;
-
-//===----------------------------------------------------------------------===//
-// PatLeafs for unsigned / unordered comparisons
-//===----------------------------------------------------------------------===//
-
-def COND_UEQ : PatLeaf <(cond), [{return N->get() == ISD::SETUEQ;}]>;
-def COND_UNE : PatLeaf <(cond), [{return N->get() == ISD::SETUNE;}]>;
-def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>;
-def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>;
-def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>;
-def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>;
-
-// XXX - For some reason R600 version is preferring to use unordered
-// for setne?
-def COND_UNE_NE : PatLeaf <
-  (cond),
-  [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
->;
-
-//===----------------------------------------------------------------------===//
-// PatLeafs for signed comparisons
-//===----------------------------------------------------------------------===//
-
-def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>;
-def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>;
-def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>;
-def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>;
-
-//===----------------------------------------------------------------------===//
-// PatLeafs for integer equality
-//===----------------------------------------------------------------------===//
-
-def COND_EQ : PatLeaf <
-  (cond),
-  [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}]
->;
-
-def COND_NE : PatLeaf <
-  (cond),
-  [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}]
->;
-
-def COND_NULL : PatLeaf <
-  (cond),
-  [{(void)N; return false;}]
->;
-
-//===----------------------------------------------------------------------===//
-// Load/Store Pattern Fragments
-//===----------------------------------------------------------------------===//
-
-class PrivateMemOp <dag ops, dag frag> : PatFrag <ops, frag, [{
-  return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
-}]>;
-
-class PrivateLoad <SDPatternOperator op> : PrivateMemOp <
-  (ops node:$ptr), (op node:$ptr)
->;
-
-class PrivateStore <SDPatternOperator op> : PrivateMemOp <
-  (ops node:$value, node:$ptr), (op node:$value, node:$ptr)
->;
-
-def load_private : PrivateLoad <load>;
-
-def truncstorei8_private : PrivateStore <truncstorei8>;
-def truncstorei16_private : PrivateStore <truncstorei16>;
-def store_private : PrivateStore <store>;
-
-def global_store : PatFrag<(ops node:$val, node:$ptr),
-    (store node:$val, node:$ptr), [{
-        return isGlobalStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-// Global address space loads
-def global_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
-    return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-// Constant address space loads
-def constant_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
-    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
-
-class AZExtLoadBase <SDPatternOperator ld_node>: PatFrag<(ops node:$ptr),
-                                              (ld_node node:$ptr), [{
-  LoadSDNode *L = cast<LoadSDNode>(N);
-  return L->getExtensionType() == ISD::ZEXTLOAD ||
-         L->getExtensionType() == ISD::EXTLOAD;
-}]>;
-
-def az_extload : AZExtLoadBase <unindexedload>;
-
-def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
-  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
-}]>;
-
-def az_extloadi8_global : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
-    return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def sextloadi8_global : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
-    return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def az_extloadi8_flat : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
-    return isFlatLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def sextloadi8_flat : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
-    return isFlatLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def az_extloadi8_constant : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
-    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
-
-def sextloadi8_constant : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
-    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
-
-def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr), [{
-    return isLocalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def sextloadi8_local : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr), [{
-    return isLocalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def extloadi8_private : PrivateLoad <az_extloadi8>;
-def sextloadi8_private : PrivateLoad <sextloadi8>;
-
-def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
-  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
-}]>;
-
-def az_extloadi16_global : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
-    return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def sextloadi16_global : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
-    return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def az_extloadi16_flat : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
-    return isFlatLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def sextloadi16_flat : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
-    return isFlatLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def az_extloadi16_constant : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
-    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
-
-def sextloadi16_constant : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
-    return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
-
-def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr), [{
-    return isLocalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def sextloadi16_local : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr), [{
-    return isLocalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def extloadi16_private : PrivateLoad <az_extloadi16>;
-def sextloadi16_private : PrivateLoad <sextloadi16>;
-
-def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
-  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
-}]>;
-
-def az_extloadi32_global : PatFrag<(ops node:$ptr),
-                                   (az_extloadi32 node:$ptr), [{
-  return isGlobalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def az_extloadi32_flat : PatFrag<(ops node:$ptr),
-                                   (az_extloadi32 node:$ptr), [{
-  return isFlatLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def az_extloadi32_constant : PatFrag<(ops node:$ptr),
-                                     (az_extloadi32 node:$ptr), [{
-  return isConstantLoad(dyn_cast<LoadSDNode>(N), -1);
-}]>;
-
-def truncstorei8_global : PatFrag<(ops node:$val, node:$ptr),
-                                  (truncstorei8 node:$val, node:$ptr), [{
-  return isGlobalStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-def truncstorei16_global : PatFrag<(ops node:$val, node:$ptr),
-                                  (truncstorei16 node:$val, node:$ptr), [{
-  return isGlobalStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-def truncstorei8_flat : PatFrag<(ops node:$val, node:$ptr),
-                                  (truncstorei8 node:$val, node:$ptr), [{
-  return isFlatStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-def truncstorei16_flat : PatFrag<(ops node:$val, node:$ptr),
-                                  (truncstorei16 node:$val, node:$ptr), [{
-  return isFlatStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-def local_store : PatFrag<(ops node:$val, node:$ptr),
-                             (store node:$val, node:$ptr), [{
-  return isLocalStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-def truncstorei8_local : PatFrag<(ops node:$val, node:$ptr),
-                                  (truncstorei8 node:$val, node:$ptr), [{
-  return isLocalStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-def truncstorei16_local : PatFrag<(ops node:$val, node:$ptr),
-                                  (truncstorei16 node:$val, node:$ptr), [{
-  return isLocalStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-def local_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
-    return isLocalLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
-    return cast<MemSDNode>(N)->getAlignment() % 8 == 0;
-}]>;
-
-def local_load_aligned8bytes : Aligned8Bytes <
-  (ops node:$ptr), (local_load node:$ptr)
->;
-
-def local_store_aligned8bytes : Aligned8Bytes <
-  (ops node:$val, node:$ptr), (local_store node:$val, node:$ptr)
->;
-
-class local_binary_atomic_op<SDNode atomic_op> :
-  PatFrag<(ops node:$ptr, node:$value),
-    (atomic_op node:$ptr, node:$value), [{
-  return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
-}]>;
-
-
-def atomic_swap_local : local_binary_atomic_op<atomic_swap>;
-def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>;
-def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>;
-def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>;
-def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>;
-def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>;
-def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>;
-def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>;
-def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>;
-def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>;
-def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>;
-
-def mskor_global : PatFrag<(ops node:$val, node:$ptr),
-                            (AMDGPUstore_mskor node:$val, node:$ptr), [{
-  return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
-}]>;
-
-multiclass AtomicCmpSwapLocal <SDNode cmp_swap_node> {
-
-  def _32_local : PatFrag <
-    (ops node:$ptr, node:$cmp, node:$swap),
-    (cmp_swap_node node:$ptr, node:$cmp, node:$swap), [{
-      AtomicSDNode *AN = cast<AtomicSDNode>(N);
-      return AN->getMemoryVT() == MVT::i32 &&
-             AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
-  }]>;
-
-  def _64_local : PatFrag<
-    (ops node:$ptr, node:$cmp, node:$swap),
-    (cmp_swap_node node:$ptr, node:$cmp, node:$swap), [{
-      AtomicSDNode *AN = cast<AtomicSDNode>(N);
-      return AN->getMemoryVT() == MVT::i64 &&
-             AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
-  }]>;
-}
-
-defm atomic_cmp_swap : AtomicCmpSwapLocal <atomic_cmp_swap>;
-
-def flat_load : PatFrag<(ops node:$ptr), (load node:$ptr), [{
-    return isFlatLoad(dyn_cast<LoadSDNode>(N));
-}]>;
-
-def flat_store : PatFrag<(ops node:$val, node:$ptr),
-                         (store node:$val, node:$ptr), [{
-  return isFlatStore(dyn_cast<StoreSDNode>(N));
-}]>;
-
-def mskor_flat : PatFrag<(ops node:$val, node:$ptr),
-                            (AMDGPUstore_mskor node:$val, node:$ptr), [{
-  return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
-}]>;
-
-class global_binary_atomic_op<SDNode atomic_op> : PatFrag<
-  (ops node:$ptr, node:$value),
-  (atomic_op node:$ptr, node:$value),
-  [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}]
->;
-
-def atomic_swap_global : global_binary_atomic_op<atomic_swap>;
-def atomic_add_global : global_binary_atomic_op<atomic_load_add>;
-def atomic_and_global : global_binary_atomic_op<atomic_load_and>;
-def atomic_max_global : global_binary_atomic_op<atomic_load_max>;
-def atomic_min_global : global_binary_atomic_op<atomic_load_min>;
-def atomic_or_global : global_binary_atomic_op<atomic_load_or>;
-def atomic_sub_global : global_binary_atomic_op<atomic_load_sub>;
-def atomic_umax_global : global_binary_atomic_op<atomic_load_umax>;
-def atomic_umin_global : global_binary_atomic_op<atomic_load_umin>;
-def atomic_xor_global : global_binary_atomic_op<atomic_load_xor>;
-
-//===----------------------------------------------------------------------===//
-// Misc Pattern Fragments
-//===----------------------------------------------------------------------===//
-
-class Constants {
-int TWO_PI = 0x40c90fdb;
-int PI = 0x40490fdb;
-int TWO_PI_INV = 0x3e22f983;
-int FP_UINT_MAX_PLUS_1 = 0x4f800000;    // 1 << 32 in floating point encoding
-int FP32_NEG_ONE = 0xbf800000;
-int FP32_ONE = 0x3f800000;
-}
-def CONST : Constants;
-
-def FP_ZERO : PatLeaf <
-  (fpimm),
-  [{return N->getValueAPF().isZero();}]
->;
-
-def FP_ONE : PatLeaf <
-  (fpimm),
-  [{return N->isExactlyValue(1.0);}]
->;
-
-def FP_HALF : PatLeaf <
-  (fpimm),
-  [{return N->isExactlyValue(0.5);}]
->;
-
-let isCodeGenOnly = 1, isPseudo = 1 in {
-
-let usesCustomInserter = 1  in {
-
-class CLAMP <RegisterClass rc> : AMDGPUShaderInst <
-  (outs rc:$dst),
-  (ins rc:$src0),
-  "CLAMP $dst, $src0",
-  [(set f32:$dst, (AMDGPUclamp f32:$src0, (f32 FP_ZERO), (f32 FP_ONE)))]
->;
-
-class FABS <RegisterClass rc> : AMDGPUShaderInst <
-  (outs rc:$dst),
-  (ins rc:$src0),
-  "FABS $dst, $src0",
-  [(set f32:$dst, (fabs f32:$src0))]
->;
-
-class FNEG <RegisterClass rc> : AMDGPUShaderInst <
-  (outs rc:$dst),
-  (ins rc:$src0),
-  "FNEG $dst, $src0",
-  [(set f32:$dst, (fneg f32:$src0))]
->;
-
-} // usesCustomInserter = 1
-
-multiclass RegisterLoadStore <RegisterClass dstClass, Operand addrClass,
-                    ComplexPattern addrPat> {
-let UseNamedOperandTable = 1 in {
-
-  def RegisterLoad : AMDGPUShaderInst <
-    (outs dstClass:$dst),
-    (ins addrClass:$addr, i32imm:$chan),
-    "RegisterLoad $dst, $addr",
-    [(set i32:$dst, (AMDGPUregister_load addrPat:$addr, (i32 timm:$chan)))]
-  > {
-    let isRegisterLoad = 1;
-  }
-
-  def RegisterStore : AMDGPUShaderInst <
-    (outs),
-    (ins dstClass:$val, addrClass:$addr, i32imm:$chan),
-    "RegisterStore $val, $addr",
-    [(AMDGPUregister_store i32:$val, addrPat:$addr, (i32 timm:$chan))]
-  > {
-    let isRegisterStore = 1;
-  }
-}
-}
-
-} // End isCodeGenOnly = 1, isPseudo = 1
-
-/* Generic helper patterns for intrinsics */
-/* -------------------------------------- */
-
-class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul>
-  : Pat <
-  (fpow f32:$src0, f32:$src1),
-  (exp_ieee (mul f32:$src1, (log_ieee f32:$src0)))
->;
-
-/* Other helper patterns */
-/* --------------------- */
-
-/* Extract element pattern */
-class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
-                       SubRegIndex sub_reg>
-  : Pat<
-  (sub_type (vector_extract vec_type:$src, sub_idx)),
-  (EXTRACT_SUBREG $src, sub_reg)
->;
-
-/* Insert element pattern */
-class Insert_Element <ValueType elem_type, ValueType vec_type,
-                      int sub_idx, SubRegIndex sub_reg>
-  : Pat <
-  (vector_insert vec_type:$vec, elem_type:$elem, sub_idx),
-  (INSERT_SUBREG $vec, $elem, sub_reg)
->;
-
-// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
-// can handle COPY instructions.
-// bitconvert pattern
-class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : Pat <
-  (dt (bitconvert (st rc:$src0))),
-  (dt rc:$src0)
->;
-
-// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
-// can handle COPY instructions.
-class DwordAddrPat<ValueType vt, RegisterClass rc> : Pat <
-  (vt (AMDGPUdwordaddr (vt rc:$addr))),
-  (vt rc:$addr)
->;
-
-// BFI_INT patterns
-
-multiclass BFIPatterns <Instruction BFI_INT,
-                        Instruction LoadImm32,
-                        RegisterClass RC64> {
-  // Definition from ISA doc:
-  // (y & x) | (z & ~x)
-  def : Pat <
-    (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
-    (BFI_INT $x, $y, $z)
-  >;
-
-  // SHA-256 Ch function
-  // z ^ (x & (y ^ z))
-  def : Pat <
-    (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
-    (BFI_INT $x, $y, $z)
-  >;
-
-  def : Pat <
-    (fcopysign f32:$src0, f32:$src1),
-    (BFI_INT (LoadImm32 0x7fffffff), $src0, $src1)
-  >;
-
-  def : Pat <
-    (f64 (fcopysign f64:$src0, f64:$src1)),
-    (REG_SEQUENCE RC64,
-      (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
-      (BFI_INT (LoadImm32 0x7fffffff),
-               (i32 (EXTRACT_SUBREG $src0, sub1)),
-               (i32 (EXTRACT_SUBREG $src1, sub1))), sub1)
-  >;
-}
-
-// SHA-256 Ma patterns
-
-// ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y
-class SHA256MaPattern <Instruction BFI_INT, Instruction XOR> : Pat <
-  (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))),
-  (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y)
->;
-
-// Bitfield extract patterns
-
-def IMMZeroBasedBitfieldMask : PatLeaf <(imm), [{
-  return isMask_32(N->getZExtValue());
-}]>;
-
-def IMMPopCount : SDNodeXForm<imm, [{
-  return CurDAG->getTargetConstant(countPopulation(N->getZExtValue()), SDLoc(N),
-                                   MVT::i32);
-}]>;
-
-class BFEPattern <Instruction BFE, Instruction MOV> : Pat <
-  (i32 (and (i32 (srl i32:$src, i32:$rshift)), IMMZeroBasedBitfieldMask:$mask)),
-  (BFE $src, $rshift, (MOV (i32 (IMMPopCount $mask))))
->;
-
-// rotr pattern
-class ROTRPattern <Instruction BIT_ALIGN> : Pat <
-  (rotr i32:$src0, i32:$src1),
-  (BIT_ALIGN $src0, $src0, $src1)
->;
-
-// 24-bit arithmetic patterns
-def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
-
-// Special conversion patterns
-
-def cvt_rpi_i32_f32 : PatFrag <
-  (ops node:$src),
-  (fp_to_sint (ffloor (fadd $src, FP_HALF))),
-  [{ (void) N; return TM.Options.NoNaNsFPMath; }]
->;
-
-def cvt_flr_i32_f32 : PatFrag <
-  (ops node:$src),
-  (fp_to_sint (ffloor $src)),
-  [{ (void)N; return TM.Options.NoNaNsFPMath; }]
->;
-
-/*
-class UMUL24Pattern <Instruction UMUL24> : Pat <
-  (mul U24:$x, U24:$y),
-  (UMUL24 $x, $y)
->;
-*/
-
-class IMad24Pat<Instruction Inst> : Pat <
-  (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
-  (Inst $src0, $src1, $src2)
->;
-
-class UMad24Pat<Instruction Inst> : Pat <
-  (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2),
-  (Inst $src0, $src1, $src2)
->;
-
-multiclass Expand24IBitOps<Instruction MulInst, Instruction AddInst> {
-  def _expand_imad24 : Pat <
-    (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2),
-    (AddInst (MulInst $src0, $src1), $src2)
-  >;
-
-  def _expand_imul24 : Pat <
-    (AMDGPUmul_i24 i32:$src0, i32:$src1),
-    (MulInst $src0, $src1)
-  >;
-}
-
-multiclass Expand24UBitOps<Instruction MulInst, Instruction AddInst> {
-  def _expand_umad24 : Pat <
-    (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2),
-    (AddInst (MulInst $src0, $src1), $src2)
-  >;
-
-  def _expand_umul24 : Pat <
-    (AMDGPUmul_u24 i32:$src0, i32:$src1),
-    (MulInst $src0, $src1)
-  >;
-}
-
-class RcpPat<Instruction RcpInst, ValueType vt> : Pat <
-  (fdiv FP_ONE, vt:$src),
-  (RcpInst $src)
->;
-
-class RsqPat<Instruction RsqInst, ValueType vt> : Pat <
-  (AMDGPUrcp (fsqrt vt:$src)),
-  (RsqInst $src)
->;
-
-include "R600Instructions.td"
-include "R700Instructions.td"
-include "EvergreenInstructions.td"
-include "CaymanInstructions.td"
-
-include "SIInstrInfo.td"
-

Removed: llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.cpp (removed)
@@ -1,77 +0,0 @@
-//===- AMDGPUIntrinsicInfo.cpp - AMDGPU Intrinsic Information ---*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief AMDGPU Implementation of the IntrinsicInfo class.
-//
-//===-----------------------------------------------------------------------===//
-
-#include "AMDGPUIntrinsicInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "llvm/IR/DerivedTypes.h"
-#include "llvm/IR/Intrinsics.h"
-#include "llvm/IR/Module.h"
-
-using namespace llvm;
-
-#define GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
-#include "AMDGPUGenIntrinsics.inc"
-#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN
-
-AMDGPUIntrinsicInfo::AMDGPUIntrinsicInfo()
-    : TargetIntrinsicInfo() {}
-
-std::string AMDGPUIntrinsicInfo::getName(unsigned IntrID, Type **Tys,
-                                         unsigned numTys) const {
-  static const char *const names[] = {
-#define GET_INTRINSIC_NAME_TABLE
-#include "AMDGPUGenIntrinsics.inc"
-#undef GET_INTRINSIC_NAME_TABLE
-  };
-
-  if (IntrID < Intrinsic::num_intrinsics) {
-    return nullptr;
-  }
-  assert(IntrID < AMDGPUIntrinsic::num_AMDGPU_intrinsics &&
-         "Invalid intrinsic ID");
-
-  std::string Result(names[IntrID - Intrinsic::num_intrinsics]);
-  return Result;
-}
-
-unsigned AMDGPUIntrinsicInfo::lookupName(const char *Name,
-                                         unsigned Len) const {
-  if (!StringRef(Name, Len).startswith("llvm."))
-    return 0; // All intrinsics start with 'llvm.'
-
-#define GET_FUNCTION_RECOGNIZER
-#include "AMDGPUGenIntrinsics.inc"
-#undef GET_FUNCTION_RECOGNIZER
-  AMDGPUIntrinsic::ID IntrinsicID =
-      (AMDGPUIntrinsic::ID)Intrinsic::not_intrinsic;
-  IntrinsicID = getIntrinsicForGCCBuiltin("AMDGPU", Name);
-
-  if (IntrinsicID != (AMDGPUIntrinsic::ID)Intrinsic::not_intrinsic) {
-    return IntrinsicID;
-  }
-  return 0;
-}
-
-bool AMDGPUIntrinsicInfo::isOverloaded(unsigned id) const {
-// Overload Table
-#define GET_INTRINSIC_OVERLOAD_TABLE
-#include "AMDGPUGenIntrinsics.inc"
-#undef GET_INTRINSIC_OVERLOAD_TABLE
-}
-
-Function *AMDGPUIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
-                                              Type **Tys,
-                                              unsigned numTys) const {
-  llvm_unreachable("Not implemented");
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUIntrinsicInfo.h (removed)
@@ -1,48 +0,0 @@
-//===- AMDGPUIntrinsicInfo.h - AMDGPU Intrinsic Information ------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Interface for the AMDGPU Implementation of the Intrinsic Info class.
-//
-//===-----------------------------------------------------------------------===//
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUINTRINSICINFO_H
-#define LLVM_LIB_TARGET_R600_AMDGPUINTRINSICINFO_H
-
-#include "llvm/IR/Intrinsics.h"
-#include "llvm/Target/TargetIntrinsicInfo.h"
-
-namespace llvm {
-class TargetMachine;
-
-namespace AMDGPUIntrinsic {
-enum ID {
-  last_non_AMDGPU_intrinsic = Intrinsic::num_intrinsics - 1,
-#define GET_INTRINSIC_ENUM_VALUES
-#include "AMDGPUGenIntrinsics.inc"
-#undef GET_INTRINSIC_ENUM_VALUES
-      , num_AMDGPU_intrinsics
-};
-
-} // end namespace AMDGPUIntrinsic
-
-class AMDGPUIntrinsicInfo : public TargetIntrinsicInfo {
-public:
-  AMDGPUIntrinsicInfo();
-  std::string getName(unsigned IntrId, Type **Tys = nullptr,
-                      unsigned numTys = 0) const override;
-  unsigned lookupName(const char *Name, unsigned Len) const override;
-  bool isOverloaded(unsigned IID) const override;
-  Function *getDeclaration(Module *M, unsigned ID,
-                           Type **Tys = nullptr,
-                           unsigned numTys = 0) const override;
-};
-
-} // end namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDGPUIntrinsics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUIntrinsics.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUIntrinsics.td (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUIntrinsics.td (removed)
@@ -1,90 +0,0 @@
-//===-- AMDGPUIntrinsics.td - Common intrinsics  -*- tablegen -*-----------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines intrinsics that are used by all hw codegen targets.
-//
-//===----------------------------------------------------------------------===//
-
-let TargetPrefix = "AMDGPU", isTarget = 1 in {
-
-  def int_AMDGPU_store_output : Intrinsic<[], [llvm_float_ty, llvm_i32_ty], []>;
-  def int_AMDGPU_swizzle : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_abs : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_arl : Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_cndlt : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_div : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_fract : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
-  def int_AMDGPU_clamp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
-
-  // This is named backwards (instead of rsq_legacy) so we don't have
-  // to define it with the public builtins intrinsics. This is a
-  // workaround for how intrinsic names are parsed. If the name is
-  // llvm.AMDGPU.rsq.legacy, the parser assumes that you meant
-  // llvm.AMDGPU.rsq.{f32 | f64} and incorrectly mangled the name.
-  def int_AMDGPU_legacy_rsq : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
-
-  def int_AMDGPU_dp4 : Intrinsic<[llvm_float_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
-  def int_AMDGPU_kill : Intrinsic<[], [llvm_float_ty], []>;
-  def int_AMDGPU_kilp : Intrinsic<[], [], []>;
-  def int_AMDGPU_lrp : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_mul : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_pow : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_seq : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_sgt : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_sge : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_sle : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_sne : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_mullit : Intrinsic<[llvm_v4f32_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_tex : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_txb : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_txf : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_txq : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_txd : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_txl : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_trunc : Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
-  def int_AMDGPU_ddx : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_ddy : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_imax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_imin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_umax : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_umin : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_umul24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_imul24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_imad24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_umad24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_cvt_f32_ubyte0 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_cvt_f32_ubyte1 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_cvt_f32_ubyte2 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_cvt_f32_ubyte3 : Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_cube : Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
-  def int_AMDGPU_bfi : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_bfe_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_bfe_u32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_bfm : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_brev : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_flbit_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
-  def int_AMDGPU_barrier_local  : Intrinsic<[], [], []>;
-  def int_AMDGPU_barrier_global  : Intrinsic<[], [], []>;
-}
-
-// Legacy names for compatibility.
-let TargetPrefix = "AMDIL", isTarget = 1 in {
-  def int_AMDIL_abs : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
-  def int_AMDIL_fraction : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
-  def int_AMDIL_clamp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
-  def int_AMDIL_exp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
-  def int_AMDIL_round_nearest : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
-}
-
-let TargetPrefix = "TGSI", isTarget = 1 in {
-
-  def int_TGSI_lit_z : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],[IntrNoMem]>;
-}
-
-include "SIIntrinsics.td"

Removed: llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.cpp (removed)
@@ -1,154 +0,0 @@
-//===- AMDGPUMCInstLower.cpp - Lower AMDGPU MachineInstr to an MCInst -----===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Code to lower AMDGPU MachineInstrs to their corresponding MCInst.
-//
-//===----------------------------------------------------------------------===//
-//
-
-#include "AMDGPUMCInstLower.h"
-#include "AMDGPUAsmPrinter.h"
-#include "AMDGPUTargetMachine.h"
-#include "InstPrinter/AMDGPUInstPrinter.h"
-#include "R600InstrInfo.h"
-#include "SIInstrInfo.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
-#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalVariable.h"
-#include "llvm/MC/MCCodeEmitter.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCObjectStreamer.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/Format.h"
-#include <algorithm>
-
-using namespace llvm;
-
-AMDGPUMCInstLower::AMDGPUMCInstLower(MCContext &ctx, const AMDGPUSubtarget &st):
-  Ctx(ctx), ST(st)
-{ }
-
-void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
-
-  int MCOpcode = ST.getInstrInfo()->pseudoToMCOpcode(MI->getOpcode());
-
-  if (MCOpcode == -1) {
-    LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext();
-    C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have "
-                "a target-specific version: " + Twine(MI->getOpcode()));
-  }
-
-  OutMI.setOpcode(MCOpcode);
-
-  for (const MachineOperand &MO : MI->explicit_operands()) {
-    MCOperand MCOp;
-    switch (MO.getType()) {
-    default:
-      llvm_unreachable("unknown operand type");
-    case MachineOperand::MO_Immediate:
-      MCOp = MCOperand::createImm(MO.getImm());
-      break;
-    case MachineOperand::MO_Register:
-      MCOp = MCOperand::createReg(MO.getReg());
-      break;
-    case MachineOperand::MO_MachineBasicBlock:
-      MCOp = MCOperand::createExpr(MCSymbolRefExpr::create(
-                                   MO.getMBB()->getSymbol(), Ctx));
-      break;
-    case MachineOperand::MO_GlobalAddress: {
-      const GlobalValue *GV = MO.getGlobal();
-      MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(GV->getName()));
-      MCOp = MCOperand::createExpr(MCSymbolRefExpr::create(Sym, Ctx));
-      break;
-    }
-    case MachineOperand::MO_TargetIndex: {
-      assert(MO.getIndex() == AMDGPU::TI_CONSTDATA_START);
-      MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(END_OF_TEXT_LABEL_NAME));
-      const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx);
-      MCOp = MCOperand::createExpr(Expr);
-      break;
-    }
-    case MachineOperand::MO_ExternalSymbol: {
-      MCSymbol *Sym = Ctx.getOrCreateSymbol(StringRef(MO.getSymbolName()));
-      const MCSymbolRefExpr *Expr = MCSymbolRefExpr::create(Sym, Ctx);
-      MCOp = MCOperand::createExpr(Expr);
-      break;
-    }
-    }
-    OutMI.addOperand(MCOp);
-  }
-}
-
-void AMDGPUAsmPrinter::EmitInstruction(const MachineInstr *MI) {
-  const AMDGPUSubtarget &STI = MF->getSubtarget<AMDGPUSubtarget>();
-  AMDGPUMCInstLower MCInstLowering(OutContext, STI);
-
-#ifdef _DEBUG
-  StringRef Err;
-  if (!STI.getInstrInfo()->verifyInstruction(MI, Err)) {
-    errs() << "Warning: Illegal instruction detected: " << Err << "\n";
-    MI->dump();
-  }
-#endif
-  if (MI->isBundle()) {
-    const MachineBasicBlock *MBB = MI->getParent();
-    MachineBasicBlock::const_instr_iterator I = MI;
-    ++I;
-    while (I != MBB->end() && I->isInsideBundle()) {
-      EmitInstruction(I);
-      ++I;
-    }
-  } else {
-    MCInst TmpInst;
-    MCInstLowering.lower(MI, TmpInst);
-    EmitToStreamer(*OutStreamer, TmpInst);
-
-    if (STI.dumpCode()) {
-      // Disassemble instruction/operands to text.
-      DisasmLines.resize(DisasmLines.size() + 1);
-      std::string &DisasmLine = DisasmLines.back();
-      raw_string_ostream DisasmStream(DisasmLine);
-
-      AMDGPUInstPrinter InstPrinter(*TM.getMCAsmInfo(),
-                                    *MF->getSubtarget().getInstrInfo(),
-                                    *MF->getSubtarget().getRegisterInfo());
-      InstPrinter.printInst(&TmpInst, DisasmStream, StringRef(),
-                            MF->getSubtarget());
-
-      // Disassemble instruction/operands to hex representation.
-      SmallVector<MCFixup, 4> Fixups;
-      SmallVector<char, 16> CodeBytes;
-      raw_svector_ostream CodeStream(CodeBytes);
-
-      auto &ObjStreamer = static_cast<MCObjectStreamer&>(*OutStreamer);
-      MCCodeEmitter &InstEmitter = ObjStreamer.getAssembler().getEmitter();
-      InstEmitter.encodeInstruction(TmpInst, CodeStream, Fixups,
-                                    MF->getSubtarget<MCSubtargetInfo>());
-      CodeStream.flush();
-
-      HexLines.resize(HexLines.size() + 1);
-      std::string &HexLine = HexLines.back();
-      raw_string_ostream HexStream(HexLine);
-
-      for (size_t i = 0; i < CodeBytes.size(); i += 4) {
-        unsigned int CodeDWord = *(unsigned int *)&CodeBytes[i];
-        HexStream << format("%s%08X", (i > 0 ? " " : ""), CodeDWord);
-      }
-
-      DisasmStream.flush();
-      DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLine.size());
-    }
-  }
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUMCInstLower.h (removed)
@@ -1,35 +0,0 @@
-//===- AMDGPUMCInstLower.h MachineInstr Lowering Interface ------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUMCINSTLOWER_H
-#define LLVM_LIB_TARGET_R600_AMDGPUMCINSTLOWER_H
-
-namespace llvm {
-
-class AMDGPUSubtarget;
-class MachineInstr;
-class MCContext;
-class MCInst;
-
-class AMDGPUMCInstLower {
-  MCContext &Ctx;
-  const AMDGPUSubtarget &ST;
-
-public:
-  AMDGPUMCInstLower(MCContext &ctx, const AMDGPUSubtarget &ST);
-
-  /// \brief Lower a MachineInstr to an MCInst
-  void lower(const MachineInstr *MI, MCInst &OutMI) const;
-
-};
-
-} // End namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.cpp (removed)
@@ -1,25 +0,0 @@
-#include "AMDGPUMachineFunction.h"
-#include "AMDGPU.h"
-#include "llvm/IR/Attributes.h"
-#include "llvm/IR/Function.h"
-using namespace llvm;
-
-static const char *const ShaderTypeAttribute = "ShaderType";
-
-// Pin the vtable to this file.
-void AMDGPUMachineFunction::anchor() {}
-
-AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
-  MachineFunctionInfo(),
-  ShaderType(ShaderType::COMPUTE),
-  LDSSize(0),
-  ScratchSize(0),
-  IsKernel(true) {
-  Attribute A = MF.getFunction()->getFnAttribute(ShaderTypeAttribute);
-
-  if (A.isStringAttribute()) {
-    StringRef Str = A.getValueAsString();
-    if (Str.getAsInteger(0, ShaderType))
-      llvm_unreachable("Can't parse shader type!");
-  }
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUMachineFunction.h (removed)
@@ -1,45 +0,0 @@
-//===-- R600MachineFunctionInfo.h - R600 Machine Function Info ----*- C++ -*-=//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUMACHINEFUNCTION_H
-#define LLVM_LIB_TARGET_R600_AMDGPUMACHINEFUNCTION_H
-
-#include "llvm/CodeGen/MachineFunction.h"
-#include <map>
-
-namespace llvm {
-
-class AMDGPUMachineFunction : public MachineFunctionInfo {
-  virtual void anchor();
-  unsigned ShaderType;
-
-public:
-  AMDGPUMachineFunction(const MachineFunction &MF);
-  /// A map to keep track of local memory objects and their offsets within
-  /// the local memory space.
-  std::map<const GlobalValue *, unsigned> LocalMemoryObjects;
-  /// Number of bytes in the LDS that are being used.
-  unsigned LDSSize;
-
-  /// Start of implicit kernel args
-  unsigned ABIArgOffset;
-
-  unsigned getShaderType() const {
-    return ShaderType;
-  }
-
-  unsigned ScratchSize;
-  bool IsKernel;
-};
-
-}
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDGPUPromoteAlloca.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUPromoteAlloca.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUPromoteAlloca.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUPromoteAlloca.cpp (removed)
@@ -1,407 +0,0 @@
-//===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass eliminates allocas by either converting them into vectors or
-// by migrating them to local address space.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/InstVisitor.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-#define DEBUG_TYPE "amdgpu-promote-alloca"
-
-using namespace llvm;
-
-namespace {
-
-class AMDGPUPromoteAlloca : public FunctionPass,
-                       public InstVisitor<AMDGPUPromoteAlloca> {
-
-  static char ID;
-  Module *Mod;
-  const AMDGPUSubtarget &ST;
-  int LocalMemAvailable;
-
-public:
-  AMDGPUPromoteAlloca(const AMDGPUSubtarget &st) : FunctionPass(ID), ST(st),
-                                                   LocalMemAvailable(0) { }
-  bool doInitialization(Module &M) override;
-  bool runOnFunction(Function &F) override;
-  const char *getPassName() const override { return "AMDGPU Promote Alloca"; }
-  void visitAlloca(AllocaInst &I);
-};
-
-} // End anonymous namespace
-
-char AMDGPUPromoteAlloca::ID = 0;
-
-bool AMDGPUPromoteAlloca::doInitialization(Module &M) {
-  Mod = &M;
-  return false;
-}
-
-bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
-
-  const FunctionType *FTy = F.getFunctionType();
-
-  LocalMemAvailable = ST.getLocalMemorySize();
-
-
-  // If the function has any arguments in the local address space, then it's
-  // possible these arguments require the entire local memory space, so
-  // we cannot use local memory in the pass.
-  for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
-    const Type *ParamTy = FTy->getParamType(i);
-    if (ParamTy->isPointerTy() &&
-        ParamTy->getPointerAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
-      LocalMemAvailable = 0;
-      DEBUG(dbgs() << "Function has local memory argument.  Promoting to "
-                      "local memory disabled.\n");
-      break;
-    }
-  }
-
-  if (LocalMemAvailable > 0) {
-    // Check how much local memory is being used by global objects
-    for (Module::global_iterator I = Mod->global_begin(),
-                                 E = Mod->global_end(); I != E; ++I) {
-      GlobalVariable *GV = I;
-      PointerType *GVTy = GV->getType();
-      if (GVTy->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
-        continue;
-      for (Value::use_iterator U = GV->use_begin(),
-                               UE = GV->use_end(); U != UE; ++U) {
-        Instruction *Use = dyn_cast<Instruction>(*U);
-        if (!Use)
-          continue;
-        if (Use->getParent()->getParent() == &F)
-          LocalMemAvailable -=
-              Mod->getDataLayout().getTypeAllocSize(GVTy->getElementType());
-      }
-    }
-  }
-
-  LocalMemAvailable = std::max(0, LocalMemAvailable);
-  DEBUG(dbgs() << LocalMemAvailable << "bytes free in local memory.\n");
-
-  visit(F);
-
-  return false;
-}
-
-static VectorType *arrayTypeToVecType(const Type *ArrayTy) {
-  return VectorType::get(ArrayTy->getArrayElementType(),
-                         ArrayTy->getArrayNumElements());
-}
-
-static Value *
-calculateVectorIndex(Value *Ptr,
-                     const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
-  if (isa<AllocaInst>(Ptr))
-    return Constant::getNullValue(Type::getInt32Ty(Ptr->getContext()));
-
-  GetElementPtrInst *GEP = cast<GetElementPtrInst>(Ptr);
-
-  auto I = GEPIdx.find(GEP);
-  return I == GEPIdx.end() ? nullptr : I->second;
-}
-
-static Value* GEPToVectorIndex(GetElementPtrInst *GEP) {
-  // FIXME we only support simple cases
-  if (GEP->getNumOperands() != 3)
-    return NULL;
-
-  ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1));
-  if (!I0 || !I0->isZero())
-    return NULL;
-
-  return GEP->getOperand(2);
-}
-
-// Not an instruction handled below to turn into a vector.
-//
-// TODO: Check isTriviallyVectorizable for calls and handle other
-// instructions.
-static bool canVectorizeInst(Instruction *Inst) {
-  switch (Inst->getOpcode()) {
-  case Instruction::Load:
-  case Instruction::Store:
-  case Instruction::BitCast:
-  case Instruction::AddrSpaceCast:
-    return true;
-  default:
-    return false;
-  }
-}
-
-static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
-  Type *AllocaTy = Alloca->getAllocatedType();
-
-  DEBUG(dbgs() << "Alloca Candidate for vectorization \n");
-
-  // FIXME: There is no reason why we can't support larger arrays, we
-  // are just being conservative for now.
-  if (!AllocaTy->isArrayTy() ||
-      AllocaTy->getArrayElementType()->isVectorTy() ||
-      AllocaTy->getArrayNumElements() > 4) {
-
-    DEBUG(dbgs() << "  Cannot convert type to vector");
-    return false;
-  }
-
-  std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
-  std::vector<Value*> WorkList;
-  for (User *AllocaUser : Alloca->users()) {
-    GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser);
-    if (!GEP) {
-      if (!canVectorizeInst(cast<Instruction>(AllocaUser)))
-        return false;
-
-      WorkList.push_back(AllocaUser);
-      continue;
-    }
-
-    Value *Index = GEPToVectorIndex(GEP);
-
-    // If we can't compute a vector index from this GEP, then we can't
-    // promote this alloca to vector.
-    if (!Index) {
-      DEBUG(dbgs() << "  Cannot compute vector index for GEP " << *GEP << '\n');
-      return false;
-    }
-
-    GEPVectorIdx[GEP] = Index;
-    for (User *GEPUser : AllocaUser->users()) {
-      if (!canVectorizeInst(cast<Instruction>(GEPUser)))
-        return false;
-
-      WorkList.push_back(GEPUser);
-    }
-  }
-
-  VectorType *VectorTy = arrayTypeToVecType(AllocaTy);
-
-  DEBUG(dbgs() << "  Converting alloca to vector "
-        << *AllocaTy << " -> " << *VectorTy << '\n');
-
-  for (std::vector<Value*>::iterator I = WorkList.begin(),
-                                     E = WorkList.end(); I != E; ++I) {
-    Instruction *Inst = cast<Instruction>(*I);
-    IRBuilder<> Builder(Inst);
-    switch (Inst->getOpcode()) {
-    case Instruction::Load: {
-      Value *Ptr = Inst->getOperand(0);
-      Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
-      Value *BitCast = Builder.CreateBitCast(Alloca, VectorTy->getPointerTo(0));
-      Value *VecValue = Builder.CreateLoad(BitCast);
-      Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
-      Inst->replaceAllUsesWith(ExtractElement);
-      Inst->eraseFromParent();
-      break;
-    }
-    case Instruction::Store: {
-      Value *Ptr = Inst->getOperand(1);
-      Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
-      Value *BitCast = Builder.CreateBitCast(Alloca, VectorTy->getPointerTo(0));
-      Value *VecValue = Builder.CreateLoad(BitCast);
-      Value *NewVecValue = Builder.CreateInsertElement(VecValue,
-                                                       Inst->getOperand(0),
-                                                       Index);
-      Builder.CreateStore(NewVecValue, BitCast);
-      Inst->eraseFromParent();
-      break;
-    }
-    case Instruction::BitCast:
-    case Instruction::AddrSpaceCast:
-      break;
-
-    default:
-      Inst->dump();
-      llvm_unreachable("Inconsistency in instructions promotable to vector");
-    }
-  }
-  return true;
-}
-
-static bool collectUsesWithPtrTypes(Value *Val, std::vector<Value*> &WorkList) {
-  bool Success = true;
-  for (User *User : Val->users()) {
-    if(std::find(WorkList.begin(), WorkList.end(), User) != WorkList.end())
-      continue;
-    if (isa<CallInst>(User)) {
-      WorkList.push_back(User);
-      continue;
-    }
-
-    // FIXME: Correctly handle ptrtoint instructions.
-    Instruction *UseInst = dyn_cast<Instruction>(User);
-    if (UseInst && UseInst->getOpcode() == Instruction::PtrToInt)
-      return false;
-
-    if (!User->getType()->isPointerTy())
-      continue;
-
-    WorkList.push_back(User);
-
-    Success &= collectUsesWithPtrTypes(User, WorkList);
-  }
-  return Success;
-}
-
-void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
-  IRBuilder<> Builder(&I);
-
-  // First try to replace the alloca with a vector
-  Type *AllocaTy = I.getAllocatedType();
-
-  DEBUG(dbgs() << "Trying to promote " << I << '\n');
-
-  if (tryPromoteAllocaToVector(&I))
-    return;
-
-  DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n");
-
-  // FIXME: This is the maximum work group size.  We should try to get
-  // value from the reqd_work_group_size function attribute if it is
-  // available.
-  unsigned WorkGroupSize = 256;
-  int AllocaSize =
-      WorkGroupSize * Mod->getDataLayout().getTypeAllocSize(AllocaTy);
-
-  if (AllocaSize > LocalMemAvailable) {
-    DEBUG(dbgs() << " Not enough local memory to promote alloca.\n");
-    return;
-  }
-
-  std::vector<Value*> WorkList;
-
-  if (!collectUsesWithPtrTypes(&I, WorkList)) {
-    DEBUG(dbgs() << " Do not know how to convert all uses\n");
-    return;
-  }
-
-  DEBUG(dbgs() << "Promoting alloca to local memory\n");
-  LocalMemAvailable -= AllocaSize;
-
-  Type *GVTy = ArrayType::get(I.getAllocatedType(), 256);
-  GlobalVariable *GV = new GlobalVariable(
-      *Mod, GVTy, false, GlobalValue::ExternalLinkage, 0, I.getName(), 0,
-      GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS);
-
-  FunctionType *FTy = FunctionType::get(
-      Type::getInt32Ty(Mod->getContext()), false);
-  AttributeSet AttrSet;
-  AttrSet.addAttribute(Mod->getContext(), 0, Attribute::ReadNone);
-
-  Value *ReadLocalSizeY = Mod->getOrInsertFunction(
-      "llvm.r600.read.local.size.y", FTy, AttrSet);
-  Value *ReadLocalSizeZ = Mod->getOrInsertFunction(
-      "llvm.r600.read.local.size.z", FTy, AttrSet);
-  Value *ReadTIDIGX = Mod->getOrInsertFunction(
-      "llvm.r600.read.tidig.x", FTy, AttrSet);
-  Value *ReadTIDIGY = Mod->getOrInsertFunction(
-      "llvm.r600.read.tidig.y", FTy, AttrSet);
-  Value *ReadTIDIGZ = Mod->getOrInsertFunction(
-      "llvm.r600.read.tidig.z", FTy, AttrSet);
-
-  Value *TCntY = Builder.CreateCall(ReadLocalSizeY, {});
-  Value *TCntZ = Builder.CreateCall(ReadLocalSizeZ, {});
-  Value *TIdX = Builder.CreateCall(ReadTIDIGX, {});
-  Value *TIdY = Builder.CreateCall(ReadTIDIGY, {});
-  Value *TIdZ = Builder.CreateCall(ReadTIDIGZ, {});
-
-  Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ);
-  Tmp0 = Builder.CreateMul(Tmp0, TIdX);
-  Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ);
-  Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
-  TID = Builder.CreateAdd(TID, TIdZ);
-
-  std::vector<Value*> Indices;
-  Indices.push_back(Constant::getNullValue(Type::getInt32Ty(Mod->getContext())));
-  Indices.push_back(TID);
-
-  Value *Offset = Builder.CreateGEP(GVTy, GV, Indices);
-  I.mutateType(Offset->getType());
-  I.replaceAllUsesWith(Offset);
-  I.eraseFromParent();
-
-  for (std::vector<Value*>::iterator i = WorkList.begin(),
-                                     e = WorkList.end(); i != e; ++i) {
-    Value *V = *i;
-    CallInst *Call = dyn_cast<CallInst>(V);
-    if (!Call) {
-      Type *EltTy = V->getType()->getPointerElementType();
-      PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);
-
-      // The operand's value should be corrected on its own.
-      if (isa<AddrSpaceCastInst>(V))
-        continue;
-
-      // FIXME: It doesn't really make sense to try to do this for all
-      // instructions.
-      V->mutateType(NewTy);
-      continue;
-    }
-
-    IntrinsicInst *Intr = dyn_cast<IntrinsicInst>(Call);
-    if (!Intr) {
-      std::vector<Type*> ArgTypes;
-      for (unsigned ArgIdx = 0, ArgEnd = Call->getNumArgOperands();
-                                ArgIdx != ArgEnd; ++ArgIdx) {
-        ArgTypes.push_back(Call->getArgOperand(ArgIdx)->getType());
-      }
-      Function *F = Call->getCalledFunction();
-      FunctionType *NewType = FunctionType::get(Call->getType(), ArgTypes,
-                                                F->isVarArg());
-      Constant *C = Mod->getOrInsertFunction((F->getName() + ".local").str(),
-                                             NewType, F->getAttributes());
-      Function *NewF = cast<Function>(C);
-      Call->setCalledFunction(NewF);
-      continue;
-    }
-
-    Builder.SetInsertPoint(Intr);
-    switch (Intr->getIntrinsicID()) {
-    case Intrinsic::lifetime_start:
-    case Intrinsic::lifetime_end:
-      // These intrinsics are for address space 0 only
-      Intr->eraseFromParent();
-      continue;
-    case Intrinsic::memcpy: {
-      MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
-      Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(),
-                           MemCpy->getLength(), MemCpy->getAlignment(),
-                           MemCpy->isVolatile());
-      Intr->eraseFromParent();
-      continue;
-    }
-    case Intrinsic::memset: {
-      MemSetInst *MemSet = cast<MemSetInst>(Intr);
-      Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
-                           MemSet->getLength(), MemSet->getAlignment(),
-                           MemSet->isVolatile());
-      Intr->eraseFromParent();
-      continue;
-    }
-    default:
-      Intr->dump();
-      llvm_unreachable("Don't know how to promote alloca intrinsic use.");
-    }
-  }
-}
-
-FunctionPass *llvm::createAMDGPUPromoteAlloca(const AMDGPUSubtarget &ST) {
-  return new AMDGPUPromoteAlloca(ST);
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.cpp (removed)
@@ -1,63 +0,0 @@
-//===-- AMDGPURegisterInfo.cpp - AMDGPU Register Information -------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Parent TargetRegisterInfo class common to all hw codegen targets.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPURegisterInfo.h"
-#include "AMDGPUTargetMachine.h"
-
-using namespace llvm;
-
-AMDGPURegisterInfo::AMDGPURegisterInfo() : AMDGPUGenRegisterInfo(0) {}
-
-//===----------------------------------------------------------------------===//
-// Function handling callbacks - Functions are a seldom used feature of GPUS, so
-// they are not supported at this time.
-//===----------------------------------------------------------------------===//
-
-const MCPhysReg AMDGPURegisterInfo::CalleeSavedReg = AMDGPU::NoRegister;
-
-const MCPhysReg*
-AMDGPURegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
-  return &CalleeSavedReg;
-}
-
-void AMDGPURegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
-                                             int SPAdj,
-                                             unsigned FIOperandNum,
-                                             RegScavenger *RS) const {
-  llvm_unreachable("Subroutines not supported yet");
-}
-
-unsigned AMDGPURegisterInfo::getFrameRegister(const MachineFunction &MF) const {
-  return AMDGPU::NoRegister;
-}
-
-unsigned AMDGPURegisterInfo::getSubRegFromChannel(unsigned Channel) const {
-  static const unsigned SubRegs[] = {
-    AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4,
-    AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, AMDGPU::sub8, AMDGPU::sub9,
-    AMDGPU::sub10, AMDGPU::sub11, AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14,
-    AMDGPU::sub15
-  };
-
-  assert(Channel < array_lengthof(SubRegs));
-  return SubRegs[Channel];
-}
-
-unsigned AMDGPURegisterInfo::getIndirectSubReg(unsigned IndirectIndex) const {
-
-  return getSubRegFromChannel(IndirectIndex);
-}
-
-#define GET_REGINFO_TARGET_DESC
-#include "AMDGPUGenRegisterInfo.inc"

Removed: llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.h (removed)
@@ -1,64 +0,0 @@
-//===-- AMDGPURegisterInfo.h - AMDGPURegisterInfo Interface -*- C++ -*-----===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief TargetRegisterInfo interface that is implemented by all hw codegen
-/// targets.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUREGISTERINFO_H
-#define LLVM_LIB_TARGET_R600_AMDGPUREGISTERINFO_H
-
-#include "llvm/ADT/BitVector.h"
-#include "llvm/Target/TargetRegisterInfo.h"
-
-#define GET_REGINFO_HEADER
-#define GET_REGINFO_ENUM
-#include "AMDGPUGenRegisterInfo.inc"
-
-namespace llvm {
-
-class AMDGPUSubtarget;
-class TargetInstrInfo;
-
-struct AMDGPURegisterInfo : public AMDGPUGenRegisterInfo {
-  static const MCPhysReg CalleeSavedReg;
-
-  AMDGPURegisterInfo();
-
-  BitVector getReservedRegs(const MachineFunction &MF) const override {
-    assert(!"Unimplemented");  return BitVector();
-  }
-
-  virtual const TargetRegisterClass* getCFGStructurizerRegClass(MVT VT) const {
-    assert(!"Unimplemented"); return nullptr;
-  }
-
-  virtual unsigned getHWRegIndex(unsigned Reg) const {
-    assert(!"Unimplemented"); return 0;
-  }
-
-  /// \returns the sub reg enum value for the given \p Channel
-  /// (e.g. getSubRegFromChannel(0) -> AMDGPU::sub0)
-  unsigned getSubRegFromChannel(unsigned Channel) const;
-
-  const MCPhysReg* getCalleeSavedRegs(const MachineFunction *MF) const override;
-  void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
-                           unsigned FIOperandNum,
-                           RegScavenger *RS) const override;
-  unsigned getFrameRegister(const MachineFunction &MF) const override;
-
-  unsigned getIndirectSubReg(unsigned IndirectIndex) const;
-
-};
-
-} // End namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.td (original)
+++ llvm/trunk/lib/Target/R600/AMDGPURegisterInfo.td (removed)
@@ -1,26 +0,0 @@
-//===-- AMDGPURegisterInfo.td - AMDGPU register info -------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Tablegen register definitions common to all hw codegen targets.
-//
-//===----------------------------------------------------------------------===//
-
-let Namespace = "AMDGPU" in {
-
-foreach Index = 0-15 in {
-  // Indices are used in a variety of ways here, so don't set a size/offset.
-  def sub#Index : SubRegIndex<-1, -1>;
-}
-
-def INDIRECT_BASE_ADDR : Register <"INDIRECT_BASE_ADDR">;
-
-}
-
-include "R600RegisterInfo.td"
-include "SIRegisterInfo.td"

Removed: llvm/trunk/lib/Target/R600/AMDGPUSubtarget.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUSubtarget.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUSubtarget.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUSubtarget.cpp (removed)
@@ -1,133 +0,0 @@
-//===-- AMDGPUSubtarget.cpp - AMDGPU Subtarget Information ----------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Implements the AMDGPU specific subclass of TargetSubtarget.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUSubtarget.h"
-#include "R600ISelLowering.h"
-#include "R600InstrInfo.h"
-#include "R600MachineScheduler.h"
-#include "SIISelLowering.h"
-#include "SIInstrInfo.h"
-#include "SIMachineFunctionInfo.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/CodeGen/MachineScheduler.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "amdgpu-subtarget"
-
-#define GET_SUBTARGETINFO_ENUM
-#define GET_SUBTARGETINFO_TARGET_DESC
-#define GET_SUBTARGETINFO_CTOR
-#include "AMDGPUGenSubtargetInfo.inc"
-
-AMDGPUSubtarget &
-AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT,
-                                                 StringRef GPU, StringRef FS) {
-  // Determine default and user-specified characteristics
-  // On SI+, we want FP64 denormals to be on by default. FP32 denormals can be
-  // enabled, but some instructions do not respect them and they run at the
-  // double precision rate, so don't enable by default.
-  //
-  // We want to be able to turn these off, but making this a subtarget feature
-  // for SI has the unhelpful behavior that it unsets everything else if you
-  // disable it.
-
-  SmallString<256> FullFS("+promote-alloca,+fp64-denormals,");
-  FullFS += FS;
-
-  if (GPU == "" && TT.getArch() == Triple::amdgcn)
-    GPU = "SI";
-
-  ParseSubtargetFeatures(GPU, FullFS);
-
-  // FIXME: I don't think think Evergreen has any useful support for
-  // denormals, but should be checked. Should we issue a warning somewhere
-  // if someone tries to enable these?
-  if (getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
-    FP32Denormals = false;
-    FP64Denormals = false;
-  }
-  return *this;
-}
-
-AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
-                                 TargetMachine &TM)
-    : AMDGPUGenSubtargetInfo(TT, GPU, FS), DevName(GPU), Is64bit(false),
-      DumpCode(false), R600ALUInst(false), HasVertexCache(false),
-      TexVTXClauseSize(0), Gen(AMDGPUSubtarget::R600), FP64(false),
-      FP64Denormals(false), FP32Denormals(false), FastFMAF32(false),
-      CaymanISA(false), FlatAddressSpace(false), EnableIRStructurizer(true),
-      EnablePromoteAlloca(false), EnableIfCvt(true), EnableLoadStoreOpt(false),
-      WavefrontSize(0), CFALUBug(false), LocalMemorySize(0),
-      EnableVGPRSpilling(false), SGPRInitBug(false), IsGCN(false),
-      GCN1Encoding(false), GCN3Encoding(false), CIInsts(false), LDSBankCount(0),
-      FrameLowering(TargetFrameLowering::StackGrowsUp,
-                    64 * 16, // Maximum stack alignment (long16)
-                    0),
-      InstrItins(getInstrItineraryForCPU(GPU)), TargetTriple(TT) {
-
-  initializeSubtargetDependencies(TT, GPU, FS);
-
-  if (getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
-    InstrInfo.reset(new R600InstrInfo(*this));
-    TLInfo.reset(new R600TargetLowering(TM, *this));
-  } else {
-    InstrInfo.reset(new SIInstrInfo(*this));
-    TLInfo.reset(new SITargetLowering(TM, *this));
-  }
-}
-
-unsigned AMDGPUSubtarget::getStackEntrySize() const {
-  assert(getGeneration() <= NORTHERN_ISLANDS);
-  switch(getWavefrontSize()) {
-  case 16:
-    return 8;
-  case 32:
-    return hasCaymanISA() ? 4 : 8;
-  case 64:
-    return 4;
-  default:
-    llvm_unreachable("Illegal wavefront size.");
-  }
-}
-
-unsigned AMDGPUSubtarget::getAmdKernelCodeChipID() const {
-  switch(getGeneration()) {
-  default: llvm_unreachable("ChipID unknown");
-  case SEA_ISLANDS: return 12;
-  }
-}
-
-bool AMDGPUSubtarget::isVGPRSpillingEnabled(
-                                       const SIMachineFunctionInfo *MFI) const {
-  return MFI->getShaderType() == ShaderType::COMPUTE || EnableVGPRSpilling;
-}
-
-void AMDGPUSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
-                                          MachineInstr *begin,
-                                          MachineInstr *end,
-                                          unsigned NumRegionInstrs) const {
-  if (getGeneration() >= SOUTHERN_ISLANDS) {
-
-    // Track register pressure so the scheduler can try to decrease
-    // pressure once register usage is above the threshold defined by
-    // SIRegisterInfo::getRegPressureSetLimit()
-    Policy.ShouldTrackPressure = true;
-
-    // Enabling both top down and bottom up scheduling seems to give us less
-    // register spills than just using one of these approaches on its own.
-    Policy.OnlyTopDown = false;
-    Policy.OnlyBottomUp = false;
-  }
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUSubtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUSubtarget.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUSubtarget.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUSubtarget.h (removed)
@@ -1,282 +0,0 @@
-//=====-- AMDGPUSubtarget.h - Define Subtarget for the AMDIL ---*- C++ -*-====//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//==-----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief AMDGPU specific subclass of TargetSubtarget.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUSUBTARGET_H
-#define LLVM_LIB_TARGET_R600_AMDGPUSUBTARGET_H
-#include "AMDGPU.h"
-#include "AMDGPUFrameLowering.h"
-#include "AMDGPUInstrInfo.h"
-#include "AMDGPUIntrinsicInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "R600ISelLowering.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
-
-#define GET_SUBTARGETINFO_HEADER
-#include "AMDGPUGenSubtargetInfo.inc"
-
-namespace llvm {
-
-class SIMachineFunctionInfo;
-
-class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo {
-
-public:
-  enum Generation {
-    R600 = 0,
-    R700,
-    EVERGREEN,
-    NORTHERN_ISLANDS,
-    SOUTHERN_ISLANDS,
-    SEA_ISLANDS,
-    VOLCANIC_ISLANDS,
-  };
-
-  enum {
-    FIXED_SGPR_COUNT_FOR_INIT_BUG = 80
-  };
-
-private:
-  std::string DevName;
-  bool Is64bit;
-  bool DumpCode;
-  bool R600ALUInst;
-  bool HasVertexCache;
-  short TexVTXClauseSize;
-  Generation Gen;
-  bool FP64;
-  bool FP64Denormals;
-  bool FP32Denormals;
-  bool FastFMAF32;
-  bool CaymanISA;
-  bool FlatAddressSpace;
-  bool EnableIRStructurizer;
-  bool EnablePromoteAlloca;
-  bool EnableIfCvt;
-  bool EnableLoadStoreOpt;
-  unsigned WavefrontSize;
-  bool CFALUBug;
-  int LocalMemorySize;
-  bool EnableVGPRSpilling;
-  bool SGPRInitBug;
-  bool IsGCN;
-  bool GCN1Encoding;
-  bool GCN3Encoding;
-  bool CIInsts;
-  bool FeatureDisable;
-  int LDSBankCount;
-
-  AMDGPUFrameLowering FrameLowering;
-  std::unique_ptr<AMDGPUTargetLowering> TLInfo;
-  std::unique_ptr<AMDGPUInstrInfo> InstrInfo;
-  InstrItineraryData InstrItins;
-  Triple TargetTriple;
-
-public:
-  AMDGPUSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
-                  TargetMachine &TM);
-  AMDGPUSubtarget &initializeSubtargetDependencies(const Triple &TT,
-                                                   StringRef GPU, StringRef FS);
-
-  const AMDGPUFrameLowering *getFrameLowering() const override {
-    return &FrameLowering;
-  }
-  const AMDGPUInstrInfo *getInstrInfo() const override {
-    return InstrInfo.get();
-  }
-  const AMDGPURegisterInfo *getRegisterInfo() const override {
-    return &InstrInfo->getRegisterInfo();
-  }
-  AMDGPUTargetLowering *getTargetLowering() const override {
-    return TLInfo.get();
-  }
-  const InstrItineraryData *getInstrItineraryData() const override {
-    return &InstrItins;
-  }
-
-  void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
-
-  bool is64bit() const {
-    return Is64bit;
-  }
-
-  bool hasVertexCache() const {
-    return HasVertexCache;
-  }
-
-  short getTexVTXClauseSize() const {
-    return TexVTXClauseSize;
-  }
-
-  Generation getGeneration() const {
-    return Gen;
-  }
-
-  bool hasHWFP64() const {
-    return FP64;
-  }
-
-  bool hasCaymanISA() const {
-    return CaymanISA;
-  }
-
-  bool hasFP32Denormals() const {
-    return FP32Denormals;
-  }
-
-  bool hasFP64Denormals() const {
-    return FP64Denormals;
-  }
-
-  bool hasFastFMAF32() const {
-    return FastFMAF32;
-  }
-
-  bool hasFlatAddressSpace() const {
-    return FlatAddressSpace;
-  }
-
-  bool hasBFE() const {
-    return (getGeneration() >= EVERGREEN);
-  }
-
-  bool hasBFI() const {
-    return (getGeneration() >= EVERGREEN);
-  }
-
-  bool hasBFM() const {
-    return hasBFE();
-  }
-
-  bool hasBCNT(unsigned Size) const {
-    if (Size == 32)
-      return (getGeneration() >= EVERGREEN);
-
-    if (Size == 64)
-      return (getGeneration() >= SOUTHERN_ISLANDS);
-
-    return false;
-  }
-
-  bool hasMulU24() const {
-    return (getGeneration() >= EVERGREEN);
-  }
-
-  bool hasMulI24() const {
-    return (getGeneration() >= SOUTHERN_ISLANDS ||
-            hasCaymanISA());
-  }
-
-  bool hasFFBL() const {
-    return (getGeneration() >= EVERGREEN);
-  }
-
-  bool hasFFBH() const {
-    return (getGeneration() >= EVERGREEN);
-  }
-
-  bool hasCARRY() const {
-    return (getGeneration() >= EVERGREEN);
-  }
-
-  bool hasBORROW() const {
-    return (getGeneration() >= EVERGREEN);
-  }
-
-  bool IsIRStructurizerEnabled() const {
-    return EnableIRStructurizer;
-  }
-
-  bool isPromoteAllocaEnabled() const {
-    return EnablePromoteAlloca;
-  }
-
-  bool isIfCvtEnabled() const {
-    return EnableIfCvt;
-  }
-
-  bool loadStoreOptEnabled() const {
-    return EnableLoadStoreOpt;
-  }
-
-  unsigned getWavefrontSize() const {
-    return WavefrontSize;
-  }
-
-  unsigned getStackEntrySize() const;
-
-  bool hasCFAluBug() const {
-    assert(getGeneration() <= NORTHERN_ISLANDS);
-    return CFALUBug;
-  }
-
-  int getLocalMemorySize() const {
-    return LocalMemorySize;
-  }
-
-  bool hasSGPRInitBug() const {
-    return SGPRInitBug;
-  }
-
-  int getLDSBankCount() const {
-    return LDSBankCount;
-  }
-
-  unsigned getAmdKernelCodeChipID() const;
-
-  bool enableMachineScheduler() const override {
-    return true;
-  }
-
-  void overrideSchedPolicy(MachineSchedPolicy &Policy,
-                           MachineInstr *begin, MachineInstr *end,
-                           unsigned NumRegionInstrs) const override;
-
-  // Helper functions to simplify if statements
-  bool isTargetELF() const {
-    return false;
-  }
-
-  StringRef getDeviceName() const {
-    return DevName;
-  }
-
-  bool dumpCode() const {
-    return DumpCode;
-  }
-  bool r600ALUEncoding() const {
-    return R600ALUInst;
-  }
-  bool isAmdHsaOS() const {
-    return TargetTriple.getOS() == Triple::AMDHSA;
-  }
-  bool isVGPRSpillingEnabled(const SIMachineFunctionInfo *MFI) const;
-
-  unsigned getMaxWavesPerCU() const {
-    if (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
-      return 10;
-
-    // FIXME: Not sure what this is for other subtagets.
-    llvm_unreachable("do not know max waves per CU for this subtarget.");
-  }
-
-  bool enableSubRegLiveness() const override {
-    return true;
-  }
-};
-
-} // End namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.cpp (removed)
@@ -1,292 +0,0 @@
-//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief The AMDGPU target machine contains all of the hardware specific
-/// information  needed to emit code for R600 and SI GPUs.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUTargetMachine.h"
-#include "AMDGPU.h"
-#include "AMDGPUTargetTransformInfo.h"
-#include "R600ISelLowering.h"
-#include "R600InstrInfo.h"
-#include "R600MachineScheduler.h"
-#include "SIISelLowering.h"
-#include "SIInstrInfo.h"
-#include "llvm/Analysis/Passes.h"
-#include "llvm/CodeGen/MachineFunctionAnalysis.h"
-#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-#include "llvm/CodeGen/Passes.h"
-#include "llvm/IR/Verifier.h"
-#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/IR/LegacyPassManager.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_os_ostream.h"
-#include "llvm/Transforms/IPO.h"
-#include "llvm/Transforms/Scalar.h"
-#include <llvm/CodeGen/Passes.h>
-
-using namespace llvm;
-
-extern "C" void LLVMInitializeR600Target() {
-  // Register the target
-  RegisterTargetMachine<R600TargetMachine> X(TheAMDGPUTarget);
-  RegisterTargetMachine<GCNTargetMachine> Y(TheGCNTarget);
-}
-
-static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
-  return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
-}
-
-static MachineSchedRegistry
-SchedCustomRegistry("r600", "Run R600's custom scheduler",
-                    createR600MachineScheduler);
-
-static std::string computeDataLayout(const Triple &TT) {
-  std::string Ret = "e-p:32:32";
-
-  if (TT.getArch() == Triple::amdgcn) {
-    // 32-bit private, local, and region pointers. 64-bit global and constant.
-    Ret += "-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64";
-  }
-
-  Ret += "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256"
-         "-v512:512-v1024:1024-v2048:2048-n32:64";
-
-  return Ret;
-}
-
-AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
-                                         StringRef CPU, StringRef FS,
-                                         TargetOptions Options, Reloc::Model RM,
-                                         CodeModel::Model CM,
-                                         CodeGenOpt::Level OptLevel)
-    : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options, RM, CM,
-                        OptLevel),
-      TLOF(new TargetLoweringObjectFileELF()), Subtarget(TT, CPU, FS, *this),
-      IntrinsicInfo() {
-  setRequiresStructuredCFG(true);
-  initAsmInfo();
-}
-
-AMDGPUTargetMachine::~AMDGPUTargetMachine() {
-  delete TLOF;
-}
-
-//===----------------------------------------------------------------------===//
-// R600 Target Machine (R600 -> Cayman)
-//===----------------------------------------------------------------------===//
-
-R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
-                                     StringRef FS, StringRef CPU,
-                                     TargetOptions Options, Reloc::Model RM,
-                                     CodeModel::Model CM, CodeGenOpt::Level OL)
-    : AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) {}
-
-//===----------------------------------------------------------------------===//
-// GCN Target Machine (SI+)
-//===----------------------------------------------------------------------===//
-
-GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
-                                   StringRef FS, StringRef CPU,
-                                   TargetOptions Options, Reloc::Model RM,
-                                   CodeModel::Model CM, CodeGenOpt::Level OL)
-    : AMDGPUTargetMachine(T, TT, FS, CPU, Options, RM, CM, OL) {}
-
-//===----------------------------------------------------------------------===//
-// AMDGPU Pass Setup
-//===----------------------------------------------------------------------===//
-
-namespace {
-class AMDGPUPassConfig : public TargetPassConfig {
-public:
-  AMDGPUPassConfig(TargetMachine *TM, PassManagerBase &PM)
-    : TargetPassConfig(TM, PM) {}
-
-  AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
-    return getTM<AMDGPUTargetMachine>();
-  }
-
-  ScheduleDAGInstrs *
-  createMachineScheduler(MachineSchedContext *C) const override {
-    const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
-    if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
-      return createR600MachineScheduler(C);
-    return nullptr;
-  }
-
-  void addIRPasses() override;
-  void addCodeGenPrepare() override;
-  virtual bool addPreISel() override;
-  virtual bool addInstSelector() override;
-};
-
-class R600PassConfig : public AMDGPUPassConfig {
-public:
-  R600PassConfig(TargetMachine *TM, PassManagerBase &PM)
-    : AMDGPUPassConfig(TM, PM) { }
-
-  bool addPreISel() override;
-  void addPreRegAlloc() override;
-  void addPreSched2() override;
-  void addPreEmitPass() override;
-};
-
-class GCNPassConfig : public AMDGPUPassConfig {
-public:
-  GCNPassConfig(TargetMachine *TM, PassManagerBase &PM)
-    : AMDGPUPassConfig(TM, PM) { }
-  bool addPreISel() override;
-  bool addInstSelector() override;
-  void addPreRegAlloc() override;
-  void addPostRegAlloc() override;
-  void addPreSched2() override;
-  void addPreEmitPass() override;
-};
-
-} // End of anonymous namespace
-
-TargetIRAnalysis AMDGPUTargetMachine::getTargetIRAnalysis() {
-  return TargetIRAnalysis(
-      [this](Function &F) { return TargetTransformInfo(AMDGPUTTIImpl(this)); });
-}
-
-void AMDGPUPassConfig::addIRPasses() {
-  // Function calls are not supported, so make sure we inline everything.
-  addPass(createAMDGPUAlwaysInlinePass());
-  addPass(createAlwaysInlinerPass());
-  // We need to add the barrier noop pass, otherwise adding the function
-  // inlining pass will cause all of the PassConfigs passes to be run
-  // one function at a time, which means if we have a nodule with two
-  // functions, then we will generate code for the first function
-  // without ever running any passes on the second.
-  addPass(createBarrierNoopPass());
-  TargetPassConfig::addIRPasses();
-}
-
-void AMDGPUPassConfig::addCodeGenPrepare() {
-  const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
-  if (ST.isPromoteAllocaEnabled()) {
-    addPass(createAMDGPUPromoteAlloca(ST));
-    addPass(createSROAPass());
-  }
-  TargetPassConfig::addCodeGenPrepare();
-}
-
-bool
-AMDGPUPassConfig::addPreISel() {
-  const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
-  addPass(createFlattenCFGPass());
-  if (ST.IsIRStructurizerEnabled())
-    addPass(createStructurizeCFGPass());
-  return false;
-}
-
-bool AMDGPUPassConfig::addInstSelector() {
-  addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
-  return false;
-}
-
-//===----------------------------------------------------------------------===//
-// R600 Pass Setup
-//===----------------------------------------------------------------------===//
-
-bool R600PassConfig::addPreISel() {
-  AMDGPUPassConfig::addPreISel();
-  addPass(createR600TextureIntrinsicsReplacer());
-  return false;
-}
-
-void R600PassConfig::addPreRegAlloc() {
-  addPass(createR600VectorRegMerger(*TM));
-}
-
-void R600PassConfig::addPreSched2() {
-  const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
-  addPass(createR600EmitClauseMarkers(), false);
-  if (ST.isIfCvtEnabled())
-    addPass(&IfConverterID, false);
-  addPass(createR600ClauseMergePass(*TM), false);
-}
-
-void R600PassConfig::addPreEmitPass() {
-  addPass(createAMDGPUCFGStructurizerPass(), false);
-  addPass(createR600ExpandSpecialInstrsPass(*TM), false);
-  addPass(&FinalizeMachineBundlesID, false);
-  addPass(createR600Packetizer(*TM), false);
-  addPass(createR600ControlFlowFinalizer(*TM), false);
-}
-
-TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
-  return new R600PassConfig(this, PM);
-}
-
-//===----------------------------------------------------------------------===//
-// GCN Pass Setup
-//===----------------------------------------------------------------------===//
-
-bool GCNPassConfig::addPreISel() {
-  AMDGPUPassConfig::addPreISel();
-  addPass(createSinkingPass());
-  addPass(createSITypeRewriter());
-  addPass(createSIAnnotateControlFlowPass());
-  return false;
-}
-
-bool GCNPassConfig::addInstSelector() {
-  AMDGPUPassConfig::addInstSelector();
-  addPass(createSILowerI1CopiesPass());
-  addPass(createSIFixSGPRCopiesPass(*TM));
-  addPass(createSIFoldOperandsPass());
-  return false;
-}
-
-void GCNPassConfig::addPreRegAlloc() {
-  const AMDGPUSubtarget &ST = *getAMDGPUTargetMachine().getSubtargetImpl();
-
-  // This needs to be run directly before register allocation because
-  // earlier passes might recompute live intervals.
-  // TODO: handle CodeGenOpt::None; fast RA ignores spill weights set by the pass
-  if (getOptLevel() > CodeGenOpt::None) {
-    initializeSIFixControlFlowLiveIntervalsPass(*PassRegistry::getPassRegistry());
-    insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
-  }
-
-  if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
-    // Don't do this with no optimizations since it throws away debug info by
-    // merging nonadjacent loads.
-
-    // This should be run after scheduling, but before register allocation. It
-    // also need extra copies to the address operand to be eliminated.
-    initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
-    insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
-  }
-  addPass(createSIShrinkInstructionsPass(), false);
-  addPass(createSIFixSGPRLiveRangesPass(), false);
-}
-
-void GCNPassConfig::addPostRegAlloc() {
-  addPass(createSIPrepareScratchRegs(), false);
-  addPass(createSIShrinkInstructionsPass(), false);
-}
-
-void GCNPassConfig::addPreSched2() {
-  addPass(createSIInsertWaits(*TM), false);
-}
-
-void GCNPassConfig::addPreEmitPass() {
-  addPass(createSILowerControlFlowPass(*TM), false);
-}
-
-TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
-  return new GCNPassConfig(this, PM);
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUTargetMachine.h (removed)
@@ -1,89 +0,0 @@
-//===-- AMDGPUTargetMachine.h - AMDGPU TargetMachine Interface --*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief The AMDGPU TargetMachine interface definition for hw codgen targets.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUTARGETMACHINE_H
-#define LLVM_LIB_TARGET_R600_AMDGPUTARGETMACHINE_H
-
-#include "AMDGPUFrameLowering.h"
-#include "AMDGPUInstrInfo.h"
-#include "AMDGPUIntrinsicInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "R600ISelLowering.h"
-#include "llvm/IR/DataLayout.h"
-
-namespace llvm {
-
-//===----------------------------------------------------------------------===//
-// AMDGPU Target Machine (R600+)
-//===----------------------------------------------------------------------===//
-
-class AMDGPUTargetMachine : public LLVMTargetMachine {
-private:
-
-protected:
-  TargetLoweringObjectFile *TLOF;
-  AMDGPUSubtarget Subtarget;
-  AMDGPUIntrinsicInfo IntrinsicInfo;
-
-public:
-  AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef FS,
-                      StringRef CPU, TargetOptions Options, Reloc::Model RM,
-                      CodeModel::Model CM, CodeGenOpt::Level OL);
-  ~AMDGPUTargetMachine();
-
-  const AMDGPUSubtarget *getSubtargetImpl() const { return &Subtarget; }
-  const AMDGPUSubtarget *getSubtargetImpl(const Function &) const override {
-    return &Subtarget;
-  }
-  const AMDGPUIntrinsicInfo *getIntrinsicInfo() const override {
-    return &IntrinsicInfo;
-  }
-  TargetIRAnalysis getTargetIRAnalysis() override;
-
-  TargetLoweringObjectFile *getObjFileLowering() const override {
-    return TLOF;
-  }
-};
-
-//===----------------------------------------------------------------------===//
-// R600 Target Machine (R600 -> Cayman)
-//===----------------------------------------------------------------------===//
-
-class R600TargetMachine : public AMDGPUTargetMachine {
-
-public:
-  R600TargetMachine(const Target &T, const Triple &TT, StringRef FS,
-                    StringRef CPU, TargetOptions Options, Reloc::Model RM,
-                    CodeModel::Model CM, CodeGenOpt::Level OL);
-
-  TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
-};
-
-//===----------------------------------------------------------------------===//
-// GCN Target Machine (SI+)
-//===----------------------------------------------------------------------===//
-
-class GCNTargetMachine : public AMDGPUTargetMachine {
-
-public:
-  GCNTargetMachine(const Target &T, const Triple &TT, StringRef FS,
-                   StringRef CPU, TargetOptions Options, Reloc::Model RM,
-                   CodeModel::Model CM, CodeGenOpt::Level OL);
-
-  TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
-};
-
-} // End namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.cpp (removed)
@@ -1,82 +0,0 @@
-//===-- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass ---------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// \file
-// This file implements a TargetTransformInfo analysis pass specific to the
-// AMDGPU target machine. It uses the target's detailed information to provide
-// more precise answers to certain TTI queries, while letting the target
-// independent and default TTI implementations handle the rest.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUTargetTransformInfo.h"
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/CodeGen/BasicTTIImpl.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Target/CostTable.h"
-#include "llvm/Target/TargetLowering.h"
-using namespace llvm;
-
-#define DEBUG_TYPE "AMDGPUtti"
-
-void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L,
-                                            TTI::UnrollingPreferences &UP) {
-  UP.Threshold = 300; // Twice the default.
-  UP.MaxCount = UINT_MAX;
-  UP.Partial = true;
-
-  // TODO: Do we want runtime unrolling?
-
-  for (const BasicBlock *BB : L->getBlocks()) {
-    const DataLayout &DL = BB->getModule()->getDataLayout();
-    for (const Instruction &I : *BB) {
-      const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
-      if (!GEP || GEP->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
-        continue;
-
-      const Value *Ptr = GEP->getPointerOperand();
-      const AllocaInst *Alloca =
-          dyn_cast<AllocaInst>(GetUnderlyingObject(Ptr, DL));
-      if (Alloca) {
-        // We want to do whatever we can to limit the number of alloca
-        // instructions that make it through to the code generator.  allocas
-        // require us to use indirect addressing, which is slow and prone to
-        // compiler bugs.  If this loop does an address calculation on an
-        // alloca ptr, then we want to use a higher than normal loop unroll
-        // threshold. This will give SROA a better chance to eliminate these
-        // allocas.
-        //
-        // Don't use the maximum allowed value here as it will make some
-        // programs way too big.
-        UP.Threshold = 800;
-      }
-    }
-  }
-}
-
-unsigned AMDGPUTTIImpl::getNumberOfRegisters(bool Vec) {
-  if (Vec)
-    return 0;
-
-  // Number of VGPRs on SI.
-  if (ST->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)
-    return 256;
-
-  return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
-}
-
-unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool) { return 32; }
-
-unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) {
-  // Semi-arbitrary large amount.
-  return 64;
-}

Removed: llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.h (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUTargetTransformInfo.h (removed)
@@ -1,78 +0,0 @@
-//===-- AMDGPUTargetTransformInfo.h - AMDGPU specific TTI -------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file
-/// This file a TargetTransformInfo::Concept conforming object specific to the
-/// AMDGPU target machine. It uses the target's detailed information to
-/// provide more precise answers to certain TTI queries, while letting the
-/// target independent and default TTI implementations handle the rest.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_AMDGPUTARGETTRANSFORMINFO_H
-#define LLVM_LIB_TARGET_R600_AMDGPUTARGETTRANSFORMINFO_H
-
-#include "AMDGPU.h"
-#include "AMDGPUTargetMachine.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/CodeGen/BasicTTIImpl.h"
-#include "llvm/Target/TargetLowering.h"
-
-namespace llvm {
-
-class AMDGPUTTIImpl : public BasicTTIImplBase<AMDGPUTTIImpl> {
-  typedef BasicTTIImplBase<AMDGPUTTIImpl> BaseT;
-  typedef TargetTransformInfo TTI;
-  friend BaseT;
-
-  const AMDGPUSubtarget *ST;
-  const AMDGPUTargetLowering *TLI;
-
-  const AMDGPUSubtarget *getST() const { return ST; }
-  const AMDGPUTargetLowering *getTLI() const { return TLI; }
-
-public:
-  explicit AMDGPUTTIImpl(const AMDGPUTargetMachine *TM)
-      : BaseT(TM), ST(TM->getSubtargetImpl()), TLI(ST->getTargetLowering()) {}
-
-  // Provide value semantics. MSVC requires that we spell all of these out.
-  AMDGPUTTIImpl(const AMDGPUTTIImpl &Arg)
-      : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
-  AMDGPUTTIImpl(AMDGPUTTIImpl &&Arg)
-      : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
-        TLI(std::move(Arg.TLI)) {}
-  AMDGPUTTIImpl &operator=(const AMDGPUTTIImpl &RHS) {
-    BaseT::operator=(static_cast<const BaseT &>(RHS));
-    ST = RHS.ST;
-    TLI = RHS.TLI;
-    return *this;
-  }
-  AMDGPUTTIImpl &operator=(AMDGPUTTIImpl &&RHS) {
-    BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
-    ST = std::move(RHS.ST);
-    TLI = std::move(RHS.TLI);
-    return *this;
-  }
-
-  bool hasBranchDivergence() { return true; }
-
-  void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP);
-
-  TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) {
-    assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
-    return ST->hasBCNT(TyWidth) ? TTI::PSK_FastHardware : TTI::PSK_Software;
-  }
-
-  unsigned getNumberOfRegisters(bool Vector);
-  unsigned getRegisterBitWidth(bool Vector);
-  unsigned getMaxInterleaveFactor(unsigned VF);
-};
-
-} // end namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/AMDILCFGStructurizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDILCFGStructurizer.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDILCFGStructurizer.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDILCFGStructurizer.cpp (removed)
@@ -1,1912 +0,0 @@
-//===-- AMDILCFGStructurizer.cpp - CFG Structurizer -----------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//==-----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "AMDGPUInstrInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "R600InstrInfo.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/SCCIterator.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/CodeGen/MachineDominators.h"
-#include "llvm/CodeGen/MachineFunction.h"
-#include "llvm/CodeGen/MachineFunctionAnalysis.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineJumpTableInfo.h"
-#include "llvm/CodeGen/MachineLoopInfo.h"
-#include "llvm/CodeGen/MachinePostDominators.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
-#include <deque>
-
-using namespace llvm;
-
-#define DEBUG_TYPE "structcfg"
-
-#define DEFAULT_VEC_SLOTS 8
-
-// TODO: move-begin.
-
-//===----------------------------------------------------------------------===//
-//
-// Statistics for CFGStructurizer.
-//
-//===----------------------------------------------------------------------===//
-
-STATISTIC(numSerialPatternMatch,    "CFGStructurizer number of serial pattern "
-    "matched");
-STATISTIC(numIfPatternMatch,        "CFGStructurizer number of if pattern "
-    "matched");
-STATISTIC(numLoopcontPatternMatch,  "CFGStructurizer number of loop-continue "
-    "pattern matched");
-STATISTIC(numClonedBlock,           "CFGStructurizer cloned blocks");
-STATISTIC(numClonedInstr,           "CFGStructurizer cloned instructions");
-
-namespace llvm {
-  void initializeAMDGPUCFGStructurizerPass(PassRegistry&);
-}
-
-//===----------------------------------------------------------------------===//
-//
-// Miscellaneous utility for CFGStructurizer.
-//
-//===----------------------------------------------------------------------===//
-namespace {
-#define SHOWNEWINSTR(i) \
-  DEBUG(dbgs() << "New instr: " << *i << "\n");
-
-#define SHOWNEWBLK(b, msg) \
-DEBUG( \
-  dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
-  dbgs() << "\n"; \
-);
-
-#define SHOWBLK_DETAIL(b, msg) \
-DEBUG( \
-  if (b) { \
-  dbgs() << msg << "BB" << b->getNumber() << "size " << b->size(); \
-  b->print(dbgs()); \
-  dbgs() << "\n"; \
-  } \
-);
-
-#define INVALIDSCCNUM -1
-
-template<class NodeT>
-void ReverseVector(SmallVectorImpl<NodeT *> &Src) {
-  size_t sz = Src.size();
-  for (size_t i = 0; i < sz/2; ++i) {
-    NodeT *t = Src[i];
-    Src[i] = Src[sz - i - 1];
-    Src[sz - i - 1] = t;
-  }
-}
-
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-//
-// supporting data structure for CFGStructurizer
-//
-//===----------------------------------------------------------------------===//
-
-
-namespace {
-
-class BlockInformation {
-public:
-  bool IsRetired;
-  int  SccNum;
-  BlockInformation() : IsRetired(false), SccNum(INVALIDSCCNUM) {}
-};
-
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-//
-// CFGStructurizer
-//
-//===----------------------------------------------------------------------===//
-
-namespace {
-class AMDGPUCFGStructurizer : public MachineFunctionPass {
-public:
-  typedef SmallVector<MachineBasicBlock *, 32> MBBVector;
-  typedef std::map<MachineBasicBlock *, BlockInformation *> MBBInfoMap;
-  typedef std::map<MachineLoop *, MachineBasicBlock *> LoopLandInfoMap;
-
-  enum PathToKind {
-    Not_SinglePath = 0,
-    SinglePath_InPath = 1,
-    SinglePath_NotInPath = 2
-  };
-
-  static char ID;
-
-  AMDGPUCFGStructurizer() :
-      MachineFunctionPass(ID), TII(nullptr), TRI(nullptr) {
-    initializeAMDGPUCFGStructurizerPass(*PassRegistry::getPassRegistry());
-  }
-
-   const char *getPassName() const override {
-    return "AMDGPU Control Flow Graph structurizer Pass";
-  }
-
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.addPreserved<MachineFunctionAnalysis>();
-    AU.addRequired<MachineFunctionAnalysis>();
-    AU.addRequired<MachineDominatorTree>();
-    AU.addRequired<MachinePostDominatorTree>();
-    AU.addRequired<MachineLoopInfo>();
-  }
-
-  /// Perform the CFG structurization
-  bool run();
-
-  /// Perform the CFG preparation
-  /// This step will remove every unconditionnal/dead jump instructions and make
-  /// sure all loops have an exit block
-  bool prepare();
-
-  bool runOnMachineFunction(MachineFunction &MF) override {
-    TII = static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo());
-    TRI = &TII->getRegisterInfo();
-    DEBUG(MF.dump(););
-    OrderedBlks.clear();
-    Visited.clear();
-    FuncRep = &MF;
-    MLI = &getAnalysis<MachineLoopInfo>();
-    DEBUG(dbgs() << "LoopInfo:\n"; PrintLoopinfo(*MLI););
-    MDT = &getAnalysis<MachineDominatorTree>();
-    DEBUG(MDT->print(dbgs(), (const llvm::Module*)nullptr););
-    PDT = &getAnalysis<MachinePostDominatorTree>();
-    DEBUG(PDT->print(dbgs()););
-    prepare();
-    run();
-    DEBUG(MF.dump(););
-    return true;
-  }
-
-protected:
-  MachineDominatorTree *MDT;
-  MachinePostDominatorTree *PDT;
-  MachineLoopInfo *MLI;
-  const R600InstrInfo *TII;
-  const AMDGPURegisterInfo *TRI;
-
-  // PRINT FUNCTIONS
-  /// Print the ordered Blocks.
-  void printOrderedBlocks() const {
-    size_t i = 0;
-    for (MBBVector::const_iterator iterBlk = OrderedBlks.begin(),
-        iterBlkEnd = OrderedBlks.end(); iterBlk != iterBlkEnd; ++iterBlk, ++i) {
-      dbgs() << "BB" << (*iterBlk)->getNumber();
-      dbgs() << "(" << getSCCNum(*iterBlk) << "," << (*iterBlk)->size() << ")";
-      if (i != 0 && i % 10 == 0) {
-        dbgs() << "\n";
-      } else {
-        dbgs() << " ";
-      }
-    }
-  }
-  static void PrintLoopinfo(const MachineLoopInfo &LoopInfo) {
-    for (MachineLoop::iterator iter = LoopInfo.begin(),
-         iterEnd = LoopInfo.end(); iter != iterEnd; ++iter) {
-      (*iter)->print(dbgs(), 0);
-    }
-  }
-
-  // UTILITY FUNCTIONS
-  int getSCCNum(MachineBasicBlock *MBB) const;
-  MachineBasicBlock *getLoopLandInfo(MachineLoop *LoopRep) const;
-  bool hasBackEdge(MachineBasicBlock *MBB) const;
-  static unsigned getLoopDepth(MachineLoop *LoopRep);
-  bool isRetiredBlock(MachineBasicBlock *MBB) const;
-  bool isActiveLoophead(MachineBasicBlock *MBB) const;
-  PathToKind singlePathTo(MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB,
-      bool AllowSideEntry = true) const;
-  int countActiveBlock(MBBVector::const_iterator It,
-      MBBVector::const_iterator E) const;
-  bool needMigrateBlock(MachineBasicBlock *MBB) const;
-
-  // Utility Functions
-  void reversePredicateSetter(MachineBasicBlock::iterator I);
-  /// Compute the reversed DFS post order of Blocks
-  void orderBlocks(MachineFunction *MF);
-
-  // Function originally from CFGStructTraits
-  void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode,
-      DebugLoc DL = DebugLoc());
-  MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode,
-    DebugLoc DL = DebugLoc());
-  MachineInstr *insertInstrBefore(MachineBasicBlock::iterator I, int NewOpcode);
-  void insertCondBranchBefore(MachineBasicBlock::iterator I, int NewOpcode,
-      DebugLoc DL);
-  void insertCondBranchBefore(MachineBasicBlock *MBB,
-      MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
-      DebugLoc DL);
-  void insertCondBranchEnd(MachineBasicBlock *MBB, int NewOpcode, int RegNum);
-  static int getBranchNzeroOpcode(int OldOpcode);
-  static int getBranchZeroOpcode(int OldOpcode);
-  static int getContinueNzeroOpcode(int OldOpcode);
-  static int getContinueZeroOpcode(int OldOpcode);
-  static MachineBasicBlock *getTrueBranch(MachineInstr *MI);
-  static void setTrueBranch(MachineInstr *MI, MachineBasicBlock *MBB);
-  static MachineBasicBlock *getFalseBranch(MachineBasicBlock *MBB,
-      MachineInstr *MI);
-  static bool isCondBranch(MachineInstr *MI);
-  static bool isUncondBranch(MachineInstr *MI);
-  static DebugLoc getLastDebugLocInBB(MachineBasicBlock *MBB);
-  static MachineInstr *getNormalBlockBranchInstr(MachineBasicBlock *MBB);
-  /// The correct naming for this is getPossibleLoopendBlockBranchInstr.
-  ///
-  /// BB with backward-edge could have move instructions after the branch
-  /// instruction.  Such move instruction "belong to" the loop backward-edge.
-  MachineInstr *getLoopendBlockBranchInstr(MachineBasicBlock *MBB);
-  static MachineInstr *getReturnInstr(MachineBasicBlock *MBB);
-  static MachineInstr *getContinueInstr(MachineBasicBlock *MBB);
-  static bool isReturnBlock(MachineBasicBlock *MBB);
-  static void cloneSuccessorList(MachineBasicBlock *DstMBB,
-      MachineBasicBlock *SrcMBB) ;
-  static MachineBasicBlock *clone(MachineBasicBlock *MBB);
-  /// MachineBasicBlock::ReplaceUsesOfBlockWith doesn't serve the purpose
-  /// because the AMDGPU instruction is not recognized as terminator fix this
-  /// and retire this routine
-  void replaceInstrUseOfBlockWith(MachineBasicBlock *SrcMBB,
-      MachineBasicBlock *OldMBB, MachineBasicBlock *NewBlk);
-  static void wrapup(MachineBasicBlock *MBB);
-
-
-  int patternMatch(MachineBasicBlock *MBB);
-  int patternMatchGroup(MachineBasicBlock *MBB);
-  int serialPatternMatch(MachineBasicBlock *MBB);
-  int ifPatternMatch(MachineBasicBlock *MBB);
-  int loopendPatternMatch();
-  int mergeLoop(MachineLoop *LoopRep);
-  int loopcontPatternMatch(MachineLoop *LoopRep, MachineBasicBlock *LoopHeader);
-
-  void handleLoopcontBlock(MachineBasicBlock *ContingMBB,
-      MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
-      MachineLoop *ContLoop);
-  /// return true iff src1Blk->succ_size() == 0 && src1Blk and src2Blk are in
-  /// the same loop with LoopLandInfo without explicitly keeping track of
-  /// loopContBlks and loopBreakBlks, this is a method to get the information.
-  bool isSameloopDetachedContbreak(MachineBasicBlock *Src1MBB,
-      MachineBasicBlock *Src2MBB);
-  int handleJumpintoIf(MachineBasicBlock *HeadMBB,
-      MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB);
-  int handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
-      MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB);
-  int improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
-      MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
-      MachineBasicBlock **LandMBBPtr);
-  void showImproveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
-      MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
-      MachineBasicBlock *LandMBB, bool Detail = false);
-  int cloneOnSideEntryTo(MachineBasicBlock *PreMBB,
-      MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB);
-  void mergeSerialBlock(MachineBasicBlock *DstMBB,
-      MachineBasicBlock *SrcMBB);
-
-  void mergeIfthenelseBlock(MachineInstr *BranchMI,
-      MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB,
-      MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB);
-  void mergeLooplandBlock(MachineBasicBlock *DstMBB,
-      MachineBasicBlock *LandMBB);
-  void mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
-      MachineBasicBlock *LandMBB);
-  void settleLoopcontBlock(MachineBasicBlock *ContingMBB,
-      MachineBasicBlock *ContMBB);
-  /// normalizeInfiniteLoopExit change
-  ///   B1:
-  ///        uncond_br LoopHeader
-  ///
-  /// to
-  ///   B1:
-  ///        cond_br 1 LoopHeader dummyExit
-  /// and return the newly added dummy exit block
-  MachineBasicBlock *normalizeInfiniteLoopExit(MachineLoop *LoopRep);
-  void removeUnconditionalBranch(MachineBasicBlock *MBB);
-  /// Remove duplicate branches instructions in a block.
-  /// For instance
-  /// B0:
-  ///    cond_br X B1 B2
-  ///    cond_br X B1 B2
-  /// is transformed to
-  /// B0:
-  ///    cond_br X B1 B2
-  void removeRedundantConditionalBranch(MachineBasicBlock *MBB);
-  void addDummyExitBlock(SmallVectorImpl<MachineBasicBlock *> &RetMBB);
-  void removeSuccessor(MachineBasicBlock *MBB);
-  MachineBasicBlock *cloneBlockForPredecessor(MachineBasicBlock *MBB,
-      MachineBasicBlock *PredMBB);
-  void migrateInstruction(MachineBasicBlock *SrcMBB,
-      MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I);
-  void recordSccnum(MachineBasicBlock *MBB, int SCCNum);
-  void retireBlock(MachineBasicBlock *MBB);
-  void setLoopLandBlock(MachineLoop *LoopRep, MachineBasicBlock *MBB = nullptr);
-
-  MachineBasicBlock *findNearestCommonPostDom(std::set<MachineBasicBlock *>&);
-  /// This is work around solution for findNearestCommonDominator not available
-  /// to post dom a proper fix should go to Dominators.h.
-  MachineBasicBlock *findNearestCommonPostDom(MachineBasicBlock *MBB1,
-      MachineBasicBlock *MBB2);
-
-private:
-  MBBInfoMap BlockInfoMap;
-  LoopLandInfoMap LLInfoMap;
-  std::map<MachineLoop *, bool> Visited;
-  MachineFunction *FuncRep;
-  SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> OrderedBlks;
-};
-
-int AMDGPUCFGStructurizer::getSCCNum(MachineBasicBlock *MBB) const {
-  MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB);
-  if (It == BlockInfoMap.end())
-    return INVALIDSCCNUM;
-  return (*It).second->SccNum;
-}
-
-MachineBasicBlock *AMDGPUCFGStructurizer::getLoopLandInfo(MachineLoop *LoopRep)
-    const {
-  LoopLandInfoMap::const_iterator It = LLInfoMap.find(LoopRep);
-  if (It == LLInfoMap.end())
-    return nullptr;
-  return (*It).second;
-}
-
-bool AMDGPUCFGStructurizer::hasBackEdge(MachineBasicBlock *MBB) const {
-  MachineLoop *LoopRep = MLI->getLoopFor(MBB);
-  if (!LoopRep)
-    return false;
-  MachineBasicBlock *LoopHeader = LoopRep->getHeader();
-  return MBB->isSuccessor(LoopHeader);
-}
-
-unsigned AMDGPUCFGStructurizer::getLoopDepth(MachineLoop *LoopRep) {
-  return LoopRep ? LoopRep->getLoopDepth() : 0;
-}
-
-bool AMDGPUCFGStructurizer::isRetiredBlock(MachineBasicBlock *MBB) const {
-  MBBInfoMap::const_iterator It = BlockInfoMap.find(MBB);
-  if (It == BlockInfoMap.end())
-    return false;
-  return (*It).second->IsRetired;
-}
-
-bool AMDGPUCFGStructurizer::isActiveLoophead(MachineBasicBlock *MBB) const {
-  MachineLoop *LoopRep = MLI->getLoopFor(MBB);
-  while (LoopRep && LoopRep->getHeader() == MBB) {
-    MachineBasicBlock *LoopLand = getLoopLandInfo(LoopRep);
-    if(!LoopLand)
-      return true;
-    if (!isRetiredBlock(LoopLand))
-      return true;
-    LoopRep = LoopRep->getParentLoop();
-  }
-  return false;
-}
-AMDGPUCFGStructurizer::PathToKind AMDGPUCFGStructurizer::singlePathTo(
-    MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB,
-    bool AllowSideEntry) const {
-  assert(DstMBB);
-  if (SrcMBB == DstMBB)
-    return SinglePath_InPath;
-  while (SrcMBB && SrcMBB->succ_size() == 1) {
-    SrcMBB = *SrcMBB->succ_begin();
-    if (SrcMBB == DstMBB)
-      return SinglePath_InPath;
-    if (!AllowSideEntry && SrcMBB->pred_size() > 1)
-      return Not_SinglePath;
-  }
-  if (SrcMBB && SrcMBB->succ_size()==0)
-    return SinglePath_NotInPath;
-  return Not_SinglePath;
-}
-
-int AMDGPUCFGStructurizer::countActiveBlock(MBBVector::const_iterator It,
-    MBBVector::const_iterator E) const {
-  int Count = 0;
-  while (It != E) {
-    if (!isRetiredBlock(*It))
-      ++Count;
-    ++It;
-  }
-  return Count;
-}
-
-bool AMDGPUCFGStructurizer::needMigrateBlock(MachineBasicBlock *MBB) const {
-  unsigned BlockSizeThreshold = 30;
-  unsigned CloneInstrThreshold = 100;
-  bool MultiplePreds = MBB && (MBB->pred_size() > 1);
-
-  if(!MultiplePreds)
-    return false;
-  unsigned BlkSize = MBB->size();
-  return ((BlkSize > BlockSizeThreshold) &&
-      (BlkSize * (MBB->pred_size() - 1) > CloneInstrThreshold));
-}
-
-void AMDGPUCFGStructurizer::reversePredicateSetter(
-    MachineBasicBlock::iterator I) {
-  while (I--) {
-    if (I->getOpcode() == AMDGPU::PRED_X) {
-      switch (static_cast<MachineInstr *>(I)->getOperand(2).getImm()) {
-      case OPCODE_IS_ZERO_INT:
-        static_cast<MachineInstr *>(I)->getOperand(2)
-            .setImm(OPCODE_IS_NOT_ZERO_INT);
-        return;
-      case OPCODE_IS_NOT_ZERO_INT:
-        static_cast<MachineInstr *>(I)->getOperand(2)
-            .setImm(OPCODE_IS_ZERO_INT);
-        return;
-      case OPCODE_IS_ZERO:
-        static_cast<MachineInstr *>(I)->getOperand(2)
-            .setImm(OPCODE_IS_NOT_ZERO);
-        return;
-      case OPCODE_IS_NOT_ZERO:
-        static_cast<MachineInstr *>(I)->getOperand(2)
-            .setImm(OPCODE_IS_ZERO);
-        return;
-      default:
-        llvm_unreachable("PRED_X Opcode invalid!");
-      }
-    }
-  }
-}
-
-void AMDGPUCFGStructurizer::insertInstrEnd(MachineBasicBlock *MBB,
-    int NewOpcode, DebugLoc DL) {
- MachineInstr *MI = MBB->getParent()
-    ->CreateMachineInstr(TII->get(NewOpcode), DL);
-  MBB->push_back(MI);
-  //assume the instruction doesn't take any reg operand ...
-  SHOWNEWINSTR(MI);
-}
-
-MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(MachineBasicBlock *MBB,
-    int NewOpcode, DebugLoc DL) {
-  MachineInstr *MI =
-      MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DL);
-  if (MBB->begin() != MBB->end())
-    MBB->insert(MBB->begin(), MI);
-  else
-    MBB->push_back(MI);
-  SHOWNEWINSTR(MI);
-  return MI;
-}
-
-MachineInstr *AMDGPUCFGStructurizer::insertInstrBefore(
-    MachineBasicBlock::iterator I, int NewOpcode) {
-  MachineInstr *OldMI = &(*I);
-  MachineBasicBlock *MBB = OldMI->getParent();
-  MachineInstr *NewMBB =
-      MBB->getParent()->CreateMachineInstr(TII->get(NewOpcode), DebugLoc());
-  MBB->insert(I, NewMBB);
-  //assume the instruction doesn't take any reg operand ...
-  SHOWNEWINSTR(NewMBB);
-  return NewMBB;
-}
-
-void AMDGPUCFGStructurizer::insertCondBranchBefore(
-    MachineBasicBlock::iterator I, int NewOpcode, DebugLoc DL) {
-  MachineInstr *OldMI = &(*I);
-  MachineBasicBlock *MBB = OldMI->getParent();
-  MachineFunction *MF = MBB->getParent();
-  MachineInstr *NewMI = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
-  MBB->insert(I, NewMI);
-  MachineInstrBuilder MIB(*MF, NewMI);
-  MIB.addReg(OldMI->getOperand(1).getReg(), false);
-  SHOWNEWINSTR(NewMI);
-  //erase later oldInstr->eraseFromParent();
-}
-
-void AMDGPUCFGStructurizer::insertCondBranchBefore(MachineBasicBlock *blk,
-    MachineBasicBlock::iterator I, int NewOpcode, int RegNum,
-    DebugLoc DL) {
-  MachineFunction *MF = blk->getParent();
-  MachineInstr *NewInstr = MF->CreateMachineInstr(TII->get(NewOpcode), DL);
-  //insert before
-  blk->insert(I, NewInstr);
-  MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false);
-  SHOWNEWINSTR(NewInstr);
-}
-
-void AMDGPUCFGStructurizer::insertCondBranchEnd(MachineBasicBlock *MBB,
-    int NewOpcode, int RegNum) {
-  MachineFunction *MF = MBB->getParent();
-  MachineInstr *NewInstr =
-    MF->CreateMachineInstr(TII->get(NewOpcode), DebugLoc());
-  MBB->push_back(NewInstr);
-  MachineInstrBuilder(*MF, NewInstr).addReg(RegNum, false);
-  SHOWNEWINSTR(NewInstr);
-}
-
-int AMDGPUCFGStructurizer::getBranchNzeroOpcode(int OldOpcode) {
-  switch(OldOpcode) {
-  case AMDGPU::JUMP_COND:
-  case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
-  case AMDGPU::BRANCH_COND_i32:
-  case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALNZ_f32;
-  default: llvm_unreachable("internal error");
-  }
-  return -1;
-}
-
-int AMDGPUCFGStructurizer::getBranchZeroOpcode(int OldOpcode) {
-  switch(OldOpcode) {
-  case AMDGPU::JUMP_COND:
-  case AMDGPU::JUMP: return AMDGPU::IF_PREDICATE_SET;
-  case AMDGPU::BRANCH_COND_i32:
-  case AMDGPU::BRANCH_COND_f32: return AMDGPU::IF_LOGICALZ_f32;
-  default: llvm_unreachable("internal error");
-  }
-  return -1;
-}
-
-int AMDGPUCFGStructurizer::getContinueNzeroOpcode(int OldOpcode) {
-  switch(OldOpcode) {
-  case AMDGPU::JUMP_COND:
-  case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALNZ_i32;
-  default: llvm_unreachable("internal error");
-  };
-  return -1;
-}
-
-int AMDGPUCFGStructurizer::getContinueZeroOpcode(int OldOpcode) {
-  switch(OldOpcode) {
-  case AMDGPU::JUMP_COND:
-  case AMDGPU::JUMP: return AMDGPU::CONTINUE_LOGICALZ_i32;
-  default: llvm_unreachable("internal error");
-  }
-  return -1;
-}
-
-MachineBasicBlock *AMDGPUCFGStructurizer::getTrueBranch(MachineInstr *MI) {
-  return MI->getOperand(0).getMBB();
-}
-
-void AMDGPUCFGStructurizer::setTrueBranch(MachineInstr *MI,
-    MachineBasicBlock *MBB) {
-  MI->getOperand(0).setMBB(MBB);
-}
-
-MachineBasicBlock *
-AMDGPUCFGStructurizer::getFalseBranch(MachineBasicBlock *MBB,
-    MachineInstr *MI) {
-  assert(MBB->succ_size() == 2);
-  MachineBasicBlock *TrueBranch = getTrueBranch(MI);
-  MachineBasicBlock::succ_iterator It = MBB->succ_begin();
-  MachineBasicBlock::succ_iterator Next = It;
-  ++Next;
-  return (*It == TrueBranch) ? *Next : *It;
-}
-
-bool AMDGPUCFGStructurizer::isCondBranch(MachineInstr *MI) {
-  switch (MI->getOpcode()) {
-    case AMDGPU::JUMP_COND:
-    case AMDGPU::BRANCH_COND_i32:
-    case AMDGPU::BRANCH_COND_f32: return true;
-  default:
-    return false;
-  }
-  return false;
-}
-
-bool AMDGPUCFGStructurizer::isUncondBranch(MachineInstr *MI) {
-  switch (MI->getOpcode()) {
-  case AMDGPU::JUMP:
-  case AMDGPU::BRANCH:
-    return true;
-  default:
-    return false;
-  }
-  return false;
-}
-
-DebugLoc AMDGPUCFGStructurizer::getLastDebugLocInBB(MachineBasicBlock *MBB) {
-  //get DebugLoc from the first MachineBasicBlock instruction with debug info
-  DebugLoc DL;
-  for (MachineBasicBlock::iterator It = MBB->begin(); It != MBB->end();
-      ++It) {
-    MachineInstr *instr = &(*It);
-    if (instr->getDebugLoc())
-      DL = instr->getDebugLoc();
-  }
-  return DL;
-}
-
-MachineInstr *AMDGPUCFGStructurizer::getNormalBlockBranchInstr(
-    MachineBasicBlock *MBB) {
-  MachineBasicBlock::reverse_iterator It = MBB->rbegin();
-  MachineInstr *MI = &*It;
-  if (MI && (isCondBranch(MI) || isUncondBranch(MI)))
-    return MI;
-  return nullptr;
-}
-
-MachineInstr *AMDGPUCFGStructurizer::getLoopendBlockBranchInstr(
-    MachineBasicBlock *MBB) {
-  for (MachineBasicBlock::reverse_iterator It = MBB->rbegin(), E = MBB->rend();
-      It != E; ++It) {
-    // FIXME: Simplify
-    MachineInstr *MI = &*It;
-    if (MI) {
-      if (isCondBranch(MI) || isUncondBranch(MI))
-        return MI;
-      else if (!TII->isMov(MI->getOpcode()))
-        break;
-    }
-  }
-  return nullptr;
-}
-
-MachineInstr *AMDGPUCFGStructurizer::getReturnInstr(MachineBasicBlock *MBB) {
-  MachineBasicBlock::reverse_iterator It = MBB->rbegin();
-  if (It != MBB->rend()) {
-    MachineInstr *instr = &(*It);
-    if (instr->getOpcode() == AMDGPU::RETURN)
-      return instr;
-  }
-  return nullptr;
-}
-
-MachineInstr *AMDGPUCFGStructurizer::getContinueInstr(MachineBasicBlock *MBB) {
-  MachineBasicBlock::reverse_iterator It = MBB->rbegin();
-  if (It != MBB->rend()) {
-    MachineInstr *MI = &(*It);
-    if (MI->getOpcode() == AMDGPU::CONTINUE)
-      return MI;
-  }
-  return nullptr;
-}
-
-bool AMDGPUCFGStructurizer::isReturnBlock(MachineBasicBlock *MBB) {
-  MachineInstr *MI = getReturnInstr(MBB);
-  bool IsReturn = (MBB->succ_size() == 0);
-  if (MI)
-    assert(IsReturn);
-  else if (IsReturn)
-    DEBUG(
-      dbgs() << "BB" << MBB->getNumber()
-             <<" is return block without RETURN instr\n";);
-  return  IsReturn;
-}
-
-void AMDGPUCFGStructurizer::cloneSuccessorList(MachineBasicBlock *DstMBB,
-    MachineBasicBlock *SrcMBB) {
-  for (MachineBasicBlock::succ_iterator It = SrcMBB->succ_begin(),
-       iterEnd = SrcMBB->succ_end(); It != iterEnd; ++It)
-    DstMBB->addSuccessor(*It);  // *iter's predecessor is also taken care of
-}
-
-MachineBasicBlock *AMDGPUCFGStructurizer::clone(MachineBasicBlock *MBB) {
-  MachineFunction *Func = MBB->getParent();
-  MachineBasicBlock *NewMBB = Func->CreateMachineBasicBlock();
-  Func->push_back(NewMBB);  //insert to function
-  for (MachineBasicBlock::iterator It = MBB->begin(), E = MBB->end();
-      It != E; ++It) {
-    MachineInstr *MI = Func->CloneMachineInstr(It);
-    NewMBB->push_back(MI);
-  }
-  return NewMBB;
-}
-
-void AMDGPUCFGStructurizer::replaceInstrUseOfBlockWith(
-    MachineBasicBlock *SrcMBB, MachineBasicBlock *OldMBB,
-    MachineBasicBlock *NewBlk) {
-  MachineInstr *BranchMI = getLoopendBlockBranchInstr(SrcMBB);
-  if (BranchMI && isCondBranch(BranchMI) &&
-      getTrueBranch(BranchMI) == OldMBB)
-    setTrueBranch(BranchMI, NewBlk);
-}
-
-void AMDGPUCFGStructurizer::wrapup(MachineBasicBlock *MBB) {
-  assert((!MBB->getParent()->getJumpTableInfo()
-          || MBB->getParent()->getJumpTableInfo()->isEmpty())
-         && "found a jump table");
-
-   //collect continue right before endloop
-   SmallVector<MachineInstr *, DEFAULT_VEC_SLOTS> ContInstr;
-   MachineBasicBlock::iterator Pre = MBB->begin();
-   MachineBasicBlock::iterator E = MBB->end();
-   MachineBasicBlock::iterator It = Pre;
-   while (It != E) {
-     if (Pre->getOpcode() == AMDGPU::CONTINUE
-         && It->getOpcode() == AMDGPU::ENDLOOP)
-       ContInstr.push_back(Pre);
-     Pre = It;
-     ++It;
-   }
-
-   //delete continue right before endloop
-   for (unsigned i = 0; i < ContInstr.size(); ++i)
-      ContInstr[i]->eraseFromParent();
-
-   // TODO to fix up jump table so later phase won't be confused.  if
-   // (jumpTableInfo->isEmpty() == false) { need to clean the jump table, but
-   // there isn't such an interface yet.  alternatively, replace all the other
-   // blocks in the jump table with the entryBlk //}
-
-}
-
-
-bool AMDGPUCFGStructurizer::prepare() {
-  bool Changed = false;
-
-  //FIXME: if not reducible flow graph, make it so ???
-
-  DEBUG(dbgs() << "AMDGPUCFGStructurizer::prepare\n";);
-
-  orderBlocks(FuncRep);
-
-  SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> RetBlks;
-
-  // Add an ExitBlk to loop that don't have one
-  for (MachineLoopInfo::iterator It = MLI->begin(),
-       E = MLI->end(); It != E; ++It) {
-    MachineLoop *LoopRep = (*It);
-    MBBVector ExitingMBBs;
-    LoopRep->getExitingBlocks(ExitingMBBs);
-
-    if (ExitingMBBs.size() == 0) {
-      MachineBasicBlock* DummyExitBlk = normalizeInfiniteLoopExit(LoopRep);
-      if (DummyExitBlk)
-        RetBlks.push_back(DummyExitBlk);
-    }
-  }
-
-  // Remove unconditional branch instr.
-  // Add dummy exit block iff there are multiple returns.
-  for (SmallVectorImpl<MachineBasicBlock *>::const_iterator
-       It = OrderedBlks.begin(), E = OrderedBlks.end(); It != E; ++It) {
-    MachineBasicBlock *MBB = *It;
-    removeUnconditionalBranch(MBB);
-    removeRedundantConditionalBranch(MBB);
-    if (isReturnBlock(MBB)) {
-      RetBlks.push_back(MBB);
-    }
-    assert(MBB->succ_size() <= 2);
-  }
-
-  if (RetBlks.size() >= 2) {
-    addDummyExitBlock(RetBlks);
-    Changed = true;
-  }
-
-  return Changed;
-}
-
-bool AMDGPUCFGStructurizer::run() {
-
-  //Assume reducible CFG...
-  DEBUG(dbgs() << "AMDGPUCFGStructurizer::run\n");
-
-#ifdef STRESSTEST
-  //Use the worse block ordering to test the algorithm.
-  ReverseVector(orderedBlks);
-#endif
-
-  DEBUG(dbgs() << "Ordered blocks:\n"; printOrderedBlocks(););
-  int NumIter = 0;
-  bool Finish = false;
-  MachineBasicBlock *MBB;
-  bool MakeProgress = false;
-  int NumRemainedBlk = countActiveBlock(OrderedBlks.begin(),
-                                        OrderedBlks.end());
-
-  do {
-    ++NumIter;
-    DEBUG(
-      dbgs() << "numIter = " << NumIter
-             << ", numRemaintedBlk = " << NumRemainedBlk << "\n";
-    );
-
-    SmallVectorImpl<MachineBasicBlock *>::const_iterator It =
-        OrderedBlks.begin();
-    SmallVectorImpl<MachineBasicBlock *>::const_iterator E =
-        OrderedBlks.end();
-
-    SmallVectorImpl<MachineBasicBlock *>::const_iterator SccBeginIter =
-        It;
-    MachineBasicBlock *SccBeginMBB = nullptr;
-    int SccNumBlk = 0;  // The number of active blocks, init to a
-                        // maximum possible number.
-    int SccNumIter;     // Number of iteration in this SCC.
-
-    while (It != E) {
-      MBB = *It;
-
-      if (!SccBeginMBB) {
-        SccBeginIter = It;
-        SccBeginMBB = MBB;
-        SccNumIter = 0;
-        SccNumBlk = NumRemainedBlk; // Init to maximum possible number.
-        DEBUG(
-              dbgs() << "start processing SCC" << getSCCNum(SccBeginMBB);
-              dbgs() << "\n";
-        );
-      }
-
-      if (!isRetiredBlock(MBB))
-        patternMatch(MBB);
-
-      ++It;
-
-      bool ContNextScc = true;
-      if (It == E
-          || getSCCNum(SccBeginMBB) != getSCCNum(*It)) {
-        // Just finish one scc.
-        ++SccNumIter;
-        int sccRemainedNumBlk = countActiveBlock(SccBeginIter, It);
-        if (sccRemainedNumBlk != 1 && sccRemainedNumBlk >= SccNumBlk) {
-          DEBUG(
-            dbgs() << "Can't reduce SCC " << getSCCNum(MBB)
-                   << ", sccNumIter = " << SccNumIter;
-            dbgs() << "doesn't make any progress\n";
-          );
-          ContNextScc = true;
-        } else if (sccRemainedNumBlk != 1 && sccRemainedNumBlk < SccNumBlk) {
-          SccNumBlk = sccRemainedNumBlk;
-          It = SccBeginIter;
-          ContNextScc = false;
-          DEBUG(
-            dbgs() << "repeat processing SCC" << getSCCNum(MBB)
-                   << "sccNumIter = " << SccNumIter << '\n';
-          );
-        } else {
-          // Finish the current scc.
-          ContNextScc = true;
-        }
-      } else {
-        // Continue on next component in the current scc.
-        ContNextScc = false;
-      }
-
-      if (ContNextScc)
-        SccBeginMBB = nullptr;
-    } //while, "one iteration" over the function.
-
-    MachineBasicBlock *EntryMBB =
-        GraphTraits<MachineFunction *>::nodes_begin(FuncRep);
-    if (EntryMBB->succ_size() == 0) {
-      Finish = true;
-      DEBUG(
-        dbgs() << "Reduce to one block\n";
-      );
-    } else {
-      int NewnumRemainedBlk
-        = countActiveBlock(OrderedBlks.begin(), OrderedBlks.end());
-      // consider cloned blocks ??
-      if (NewnumRemainedBlk == 1 || NewnumRemainedBlk < NumRemainedBlk) {
-        MakeProgress = true;
-        NumRemainedBlk = NewnumRemainedBlk;
-      } else {
-        MakeProgress = false;
-        DEBUG(
-          dbgs() << "No progress\n";
-        );
-      }
-    }
-  } while (!Finish && MakeProgress);
-
-  // Misc wrap up to maintain the consistency of the Function representation.
-  wrapup(GraphTraits<MachineFunction *>::nodes_begin(FuncRep));
-
-  // Detach retired Block, release memory.
-  for (MBBInfoMap::iterator It = BlockInfoMap.begin(), E = BlockInfoMap.end();
-      It != E; ++It) {
-    if ((*It).second && (*It).second->IsRetired) {
-      assert(((*It).first)->getNumber() != -1);
-      DEBUG(
-        dbgs() << "Erase BB" << ((*It).first)->getNumber() << "\n";
-      );
-      (*It).first->eraseFromParent();  //Remove from the parent Function.
-    }
-    delete (*It).second;
-  }
-  BlockInfoMap.clear();
-  LLInfoMap.clear();
-
-  if (!Finish) {
-    DEBUG(FuncRep->viewCFG());
-    llvm_unreachable("IRREDUCIBLE_CFG");
-  }
-
-  return true;
-}
-
-
-
-void AMDGPUCFGStructurizer::orderBlocks(MachineFunction *MF) {
-  int SccNum = 0;
-  MachineBasicBlock *MBB;
-  for (scc_iterator<MachineFunction *> It = scc_begin(MF); !It.isAtEnd();
-       ++It, ++SccNum) {
-    const std::vector<MachineBasicBlock *> &SccNext = *It;
-    for (std::vector<MachineBasicBlock *>::const_iterator
-         blockIter = SccNext.begin(), blockEnd = SccNext.end();
-         blockIter != blockEnd; ++blockIter) {
-      MBB = *blockIter;
-      OrderedBlks.push_back(MBB);
-      recordSccnum(MBB, SccNum);
-    }
-  }
-
-  //walk through all the block in func to check for unreachable
-  typedef GraphTraits<MachineFunction *> GTM;
-  MachineFunction::iterator It = GTM::nodes_begin(MF), E = GTM::nodes_end(MF);
-  for (; It != E; ++It) {
-    MachineBasicBlock *MBB = &(*It);
-    SccNum = getSCCNum(MBB);
-    if (SccNum == INVALIDSCCNUM)
-      dbgs() << "unreachable block BB" << MBB->getNumber() << "\n";
-  }
-}
-
-int AMDGPUCFGStructurizer::patternMatch(MachineBasicBlock *MBB) {
-  int NumMatch = 0;
-  int CurMatch;
-
-  DEBUG(
-        dbgs() << "Begin patternMatch BB" << MBB->getNumber() << "\n";
-  );
-
-  while ((CurMatch = patternMatchGroup(MBB)) > 0)
-    NumMatch += CurMatch;
-
-  DEBUG(
-        dbgs() << "End patternMatch BB" << MBB->getNumber()
-      << ", numMatch = " << NumMatch << "\n";
-  );
-
-  return NumMatch;
-}
-
-int AMDGPUCFGStructurizer::patternMatchGroup(MachineBasicBlock *MBB) {
-  int NumMatch = 0;
-  NumMatch += loopendPatternMatch();
-  NumMatch += serialPatternMatch(MBB);
-  NumMatch += ifPatternMatch(MBB);
-  return NumMatch;
-}
-
-
-int AMDGPUCFGStructurizer::serialPatternMatch(MachineBasicBlock *MBB) {
-  if (MBB->succ_size() != 1)
-    return 0;
-
-  MachineBasicBlock *childBlk = *MBB->succ_begin();
-  if (childBlk->pred_size() != 1 || isActiveLoophead(childBlk))
-    return 0;
-
-  mergeSerialBlock(MBB, childBlk);
-  ++numSerialPatternMatch;
-  return 1;
-}
-
-int AMDGPUCFGStructurizer::ifPatternMatch(MachineBasicBlock *MBB) {
-  //two edges
-  if (MBB->succ_size() != 2)
-    return 0;
-  if (hasBackEdge(MBB))
-    return 0;
-  MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB);
-  if (!BranchMI)
-    return 0;
-
-  assert(isCondBranch(BranchMI));
-  int NumMatch = 0;
-
-  MachineBasicBlock *TrueMBB = getTrueBranch(BranchMI);
-  NumMatch += serialPatternMatch(TrueMBB);
-  NumMatch += ifPatternMatch(TrueMBB);
-  MachineBasicBlock *FalseMBB = getFalseBranch(MBB, BranchMI);
-  NumMatch += serialPatternMatch(FalseMBB);
-  NumMatch += ifPatternMatch(FalseMBB);
-  MachineBasicBlock *LandBlk;
-  int Cloned = 0;
-
-  assert (!TrueMBB->succ_empty() || !FalseMBB->succ_empty());
-  // TODO: Simplify
-  if (TrueMBB->succ_size() == 1 && FalseMBB->succ_size() == 1
-    && *TrueMBB->succ_begin() == *FalseMBB->succ_begin()) {
-    // Diamond pattern
-    LandBlk = *TrueMBB->succ_begin();
-  } else if (TrueMBB->succ_size() == 1 && *TrueMBB->succ_begin() == FalseMBB) {
-    // Triangle pattern, false is empty
-    LandBlk = FalseMBB;
-    FalseMBB = nullptr;
-  } else if (FalseMBB->succ_size() == 1
-             && *FalseMBB->succ_begin() == TrueMBB) {
-    // Triangle pattern, true is empty
-    // We reverse the predicate to make a triangle, empty false pattern;
-    std::swap(TrueMBB, FalseMBB);
-    reversePredicateSetter(MBB->end());
-    LandBlk = FalseMBB;
-    FalseMBB = nullptr;
-  } else if (FalseMBB->succ_size() == 1
-             && isSameloopDetachedContbreak(TrueMBB, FalseMBB)) {
-    LandBlk = *FalseMBB->succ_begin();
-  } else if (TrueMBB->succ_size() == 1
-    && isSameloopDetachedContbreak(FalseMBB, TrueMBB)) {
-    LandBlk = *TrueMBB->succ_begin();
-  } else {
-    return NumMatch + handleJumpintoIf(MBB, TrueMBB, FalseMBB);
-  }
-
-  // improveSimpleJumpinfoIf can handle the case where landBlk == NULL but the
-  // new BB created for landBlk==NULL may introduce new challenge to the
-  // reduction process.
-  if (LandBlk &&
-      ((TrueMBB && TrueMBB->pred_size() > 1)
-      || (FalseMBB && FalseMBB->pred_size() > 1))) {
-     Cloned += improveSimpleJumpintoIf(MBB, TrueMBB, FalseMBB, &LandBlk);
-  }
-
-  if (TrueMBB && TrueMBB->pred_size() > 1) {
-    TrueMBB = cloneBlockForPredecessor(TrueMBB, MBB);
-    ++Cloned;
-  }
-
-  if (FalseMBB && FalseMBB->pred_size() > 1) {
-    FalseMBB = cloneBlockForPredecessor(FalseMBB, MBB);
-    ++Cloned;
-  }
-
-  mergeIfthenelseBlock(BranchMI, MBB, TrueMBB, FalseMBB, LandBlk);
-
-  ++numIfPatternMatch;
-
-  numClonedBlock += Cloned;
-
-  return 1 + Cloned + NumMatch;
-}
-
-int AMDGPUCFGStructurizer::loopendPatternMatch() {
-  std::deque<MachineLoop *> NestedLoops;
-  for (auto &It: *MLI)
-    for (MachineLoop *ML : depth_first(It))
-      NestedLoops.push_front(ML);
-
-  if (NestedLoops.size() == 0)
-    return 0;
-
-  // Process nested loop outside->inside (we did push_front),
-  // so "continue" to a outside loop won't be mistaken as "break"
-  // of the current loop.
-  int Num = 0;
-  for (MachineLoop *ExaminedLoop : NestedLoops) {
-    if (ExaminedLoop->getNumBlocks() == 0 || Visited[ExaminedLoop])
-      continue;
-    DEBUG(dbgs() << "Processing:\n"; ExaminedLoop->dump(););
-    int NumBreak = mergeLoop(ExaminedLoop);
-    if (NumBreak == -1)
-      break;
-    Num += NumBreak;
-  }
-  return Num;
-}
-
-int AMDGPUCFGStructurizer::mergeLoop(MachineLoop *LoopRep) {
-  MachineBasicBlock *LoopHeader = LoopRep->getHeader();
-  MBBVector ExitingMBBs;
-  LoopRep->getExitingBlocks(ExitingMBBs);
-  assert(!ExitingMBBs.empty() && "Infinite Loop not supported");
-  DEBUG(dbgs() << "Loop has " << ExitingMBBs.size() << " exiting blocks\n";);
-  // We assume a single ExitBlk
-  MBBVector ExitBlks;
-  LoopRep->getExitBlocks(ExitBlks);
-  SmallPtrSet<MachineBasicBlock *, 2> ExitBlkSet;
-  for (unsigned i = 0, e = ExitBlks.size(); i < e; ++i)
-    ExitBlkSet.insert(ExitBlks[i]);
-  assert(ExitBlkSet.size() == 1);
-  MachineBasicBlock *ExitBlk = *ExitBlks.begin();
-  assert(ExitBlk && "Loop has several exit block");
-  MBBVector LatchBlks;
-  typedef GraphTraits<Inverse<MachineBasicBlock*> > InvMBBTraits;
-  InvMBBTraits::ChildIteratorType PI = InvMBBTraits::child_begin(LoopHeader),
-      PE = InvMBBTraits::child_end(LoopHeader);
-  for (; PI != PE; PI++) {
-    if (LoopRep->contains(*PI))
-      LatchBlks.push_back(*PI);
-  }
-
-  for (unsigned i = 0, e = ExitingMBBs.size(); i < e; ++i)
-    mergeLoopbreakBlock(ExitingMBBs[i], ExitBlk);
-  for (unsigned i = 0, e = LatchBlks.size(); i < e; ++i)
-    settleLoopcontBlock(LatchBlks[i], LoopHeader);
-  int Match = 0;
-  do {
-    Match = 0;
-    Match += serialPatternMatch(LoopHeader);
-    Match += ifPatternMatch(LoopHeader);
-  } while (Match > 0);
-  mergeLooplandBlock(LoopHeader, ExitBlk);
-  MachineLoop *ParentLoop = LoopRep->getParentLoop();
-  if (ParentLoop)
-    MLI->changeLoopFor(LoopHeader, ParentLoop);
-  else
-    MLI->removeBlock(LoopHeader);
-  Visited[LoopRep] = true;
-  return 1;
-}
-
-int AMDGPUCFGStructurizer::loopcontPatternMatch(MachineLoop *LoopRep,
-    MachineBasicBlock *LoopHeader) {
-  int NumCont = 0;
-  SmallVector<MachineBasicBlock *, DEFAULT_VEC_SLOTS> ContMBB;
-  typedef GraphTraits<Inverse<MachineBasicBlock *> > GTIM;
-  GTIM::ChildIteratorType It = GTIM::child_begin(LoopHeader),
-      E = GTIM::child_end(LoopHeader);
-  for (; It != E; ++It) {
-    MachineBasicBlock *MBB = *It;
-    if (LoopRep->contains(MBB)) {
-      handleLoopcontBlock(MBB, MLI->getLoopFor(MBB),
-                          LoopHeader, LoopRep);
-      ContMBB.push_back(MBB);
-      ++NumCont;
-    }
-  }
-
-  for (SmallVectorImpl<MachineBasicBlock *>::iterator It = ContMBB.begin(),
-      E = ContMBB.end(); It != E; ++It) {
-    (*It)->removeSuccessor(LoopHeader);
-  }
-
-  numLoopcontPatternMatch += NumCont;
-
-  return NumCont;
-}
-
-
-bool AMDGPUCFGStructurizer::isSameloopDetachedContbreak(
-    MachineBasicBlock *Src1MBB, MachineBasicBlock *Src2MBB) {
-  if (Src1MBB->succ_size() == 0) {
-    MachineLoop *LoopRep = MLI->getLoopFor(Src1MBB);
-    if (LoopRep&& LoopRep == MLI->getLoopFor(Src2MBB)) {
-      MachineBasicBlock *&TheEntry = LLInfoMap[LoopRep];
-      if (TheEntry) {
-        DEBUG(
-          dbgs() << "isLoopContBreakBlock yes src1 = BB"
-                 << Src1MBB->getNumber()
-                 << " src2 = BB" << Src2MBB->getNumber() << "\n";
-        );
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
-int AMDGPUCFGStructurizer::handleJumpintoIf(MachineBasicBlock *HeadMBB,
-    MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) {
-  int Num = handleJumpintoIfImp(HeadMBB, TrueMBB, FalseMBB);
-  if (Num == 0) {
-    DEBUG(
-      dbgs() << "handleJumpintoIf swap trueBlk and FalseBlk" << "\n";
-    );
-    Num = handleJumpintoIfImp(HeadMBB, FalseMBB, TrueMBB);
-  }
-  return Num;
-}
-
-int AMDGPUCFGStructurizer::handleJumpintoIfImp(MachineBasicBlock *HeadMBB,
-    MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB) {
-  int Num = 0;
-  MachineBasicBlock *DownBlk;
-
-  //trueBlk could be the common post dominator
-  DownBlk = TrueMBB;
-
-  DEBUG(
-    dbgs() << "handleJumpintoIfImp head = BB" << HeadMBB->getNumber()
-           << " true = BB" << TrueMBB->getNumber()
-           << ", numSucc=" << TrueMBB->succ_size()
-           << " false = BB" << FalseMBB->getNumber() << "\n";
-  );
-
-  while (DownBlk) {
-    DEBUG(
-      dbgs() << "check down = BB" << DownBlk->getNumber();
-    );
-
-    if (singlePathTo(FalseMBB, DownBlk) == SinglePath_InPath) {
-      DEBUG(
-        dbgs() << " working\n";
-      );
-
-      Num += cloneOnSideEntryTo(HeadMBB, TrueMBB, DownBlk);
-      Num += cloneOnSideEntryTo(HeadMBB, FalseMBB, DownBlk);
-
-      numClonedBlock += Num;
-      Num += serialPatternMatch(*HeadMBB->succ_begin());
-      Num += serialPatternMatch(*std::next(HeadMBB->succ_begin()));
-      Num += ifPatternMatch(HeadMBB);
-      assert(Num > 0);
-
-      break;
-    }
-    DEBUG(
-      dbgs() << " not working\n";
-    );
-    DownBlk = (DownBlk->succ_size() == 1) ? (*DownBlk->succ_begin()) : nullptr;
-  } // walk down the postDomTree
-
-  return Num;
-}
-
-void AMDGPUCFGStructurizer::showImproveSimpleJumpintoIf(
-    MachineBasicBlock *HeadMBB, MachineBasicBlock *TrueMBB,
-    MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB, bool Detail) {
-  dbgs() << "head = BB" << HeadMBB->getNumber()
-         << " size = " << HeadMBB->size();
-  if (Detail) {
-    dbgs() << "\n";
-    HeadMBB->print(dbgs());
-    dbgs() << "\n";
-  }
-
-  if (TrueMBB) {
-    dbgs() << ", true = BB" << TrueMBB->getNumber() << " size = "
-           << TrueMBB->size() << " numPred = " << TrueMBB->pred_size();
-    if (Detail) {
-      dbgs() << "\n";
-      TrueMBB->print(dbgs());
-      dbgs() << "\n";
-    }
-  }
-  if (FalseMBB) {
-    dbgs() << ", false = BB" << FalseMBB->getNumber() << " size = "
-           << FalseMBB->size() << " numPred = " << FalseMBB->pred_size();
-    if (Detail) {
-      dbgs() << "\n";
-      FalseMBB->print(dbgs());
-      dbgs() << "\n";
-    }
-  }
-  if (LandMBB) {
-    dbgs() << ", land = BB" << LandMBB->getNumber() << " size = "
-           << LandMBB->size() << " numPred = " << LandMBB->pred_size();
-    if (Detail) {
-      dbgs() << "\n";
-      LandMBB->print(dbgs());
-      dbgs() << "\n";
-    }
-  }
-
-    dbgs() << "\n";
-}
-
-int AMDGPUCFGStructurizer::improveSimpleJumpintoIf(MachineBasicBlock *HeadMBB,
-    MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
-    MachineBasicBlock **LandMBBPtr) {
-  bool MigrateTrue = false;
-  bool MigrateFalse = false;
-
-  MachineBasicBlock *LandBlk = *LandMBBPtr;
-
-  assert((!TrueMBB || TrueMBB->succ_size() <= 1)
-         && (!FalseMBB || FalseMBB->succ_size() <= 1));
-
-  if (TrueMBB == FalseMBB)
-    return 0;
-
-  MigrateTrue = needMigrateBlock(TrueMBB);
-  MigrateFalse = needMigrateBlock(FalseMBB);
-
-  if (!MigrateTrue && !MigrateFalse)
-    return 0;
-
-  // If we need to migrate either trueBlk and falseBlk, migrate the rest that
-  // have more than one predecessors.  without doing this, its predecessor
-  // rather than headBlk will have undefined value in initReg.
-  if (!MigrateTrue && TrueMBB && TrueMBB->pred_size() > 1)
-    MigrateTrue = true;
-  if (!MigrateFalse && FalseMBB && FalseMBB->pred_size() > 1)
-    MigrateFalse = true;
-
-  DEBUG(
-    dbgs() << "before improveSimpleJumpintoIf: ";
-    showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
-  );
-
-  // org: headBlk => if () {trueBlk} else {falseBlk} => landBlk
-  //
-  // new: headBlk => if () {initReg = 1; org trueBlk branch} else
-  //      {initReg = 0; org falseBlk branch }
-  //      => landBlk => if (initReg) {org trueBlk} else {org falseBlk}
-  //      => org landBlk
-  //      if landBlk->pred_size() > 2, put the about if-else inside
-  //      if (initReg !=2) {...}
-  //
-  // add initReg = initVal to headBlk
-
-  const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
-  if (!MigrateTrue || !MigrateFalse) {
-    // XXX: We have an opportunity here to optimize the "branch into if" case
-    // here.  Branch into if looks like this:
-    //                        entry
-    //                       /     |
-    //           diamond_head       branch_from
-    //             /      \           |
-    // diamond_false        diamond_true
-    //             \      /
-    //               done
-    //
-    // The diamond_head block begins the "if" and the diamond_true block
-    // is the block being "branched into".
-    //
-    // If MigrateTrue is true, then TrueBB is the block being "branched into"
-    // and if MigrateFalse is true, then FalseBB is the block being
-    // "branched into"
-    // 
-    // Here is the pseudo code for how I think the optimization should work:
-    // 1. Insert MOV GPR0, 0 before the branch instruction in diamond_head.
-    // 2. Insert MOV GPR0, 1 before the branch instruction in branch_from.
-    // 3. Move the branch instruction from diamond_head into its own basic
-    //    block (new_block).
-    // 4. Add an unconditional branch from diamond_head to new_block
-    // 5. Replace the branch instruction in branch_from with an unconditional
-    //    branch to new_block.  If branch_from has multiple predecessors, then
-    //    we need to replace the True/False block in the branch
-    //    instruction instead of replacing it.
-    // 6. Change the condition of the branch instruction in new_block from
-    //    COND to (COND || GPR0)
-    //
-    // In order insert these MOV instruction, we will need to use the
-    // RegisterScavenger.  Usually liveness stops being tracked during
-    // the late machine optimization passes, however if we implement
-    // bool TargetRegisterInfo::requiresRegisterScavenging(
-    //                                                const MachineFunction &MF)
-    // and have it return true, liveness will be tracked correctly 
-    // by generic optimization passes.  We will also need to make sure that
-    // all of our target-specific passes that run after regalloc and before
-    // the CFGStructurizer track liveness and we will need to modify this pass
-    // to correctly track liveness.
-    //
-    // After the above changes, the new CFG should look like this:
-    //                        entry
-    //                       /     |
-    //           diamond_head       branch_from
-    //                       \     /
-    //                      new_block
-    //                      /      |
-    //         diamond_false        diamond_true
-    //                      \      /
-    //                        done
-    //
-    // Without this optimization, we are forced to duplicate the diamond_true
-    // block and we will end up with a CFG like this:
-    //
-    //                        entry
-    //                       /     |
-    //           diamond_head       branch_from
-    //             /      \                   |
-    // diamond_false        diamond_true      diamond_true (duplicate)
-    //             \      /                   |
-    //               done --------------------|
-    //
-    // Duplicating diamond_true can be very costly especially if it has a
-    // lot of instructions.
-    return 0;
-  }
-
-  int NumNewBlk = 0;
-
-  bool LandBlkHasOtherPred = (LandBlk->pred_size() > 2);
-
-  //insert AMDGPU::ENDIF to avoid special case "input landBlk == NULL"
-  MachineBasicBlock::iterator I = insertInstrBefore(LandBlk, AMDGPU::ENDIF);
-
-  if (LandBlkHasOtherPred) {
-    llvm_unreachable("Extra register needed to handle CFG");
-    unsigned CmpResReg =
-      HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
-    llvm_unreachable("Extra compare instruction needed to handle CFG");
-    insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET,
-        CmpResReg, DebugLoc());
-  }
-
-  // XXX: We are running this after RA, so creating virtual registers will
-  // cause an assertion failure in the PostRA scheduling pass.
-  unsigned InitReg =
-    HeadMBB->getParent()->getRegInfo().createVirtualRegister(I32RC);
-  insertCondBranchBefore(LandBlk, I, AMDGPU::IF_PREDICATE_SET, InitReg,
-      DebugLoc());
-
-  if (MigrateTrue) {
-    migrateInstruction(TrueMBB, LandBlk, I);
-    // need to uncondionally insert the assignment to ensure a path from its
-    // predecessor rather than headBlk has valid value in initReg if
-    // (initVal != 1).
-    llvm_unreachable("Extra register needed to handle CFG");
-  }
-  insertInstrBefore(I, AMDGPU::ELSE);
-
-  if (MigrateFalse) {
-    migrateInstruction(FalseMBB, LandBlk, I);
-    // need to uncondionally insert the assignment to ensure a path from its
-    // predecessor rather than headBlk has valid value in initReg if
-    // (initVal != 0)
-    llvm_unreachable("Extra register needed to handle CFG");
-  }
-
-  if (LandBlkHasOtherPred) {
-    // add endif
-    insertInstrBefore(I, AMDGPU::ENDIF);
-
-    // put initReg = 2 to other predecessors of landBlk
-    for (MachineBasicBlock::pred_iterator PI = LandBlk->pred_begin(),
-         PE = LandBlk->pred_end(); PI != PE; ++PI) {
-      MachineBasicBlock *MBB = *PI;
-      if (MBB != TrueMBB && MBB != FalseMBB)
-        llvm_unreachable("Extra register needed to handle CFG");
-    }
-  }
-  DEBUG(
-    dbgs() << "result from improveSimpleJumpintoIf: ";
-    showImproveSimpleJumpintoIf(HeadMBB, TrueMBB, FalseMBB, LandBlk, 0);
-  );
-
-  // update landBlk
-  *LandMBBPtr = LandBlk;
-
-  return NumNewBlk;
-}
-
-void AMDGPUCFGStructurizer::handleLoopcontBlock(MachineBasicBlock *ContingMBB,
-    MachineLoop *ContingLoop, MachineBasicBlock *ContMBB,
-    MachineLoop *ContLoop) {
-  DEBUG(dbgs() << "loopcontPattern cont = BB" << ContingMBB->getNumber()
-               << " header = BB" << ContMBB->getNumber() << "\n";
-        dbgs() << "Trying to continue loop-depth = "
-               << getLoopDepth(ContLoop)
-               << " from loop-depth = " << getLoopDepth(ContingLoop) << "\n";);
-  settleLoopcontBlock(ContingMBB, ContMBB);
-}
-
-void AMDGPUCFGStructurizer::mergeSerialBlock(MachineBasicBlock *DstMBB,
-    MachineBasicBlock *SrcMBB) {
-  DEBUG(
-    dbgs() << "serialPattern BB" << DstMBB->getNumber()
-           << " <= BB" << SrcMBB->getNumber() << "\n";
-  );
-  DstMBB->splice(DstMBB->end(), SrcMBB, SrcMBB->begin(), SrcMBB->end());
-
-  DstMBB->removeSuccessor(SrcMBB);
-  cloneSuccessorList(DstMBB, SrcMBB);
-
-  removeSuccessor(SrcMBB);
-  MLI->removeBlock(SrcMBB);
-  retireBlock(SrcMBB);
-}
-
-void AMDGPUCFGStructurizer::mergeIfthenelseBlock(MachineInstr *BranchMI,
-    MachineBasicBlock *MBB, MachineBasicBlock *TrueMBB,
-    MachineBasicBlock *FalseMBB, MachineBasicBlock *LandMBB) {
-  assert (TrueMBB);
-  DEBUG(
-    dbgs() << "ifPattern BB" << MBB->getNumber();
-    dbgs() << "{  ";
-    if (TrueMBB) {
-      dbgs() << "BB" << TrueMBB->getNumber();
-    }
-    dbgs() << "  } else ";
-    dbgs() << "{  ";
-    if (FalseMBB) {
-      dbgs() << "BB" << FalseMBB->getNumber();
-    }
-    dbgs() << "  }\n ";
-    dbgs() << "landBlock: ";
-    if (!LandMBB) {
-      dbgs() << "NULL";
-    } else {
-      dbgs() << "BB" << LandMBB->getNumber();
-    }
-    dbgs() << "\n";
-  );
-
-  int OldOpcode = BranchMI->getOpcode();
-  DebugLoc BranchDL = BranchMI->getDebugLoc();
-
-//    transform to
-//    if cond
-//       trueBlk
-//    else
-//       falseBlk
-//    endif
-//    landBlk
-
-  MachineBasicBlock::iterator I = BranchMI;
-  insertCondBranchBefore(I, getBranchNzeroOpcode(OldOpcode),
-      BranchDL);
-
-  if (TrueMBB) {
-    MBB->splice(I, TrueMBB, TrueMBB->begin(), TrueMBB->end());
-    MBB->removeSuccessor(TrueMBB);
-    if (LandMBB && TrueMBB->succ_size()!=0)
-      TrueMBB->removeSuccessor(LandMBB);
-    retireBlock(TrueMBB);
-    MLI->removeBlock(TrueMBB);
-  }
-
-  if (FalseMBB) {
-    insertInstrBefore(I, AMDGPU::ELSE);
-    MBB->splice(I, FalseMBB, FalseMBB->begin(),
-                   FalseMBB->end());
-    MBB->removeSuccessor(FalseMBB);
-    if (LandMBB && FalseMBB->succ_size() != 0)
-      FalseMBB->removeSuccessor(LandMBB);
-    retireBlock(FalseMBB);
-    MLI->removeBlock(FalseMBB);
-  }
-  insertInstrBefore(I, AMDGPU::ENDIF);
-
-  BranchMI->eraseFromParent();
-
-  if (LandMBB && TrueMBB && FalseMBB)
-    MBB->addSuccessor(LandMBB);
-
-}
-
-void AMDGPUCFGStructurizer::mergeLooplandBlock(MachineBasicBlock *DstBlk,
-    MachineBasicBlock *LandMBB) {
-  DEBUG(dbgs() << "loopPattern header = BB" << DstBlk->getNumber()
-               << " land = BB" << LandMBB->getNumber() << "\n";);
-
-  insertInstrBefore(DstBlk, AMDGPU::WHILELOOP, DebugLoc());
-  insertInstrEnd(DstBlk, AMDGPU::ENDLOOP, DebugLoc());
-  DstBlk->addSuccessor(LandMBB);
-  DstBlk->removeSuccessor(DstBlk);
-}
-
-
-void AMDGPUCFGStructurizer::mergeLoopbreakBlock(MachineBasicBlock *ExitingMBB,
-    MachineBasicBlock *LandMBB) {
-  DEBUG(dbgs() << "loopbreakPattern exiting = BB" << ExitingMBB->getNumber()
-               << " land = BB" << LandMBB->getNumber() << "\n";);
-  MachineInstr *BranchMI = getLoopendBlockBranchInstr(ExitingMBB);
-  assert(BranchMI && isCondBranch(BranchMI));
-  DebugLoc DL = BranchMI->getDebugLoc();
-  MachineBasicBlock *TrueBranch = getTrueBranch(BranchMI);
-  MachineBasicBlock::iterator I = BranchMI;
-  if (TrueBranch != LandMBB)
-    reversePredicateSetter(I);
-  insertCondBranchBefore(ExitingMBB, I, AMDGPU::IF_PREDICATE_SET, AMDGPU::PREDICATE_BIT, DL);
-  insertInstrBefore(I, AMDGPU::BREAK);
-  insertInstrBefore(I, AMDGPU::ENDIF);
-  //now branchInst can be erase safely
-  BranchMI->eraseFromParent();
-  //now take care of successors, retire blocks
-  ExitingMBB->removeSuccessor(LandMBB);
-}
-
-void AMDGPUCFGStructurizer::settleLoopcontBlock(MachineBasicBlock *ContingMBB,
-    MachineBasicBlock *ContMBB) {
-  DEBUG(dbgs() << "settleLoopcontBlock conting = BB"
-               << ContingMBB->getNumber()
-               << ", cont = BB" << ContMBB->getNumber() << "\n";);
-
-  MachineInstr *MI = getLoopendBlockBranchInstr(ContingMBB);
-  if (MI) {
-    assert(isCondBranch(MI));
-    MachineBasicBlock::iterator I = MI;
-    MachineBasicBlock *TrueBranch = getTrueBranch(MI);
-    int OldOpcode = MI->getOpcode();
-    DebugLoc DL = MI->getDebugLoc();
-
-    bool UseContinueLogical = ((&*ContingMBB->rbegin()) == MI);
-
-    if (!UseContinueLogical) {
-      int BranchOpcode =
-          TrueBranch == ContMBB ? getBranchNzeroOpcode(OldOpcode) :
-          getBranchZeroOpcode(OldOpcode);
-      insertCondBranchBefore(I, BranchOpcode, DL);
-      // insertEnd to ensure phi-moves, if exist, go before the continue-instr.
-      insertInstrEnd(ContingMBB, AMDGPU::CONTINUE, DL);
-      insertInstrEnd(ContingMBB, AMDGPU::ENDIF, DL);
-    } else {
-      int BranchOpcode =
-          TrueBranch == ContMBB ? getContinueNzeroOpcode(OldOpcode) :
-          getContinueZeroOpcode(OldOpcode);
-      insertCondBranchBefore(I, BranchOpcode, DL);
-    }
-
-    MI->eraseFromParent();
-  } else {
-    // if we've arrived here then we've already erased the branch instruction
-    // travel back up the basic block to see the last reference of our debug
-    // location we've just inserted that reference here so it should be
-    // representative insertEnd to ensure phi-moves, if exist, go before the
-    // continue-instr.
-    insertInstrEnd(ContingMBB, AMDGPU::CONTINUE,
-        getLastDebugLocInBB(ContingMBB));
-  }
-}
-
-int AMDGPUCFGStructurizer::cloneOnSideEntryTo(MachineBasicBlock *PreMBB,
-    MachineBasicBlock *SrcMBB, MachineBasicBlock *DstMBB) {
-  int Cloned = 0;
-  assert(PreMBB->isSuccessor(SrcMBB));
-  while (SrcMBB && SrcMBB != DstMBB) {
-    assert(SrcMBB->succ_size() == 1);
-    if (SrcMBB->pred_size() > 1) {
-      SrcMBB = cloneBlockForPredecessor(SrcMBB, PreMBB);
-      ++Cloned;
-    }
-
-    PreMBB = SrcMBB;
-    SrcMBB = *SrcMBB->succ_begin();
-  }
-
-  return Cloned;
-}
-
-MachineBasicBlock *
-AMDGPUCFGStructurizer::cloneBlockForPredecessor(MachineBasicBlock *MBB,
-    MachineBasicBlock *PredMBB) {
-  assert(PredMBB->isSuccessor(MBB) &&
-         "succBlk is not a prececessor of curBlk");
-
-  MachineBasicBlock *CloneMBB = clone(MBB);  //clone instructions
-  replaceInstrUseOfBlockWith(PredMBB, MBB, CloneMBB);
-  //srcBlk, oldBlk, newBlk
-
-  PredMBB->removeSuccessor(MBB);
-  PredMBB->addSuccessor(CloneMBB);
-
-  // add all successor to cloneBlk
-  cloneSuccessorList(CloneMBB, MBB);
-
-  numClonedInstr += MBB->size();
-
-  DEBUG(
-    dbgs() << "Cloned block: " << "BB"
-           << MBB->getNumber() << "size " << MBB->size() << "\n";
-  );
-
-  SHOWNEWBLK(CloneMBB, "result of Cloned block: ");
-
-  return CloneMBB;
-}
-
-void AMDGPUCFGStructurizer::migrateInstruction(MachineBasicBlock *SrcMBB,
-    MachineBasicBlock *DstMBB, MachineBasicBlock::iterator I) {
-  MachineBasicBlock::iterator SpliceEnd;
-  //look for the input branchinstr, not the AMDGPU branchinstr
-  MachineInstr *BranchMI = getNormalBlockBranchInstr(SrcMBB);
-  if (!BranchMI) {
-    DEBUG(
-      dbgs() << "migrateInstruction don't see branch instr\n" ;
-    );
-    SpliceEnd = SrcMBB->end();
-  } else {
-    DEBUG(
-      dbgs() << "migrateInstruction see branch instr\n" ;
-      BranchMI->dump();
-    );
-    SpliceEnd = BranchMI;
-  }
-  DEBUG(
-    dbgs() << "migrateInstruction before splice dstSize = " << DstMBB->size()
-      << "srcSize = " << SrcMBB->size() << "\n";
-  );
-
-  //splice insert before insertPos
-  DstMBB->splice(I, SrcMBB, SrcMBB->begin(), SpliceEnd);
-
-  DEBUG(
-    dbgs() << "migrateInstruction after splice dstSize = " << DstMBB->size()
-      << "srcSize = " << SrcMBB->size() << "\n";
-  );
-}
-
-MachineBasicBlock *
-AMDGPUCFGStructurizer::normalizeInfiniteLoopExit(MachineLoop* LoopRep) {
-  MachineBasicBlock *LoopHeader = LoopRep->getHeader();
-  MachineBasicBlock *LoopLatch = LoopRep->getLoopLatch();
-  const TargetRegisterClass * I32RC = TRI->getCFGStructurizerRegClass(MVT::i32);
-
-  if (!LoopHeader || !LoopLatch)
-    return nullptr;
-  MachineInstr *BranchMI = getLoopendBlockBranchInstr(LoopLatch);
-  // Is LoopRep an infinite loop ?
-  if (!BranchMI || !isUncondBranch(BranchMI))
-    return nullptr;
-
-  MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
-  FuncRep->push_back(DummyExitBlk);  //insert to function
-  SHOWNEWBLK(DummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
-  DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";);
-  MachineBasicBlock::iterator I = BranchMI;
-  unsigned ImmReg = FuncRep->getRegInfo().createVirtualRegister(I32RC);
-  llvm_unreachable("Extra register needed to handle CFG");
-  MachineInstr *NewMI = insertInstrBefore(I, AMDGPU::BRANCH_COND_i32);
-  MachineInstrBuilder MIB(*FuncRep, NewMI);
-  MIB.addMBB(LoopHeader);
-  MIB.addReg(ImmReg, false);
-  SHOWNEWINSTR(NewMI);
-  BranchMI->eraseFromParent();
-  LoopLatch->addSuccessor(DummyExitBlk);
-
-  return DummyExitBlk;
-}
-
-void AMDGPUCFGStructurizer::removeUnconditionalBranch(MachineBasicBlock *MBB) {
-  MachineInstr *BranchMI;
-
-  // I saw two unconditional branch in one basic block in example
-  // test_fc_do_while_or.c need to fix the upstream on this to remove the loop.
-  while ((BranchMI = getLoopendBlockBranchInstr(MBB))
-          && isUncondBranch(BranchMI)) {
-    DEBUG(dbgs() << "Removing uncond branch instr"; BranchMI->dump(););
-    BranchMI->eraseFromParent();
-  }
-}
-
-void AMDGPUCFGStructurizer::removeRedundantConditionalBranch(
-    MachineBasicBlock *MBB) {
-  if (MBB->succ_size() != 2)
-    return;
-  MachineBasicBlock *MBB1 = *MBB->succ_begin();
-  MachineBasicBlock *MBB2 = *std::next(MBB->succ_begin());
-  if (MBB1 != MBB2)
-    return;
-
-  MachineInstr *BranchMI = getNormalBlockBranchInstr(MBB);
-  assert(BranchMI && isCondBranch(BranchMI));
-  DEBUG(dbgs() << "Removing unneeded cond branch instr"; BranchMI->dump(););
-  BranchMI->eraseFromParent();
-  SHOWNEWBLK(MBB1, "Removing redundant successor");
-  MBB->removeSuccessor(MBB1);
-}
-
-void AMDGPUCFGStructurizer::addDummyExitBlock(
-    SmallVectorImpl<MachineBasicBlock*> &RetMBB) {
-  MachineBasicBlock *DummyExitBlk = FuncRep->CreateMachineBasicBlock();
-  FuncRep->push_back(DummyExitBlk);  //insert to function
-  insertInstrEnd(DummyExitBlk, AMDGPU::RETURN);
-
-  for (SmallVectorImpl<MachineBasicBlock *>::iterator It = RetMBB.begin(),
-       E = RetMBB.end(); It != E; ++It) {
-    MachineBasicBlock *MBB = *It;
-    MachineInstr *MI = getReturnInstr(MBB);
-    if (MI)
-      MI->eraseFromParent();
-    MBB->addSuccessor(DummyExitBlk);
-    DEBUG(
-      dbgs() << "Add dummyExitBlock to BB" << MBB->getNumber()
-             << " successors\n";
-    );
-  }
-  SHOWNEWBLK(DummyExitBlk, "DummyExitBlock: ");
-}
-
-void AMDGPUCFGStructurizer::removeSuccessor(MachineBasicBlock *MBB) {
-  while (MBB->succ_size())
-    MBB->removeSuccessor(*MBB->succ_begin());
-}
-
-void AMDGPUCFGStructurizer::recordSccnum(MachineBasicBlock *MBB,
-    int SccNum) {
-  BlockInformation *&srcBlkInfo = BlockInfoMap[MBB];
-  if (!srcBlkInfo)
-    srcBlkInfo = new BlockInformation();
-  srcBlkInfo->SccNum = SccNum;
-}
-
-void AMDGPUCFGStructurizer::retireBlock(MachineBasicBlock *MBB) {
-  DEBUG(
-        dbgs() << "Retiring BB" << MBB->getNumber() << "\n";
-  );
-
-  BlockInformation *&SrcBlkInfo = BlockInfoMap[MBB];
-
-  if (!SrcBlkInfo)
-    SrcBlkInfo = new BlockInformation();
-
-  SrcBlkInfo->IsRetired = true;
-  assert(MBB->succ_size() == 0 && MBB->pred_size() == 0
-         && "can't retire block yet");
-}
-
-void AMDGPUCFGStructurizer::setLoopLandBlock(MachineLoop *loopRep,
-    MachineBasicBlock *MBB) {
-  MachineBasicBlock *&TheEntry = LLInfoMap[loopRep];
-  if (!MBB) {
-    MBB = FuncRep->CreateMachineBasicBlock();
-    FuncRep->push_back(MBB);  //insert to function
-    SHOWNEWBLK(MBB, "DummyLandingBlock for loop without break: ");
-  }
-  TheEntry = MBB;
-  DEBUG(
-    dbgs() << "setLoopLandBlock loop-header = BB"
-           << loopRep->getHeader()->getNumber()
-           << "  landing-block = BB" << MBB->getNumber() << "\n";
-  );
-}
-
-MachineBasicBlock *
-AMDGPUCFGStructurizer::findNearestCommonPostDom(MachineBasicBlock *MBB1,
-    MachineBasicBlock *MBB2) {
-
-  if (PDT->dominates(MBB1, MBB2))
-    return MBB1;
-  if (PDT->dominates(MBB2, MBB1))
-    return MBB2;
-
-  MachineDomTreeNode *Node1 = PDT->getNode(MBB1);
-  MachineDomTreeNode *Node2 = PDT->getNode(MBB2);
-
-  // Handle newly cloned node.
-  if (!Node1 && MBB1->succ_size() == 1)
-    return findNearestCommonPostDom(*MBB1->succ_begin(), MBB2);
-  if (!Node2 && MBB2->succ_size() == 1)
-    return findNearestCommonPostDom(MBB1, *MBB2->succ_begin());
-
-  if (!Node1 || !Node2)
-    return nullptr;
-
-  Node1 = Node1->getIDom();
-  while (Node1) {
-    if (PDT->dominates(Node1, Node2))
-      return Node1->getBlock();
-    Node1 = Node1->getIDom();
-  }
-
-  return nullptr;
-}
-
-MachineBasicBlock *
-AMDGPUCFGStructurizer::findNearestCommonPostDom(
-    std::set<MachineBasicBlock *> &MBBs) {
-  MachineBasicBlock *CommonDom;
-  std::set<MachineBasicBlock *>::const_iterator It = MBBs.begin();
-  std::set<MachineBasicBlock *>::const_iterator E = MBBs.end();
-  for (CommonDom = *It; It != E && CommonDom; ++It) {
-    MachineBasicBlock *MBB = *It;
-    if (MBB != CommonDom)
-      CommonDom = findNearestCommonPostDom(MBB, CommonDom);
-  }
-
-  DEBUG(
-    dbgs() << "Common post dominator for exit blocks is ";
-    if (CommonDom)
-          dbgs() << "BB" << CommonDom->getNumber() << "\n";
-    else
-      dbgs() << "NULL\n";
-  );
-
-  return CommonDom;
-}
-
-char AMDGPUCFGStructurizer::ID = 0;
-
-} // end anonymous namespace
-
-
-INITIALIZE_PASS_BEGIN(AMDGPUCFGStructurizer, "amdgpustructurizer",
-                      "AMDGPU CFG Structurizer", false, false)
-INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
-INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
-INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
-INITIALIZE_PASS_END(AMDGPUCFGStructurizer, "amdgpustructurizer",
-                      "AMDGPU CFG Structurizer", false, false)
-
-FunctionPass *llvm::createAMDGPUCFGStructurizerPass() {
-  return new AMDGPUCFGStructurizer();
-}

Removed: llvm/trunk/lib/Target/R600/AMDKernelCodeT.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDKernelCodeT.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDKernelCodeT.h (original)
+++ llvm/trunk/lib/Target/R600/AMDKernelCodeT.h (removed)
@@ -1,704 +0,0 @@
-//===-- AMDGPUKernelCodeT.h - Print AMDGPU assembly code ---------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-/// \file AMDKernelCodeT.h
-//===----------------------------------------------------------------------===//
-
-#ifndef AMDKERNELCODET_H
-#define AMDKERNELCODET_H
-
-#include <cstddef>
-#include <cstdint>
-
-//---------------------------------------------------------------------------//
-// AMD Kernel Code, and its dependencies                                     //
-//---------------------------------------------------------------------------//
-
-typedef uint8_t hsa_powertwo8_t;
-typedef uint32_t hsa_ext_code_kind_t;
-typedef uint8_t hsa_ext_brig_profile8_t;
-typedef uint8_t hsa_ext_brig_machine_model8_t;
-typedef uint64_t hsa_ext_control_directive_present64_t;
-typedef uint16_t hsa_ext_exception_kind16_t;
-typedef uint32_t hsa_ext_code_kind32_t;
-
-typedef struct hsa_dim3_s {
-  uint32_t x;
-  uint32_t y;
-  uint32_t z;
-} hsa_dim3_t;
-
-/// The version of the amd_*_code_t struct. Minor versions must be
-/// backward compatible.
-typedef uint32_t amd_code_version32_t;
-enum amd_code_version_t {
-  AMD_CODE_VERSION_MAJOR = 0,
-  AMD_CODE_VERSION_MINOR = 1
-};
-
-/// The values used to define the number of bytes to use for the
-/// swizzle element size.
-enum amd_element_byte_size_t {
-  AMD_ELEMENT_2_BYTES = 0,
-  AMD_ELEMENT_4_BYTES = 1,
-  AMD_ELEMENT_8_BYTES = 2,
-  AMD_ELEMENT_16_BYTES = 3
-};
-
-/// Shader program settings for CS. Contains COMPUTE_PGM_RSRC1 and
-/// COMPUTE_PGM_RSRC2 registers.
-typedef uint64_t amd_compute_pgm_resource_register64_t;
-
-/// Every amd_*_code_t has the following properties, which are composed of
-/// a number of bit fields. Every bit field has a mask (AMD_CODE_PROPERTY_*),
-/// bit width (AMD_CODE_PROPERTY_*_WIDTH, and bit shift amount
-/// (AMD_CODE_PROPERTY_*_SHIFT) for convenient access. Unused bits must be 0.
-///
-/// (Note that bit fields cannot be used as their layout is
-/// implementation defined in the C standard and so cannot be used to
-/// specify an ABI)
-typedef uint32_t amd_code_property32_t;
-enum amd_code_property_mask_t {
-
-  /// Enable the setup of the SGPR user data registers
-  /// (AMD_CODE_PROPERTY_ENABLE_SGPR_*), see documentation of amd_kernel_code_t
-  /// for initial register state.
-  ///
-  /// The total number of SGPRuser data registers requested must not
-  /// exceed 16. Any requests beyond 16 will be ignored.
-  ///
-  /// Used to set COMPUTE_PGM_RSRC2.USER_SGPR (set to total count of
-  /// SGPR user data registers enabled up to 16).
-
-  AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT = 0,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_WIDTH = 1,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT,
-
-  AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT = 1,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_WIDTH = 1,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT,
-
-  AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT = 2,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_WIDTH = 1,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT,
-
-  AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT = 3,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_WIDTH = 1,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT,
-
-  AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT = 4,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_WIDTH = 1,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT,
-
-  AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT = 5,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_WIDTH = 1,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT,
-
-  AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT = 6,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_WIDTH = 1,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT,
-
-  AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT = 7,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_WIDTH = 1,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT,
-
-  AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT = 8,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_WIDTH = 1,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT,
-
-  AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT = 9,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_WIDTH = 1,
-  AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z = ((1 << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT,
-
-  /// Control wave ID base counter for GDS ordered-append. Used to set
-  /// COMPUTE_DISPATCH_INITIATOR.ORDERED_APPEND_ENBL. (Not sure if
-  /// ORDERED_APPEND_MODE also needs to be settable)
-  AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT = 10,
-  AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_WIDTH = 1,
-  AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS = ((1 << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_WIDTH) - 1) << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT,
-
-  /// The interleave (swizzle) element size in bytes required by the
-  /// code for private memory. This must be 2, 4, 8 or 16. This value
-  /// is provided to the finalizer when it is invoked and is recorded
-  /// here. The hardware will interleave the memory requests of each
-  /// lane of a wavefront by this element size to ensure each
-  /// work-item gets a distinct memory memory location. Therefore, the
-  /// finalizer ensures that all load and store operations done to
-  /// private memory do not exceed this size. For example, if the
-  /// element size is 4 (32-bits or dword) and a 64-bit value must be
-  /// loaded, the finalizer will generate two 32-bit loads. This
-  /// ensures that the interleaving will get the the work-item
-  /// specific dword for both halves of the 64-bit value. If it just
-  /// did a 64-bit load then it would get one dword which belonged to
-  /// its own work-item, but the second dword would belong to the
-  /// adjacent lane work-item since the interleaving is in dwords.
-  ///
-  /// The value used must match the value that the runtime configures
-  /// the GPU flat scratch (SH_STATIC_MEM_CONFIG.ELEMENT_SIZE). This
-  /// is generally DWORD.
-  ///
-  /// Use values from the amd_element_byte_size_t enum.
-  AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT = 11,
-  AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_WIDTH = 2,
-  AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE = ((1 << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_WIDTH) - 1) << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT,
-
-  /// Are global memory addresses 64 bits. Must match
-  /// amd_kernel_code_t.hsail_machine_model ==
-  /// HSA_MACHINE_LARGE. Must also match
-  /// SH_MEM_CONFIG.PTR32 (GFX6 (SI)/GFX7 (CI)),
-  /// SH_MEM_CONFIG.ADDRESS_MODE (GFX8 (VI)+).
-  AMD_CODE_PROPERTY_IS_PTR64_SHIFT = 13,
-  AMD_CODE_PROPERTY_IS_PTR64_WIDTH = 1,
-  AMD_CODE_PROPERTY_IS_PTR64 = ((1 << AMD_CODE_PROPERTY_IS_PTR64_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_PTR64_SHIFT,
-
-  /// Indicate if the generated ISA is using a dynamically sized call
-  /// stack. This can happen if calls are implemented using a call
-  /// stack and recursion, alloca or calls to indirect functions are
-  /// present. In these cases the Finalizer cannot compute the total
-  /// private segment size at compile time. In this case the
-  /// workitem_private_segment_byte_size only specifies the statically
-  /// know private segment size, and additional space must be added
-  /// for the call stack.
-  AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT = 14,
-  AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_WIDTH = 1,
-  AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK = ((1 << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT,
-
-  /// Indicate if code generated has support for debugging.
-  AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT = 15,
-  AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_WIDTH = 1,
-  AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED = ((1 << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_WIDTH) - 1) << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT
-};
-
-/// @brief The hsa_ext_control_directives_t specifies the values for the HSAIL
-/// control directives. These control how the finalizer generates code. This
-/// struct is used both as an argument to hsaFinalizeKernel to specify values for
-/// the control directives, and is used in HsaKernelCode to record the values of
-/// the control directives that the finalize used when generating the code which
-/// either came from the finalizer argument or explicit HSAIL control
-/// directives. See the definition of the control directives in HSA Programmer's
-/// Reference Manual which also defines how the values specified as finalizer
-/// arguments have to agree with the control directives in the HSAIL code.
-typedef struct hsa_ext_control_directives_s {
-  /// This is a bit set indicating which control directives have been
-  /// specified. If the value is 0 then there are no control directives specified
-  /// and the rest of the fields can be ignored. The bits are accessed using the
-  /// hsa_ext_control_directives_present_mask_t. Any control directive that is not
-  /// enabled in this bit set must have the value of all 0s.
-  hsa_ext_control_directive_present64_t enabled_control_directives;
-
-  /// If enableBreakExceptions is not enabled then must be 0, otherwise must be
-  /// non-0 and specifies the set of HSAIL exceptions that must have the BREAK
-  /// policy enabled. If this set is not empty then the generated code may have
-  /// lower performance than if the set is empty. If the kernel being finalized
-  /// has any enablebreakexceptions control directives, then the values specified
-  /// by this argument are unioned with the values in these control
-  /// directives. If any of the functions the kernel calls have an
-  /// enablebreakexceptions control directive, then they must be equal or a
-  /// subset of, this union.
-  hsa_ext_exception_kind16_t enable_break_exceptions;
-
-  /// If enableDetectExceptions is not enabled then must be 0, otherwise must be
-  /// non-0 and specifies the set of HSAIL exceptions that must have the DETECT
-  /// policy enabled. If this set is not empty then the generated code may have
-  /// lower performance than if the set is empty. However, an implementation
-  /// should endeavour to make the performance impact small. If the kernel being
-  /// finalized has any enabledetectexceptions control directives, then the
-  /// values specified by this argument are unioned with the values in these
-  /// control directives. If any of the functions the kernel calls have an
-  /// enabledetectexceptions control directive, then they must be equal or a
-  /// subset of, this union.
-  hsa_ext_exception_kind16_t enable_detect_exceptions;
-
-  /// If maxDynamicGroupSize is not enabled then must be 0, and any amount of
-  /// dynamic group segment can be allocated for a dispatch, otherwise the value
-  /// specifies the maximum number of bytes of dynamic group segment that can be
-  /// allocated for a dispatch. If the kernel being finalized has any
-  /// maxdynamicsize control directives, then the values must be the same, and
-  /// must be the same as this argument if it is enabled. This value can be used
-  /// by the finalizer to determine the maximum number of bytes of group memory
-  /// used by each work-group by adding this value to the group memory required
-  /// for all group segment variables used by the kernel and all functions it
-  /// calls, and group memory used to implement other HSAIL features such as
-  /// fbarriers and the detect exception operations. This can allow the finalizer
-  /// to determine the expected number of work-groups that can be executed by a
-  /// compute unit and allow more resources to be allocated to the work-items if
-  /// it is known that fewer work-groups can be executed due to group memory
-  /// limitations.
-  uint32_t max_dynamic_group_size;
-
-  /// If maxFlatGridSize is not enabled then must be 0, otherwise must be greater
-  /// than 0. See HSA Programmer's Reference Manual description of
-  /// maxflatgridsize control directive.
-  uint32_t max_flat_grid_size;
-
-  /// If maxFlatWorkgroupSize is not enabled then must be 0, otherwise must be
-  /// greater than 0. See HSA Programmer's Reference Manual description of
-  /// maxflatworkgroupsize control directive.
-  uint32_t max_flat_workgroup_size;
-
-  /// If requestedWorkgroupsPerCu is not enabled then must be 0, and the
-  /// finalizer is free to generate ISA that may result in any number of
-  /// work-groups executing on a single compute unit. Otherwise, the finalizer
-  /// should attempt to generate ISA that will allow the specified number of
-  /// work-groups to execute on a single compute unit. This is only a hint and
-  /// can be ignored by the finalizer. If the kernel being finalized, or any of
-  /// the functions it calls, has a requested control directive, then the values
-  /// must be the same. This can be used to determine the number of resources
-  /// that should be allocated to a single work-group and work-item. For example,
-  /// a low value may allow more resources to be allocated, resulting in higher
-  /// per work-item performance, as it is known there will never be more than the
-  /// specified number of work-groups actually executing on the compute
-  /// unit. Conversely, a high value may allocate fewer resources, resulting in
-  /// lower per work-item performance, which is offset by the fact it allows more
-  /// work-groups to actually execute on the compute unit.
-  uint32_t requested_workgroups_per_cu;
-
-  /// If not enabled then all elements for Dim3 must be 0, otherwise every
-  /// element must be greater than 0. See HSA Programmer's Reference Manual
-  /// description of requiredgridsize control directive.
-  hsa_dim3_t required_grid_size;
-
-  /// If requiredWorkgroupSize is not enabled then all elements for Dim3 must be
-  /// 0, and the produced code can be dispatched with any legal work-group range
-  /// consistent with the dispatch dimensions. Otherwise, the code produced must
-  /// always be dispatched with the specified work-group range. No element of the
-  /// specified range must be 0. It must be consistent with required_dimensions
-  /// and max_flat_workgroup_size. If the kernel being finalized, or any of the
-  /// functions it calls, has a requiredworkgroupsize control directive, then the
-  /// values must be the same. Specifying a value can allow the finalizer to
-  /// optimize work-group id operations, and if the number of work-items in the
-  /// work-group is less than the WAVESIZE then barrier operations can be
-  /// optimized to just a memory fence.
-  hsa_dim3_t required_workgroup_size;
-
-  /// If requiredDim is not enabled then must be 0 and the produced kernel code
-  /// can be dispatched with 1, 2 or 3 dimensions. If enabled then the value is
-  /// 1..3 and the code produced must only be dispatched with a dimension that
-  /// matches. Other values are illegal. If the kernel being finalized, or any of
-  /// the functions it calls, has a requireddimsize control directive, then the
-  /// values must be the same. This can be used to optimize the code generated to
-  /// compute the absolute and flat work-group and work-item id, and the dim
-  /// HSAIL operations.
-  uint8_t required_dim;
-
-  /// Reserved. Must be 0.
-  uint8_t reserved[75];
-} hsa_ext_control_directives_t;
-
-/// AMD Kernel Code Object (amd_kernel_code_t). GPU CP uses the AMD Kernel
-/// Code Object to set up the hardware to execute the kernel dispatch.
-///
-/// Initial Kernel Register State.
-///
-/// Initial kernel register state will be set up by CP/SPI prior to the start
-/// of execution of every wavefront. This is limited by the constraints of the
-/// current hardware.
-///
-/// The order of the SGPR registers is defined, but the Finalizer can specify
-/// which ones are actually setup in the amd_kernel_code_t object using the
-/// enable_sgpr_* bit fields. The register numbers used for enabled registers
-/// are dense starting at SGPR0: the first enabled register is SGPR0, the next
-/// enabled register is SGPR1 etc.; disabled registers do not have an SGPR
-/// number.
-///
-/// The initial SGPRs comprise up to 16 User SRGPs that are set up by CP and
-/// apply to all waves of the grid. It is possible to specify more than 16 User
-/// SGPRs using the enable_sgpr_* bit fields, in which case only the first 16
-/// are actually initialized. These are then immediately followed by the System
-/// SGPRs that are set up by ADC/SPI and can have different values for each wave
-/// of the grid dispatch.
-///
-/// SGPR register initial state is defined as follows:
-///
-/// Private Segment Buffer (enable_sgpr_private_segment_buffer):
-///   Number of User SGPR registers: 4. V# that can be used, together with
-///   Scratch Wave Offset as an offset, to access the Private/Spill/Arg
-///   segments using a segment address. It must be set as follows:
-///     - Base address: of the scratch memory area used by the dispatch. It
-///       does not include the scratch wave offset. It will be the per process
-///       SH_HIDDEN_PRIVATE_BASE_VMID plus any offset from this dispatch (for
-///       example there may be a per pipe offset, or per AQL Queue offset).
-///     - Stride + data_format: Element Size * Index Stride (???)
-///     - Cache swizzle: ???
-///     - Swizzle enable: SH_STATIC_MEM_CONFIG.SWIZZLE_ENABLE (must be 1 for
-///       scratch)
-///     - Num records: Flat Scratch Work Item Size / Element Size (???)
-///     - Dst_sel_*: ???
-///     - Num_format: ???
-///     - Element_size: SH_STATIC_MEM_CONFIG.ELEMENT_SIZE (will be DWORD, must
-///       agree with amd_kernel_code_t.privateElementSize)
-///     - Index_stride: SH_STATIC_MEM_CONFIG.INDEX_STRIDE (will be 64 as must
-///       be number of wavefront lanes for scratch, must agree with
-///       amd_kernel_code_t.wavefrontSize)
-///     - Add tid enable: 1
-///     - ATC: from SH_MEM_CONFIG.PRIVATE_ATC,
-///     - Hash_enable: ???
-///     - Heap: ???
-///     - Mtype: from SH_STATIC_MEM_CONFIG.PRIVATE_MTYPE
-///     - Type: 0 (a buffer) (???)
-///
-/// Dispatch Ptr (enable_sgpr_dispatch_ptr):
-///   Number of User SGPR registers: 2. 64 bit address of AQL dispatch packet
-///   for kernel actually executing.
-///
-/// Queue Ptr (enable_sgpr_queue_ptr):
-///   Number of User SGPR registers: 2. 64 bit address of AmdQueue object for
-///   AQL queue on which the dispatch packet was queued.
-///
-/// Kernarg Segment Ptr (enable_sgpr_kernarg_segment_ptr):
-///   Number of User SGPR registers: 2. 64 bit address of Kernarg segment. This
-///   is directly copied from the kernargPtr in the dispatch packet. Having CP
-///   load it once avoids loading it at the beginning of every wavefront.
-///
-/// Dispatch Id (enable_sgpr_dispatch_id):
-///   Number of User SGPR registers: 2. 64 bit Dispatch ID of the dispatch
-///   packet being executed.
-///
-/// Flat Scratch Init (enable_sgpr_flat_scratch_init):
-///   Number of User SGPR registers: 2. This is 2 SGPRs.
-///
-///   For CI/VI:
-///     The first SGPR is a 32 bit byte offset from SH_MEM_HIDDEN_PRIVATE_BASE
-///     to base of memory for scratch for this dispatch. This is the same offset
-///     used in computing the Scratch Segment Buffer base address. The value of
-///     Scratch Wave Offset must be added by the kernel code and moved to
-///     SGPRn-4 for use as the FLAT SCRATCH BASE in flat memory instructions.
-///
-///     The second SGPR is 32 bit byte size of a single work-itemÂ’s scratch
-///     memory usage. This is directly loaded from the dispatch packet Private
-///     Segment Byte Size and rounded up to a multiple of DWORD.
-///
-///     \todo [Does CP need to round this to >4 byte alignment?]
-///
-///     The kernel code must move to SGPRn-3 for use as the FLAT SCRATCH SIZE in
-///     flat memory instructions. Having CP load it once avoids loading it at
-///     the beginning of every wavefront.
-///
-///   For PI:
-///     This is the 64 bit base address of the scratch backing memory for
-///     allocated by CP for this dispatch.
-///
-/// Private Segment Size (enable_sgpr_private_segment_size):
-///   Number of User SGPR registers: 1. The 32 bit byte size of a single
-///   work-itemÂ’s scratch memory allocation. This is the value from the dispatch
-///   packet. Private Segment Byte Size rounded up by CP to a multiple of DWORD.
-///
-///   \todo [Does CP need to round this to >4 byte alignment?]
-///
-///   Having CP load it once avoids loading it at the beginning of every
-///   wavefront.
-///
-///   \todo [This will not be used for CI/VI since it is the same value as
-///   the second SGPR of Flat Scratch Init. However, it is need for PI which
-///   changes meaning of Flat Scratchg Init..]
-///
-/// Grid Work-Group Count X (enable_sgpr_grid_workgroup_count_x):
-///   Number of User SGPR registers: 1. 32 bit count of the number of
-///   work-groups in the X dimension for the grid being executed. Computed from
-///   the fields in the HsaDispatchPacket as
-///   ((gridSize.x+workgroupSize.x-1)/workgroupSize.x).
-///
-/// Grid Work-Group Count Y (enable_sgpr_grid_workgroup_count_y):
-///   Number of User SGPR registers: 1. 32 bit count of the number of
-///   work-groups in the Y dimension for the grid being executed. Computed from
-///   the fields in the HsaDispatchPacket as
-///   ((gridSize.y+workgroupSize.y-1)/workgroupSize.y).
-///
-///   Only initialized if <16 previous SGPRs initialized.
-///
-/// Grid Work-Group Count Z (enable_sgpr_grid_workgroup_count_z):
-///   Number of User SGPR registers: 1. 32 bit count of the number of
-///   work-groups in the Z dimension for the grid being executed. Computed
-///   from the fields in the HsaDispatchPacket as
-///   ((gridSize.z+workgroupSize.z-1)/workgroupSize.z).
-///
-///   Only initialized if <16 previous SGPRs initialized.
-///
-/// Work-Group Id X (enable_sgpr_workgroup_id_x):
-///   Number of System SGPR registers: 1. 32 bit work group id in X dimension
-///   of grid for wavefront. Always present.
-///
-/// Work-Group Id Y (enable_sgpr_workgroup_id_y):
-///   Number of System SGPR registers: 1. 32 bit work group id in Y dimension
-///   of grid for wavefront.
-///
-/// Work-Group Id Z (enable_sgpr_workgroup_id_z):
-///   Number of System SGPR registers: 1. 32 bit work group id in Z dimension
-///   of grid for wavefront. If present then Work-group Id Y will also be
-///   present
-///
-/// Work-Group Info (enable_sgpr_workgroup_info):
-///   Number of System SGPR registers: 1. {first_wave, 14Â’b0000,
-///   ordered_append_term[10:0], threadgroup_size_in_waves[5:0]}
-///
-/// Private Segment Wave Byte Offset
-/// (enable_sgpr_private_segment_wave_byte_offset):
-///   Number of System SGPR registers: 1. 32 bit byte offset from base of
-///   dispatch scratch base. Must be used as an offset with Private/Spill/Arg
-///   segment address when using Scratch Segment Buffer. It must be added to
-///   Flat Scratch Offset if setting up FLAT SCRATCH for flat addressing.
-///
-///
-/// The order of the VGPR registers is defined, but the Finalizer can specify
-/// which ones are actually setup in the amd_kernel_code_t object using the
-/// enableVgpr*  bit fields. The register numbers used for enabled registers
-/// are dense starting at VGPR0: the first enabled register is VGPR0, the next
-/// enabled register is VGPR1 etc.; disabled registers do not have an VGPR
-/// number.
-///
-/// VGPR register initial state is defined as follows:
-///
-/// Work-Item Id X (always initialized):
-///   Number of registers: 1. 32 bit work item id in X dimension of work-group
-///   for wavefront lane.
-///
-/// Work-Item Id X (enable_vgpr_workitem_id > 0):
-///   Number of registers: 1. 32 bit work item id in Y dimension of work-group
-///   for wavefront lane.
-///
-/// Work-Item Id X (enable_vgpr_workitem_id > 0):
-///   Number of registers: 1. 32 bit work item id in Z dimension of work-group
-///   for wavefront lane.
-///
-///
-/// The setting of registers is being done by existing GPU hardware as follows:
-///   1) SGPRs before the Work-Group Ids are set by CP using the 16 User Data
-///      registers.
-///   2) Work-group Id registers X, Y, Z are set by SPI which supports any
-///      combination including none.
-///   3) Scratch Wave Offset is also set by SPI which is why its value cannot
-///      be added into the value Flat Scratch Offset which would avoid the
-///      Finalizer generated prolog having to do the add.
-///   4) The VGPRs are set by SPI which only supports specifying either (X),
-///      (X, Y) or (X, Y, Z).
-///
-/// Flat Scratch Dispatch Offset and Flat Scratch Size are adjacent SGRRs so
-/// they can be moved as a 64 bit value to the hardware required SGPRn-3 and
-/// SGPRn-4 respectively using the Finalizer ?FLAT_SCRATCH? Register.
-///
-/// The global segment can be accessed either using flat operations or buffer
-/// operations. If buffer operations are used then the Global Buffer used to
-/// access HSAIL Global/Readonly/Kernarg (which are combine) segments using a
-/// segment address is not passed into the kernel code by CP since its base
-/// address is always 0. Instead the Finalizer generates prolog code to
-/// initialize 4 SGPRs with a V# that has the following properties, and then
-/// uses that in the buffer instructions:
-///   - base address of 0
-///   - no swizzle
-///   - ATC=1
-///   - MTYPE set to support memory coherence specified in
-///     amd_kernel_code_t.globalMemoryCoherence
-///
-/// When the Global Buffer is used to access the Kernarg segment, must add the
-/// dispatch packet kernArgPtr to a kernarg segment address before using this V#.
-/// Alternatively scalar loads can be used if the kernarg offset is uniform, as
-/// the kernarg segment is constant for the duration of the kernel execution.
-///
-typedef struct amd_kernel_code_s {
-  /// The AMD major version of the Code Object. Must be the value
-  /// AMD_CODE_VERSION_MAJOR.
-  amd_code_version32_t amd_code_version_major;
-
-  /// The AMD minor version of the Code Object. Minor versions must be
-  /// backward compatible. Must be the value
-  /// AMD_CODE_VERSION_MINOR.
-  amd_code_version32_t amd_code_version_minor;
-
-  /// The byte size of this struct. Must be set to
-  /// sizeof(amd_kernel_code_t). Used for backward
-  /// compatibility.
-  uint32_t struct_byte_size;
-
-  /// The target chip instruction set for which code has been
-  /// generated. Values are from the E_SC_INSTRUCTION_SET enumeration
-  /// in sc/Interface/SCCommon.h.
-  uint32_t target_chip;
-
-  /// Byte offset (possibly negative) from start of amd_kernel_code_t
-  /// object to kernel's entry point instruction. The actual code for
-  /// the kernel is required to be 256 byte aligned to match hardware
-  /// requirements (SQ cache line is 16). The code must be position
-  /// independent code (PIC) for AMD devices to give runtime the
-  /// option of copying code to discrete GPU memory or APU L2
-  /// cache. The Finalizer should endeavour to allocate all kernel
-  /// machine code in contiguous memory pages so that a device
-  /// pre-fetcher will tend to only pre-fetch Kernel Code objects,
-  /// improving cache performance.
-  int64_t kernel_code_entry_byte_offset;
-
-  /// Range of bytes to consider prefetching expressed as an offset
-  /// and size. The offset is from the start (possibly negative) of
-  /// amd_kernel_code_t object. Set both to 0 if no prefetch
-  /// information is available.
-  ///
-  /// \todo ttye 11/15/2013 Is the prefetch definition we want? Did
-  /// not make the size a uint64_t as prefetching more than 4GiB seems
-  /// excessive.
-  int64_t kernel_code_prefetch_byte_offset;
-  uint64_t kernel_code_prefetch_byte_size;
-
-  /// Number of bytes of scratch backing memory required for full
-  /// occupancy of target chip. This takes into account the number of
-  /// bytes of scratch per work-item, the wavefront size, the maximum
-  /// number of wavefronts per CU, and the number of CUs. This is an
-  /// upper limit on scratch. If the grid being dispatched is small it
-  /// may only need less than this. If the kernel uses no scratch, or
-  /// the Finalizer has not computed this value, it must be 0.
-  uint64_t max_scratch_backing_memory_byte_size;
-
-  /// Shader program settings for CS. Contains COMPUTE_PGM_RSRC1 and
-  /// COMPUTE_PGM_RSRC2 registers.
-  amd_compute_pgm_resource_register64_t compute_pgm_resource_registers;
-
-  /// Code properties. See amd_code_property_mask_t for a full list of
-  /// properties.
-  amd_code_property32_t code_properties;
-
-  /// The amount of memory required for the combined private, spill
-  /// and arg segments for a work-item in bytes. If
-  /// is_dynamic_callstack is 1 then additional space must be added to
-  /// this value for the call stack.
-  uint32_t workitem_private_segment_byte_size;
-
-  /// The amount of group segment memory required by a work-group in
-  /// bytes. This does not include any dynamically allocated group
-  /// segment memory that may be added when the kernel is
-  /// dispatched.
-  uint32_t workgroup_group_segment_byte_size;
-
-  /// Number of byte of GDS required by kernel dispatch. Must be 0 if
-  /// not using GDS.
-  uint32_t gds_segment_byte_size;
-
-  /// The size in bytes of the kernarg segment that holds the values
-  /// of the arguments to the kernel. This could be used by CP to
-  /// prefetch the kernarg segment pointed to by the dispatch packet.
-  uint64_t kernarg_segment_byte_size;
-
-  /// Number of fbarrier's used in the kernel and all functions it
-  /// calls. If the implementation uses group memory to allocate the
-  /// fbarriers then that amount must already be included in the
-  /// workgroup_group_segment_byte_size total.
-  uint32_t workgroup_fbarrier_count;
-
-  /// Number of scalar registers used by a wavefront. This includes
-  /// the special SGPRs for VCC, Flat Scratch Base, Flat Scratch Size
-  /// and XNACK (for GFX8 (VI)). It does not include the 16 SGPR added if a
-  /// trap handler is enabled. Used to set COMPUTE_PGM_RSRC1.SGPRS.
-  uint16_t wavefront_sgpr_count;
-
-  /// Number of vector registers used by each work-item. Used to set
-  /// COMPUTE_PGM_RSRC1.VGPRS.
-  uint16_t workitem_vgpr_count;
-
-  /// If reserved_vgpr_count is 0 then must be 0. Otherwise, this is the
-  /// first fixed VGPR number reserved.
-  uint16_t reserved_vgpr_first;
-
-  /// The number of consecutive VGPRs reserved by the client. If
-  /// is_debug_supported then this count includes VGPRs reserved
-  /// for debugger use.
-  uint16_t reserved_vgpr_count;
-
-  /// If reserved_sgpr_count is 0 then must be 0. Otherwise, this is the
-  /// first fixed SGPR number reserved.
-  uint16_t reserved_sgpr_first;
-
-  /// The number of consecutive SGPRs reserved by the client. If
-  /// is_debug_supported then this count includes SGPRs reserved
-  /// for debugger use.
-  uint16_t reserved_sgpr_count;
-
-  /// If is_debug_supported is 0 then must be 0. Otherwise, this is the
-  /// fixed SGPR number used to hold the wave scratch offset for the
-  /// entire kernel execution, or uint16_t(-1) if the register is not
-  /// used or not known.
-  uint16_t debug_wavefront_private_segment_offset_sgpr;
-
-  /// If is_debug_supported is 0 then must be 0. Otherwise, this is the
-  /// fixed SGPR number of the first of 4 SGPRs used to hold the
-  /// scratch V# used for the entire kernel execution, or uint16_t(-1)
-  /// if the registers are not used or not known.
-  uint16_t debug_private_segment_buffer_sgpr;
-
-  /// The maximum byte alignment of variables used by the kernel in
-  /// the specified memory segment. Expressed as a power of two. Must
-  /// be at least HSA_POWERTWO_16.
-  hsa_powertwo8_t kernarg_segment_alignment;
-  hsa_powertwo8_t group_segment_alignment;
-  hsa_powertwo8_t private_segment_alignment;
-
-  uint8_t reserved3;
-
-  /// Type of code object.
-  hsa_ext_code_kind32_t code_type;
-
-  /// Reserved for code properties if any are defined in the future.
-  /// There are currently no code properties so this field must be 0.
-  uint32_t reserved4;
-
-  /// Wavefront size expressed as a power of two. Must be a power of 2
-  /// in range 1..64 inclusive. Used to support runtime query that
-  /// obtains wavefront size, which may be used by application to
-  /// allocated dynamic group memory and set the dispatch work-group
-  /// size.
-  hsa_powertwo8_t wavefront_size;
-
-  /// The optimization level specified when the kernel was
-  /// finalized.
-  uint8_t optimization_level;
-
-  /// The HSAIL profile defines which features are used. This
-  /// information is from the HSAIL version directive. If this
-  /// amd_kernel_code_t is not generated from an HSAIL compilation
-  /// unit then must be 0.
-  hsa_ext_brig_profile8_t hsail_profile;
-
-  /// The HSAIL machine model gives the address sizes used by the
-  /// code. This information is from the HSAIL version directive. If
-  /// not generated from an HSAIL compilation unit then must still
-  /// indicate for what machine mode the code is generated.
-  hsa_ext_brig_machine_model8_t hsail_machine_model;
-
-  /// The HSAIL major version. This information is from the HSAIL
-  /// version directive. If this amd_kernel_code_t is not
-  /// generated from an HSAIL compilation unit then must be 0.
-  uint32_t hsail_version_major;
-
-  /// The HSAIL minor version. This information is from the HSAIL
-  /// version directive. If this amd_kernel_code_t is not
-  /// generated from an HSAIL compilation unit then must be 0.
-  uint32_t hsail_version_minor;
-
-  /// Reserved for HSAIL target options if any are defined in the
-  /// future. There are currently no target options so this field
-  /// must be 0.
-  uint16_t reserved5;
-
-  /// Reserved. Must be 0.
-  uint16_t reserved6;
-
-  /// The values should be the actually values used by the finalizer
-  /// in generating the code. This may be the union of values
-  /// specified as finalizer arguments and explicit HSAIL control
-  /// directives. If the finalizer chooses to ignore a control
-  /// directive, and not generate constrained code, then the control
-  /// directive should not be marked as enabled even though it was
-  /// present in the HSAIL or finalizer argument. The values are
-  /// intended to reflect the constraints that the code actually
-  /// requires to correctly execute, not the values that were
-  /// actually specified at finalize time.
-  hsa_ext_control_directives_t control_directive;
-
-  /// The code can immediately follow the amd_kernel_code_t, or can
-  /// come after subsequent amd_kernel_code_t structs when there are
-  /// multiple kernels in the compilation unit.
-
-} amd_kernel_code_t;
-
-#endif // AMDKERNELCODET_H

Removed: llvm/trunk/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp (original)
+++ llvm/trunk/lib/Target/R600/AsmParser/AMDGPUAsmParser.cpp (removed)
@@ -1,1380 +0,0 @@
-//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
-#include "SIDefines.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Twine.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCParser/MCAsmLexer.h"
-#include "llvm/MC/MCParser/MCAsmParser.h"
-#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MCTargetAsmParser.h"
-#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/Support/Debug.h"
-
-using namespace llvm;
-
-namespace {
-
-struct OptionalOperand;
-
-class AMDGPUOperand : public MCParsedAsmOperand {
-  enum KindTy {
-    Token,
-    Immediate,
-    Register,
-    Expression
-  } Kind;
-
-  SMLoc StartLoc, EndLoc;
-
-public:
-  AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
-
-  MCContext *Ctx;
-
-  enum ImmTy {
-    ImmTyNone,
-    ImmTyDSOffset0,
-    ImmTyDSOffset1,
-    ImmTyGDS,
-    ImmTyOffset,
-    ImmTyGLC,
-    ImmTySLC,
-    ImmTyTFE,
-    ImmTyClamp,
-    ImmTyOMod
-  };
-
-  struct TokOp {
-    const char *Data;
-    unsigned Length;
-  };
-
-  struct ImmOp {
-    bool IsFPImm;
-    ImmTy Type;
-    int64_t Val;
-  };
-
-  struct RegOp {
-    unsigned RegNo;
-    int Modifiers;
-    const MCRegisterInfo *TRI;
-    bool IsForcedVOP3;
-  };
-
-  union {
-    TokOp Tok;
-    ImmOp Imm;
-    RegOp Reg;
-    const MCExpr *Expr;
-  };
-
-  void addImmOperands(MCInst &Inst, unsigned N) const {
-    Inst.addOperand(MCOperand::createImm(getImm()));
-  }
-
-  StringRef getToken() const {
-    return StringRef(Tok.Data, Tok.Length);
-  }
-
-  void addRegOperands(MCInst &Inst, unsigned N) const {
-    Inst.addOperand(MCOperand::createReg(getReg()));
-  }
-
-  void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
-    if (isReg())
-      addRegOperands(Inst, N);
-    else
-      addImmOperands(Inst, N);
-  }
-
-  void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
-    Inst.addOperand(MCOperand::createImm(
-        Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
-    addRegOperands(Inst, N);
-  }
-
-  void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
-    if (isImm())
-      addImmOperands(Inst, N);
-    else {
-      assert(isExpr());
-      Inst.addOperand(MCOperand::createExpr(Expr));
-    }
-  }
-
-  bool defaultTokenHasSuffix() const {
-    StringRef Token(Tok.Data, Tok.Length);
-
-    return Token.endswith("_e32") || Token.endswith("_e64");
-  }
-
-  bool isToken() const override {
-    return Kind == Token;
-  }
-
-  bool isImm() const override {
-    return Kind == Immediate;
-  }
-
-  bool isInlineImm() const {
-    float F = BitsToFloat(Imm.Val);
-    // TODO: Add 0.5pi for VI
-    return isImm() && ((Imm.Val <= 64 && Imm.Val >= -16) ||
-           (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
-           F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0));
-  }
-
-  bool isDSOffset0() const {
-    assert(isImm());
-    return Imm.Type == ImmTyDSOffset0;
-  }
-
-  bool isDSOffset1() const {
-    assert(isImm());
-    return Imm.Type == ImmTyDSOffset1;
-  }
-
-  int64_t getImm() const {
-    return Imm.Val;
-  }
-
-  enum ImmTy getImmTy() const {
-    assert(isImm());
-    return Imm.Type;
-  }
-
-  bool isRegKind() const {
-    return Kind == Register;
-  }
-
-  bool isReg() const override {
-    return Kind == Register && Reg.Modifiers == -1;
-  }
-
-  bool isRegWithInputMods() const {
-    return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
-  }
-
-  void setModifiers(unsigned Mods) {
-    assert(isReg());
-    Reg.Modifiers = Mods;
-  }
-
-  bool hasModifiers() const {
-    assert(isRegKind());
-    return Reg.Modifiers != -1;
-  }
-
-  unsigned getReg() const override {
-    return Reg.RegNo;
-  }
-
-  bool isRegOrImm() const {
-    return isReg() || isImm();
-  }
-
-  bool isRegClass(unsigned RCID) const {
-    return Reg.TRI->getRegClass(RCID).contains(getReg());
-  }
-
-  bool isSCSrc32() const {
-    return isInlineImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
-  }
-
-  bool isSSrc32() const {
-    return isImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
-  }
-
-  bool isSSrc64() const {
-    return isImm() || isInlineImm() ||
-           (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
-  }
-
-  bool isVCSrc32() const {
-    return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
-  }
-
-  bool isVCSrc64() const {
-    return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
-  }
-
-  bool isVSrc32() const {
-    return isImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
-  }
-
-  bool isVSrc64() const {
-    return isImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
-  }
-
-  bool isMem() const override {
-    return false;
-  }
-
-  bool isExpr() const {
-    return Kind == Expression;
-  }
-
-  bool isSoppBrTarget() const {
-    return isExpr() || isImm();
-  }
-
-  SMLoc getStartLoc() const override {
-    return StartLoc;
-  }
-
-  SMLoc getEndLoc() const override {
-    return EndLoc;
-  }
-
-  void print(raw_ostream &OS) const override { }
-
-  static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
-                                                  enum ImmTy Type = ImmTyNone,
-                                                  bool IsFPImm = false) {
-    auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
-    Op->Imm.Val = Val;
-    Op->Imm.IsFPImm = IsFPImm;
-    Op->Imm.Type = Type;
-    Op->StartLoc = Loc;
-    Op->EndLoc = Loc;
-    return Op;
-  }
-
-  static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
-                                           bool HasExplicitEncodingSize = true) {
-    auto Res = llvm::make_unique<AMDGPUOperand>(Token);
-    Res->Tok.Data = Str.data();
-    Res->Tok.Length = Str.size();
-    Res->StartLoc = Loc;
-    Res->EndLoc = Loc;
-    return Res;
-  }
-
-  static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
-                                                  SMLoc E,
-                                                  const MCRegisterInfo *TRI,
-                                                  bool ForceVOP3) {
-    auto Op = llvm::make_unique<AMDGPUOperand>(Register);
-    Op->Reg.RegNo = RegNo;
-    Op->Reg.TRI = TRI;
-    Op->Reg.Modifiers = -1;
-    Op->Reg.IsForcedVOP3 = ForceVOP3;
-    Op->StartLoc = S;
-    Op->EndLoc = E;
-    return Op;
-  }
-
-  static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
-    auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
-    Op->Expr = Expr;
-    Op->StartLoc = S;
-    Op->EndLoc = S;
-    return Op;
-  }
-
-  bool isDSOffset() const;
-  bool isDSOffset01() const;
-  bool isSWaitCnt() const;
-  bool isMubufOffset() const;
-};
-
-class AMDGPUAsmParser : public MCTargetAsmParser {
-  MCSubtargetInfo &STI;
-  const MCInstrInfo &MII;
-  MCAsmParser &Parser;
-
-  unsigned ForcedEncodingSize;
-  /// @name Auto-generated Match Functions
-  /// {
-
-#define GET_ASSEMBLER_HEADER
-#include "AMDGPUGenAsmMatcher.inc"
-
-  /// }
-
-public:
-  AMDGPUAsmParser(MCSubtargetInfo &STI, MCAsmParser &_Parser,
-               const MCInstrInfo &MII,
-               const MCTargetOptions &Options)
-      : MCTargetAsmParser(), STI(STI), MII(MII), Parser(_Parser),
-        ForcedEncodingSize(0){
-
-    if (STI.getFeatureBits().none()) {
-      // Set default features.
-      STI.ToggleFeature("SOUTHERN_ISLANDS");
-    }
-
-    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
-  }
-
-  unsigned getForcedEncodingSize() const {
-    return ForcedEncodingSize;
-  }
-
-  void setForcedEncodingSize(unsigned Size) {
-    ForcedEncodingSize = Size;
-  }
-
-  bool isForcedVOP3() const {
-    return ForcedEncodingSize == 64;
-  }
-
-  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
-  unsigned checkTargetMatchPredicate(MCInst &Inst) override;
-  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
-                               OperandVector &Operands, MCStreamer &Out,
-                               uint64_t &ErrorInfo,
-                               bool MatchingInlineAsm) override;
-  bool ParseDirective(AsmToken DirectiveID) override;
-  OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
-  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
-                        SMLoc NameLoc, OperandVector &Operands) override;
-
-  OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
-                                          int64_t Default = 0);
-  OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
-                                          OperandVector &Operands,
-                                          enum AMDGPUOperand::ImmTy ImmTy =
-                                                      AMDGPUOperand::ImmTyNone);
-  OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
-                                     enum AMDGPUOperand::ImmTy ImmTy =
-                                                      AMDGPUOperand::ImmTyNone);
-  OperandMatchResultTy parseOptionalOps(
-                                   const ArrayRef<OptionalOperand> &OptionalOps,
-                                   OperandVector &Operands);
-
-
-  void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
-  void cvtDS(MCInst &Inst, const OperandVector &Operands);
-  OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
-  OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
-  OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
-
-  bool parseCnt(int64_t &IntVal);
-  OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
-  OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
-
-  OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
-  OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
-  void cvtFlat(MCInst &Inst, const OperandVector &Operands);
-
-  void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
-  OperandMatchResultTy parseOffset(OperandVector &Operands);
-  OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
-  OperandMatchResultTy parseGLC(OperandVector &Operands);
-  OperandMatchResultTy parseSLC(OperandVector &Operands);
-  OperandMatchResultTy parseTFE(OperandVector &Operands);
-
-  OperandMatchResultTy parseDMask(OperandVector &Operands);
-  OperandMatchResultTy parseUNorm(OperandVector &Operands);
-  OperandMatchResultTy parseR128(OperandVector &Operands);
-
-  void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
-  OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
-};
-
-struct OptionalOperand {
-  const char *Name;
-  AMDGPUOperand::ImmTy Type;
-  bool IsBit;
-  int64_t Default;
-  bool (*ConvertResult)(int64_t&);
-};
-
-}
-
-static unsigned getRegClass(bool IsVgpr, unsigned RegWidth) {
-  if (IsVgpr) {
-    switch (RegWidth) {
-      default: llvm_unreachable("Unknown register width");
-      case 1: return AMDGPU::VGPR_32RegClassID;
-      case 2: return AMDGPU::VReg_64RegClassID;
-      case 3: return AMDGPU::VReg_96RegClassID;
-      case 4: return AMDGPU::VReg_128RegClassID;
-      case 8: return AMDGPU::VReg_256RegClassID;
-      case 16: return AMDGPU::VReg_512RegClassID;
-    }
-  }
-
-  switch (RegWidth) {
-    default: llvm_unreachable("Unknown register width");
-    case 1: return AMDGPU::SGPR_32RegClassID;
-    case 2: return AMDGPU::SGPR_64RegClassID;
-    case 4: return AMDGPU::SReg_128RegClassID;
-    case 8: return AMDGPU::SReg_256RegClassID;
-    case 16: return AMDGPU::SReg_512RegClassID;
-  }
-}
-
-static unsigned getRegForName(const StringRef &RegName) {
-
-  return StringSwitch<unsigned>(RegName)
-    .Case("exec", AMDGPU::EXEC)
-    .Case("vcc", AMDGPU::VCC)
-    .Case("flat_scr", AMDGPU::FLAT_SCR)
-    .Case("m0", AMDGPU::M0)
-    .Case("scc", AMDGPU::SCC)
-    .Case("flat_scr_lo", AMDGPU::FLAT_SCR_LO)
-    .Case("flat_scr_hi", AMDGPU::FLAT_SCR_HI)
-    .Case("vcc_lo", AMDGPU::VCC_LO)
-    .Case("vcc_hi", AMDGPU::VCC_HI)
-    .Case("exec_lo", AMDGPU::EXEC_LO)
-    .Case("exec_hi", AMDGPU::EXEC_HI)
-    .Default(0);
-}
-
-bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
-  const AsmToken Tok = Parser.getTok();
-  StartLoc = Tok.getLoc();
-  EndLoc = Tok.getEndLoc();
-  const StringRef &RegName = Tok.getString();
-  RegNo = getRegForName(RegName);
-
-  if (RegNo) {
-    Parser.Lex();
-    return false;
-  }
-
-  // Match vgprs and sgprs
-  if (RegName[0] != 's' && RegName[0] != 'v')
-    return true;
-
-  bool IsVgpr = RegName[0] == 'v';
-  unsigned RegWidth;
-  unsigned RegIndexInClass;
-  if (RegName.size() > 1) {
-    // We have a 32-bit register
-    RegWidth = 1;
-    if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
-      return true;
-    Parser.Lex();
-  } else {
-    // We have a register greater than 32-bits.
-
-    int64_t RegLo, RegHi;
-    Parser.Lex();
-    if (getLexer().isNot(AsmToken::LBrac))
-      return true;
-
-    Parser.Lex();
-    if (getParser().parseAbsoluteExpression(RegLo))
-      return true;
-
-    if (getLexer().isNot(AsmToken::Colon))
-      return true;
-
-    Parser.Lex();
-    if (getParser().parseAbsoluteExpression(RegHi))
-      return true;
-
-    if (getLexer().isNot(AsmToken::RBrac))
-      return true;
-
-    Parser.Lex();
-    RegWidth = (RegHi - RegLo) + 1;
-    if (IsVgpr) {
-      // VGPR registers aren't aligned.
-      RegIndexInClass = RegLo;
-    } else {
-      // SGPR registers are aligned.  Max alignment is 4 dwords.
-      RegIndexInClass = RegLo / std::min(RegWidth, 4u);
-    }
-  }
-
-  const MCRegisterInfo *TRC = getContext().getRegisterInfo();
-  unsigned RC = getRegClass(IsVgpr, RegWidth);
-  if (RegIndexInClass > TRC->getRegClass(RC).getNumRegs())
-    return true;
-  RegNo = TRC->getRegClass(RC).getRegister(RegIndexInClass);
-  return false;
-}
-
-unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
-
-  uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
-
-  if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
-      (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
-    return Match_InvalidOperand;
-
-  return Match_Success;
-}
-
-
-bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
-                                              OperandVector &Operands,
-                                              MCStreamer &Out,
-                                              uint64_t &ErrorInfo,
-                                              bool MatchingInlineAsm) {
-  MCInst Inst;
-
-  switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
-    default: break;
-    case Match_Success:
-      Inst.setLoc(IDLoc);
-      Out.EmitInstruction(Inst, STI);
-      return false;
-    case Match_MissingFeature:
-      return Error(IDLoc, "instruction not supported on this GPU");
-
-    case Match_MnemonicFail:
-      return Error(IDLoc, "unrecognized instruction mnemonic");
-
-    case Match_InvalidOperand: {
-      SMLoc ErrorLoc = IDLoc;
-      if (ErrorInfo != ~0ULL) {
-        if (ErrorInfo >= Operands.size()) {
-          if (isForcedVOP3()) {
-            // If 64-bit encoding has been forced we can end up with no
-            // clamp or omod operands if none of the registers have modifiers,
-            // so we need to add these to the operand list.
-            AMDGPUOperand &LastOp =
-                ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
-            if (LastOp.isRegKind() ||
-               (LastOp.isImm() &&
-                LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
-              SMLoc S = Parser.getTok().getLoc();
-              Operands.push_back(AMDGPUOperand::CreateImm(0, S,
-                                 AMDGPUOperand::ImmTyClamp));
-              Operands.push_back(AMDGPUOperand::CreateImm(0, S,
-                                 AMDGPUOperand::ImmTyOMod));
-              bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
-                                                 Out, ErrorInfo,
-                                                 MatchingInlineAsm);
-              if (!Res)
-                return Res;
-            }
-
-          }
-          return Error(IDLoc, "too few operands for instruction");
-        }
-
-        ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
-        if (ErrorLoc == SMLoc())
-          ErrorLoc = IDLoc;
-      }
-      return Error(ErrorLoc, "invalid operand for instruction");
-    }
-  }
-  llvm_unreachable("Implement any new match types added!");
-}
-
-bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
-  return true;
-}
-
-static bool operandsHaveModifiers(const OperandVector &Operands) {
-
-  for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
-    const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
-    if (Op.isRegKind() && Op.hasModifiers())
-      return true;
-    if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
-                       Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
-      return true;
-  }
-  return false;
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
-
-  // Try to parse with a custom parser
-  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
-
-  // If we successfully parsed the operand or if there as an error parsing,
-  // we are done.
-  //
-  // If we are parsing after we reach EndOfStatement then this means we
-  // are appending default values to the Operands list.  This is only done
-  // by custom parser, so we shouldn't continue on to the generic parsing.
-  if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
-      getLexer().is(AsmToken::EndOfStatement))
-    return ResTy;
-
-  bool Negate = false, Abs = false;
-  if (getLexer().getKind()== AsmToken::Minus) {
-    Parser.Lex();
-    Negate = true;
-  }
-
-  if (getLexer().getKind() == AsmToken::Pipe) {
-    Parser.Lex();
-    Abs = true;
-  }
-
-  switch(getLexer().getKind()) {
-    case AsmToken::Integer: {
-      SMLoc S = Parser.getTok().getLoc();
-      int64_t IntVal;
-      if (getParser().parseAbsoluteExpression(IntVal))
-        return MatchOperand_ParseFail;
-      APInt IntVal32(32, IntVal);
-      if (IntVal32.getSExtValue() != IntVal) {
-        Error(S, "invalid immediate: only 32-bit values are legal");
-        return MatchOperand_ParseFail;
-      }
-
-      IntVal = IntVal32.getSExtValue();
-      if (Negate)
-        IntVal *= -1;
-      Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
-      return MatchOperand_Success;
-    }
-    case AsmToken::Real: {
-      // FIXME: We should emit an error if a double precisions floating-point
-      // value is used.  I'm not sure the best way to detect this.
-      SMLoc S = Parser.getTok().getLoc();
-      int64_t IntVal;
-      if (getParser().parseAbsoluteExpression(IntVal))
-        return MatchOperand_ParseFail;
-
-      APFloat F((float)BitsToDouble(IntVal));
-      if (Negate)
-        F.changeSign();
-      Operands.push_back(
-          AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
-      return MatchOperand_Success;
-    }
-    case AsmToken::Identifier: {
-      SMLoc S, E;
-      unsigned RegNo;
-      if (!ParseRegister(RegNo, S, E)) {
-
-        bool HasModifiers = operandsHaveModifiers(Operands);
-        unsigned Modifiers = 0;
-
-        if (Negate)
-          Modifiers |= 0x1;
-
-        if (Abs) {
-          if (getLexer().getKind() != AsmToken::Pipe)
-            return MatchOperand_ParseFail;
-          Parser.Lex();
-          Modifiers |= 0x2;
-        }
-
-        if (Modifiers && !HasModifiers) {
-          // We are adding a modifier to src1 or src2 and previous sources
-          // don't have modifiers, so we need to go back and empty modifers
-          // for each previous source.
-          for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
-               --PrevRegIdx) {
-
-            AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
-            RegOp.setModifiers(0);
-          }
-        }
-
-
-        Operands.push_back(AMDGPUOperand::CreateReg(
-            RegNo, S, E, getContext().getRegisterInfo(),
-            isForcedVOP3()));
-
-        if (HasModifiers || Modifiers) {
-          AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
-          RegOp.setModifiers(Modifiers);
-
-        }
-     }  else {
-      Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
-                                                    S));
-      Parser.Lex();
-     }
-     return MatchOperand_Success;
-    }
-    default:
-      return MatchOperand_NoMatch;
-  }
-}
-
-bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
-                                       StringRef Name,
-                                       SMLoc NameLoc, OperandVector &Operands) {
-
-  // Clear any forced encodings from the previous instruction.
-  setForcedEncodingSize(0);
-
-  if (Name.endswith("_e64"))
-    setForcedEncodingSize(64);
-  else if (Name.endswith("_e32"))
-    setForcedEncodingSize(32);
-
-  // Add the instruction mnemonic
-  Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
-
-  while (!getLexer().is(AsmToken::EndOfStatement)) {
-    AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
-
-    // Eat the comma or space if there is one.
-    if (getLexer().is(AsmToken::Comma))
-      Parser.Lex();
-
-    switch (Res) {
-      case MatchOperand_Success: break;
-      case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
-                                                "failed parsing operand.");
-      case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
-                                              "not a valid operand.");
-    }
-  }
-
-  // Once we reach end of statement, continue parsing so we can add default
-  // values for optional arguments.
-  AMDGPUAsmParser::OperandMatchResultTy Res;
-  while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
-    if (Res != MatchOperand_Success)
-      return Error(getLexer().getLoc(), "failed parsing operand.");
-  }
-  return false;
-}
-
-//===----------------------------------------------------------------------===//
-// Utility functions
-//===----------------------------------------------------------------------===//
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
-                                    int64_t Default) {
-
-  // We are at the end of the statement, and this is a default argument, so
-  // use a default value.
-  if (getLexer().is(AsmToken::EndOfStatement)) {
-    Int = Default;
-    return MatchOperand_Success;
-  }
-
-  switch(getLexer().getKind()) {
-    default: return MatchOperand_NoMatch;
-    case AsmToken::Identifier: {
-      StringRef OffsetName = Parser.getTok().getString();
-      if (!OffsetName.equals(Prefix))
-        return MatchOperand_NoMatch;
-
-      Parser.Lex();
-      if (getLexer().isNot(AsmToken::Colon))
-        return MatchOperand_ParseFail;
-
-      Parser.Lex();
-      if (getLexer().isNot(AsmToken::Integer))
-        return MatchOperand_ParseFail;
-
-      if (getParser().parseAbsoluteExpression(Int))
-        return MatchOperand_ParseFail;
-      break;
-    }
-  }
-  return MatchOperand_Success;
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
-                                    enum AMDGPUOperand::ImmTy ImmTy) {
-
-  SMLoc S = Parser.getTok().getLoc();
-  int64_t Offset = 0;
-
-  AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
-  if (Res != MatchOperand_Success)
-    return Res;
-
-  Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
-  return MatchOperand_Success;
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
-                               enum AMDGPUOperand::ImmTy ImmTy) {
-  int64_t Bit = 0;
-  SMLoc S = Parser.getTok().getLoc();
-
-  // We are at the end of the statement, and this is a default argument, so
-  // use a default value.
-  if (getLexer().isNot(AsmToken::EndOfStatement)) {
-    switch(getLexer().getKind()) {
-      case AsmToken::Identifier: {
-        StringRef Tok = Parser.getTok().getString();
-        if (Tok == Name) {
-          Bit = 1;
-          Parser.Lex();
-        } else if (Tok.startswith("no") && Tok.endswith(Name)) {
-          Bit = 0;
-          Parser.Lex();
-        } else {
-          return MatchOperand_NoMatch;
-        }
-        break;
-      }
-      default:
-        return MatchOperand_NoMatch;
-    }
-  }
-
-  Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
-  return MatchOperand_Success;
-}
-
-static bool operandsHasOptionalOp(const OperandVector &Operands,
-                                  const OptionalOperand &OOp) {
-  for (unsigned i = 0; i < Operands.size(); i++) {
-    const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
-    if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
-        (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
-      return true;
-
-  }
-  return false;
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
-                                   OperandVector &Operands) {
-  SMLoc S = Parser.getTok().getLoc();
-  for (const OptionalOperand &Op : OptionalOps) {
-    if (operandsHasOptionalOp(Operands, Op))
-      continue;
-    AMDGPUAsmParser::OperandMatchResultTy Res;
-    int64_t Value;
-    if (Op.IsBit) {
-      Res = parseNamedBit(Op.Name, Operands, Op.Type);
-      if (Res == MatchOperand_NoMatch)
-        continue;
-      return Res;
-    }
-
-    Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
-
-    if (Res == MatchOperand_NoMatch)
-      continue;
-
-    if (Res != MatchOperand_Success)
-      return Res;
-
-    if (Op.ConvertResult && !Op.ConvertResult(Value)) {
-      return MatchOperand_ParseFail;
-    }
-
-    Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
-    return MatchOperand_Success;
-  }
-  return MatchOperand_NoMatch;
-}
-
-//===----------------------------------------------------------------------===//
-// ds
-//===----------------------------------------------------------------------===//
-
-static const OptionalOperand DSOptionalOps [] = {
-  {"offset",  AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
-  {"gds",     AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
-};
-
-static const OptionalOperand DSOptionalOpsOff01 [] = {
-  {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
-  {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
-  {"gds",     AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
-};
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
-  return parseOptionalOps(DSOptionalOps, Operands);
-}
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
-  return parseOptionalOps(DSOptionalOpsOff01, Operands);
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
-  SMLoc S = Parser.getTok().getLoc();
-  AMDGPUAsmParser::OperandMatchResultTy Res =
-    parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
-  if (Res == MatchOperand_NoMatch) {
-    Operands.push_back(AMDGPUOperand::CreateImm(0, S,
-                       AMDGPUOperand::ImmTyOffset));
-    Res = MatchOperand_Success;
-  }
-  return Res;
-}
-
-bool AMDGPUOperand::isDSOffset() const {
-  return isImm() && isUInt<16>(getImm());
-}
-
-bool AMDGPUOperand::isDSOffset01() const {
-  return isImm() && isUInt<8>(getImm());
-}
-
-void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
-                                    const OperandVector &Operands) {
-
-  std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
-
-  for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
-    AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
-
-    // Add the register arguments
-    if (Op.isReg()) {
-      Op.addRegOperands(Inst, 1);
-      continue;
-    }
-
-    // Handle optional arguments
-    OptionalIdx[Op.getImmTy()] = i;
-  }
-
-  unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
-  unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
-  unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
-
-  ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
-  ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
-  ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
-  Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
-}
-
-void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
-
-  std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
-  bool GDSOnly = false;
-
-  for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
-    AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
-
-    // Add the register arguments
-    if (Op.isReg()) {
-      Op.addRegOperands(Inst, 1);
-      continue;
-    }
-
-    if (Op.isToken() && Op.getToken() == "gds") {
-      GDSOnly = true;
-      continue;
-    }
-
-    // Handle optional arguments
-    OptionalIdx[Op.getImmTy()] = i;
-  }
-
-  unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
-  ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
-
-  if (!GDSOnly) {
-    unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
-    ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
-  }
-  Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
-}
-
-
-//===----------------------------------------------------------------------===//
-// s_waitcnt
-//===----------------------------------------------------------------------===//
-
-bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
-  StringRef CntName = Parser.getTok().getString();
-  int64_t CntVal;
-
-  Parser.Lex();
-  if (getLexer().isNot(AsmToken::LParen))
-    return true;
-
-  Parser.Lex();
-  if (getLexer().isNot(AsmToken::Integer))
-    return true;
-
-  if (getParser().parseAbsoluteExpression(CntVal))
-    return true;
-
-  if (getLexer().isNot(AsmToken::RParen))
-    return true;
-
-  Parser.Lex();
-  if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
-    Parser.Lex();
-
-  int CntShift;
-  int CntMask;
-
-  if (CntName == "vmcnt") {
-    CntMask = 0xf;
-    CntShift = 0;
-  } else if (CntName == "expcnt") {
-    CntMask = 0x7;
-    CntShift = 4;
-  } else if (CntName == "lgkmcnt") {
-    CntMask = 0x7;
-    CntShift = 8;
-  } else {
-    return true;
-  }
-
-  IntVal &= ~(CntMask << CntShift);
-  IntVal |= (CntVal << CntShift);
-  return false;
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
-  // Disable all counters by default.
-  // vmcnt   [3:0]
-  // expcnt  [6:4]
-  // lgkmcnt [10:8]
-  int64_t CntVal = 0x77f;
-  SMLoc S = Parser.getTok().getLoc();
-
-  switch(getLexer().getKind()) {
-    default: return MatchOperand_ParseFail;
-    case AsmToken::Integer:
-      // The operand can be an integer value.
-      if (getParser().parseAbsoluteExpression(CntVal))
-        return MatchOperand_ParseFail;
-      break;
-
-    case AsmToken::Identifier:
-      do {
-        if (parseCnt(CntVal))
-          return MatchOperand_ParseFail;
-      } while(getLexer().isNot(AsmToken::EndOfStatement));
-      break;
-  }
-  Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
-  return MatchOperand_Success;
-}
-
-bool AMDGPUOperand::isSWaitCnt() const {
-  return isImm();
-}
-
-//===----------------------------------------------------------------------===//
-// sopp branch targets
-//===----------------------------------------------------------------------===//
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
-  SMLoc S = Parser.getTok().getLoc();
-
-  switch (getLexer().getKind()) {
-    default: return MatchOperand_ParseFail;
-    case AsmToken::Integer: {
-      int64_t Imm;
-      if (getParser().parseAbsoluteExpression(Imm))
-        return MatchOperand_ParseFail;
-      Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
-      return MatchOperand_Success;
-    }
-
-    case AsmToken::Identifier:
-      Operands.push_back(AMDGPUOperand::CreateExpr(
-          MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
-                                  Parser.getTok().getString()), getContext()), S));
-      Parser.Lex();
-      return MatchOperand_Success;
-  }
-}
-
-//===----------------------------------------------------------------------===//
-// flat
-//===----------------------------------------------------------------------===//
-
-static const OptionalOperand FlatOptionalOps [] = {
-  {"glc",    AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
-  {"slc",    AMDGPUOperand::ImmTySLC, true, 0, nullptr},
-  {"tfe",    AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
-};
-
-static const OptionalOperand FlatAtomicOptionalOps [] = {
-  {"slc",    AMDGPUOperand::ImmTySLC, true, 0, nullptr},
-  {"tfe",    AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
-};
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
-  return parseOptionalOps(FlatOptionalOps, Operands);
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
-  return parseOptionalOps(FlatAtomicOptionalOps, Operands);
-}
-
-void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
-                               const OperandVector &Operands) {
-  std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
-
-  for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
-    AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
-
-    // Add the register arguments
-    if (Op.isReg()) {
-      Op.addRegOperands(Inst, 1);
-      continue;
-    }
-
-    // Handle 'glc' token which is sometimes hard-coded into the
-    // asm string.  There are no MCInst operands for these.
-    if (Op.isToken())
-      continue;
-
-    // Handle optional arguments
-    OptionalIdx[Op.getImmTy()] = i;
-
-  }
-
-  // flat atomic instructions don't have a glc argument.
-  if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
-    unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
-    ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
-  }
-
-  unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
-  unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
-
-  ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
-  ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
-}
-
-//===----------------------------------------------------------------------===//
-// mubuf
-//===----------------------------------------------------------------------===//
-
-static const OptionalOperand MubufOptionalOps [] = {
-  {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
-  {"glc",    AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
-  {"slc",    AMDGPUOperand::ImmTySLC, true, 0, nullptr},
-  {"tfe",    AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
-};
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
-  return parseOptionalOps(MubufOptionalOps, Operands);
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
-  return parseIntWithPrefix("offset", Operands);
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
-  return parseNamedBit("glc", Operands);
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
-  return parseNamedBit("slc", Operands);
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
-  return parseNamedBit("tfe", Operands);
-}
-
-bool AMDGPUOperand::isMubufOffset() const {
-  return isImm() && isUInt<12>(getImm());
-}
-
-void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
-                               const OperandVector &Operands) {
-  std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
-
-  for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
-    AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
-
-    // Add the register arguments
-    if (Op.isReg()) {
-      Op.addRegOperands(Inst, 1);
-      continue;
-    }
-
-    // Handle the case where soffset is an immediate
-    if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
-      Op.addImmOperands(Inst, 1);
-      continue;
-    }
-
-    // Handle tokens like 'offen' which are sometimes hard-coded into the
-    // asm string.  There are no MCInst operands for these.
-    if (Op.isToken()) {
-      continue;
-    }
-    assert(Op.isImm());
-
-    // Handle optional arguments
-    OptionalIdx[Op.getImmTy()] = i;
-  }
-
-  assert(OptionalIdx.size() == 4);
-
-  unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
-  unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
-  unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
-  unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
-
-  ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
-  ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
-  ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
-  ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
-}
-
-//===----------------------------------------------------------------------===//
-// mimg
-//===----------------------------------------------------------------------===//
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
-  return parseIntWithPrefix("dmask", Operands);
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
-  return parseNamedBit("unorm", Operands);
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseR128(OperandVector &Operands) {
-  return parseNamedBit("r128", Operands);
-}
-
-//===----------------------------------------------------------------------===//
-// vop3
-//===----------------------------------------------------------------------===//
-
-static bool ConvertOmodMul(int64_t &Mul) {
-  if (Mul != 1 && Mul != 2 && Mul != 4)
-    return false;
-
-  Mul >>= 1;
-  return true;
-}
-
-static bool ConvertOmodDiv(int64_t &Div) {
-  if (Div == 1) {
-    Div = 0;
-    return true;
-  }
-
-  if (Div == 2) {
-    Div = 3;
-    return true;
-  }
-
-  return false;
-}
-
-static const OptionalOperand VOP3OptionalOps [] = {
-  {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
-  {"mul",   AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
-  {"div",   AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
-};
-
-static bool isVOP3(OperandVector &Operands) {
-  if (operandsHaveModifiers(Operands))
-    return true;
-
-  AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
-
-  if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
-    return true;
-
-  if (Operands.size() >= 5)
-    return true;
-
-  if (Operands.size() > 3) {
-    AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
-    if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
-                            Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
-      return true;
-  }
-  return false;
-}
-
-AMDGPUAsmParser::OperandMatchResultTy
-AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
-
-  // The value returned by this function may change after parsing
-  // an operand so store the original value here.
-  bool HasModifiers = operandsHaveModifiers(Operands);
-
-  bool IsVOP3 = isVOP3(Operands);
-  if (HasModifiers || IsVOP3 ||
-      getLexer().isNot(AsmToken::EndOfStatement) ||
-      getForcedEncodingSize() == 64) {
-
-    AMDGPUAsmParser::OperandMatchResultTy Res =
-        parseOptionalOps(VOP3OptionalOps, Operands);
-
-    if (!HasModifiers && Res == MatchOperand_Success) {
-      // We have added a modifier operation, so we need to make sure all
-      // previous register operands have modifiers
-      for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
-        AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
-        if (Op.isReg())
-          Op.setModifiers(0);
-      }
-    }
-    return Res;
-  }
-  return MatchOperand_NoMatch;
-}
-
-void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
-  ((AMDGPUOperand &)*Operands[1]).addRegOperands(Inst, 1);
-  unsigned i = 2;
-
-  std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
-
-  if (operandsHaveModifiers(Operands)) {
-    for (unsigned e = Operands.size(); i != e; ++i) {
-      AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
-
-      if (Op.isRegWithInputMods()) {
-        ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
-        continue;
-      }
-      OptionalIdx[Op.getImmTy()] = i;
-    }
-
-    unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
-    unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
-
-    ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
-    ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
-  } else {
-    for (unsigned e = Operands.size(); i != e; ++i)
-      ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
-  }
-}
-
-/// Force static initialization.
-extern "C" void LLVMInitializeR600AsmParser() {
-  RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
-  RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
-}
-
-#define GET_REGISTER_MATCHER
-#define GET_MATCHER_IMPLEMENTATION
-#include "AMDGPUGenAsmMatcher.inc"
-

Removed: llvm/trunk/lib/Target/R600/AsmParser/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AsmParser/CMakeLists.txt?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AsmParser/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/R600/AsmParser/CMakeLists.txt (removed)
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMR600AsmParser
-  AMDGPUAsmParser.cpp
-  )

Removed: llvm/trunk/lib/Target/R600/AsmParser/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AsmParser/LLVMBuild.txt?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AsmParser/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/R600/AsmParser/LLVMBuild.txt (removed)
@@ -1,23 +0,0 @@
-;===- ./lib/Target/R600/AsmParser/LLVMBuild.txt -------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = R600AsmParser
-parent = R600
-required_libraries = MC MCParser R600Desc R600Info Support
-add_to_library_groups = R600

Removed: llvm/trunk/lib/Target/R600/AsmParser/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AsmParser/Makefile?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/AsmParser/Makefile (original)
+++ llvm/trunk/lib/Target/R600/AsmParser/Makefile (removed)
@@ -1,15 +0,0 @@
-##===- lib/Target/R600/AsmParser/Makefile ----------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMR600AsmParser
-
-# Hack: we need to include 'main' R600 target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common

Removed: llvm/trunk/lib/Target/R600/CIInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/CIInstructions.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/CIInstructions.td (original)
+++ llvm/trunk/lib/Target/R600/CIInstructions.td (removed)
@@ -1,149 +0,0 @@
-//===-- CIInstructions.td - CI Instruction Defintions ---------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// Instruction definitions for CI and newer.
-//===----------------------------------------------------------------------===//
-
-
-def isCIVI : Predicate <
-  "Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS || "
-  "Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS"
->, AssemblerPredicate<"FeatureCIInsts">;
-
-def HasFlatAddressSpace : Predicate<"Subtarget->hasFlatAddressSpace()">;
-
-//===----------------------------------------------------------------------===//
-// VOP1 Instructions
-//===----------------------------------------------------------------------===//
-
-let SubtargetPredicate = isCIVI in {
-
-defm V_TRUNC_F64 : VOP1Inst <vop1<0x17>, "v_trunc_f64",
-  VOP_F64_F64, ftrunc
->;
-defm V_CEIL_F64 : VOP1Inst <vop1<0x18>, "v_ceil_f64",
-  VOP_F64_F64, fceil
->;
-defm V_FLOOR_F64 : VOP1Inst <vop1<0x1A>, "v_floor_f64",
-  VOP_F64_F64, ffloor
->;
-defm V_RNDNE_F64 : VOP1Inst <vop1<0x19>, "v_rndne_f64",
-  VOP_F64_F64, frint
->;
-defm V_LOG_LEGACY_F32 : VOP1Inst <vop1<0x45, 0x4c>, "v_log_legacy_f32",
-  VOP_F32_F32
->;
-defm V_EXP_LEGACY_F32 : VOP1Inst <vop1<0x46, 0x4b>, "v_exp_legacy_f32",
-  VOP_F32_F32
->;
-
-//===----------------------------------------------------------------------===//
-// Flat Instructions
-//===----------------------------------------------------------------------===//
-
-def FLAT_LOAD_UBYTE : FLAT_Load_Helper <0x8, "flat_load_ubyte", VGPR_32>;
-def FLAT_LOAD_SBYTE : FLAT_Load_Helper <0x9, "flat_load_sbyte", VGPR_32>;
-def FLAT_LOAD_USHORT : FLAT_Load_Helper <0xa, "flat_load_ushort", VGPR_32>;
-def FLAT_LOAD_SSHORT : FLAT_Load_Helper <0xb, "flat_load_sshort", VGPR_32>;
-def FLAT_LOAD_DWORD : FLAT_Load_Helper <0xc, "flat_load_dword", VGPR_32>;
-def FLAT_LOAD_DWORDX2 : FLAT_Load_Helper <0xd, "flat_load_dwordx2", VReg_64>;
-def FLAT_LOAD_DWORDX4 : FLAT_Load_Helper <0xe, "flat_load_dwordx4", VReg_128>;
-def FLAT_LOAD_DWORDX3 : FLAT_Load_Helper <0xf, "flat_load_dwordx3", VReg_96>;
-def FLAT_STORE_BYTE : FLAT_Store_Helper <0x18, "flat_store_byte", VGPR_32>;
-def FLAT_STORE_SHORT : FLAT_Store_Helper <0x1a, "flat_store_short", VGPR_32>;
-def FLAT_STORE_DWORD : FLAT_Store_Helper <0x1c, "flat_store_dword", VGPR_32>;
-def FLAT_STORE_DWORDX2 : FLAT_Store_Helper <
-  0x1d, "flat_store_dwordx2", VReg_64
->;
-def FLAT_STORE_DWORDX4 : FLAT_Store_Helper <
-  0x1e, "flat_store_dwordx4", VReg_128
->;
-def FLAT_STORE_DWORDX3 : FLAT_Store_Helper <
-  0x1f, "flat_store_dwordx3", VReg_96
->;
-defm FLAT_ATOMIC_SWAP : FLAT_ATOMIC <0x30, "flat_atomic_swap", VGPR_32>;
-defm FLAT_ATOMIC_CMPSWAP : FLAT_ATOMIC <
-  0x31, "flat_atomic_cmpswap", VGPR_32, VReg_64
->;
-defm FLAT_ATOMIC_ADD : FLAT_ATOMIC <0x32, "flat_atomic_add", VGPR_32>;
-defm FLAT_ATOMIC_SUB : FLAT_ATOMIC <0x33, "flat_atomic_sub", VGPR_32>;
-defm FLAT_ATOMIC_RSUB : FLAT_ATOMIC <0x34, "flat_atomic_rsub", VGPR_32>;
-defm FLAT_ATOMIC_SMIN : FLAT_ATOMIC <0x35, "flat_atomic_smin", VGPR_32>;
-defm FLAT_ATOMIC_UMIN : FLAT_ATOMIC <0x36, "flat_atomic_umin", VGPR_32>;
-defm FLAT_ATOMIC_SMAX : FLAT_ATOMIC <0x37, "flat_atomic_smax", VGPR_32>;
-defm FLAT_ATOMIC_UMAX : FLAT_ATOMIC <0x38, "flat_atomic_umax", VGPR_32>;
-defm FLAT_ATOMIC_AND : FLAT_ATOMIC <0x39, "flat_atomic_and", VGPR_32>;
-defm FLAT_ATOMIC_OR : FLAT_ATOMIC <0x3a, "flat_atomic_or", VGPR_32>;
-defm FLAT_ATOMIC_XOR : FLAT_ATOMIC <0x3b, "flat_atomic_xor", VGPR_32>;
-defm FLAT_ATOMIC_INC : FLAT_ATOMIC <0x3c, "flat_atomic_inc", VGPR_32>;
-defm FLAT_ATOMIC_DEC : FLAT_ATOMIC <0x3d, "flat_atomic_dec", VGPR_32>;
-defm FLAT_ATOMIC_FCMPSWAP : FLAT_ATOMIC <
-  0x3e, "flat_atomic_fcmpswap", VGPR_32, VReg_64
->;
-defm FLAT_ATOMIC_FMIN : FLAT_ATOMIC <0x3f, "flat_atomic_fmin", VGPR_32>;
-defm FLAT_ATOMIC_FMAX : FLAT_ATOMIC <0x40, "flat_atomic_fmax", VGPR_32>;
-defm FLAT_ATOMIC_SWAP_X2 : FLAT_ATOMIC <0x50, "flat_atomic_swap_x2", VReg_64>;
-defm FLAT_ATOMIC_CMPSWAP_X2 : FLAT_ATOMIC <
-  0x51, "flat_atomic_cmpswap_x2", VReg_64, VReg_128
->;
-defm FLAT_ATOMIC_ADD_X2 : FLAT_ATOMIC <0x52, "flat_atomic_add_x2", VReg_64>;
-defm FLAT_ATOMIC_SUB_X2 : FLAT_ATOMIC <0x53, "flat_atomic_sub_x2", VReg_64>;
-defm FLAT_ATOMIC_RSUB_X2 : FLAT_ATOMIC <0x54, "flat_atomic_rsub_x2", VReg_64>;
-defm FLAT_ATOMIC_SMIN_X2 : FLAT_ATOMIC <0x55, "flat_atomic_smin_x2", VReg_64>;
-defm FLAT_ATOMIC_UMIN_X2 : FLAT_ATOMIC <0x56, "flat_atomic_umin_x2", VReg_64>;
-defm FLAT_ATOMIC_SMAX_X2 : FLAT_ATOMIC <0x57, "flat_atomic_smax_x2", VReg_64>;
-defm FLAT_ATOMIC_UMAX_X2 : FLAT_ATOMIC <0x58, "flat_atomic_umax_x2", VReg_64>;
-defm FLAT_ATOMIC_AND_X2 : FLAT_ATOMIC <0x59, "flat_atomic_and_x2", VReg_64>;
-defm FLAT_ATOMIC_OR_X2 : FLAT_ATOMIC <0x5a, "flat_atomic_or_x2", VReg_64>;
-defm FLAT_ATOMIC_XOR_X2 : FLAT_ATOMIC <0x5b, "flat_atomic_xor_x2", VReg_64>;
-defm FLAT_ATOMIC_INC_X2 : FLAT_ATOMIC <0x5c, "flat_atomic_inc_x2", VReg_64>;
-defm FLAT_ATOMIC_DEC_X2 : FLAT_ATOMIC <0x5d, "flat_atomic_dec_x2", VReg_64>;
-defm FLAT_ATOMIC_FCMPSWAP_X2 : FLAT_ATOMIC <
-  0x5e, "flat_atomic_fcmpswap_x2", VReg_64, VReg_128
->;
-defm FLAT_ATOMIC_FMIN_X2 : FLAT_ATOMIC <0x5f, "flat_atomic_fmin_x2", VReg_64>;
-defm FLAT_ATOMIC_FMAX_X2 : FLAT_ATOMIC <0x60, "flat_atomic_fmax_x2", VReg_64>;
-
-} // End SubtargetPredicate = isCIVI
-
-//===----------------------------------------------------------------------===//
-// Flat Patterns
-//===----------------------------------------------------------------------===//
-
-let Predicates = [HasFlatAddressSpace] in {
-
-class FLATLoad_Pattern <FLAT Instr_ADDR64, ValueType vt,
-                             PatFrag flat_ld> :
-  Pat <(vt (flat_ld i64:$ptr)),
-       (Instr_ADDR64 $ptr, 0, 0, 0)
->;
-
-def : FLATLoad_Pattern <FLAT_LOAD_SBYTE, i32, sextloadi8_flat>;
-def : FLATLoad_Pattern <FLAT_LOAD_UBYTE, i32, az_extloadi8_flat>;
-def : FLATLoad_Pattern <FLAT_LOAD_SSHORT, i32, sextloadi16_flat>;
-def : FLATLoad_Pattern <FLAT_LOAD_USHORT, i32, az_extloadi16_flat>;
-def : FLATLoad_Pattern <FLAT_LOAD_DWORD, i32, flat_load>;
-def : FLATLoad_Pattern <FLAT_LOAD_DWORDX2, i64, flat_load>;
-def : FLATLoad_Pattern <FLAT_LOAD_DWORDX2, i64, az_extloadi32_flat>;
-def : FLATLoad_Pattern <FLAT_LOAD_DWORDX2, v2i32, flat_load>;
-def : FLATLoad_Pattern <FLAT_LOAD_DWORDX4, v4i32, flat_load>;
-
-class FLATStore_Pattern <FLAT Instr, ValueType vt, PatFrag st> :
-  Pat <(st vt:$value, i64:$ptr),
-        (Instr $value, $ptr, 0, 0, 0)
-  >;
-
-def : FLATStore_Pattern <FLAT_STORE_BYTE, i32, truncstorei8_flat>;
-def : FLATStore_Pattern <FLAT_STORE_SHORT, i32, truncstorei16_flat>;
-def : FLATStore_Pattern <FLAT_STORE_DWORD, i32, flat_store>;
-def : FLATStore_Pattern <FLAT_STORE_DWORDX2, i64, flat_store>;
-def : FLATStore_Pattern <FLAT_STORE_DWORDX2, v2i32, flat_store>;
-def : FLATStore_Pattern <FLAT_STORE_DWORDX4, v4i32, flat_store>;
-
-} // End HasFlatAddressSpace predicate
-

Removed: llvm/trunk/lib/Target/R600/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/CMakeLists.txt?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/R600/CMakeLists.txt (removed)
@@ -1,64 +0,0 @@
-set(LLVM_TARGET_DEFINITIONS AMDGPU.td)
-
-tablegen(LLVM AMDGPUGenRegisterInfo.inc -gen-register-info)
-tablegen(LLVM AMDGPUGenInstrInfo.inc -gen-instr-info)
-tablegen(LLVM AMDGPUGenDAGISel.inc -gen-dag-isel)
-tablegen(LLVM AMDGPUGenCallingConv.inc -gen-callingconv)
-tablegen(LLVM AMDGPUGenSubtargetInfo.inc -gen-subtarget)
-tablegen(LLVM AMDGPUGenIntrinsics.inc -gen-tgt-intrinsic)
-tablegen(LLVM AMDGPUGenMCCodeEmitter.inc -gen-emitter)
-tablegen(LLVM AMDGPUGenDFAPacketizer.inc -gen-dfa-packetizer)
-tablegen(LLVM AMDGPUGenAsmWriter.inc -gen-asm-writer)
-tablegen(LLVM AMDGPUGenAsmMatcher.inc -gen-asm-matcher)
-add_public_tablegen_target(AMDGPUCommonTableGen)
-
-add_llvm_target(R600CodeGen
-  AMDILCFGStructurizer.cpp
-  AMDGPUAlwaysInlinePass.cpp
-  AMDGPUAsmPrinter.cpp
-  AMDGPUFrameLowering.cpp
-  AMDGPUIntrinsicInfo.cpp
-  AMDGPUISelDAGToDAG.cpp
-  AMDGPUMCInstLower.cpp
-  AMDGPUMachineFunction.cpp
-  AMDGPUSubtarget.cpp
-  AMDGPUTargetMachine.cpp
-  AMDGPUTargetTransformInfo.cpp
-  AMDGPUISelLowering.cpp
-  AMDGPUInstrInfo.cpp
-  AMDGPUPromoteAlloca.cpp
-  AMDGPURegisterInfo.cpp
-  R600ClauseMergePass.cpp
-  R600ControlFlowFinalizer.cpp
-  R600EmitClauseMarkers.cpp
-  R600ExpandSpecialInstrs.cpp
-  R600InstrInfo.cpp
-  R600ISelLowering.cpp
-  R600MachineFunctionInfo.cpp
-  R600MachineScheduler.cpp
-  R600OptimizeVectorRegisters.cpp
-  R600Packetizer.cpp
-  R600RegisterInfo.cpp
-  R600TextureIntrinsicsReplacer.cpp
-  SIAnnotateControlFlow.cpp
-  SIFixControlFlowLiveIntervals.cpp
-  SIFixSGPRCopies.cpp
-  SIFixSGPRLiveRanges.cpp
-  SIFoldOperands.cpp
-  SIInsertWaits.cpp
-  SIInstrInfo.cpp
-  SIISelLowering.cpp
-  SILoadStoreOptimizer.cpp
-  SILowerControlFlow.cpp
-  SILowerI1Copies.cpp
-  SIMachineFunctionInfo.cpp
-  SIPrepareScratchRegs.cpp
-  SIRegisterInfo.cpp
-  SIShrinkInstructions.cpp
-  SITypeRewriter.cpp
-  )
-
-add_subdirectory(AsmParser)
-add_subdirectory(InstPrinter)
-add_subdirectory(TargetInfo)
-add_subdirectory(MCTargetDesc)

Removed: llvm/trunk/lib/Target/R600/CaymanInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/CaymanInstructions.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/CaymanInstructions.td (original)
+++ llvm/trunk/lib/Target/R600/CaymanInstructions.td (removed)
@@ -1,226 +0,0 @@
-//===-- CaymanInstructions.td - CM Instruction defs  -------*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// TableGen definitions for instructions which are available only on Cayman
-// family GPUs.
-//
-//===----------------------------------------------------------------------===//
-
-def isCayman : Predicate<"Subtarget->hasCaymanISA()">;
-
-//===----------------------------------------------------------------------===//
-// Cayman Instructions
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isCayman] in {
-
-def MULADD_INT24_cm : R600_3OP <0x08, "MULADD_INT24",
-  [(set i32:$dst, (AMDGPUmad_i24 i32:$src0, i32:$src1, i32:$src2))], VecALU
->;
-def MUL_INT24_cm : R600_2OP <0x5B, "MUL_INT24",
-  [(set i32:$dst, (AMDGPUmul_i24 i32:$src0, i32:$src1))], VecALU
->;
-
-def : IMad24Pat<MULADD_INT24_cm>;
-
-let isVector = 1 in {
-
-def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>;
-
-def MULLO_INT_cm : MULLO_INT_Common<0x8F>;
-def MULHI_INT_cm : MULHI_INT_Common<0x90>;
-def MULLO_UINT_cm : MULLO_UINT_Common<0x91>;
-def MULHI_UINT_cm : MULHI_UINT_Common<0x92>;
-def RECIPSQRT_CLAMPED_cm : RECIPSQRT_CLAMPED_Common<0x87>;
-def EXP_IEEE_cm : EXP_IEEE_Common<0x81>;
-def LOG_IEEE_cm : LOG_IEEE_Common<0x83>;
-def RECIP_CLAMPED_cm : RECIP_CLAMPED_Common<0x84>;
-def RECIPSQRT_IEEE_cm : RECIPSQRT_IEEE_Common<0x89>;
-def SIN_cm : SIN_Common<0x8D>;
-def COS_cm : COS_Common<0x8E>;
-} // End isVector = 1
-
-def : RsqPat<RECIPSQRT_IEEE_cm, f32>;
-
-def : POW_Common <LOG_IEEE_cm, EXP_IEEE_cm, MUL>;
-
-defm DIV_cm : DIV_Common<RECIP_IEEE_cm>;
-defm : Expand24UBitOps<MULLO_UINT_cm, ADD_INT>;
-
-// RECIP_UINT emulation for Cayman
-// The multiplication scales from [0,1] to the unsigned integer range
-def : Pat <
-  (AMDGPUurecip i32:$src0),
-  (FLT_TO_UINT_eg (MUL_IEEE (RECIP_IEEE_cm (UINT_TO_FLT_eg $src0)),
-                            (MOV_IMM_I32 CONST.FP_UINT_MAX_PLUS_1)))
->;
-
-  def CF_END_CM : CF_CLAUSE_EG<32, (ins), "CF_END"> {
-    let ADDR = 0;
-    let POP_COUNT = 0;
-    let COUNT = 0;
-  }
-
-
-def : Pat<(fsqrt f32:$src), (MUL R600_Reg32:$src, (RECIPSQRT_CLAMPED_cm $src))>;
-
-class RAT_STORE_DWORD <RegisterClass rc, ValueType vt, bits<4> mask> :
-  CF_MEM_RAT_CACHELESS <0x14, 0, mask,
-                        (ins rc:$rw_gpr, R600_TReg32_X:$index_gpr),
-                        "STORE_DWORD $rw_gpr, $index_gpr",
-                        [(global_store vt:$rw_gpr, i32:$index_gpr)]> {
-  let eop = 0; // This bit is not used on Cayman.
-}
-
-def RAT_STORE_DWORD32 : RAT_STORE_DWORD <R600_TReg32_X, i32, 0x1>;
-def RAT_STORE_DWORD64 : RAT_STORE_DWORD <R600_Reg64, v2i32, 0x3>;
-def RAT_STORE_DWORD128 : RAT_STORE_DWORD <R600_Reg128, v4i32, 0xf>;
-
-class VTX_READ_cm <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
-    : VTX_WORD0_cm, VTX_READ<name, buffer_id, outs, pattern> {
-
-  // Static fields
-  let VC_INST = 0;
-  let FETCH_TYPE = 2;
-  let FETCH_WHOLE_QUAD = 0;
-  let BUFFER_ID = buffer_id;
-  let SRC_REL = 0;
-  // XXX: We can infer this field based on the SRC_GPR.  This would allow us
-  // to store vertex addresses in any channel, not just X.
-  let SRC_SEL_X = 0;
-  let SRC_SEL_Y = 0;
-  let STRUCTURED_READ = 0;
-  let LDS_REQ = 0;
-  let COALESCED_READ = 0;
-
-  let Inst{31-0} = Word0;
-}
-
-class VTX_READ_8_cm <bits<8> buffer_id, list<dag> pattern>
-    : VTX_READ_cm <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id,
-                   (outs R600_TReg32_X:$dst_gpr), pattern> {
-
-  let DST_SEL_X = 0;
-  let DST_SEL_Y = 7;   // Masked
-  let DST_SEL_Z = 7;   // Masked
-  let DST_SEL_W = 7;   // Masked
-  let DATA_FORMAT = 1; // FMT_8
-}
-
-class VTX_READ_16_cm <bits<8> buffer_id, list<dag> pattern>
-    : VTX_READ_cm <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id,
-                   (outs R600_TReg32_X:$dst_gpr), pattern> {
-  let DST_SEL_X = 0;
-  let DST_SEL_Y = 7;   // Masked
-  let DST_SEL_Z = 7;   // Masked
-  let DST_SEL_W = 7;   // Masked
-  let DATA_FORMAT = 5; // FMT_16
-
-}
-
-class VTX_READ_32_cm <bits<8> buffer_id, list<dag> pattern>
-    : VTX_READ_cm <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id,
-                   (outs R600_TReg32_X:$dst_gpr), pattern> {
-
-  let DST_SEL_X        = 0;
-  let DST_SEL_Y        = 7;   // Masked
-  let DST_SEL_Z        = 7;   // Masked
-  let DST_SEL_W        = 7;   // Masked
-  let DATA_FORMAT      = 0xD; // COLOR_32
-
-  // This is not really necessary, but there were some GPU hangs that appeared
-  // to be caused by ALU instructions in the next instruction group that wrote
-  // to the $src_gpr registers of the VTX_READ.
-  // e.g.
-  // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
-  // %T2_X<def> = MOV %ZERO
-  //Adding this constraint prevents this from happening.
-  let Constraints = "$src_gpr.ptr = $dst_gpr";
-}
-
-class VTX_READ_64_cm <bits<8> buffer_id, list<dag> pattern>
-    : VTX_READ_cm <"VTX_READ_64 $dst_gpr, $src_gpr", buffer_id,
-                   (outs R600_Reg64:$dst_gpr), pattern> {
-
-  let DST_SEL_X        = 0;
-  let DST_SEL_Y        = 1;
-  let DST_SEL_Z        = 7;
-  let DST_SEL_W        = 7;
-  let DATA_FORMAT      = 0x1D; // COLOR_32_32
-}
-
-class VTX_READ_128_cm <bits<8> buffer_id, list<dag> pattern>
-    : VTX_READ_cm <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id,
-                   (outs R600_Reg128:$dst_gpr), pattern> {
-
-  let DST_SEL_X        =  0;
-  let DST_SEL_Y        =  1;
-  let DST_SEL_Z        =  2;
-  let DST_SEL_W        =  3;
-  let DATA_FORMAT      =  0x22; // COLOR_32_32_32_32
-
-  // XXX: Need to force VTX_READ_128 instructions to write to the same register
-  // that holds its buffer address to avoid potential hangs.  We can't use
-  // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst
-  // registers are different sizes.
-}
-
-//===----------------------------------------------------------------------===//
-// VTX Read from parameter memory space
-//===----------------------------------------------------------------------===//
-def VTX_READ_PARAM_8_cm : VTX_READ_8_cm <0,
-  [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_16_cm : VTX_READ_16_cm <0,
-  [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_32_cm : VTX_READ_32_cm <0,
-  [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_64_cm : VTX_READ_64_cm <0,
-  [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_128_cm : VTX_READ_128_cm <0,
-  [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-//===----------------------------------------------------------------------===//
-// VTX Read from global memory space
-//===----------------------------------------------------------------------===//
-
-// 8-bit reads
-def VTX_READ_GLOBAL_8_cm : VTX_READ_8_cm <1,
-  [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_GLOBAL_16_cm : VTX_READ_16_cm <1,
-  [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
->;
-
-// 32-bit reads
-def VTX_READ_GLOBAL_32_cm : VTX_READ_32_cm <1,
-  [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-// 64-bit reads
-def VTX_READ_GLOBAL_64_cm : VTX_READ_64_cm <1,
-  [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-// 128-bit reads
-def VTX_READ_GLOBAL_128_cm : VTX_READ_128_cm <1,
-  [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-} // End isCayman
-

Removed: llvm/trunk/lib/Target/R600/EvergreenInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/EvergreenInstructions.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/EvergreenInstructions.td (original)
+++ llvm/trunk/lib/Target/R600/EvergreenInstructions.td (removed)
@@ -1,670 +0,0 @@
-//===-- EvergreenInstructions.td - EG Instruction defs  ----*- tablegen -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// TableGen definitions for instructions which are:
-// - Available to Evergreen and newer VLIW4/VLIW5 GPUs
-// - Available only on Evergreen family GPUs.
-//
-//===----------------------------------------------------------------------===//
-
-def isEG : Predicate<
-  "Subtarget->getGeneration() >= AMDGPUSubtarget::EVERGREEN && "
-  "Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS && "
-  "!Subtarget->hasCaymanISA()"
->;
-
-def isEGorCayman : Predicate<
-  "Subtarget->getGeneration() == AMDGPUSubtarget::EVERGREEN ||"
-  "Subtarget->getGeneration() ==AMDGPUSubtarget::NORTHERN_ISLANDS"
->;
-
-//===----------------------------------------------------------------------===//
-// Evergreen / Cayman store instructions
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isEGorCayman] in {
-
-class CF_MEM_RAT_CACHELESS <bits<6> rat_inst, bits<4> rat_id, bits<4> mask, dag ins,
-                           string name, list<dag> pattern>
-    : EG_CF_RAT <0x57, rat_inst, rat_id, mask, (outs), ins,
-                 "MEM_RAT_CACHELESS "#name, pattern>;
-
-class CF_MEM_RAT <bits<6> rat_inst, bits<4> rat_id, dag ins, string name,
-                  list<dag> pattern>
-    : EG_CF_RAT <0x56, rat_inst, rat_id, 0xf /* mask */, (outs), ins,
-                 "MEM_RAT "#name, pattern>;
-
-def RAT_MSKOR : CF_MEM_RAT <0x11, 0,
-  (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr),
-  "MSKOR $rw_gpr.XW, $index_gpr",
-  [(mskor_global v4i32:$rw_gpr, i32:$index_gpr)]
-> {
-  let eop = 0;
-}
-
-} // End let Predicates = [isEGorCayman]
-
-//===----------------------------------------------------------------------===//
-// Evergreen Only instructions
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isEG] in {
-
-def RECIP_IEEE_eg : RECIP_IEEE_Common<0x86>;
-defm DIV_eg : DIV_Common<RECIP_IEEE_eg>;
-
-def MULLO_INT_eg : MULLO_INT_Common<0x8F>;
-def MULHI_INT_eg : MULHI_INT_Common<0x90>;
-def MULLO_UINT_eg : MULLO_UINT_Common<0x91>;
-def MULHI_UINT_eg : MULHI_UINT_Common<0x92>;
-def RECIP_UINT_eg : RECIP_UINT_Common<0x94>;
-def RECIPSQRT_CLAMPED_eg : RECIPSQRT_CLAMPED_Common<0x87>;
-def EXP_IEEE_eg : EXP_IEEE_Common<0x81>;
-def LOG_IEEE_eg : LOG_IEEE_Common<0x83>;
-def RECIP_CLAMPED_eg : RECIP_CLAMPED_Common<0x84>;
-def RECIPSQRT_IEEE_eg : RECIPSQRT_IEEE_Common<0x89>;
-def : RsqPat<RECIPSQRT_IEEE_eg, f32>;
-def SIN_eg : SIN_Common<0x8D>;
-def COS_eg : COS_Common<0x8E>;
-
-def : POW_Common <LOG_IEEE_eg, EXP_IEEE_eg, MUL>;
-def : Pat<(fsqrt f32:$src), (MUL $src, (RECIPSQRT_CLAMPED_eg $src))>;
-
-defm : Expand24IBitOps<MULLO_INT_eg, ADD_INT>;
-
-//===----------------------------------------------------------------------===//
-// Memory read/write instructions
-//===----------------------------------------------------------------------===//
-
-let usesCustomInserter = 1 in {
-
-// 32-bit store
-def RAT_WRITE_CACHELESS_32_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0x1,
-  (ins R600_TReg32_X:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
-  "STORE_RAW $rw_gpr, $index_gpr, $eop",
-  [(global_store i32:$rw_gpr, i32:$index_gpr)]
->;
-
-// 64-bit store
-def RAT_WRITE_CACHELESS_64_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0x3,
-  (ins R600_Reg64:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
-  "STORE_RAW $rw_gpr.XY, $index_gpr, $eop",
-  [(global_store v2i32:$rw_gpr, i32:$index_gpr)]
->;
-
-//128-bit store
-def RAT_WRITE_CACHELESS_128_eg : CF_MEM_RAT_CACHELESS <0x2, 0, 0xf,
-  (ins R600_Reg128:$rw_gpr, R600_TReg32_X:$index_gpr, InstFlag:$eop),
-  "STORE_RAW $rw_gpr.XYZW, $index_gpr, $eop",
-  [(global_store v4i32:$rw_gpr, i32:$index_gpr)]
->;
-
-} // End usesCustomInserter = 1
-
-class VTX_READ_eg <string name, bits<8> buffer_id, dag outs, list<dag> pattern>
-    : VTX_WORD0_eg, VTX_READ<name, buffer_id, outs, pattern> {
-
-  // Static fields
-  let VC_INST = 0;
-  let FETCH_TYPE = 2;
-  let FETCH_WHOLE_QUAD = 0;
-  let BUFFER_ID = buffer_id;
-  let SRC_REL = 0;
-  // XXX: We can infer this field based on the SRC_GPR.  This would allow us
-  // to store vertex addresses in any channel, not just X.
-  let SRC_SEL_X = 0;
-
-  let Inst{31-0} = Word0;
-}
-
-class VTX_READ_8_eg <bits<8> buffer_id, list<dag> pattern>
-    : VTX_READ_eg <"VTX_READ_8 $dst_gpr, $src_gpr", buffer_id,
-                   (outs R600_TReg32_X:$dst_gpr), pattern> {
-
-  let MEGA_FETCH_COUNT = 1;
-  let DST_SEL_X = 0;
-  let DST_SEL_Y = 7;   // Masked
-  let DST_SEL_Z = 7;   // Masked
-  let DST_SEL_W = 7;   // Masked
-  let DATA_FORMAT = 1; // FMT_8
-}
-
-class VTX_READ_16_eg <bits<8> buffer_id, list<dag> pattern>
-    : VTX_READ_eg <"VTX_READ_16 $dst_gpr, $src_gpr", buffer_id,
-                   (outs R600_TReg32_X:$dst_gpr), pattern> {
-  let MEGA_FETCH_COUNT = 2;
-  let DST_SEL_X = 0;
-  let DST_SEL_Y = 7;   // Masked
-  let DST_SEL_Z = 7;   // Masked
-  let DST_SEL_W = 7;   // Masked
-  let DATA_FORMAT = 5; // FMT_16
-
-}
-
-class VTX_READ_32_eg <bits<8> buffer_id, list<dag> pattern>
-    : VTX_READ_eg <"VTX_READ_32 $dst_gpr, $src_gpr", buffer_id,
-                   (outs R600_TReg32_X:$dst_gpr), pattern> {
-
-  let MEGA_FETCH_COUNT = 4;
-  let DST_SEL_X        = 0;
-  let DST_SEL_Y        = 7;   // Masked
-  let DST_SEL_Z        = 7;   // Masked
-  let DST_SEL_W        = 7;   // Masked
-  let DATA_FORMAT      = 0xD; // COLOR_32
-
-  // This is not really necessary, but there were some GPU hangs that appeared
-  // to be caused by ALU instructions in the next instruction group that wrote
-  // to the $src_gpr registers of the VTX_READ.
-  // e.g.
-  // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
-  // %T2_X<def> = MOV %ZERO
-  //Adding this constraint prevents this from happening.
-  let Constraints = "$src_gpr.ptr = $dst_gpr";
-}
-
-class VTX_READ_64_eg <bits<8> buffer_id, list<dag> pattern>
-    : VTX_READ_eg <"VTX_READ_64 $dst_gpr.XY, $src_gpr", buffer_id,
-                   (outs R600_Reg64:$dst_gpr), pattern> {
-
-  let MEGA_FETCH_COUNT = 8;
-  let DST_SEL_X        = 0;
-  let DST_SEL_Y        = 1;
-  let DST_SEL_Z        = 7;
-  let DST_SEL_W        = 7;
-  let DATA_FORMAT      = 0x1D; // COLOR_32_32
-}
-
-class VTX_READ_128_eg <bits<8> buffer_id, list<dag> pattern>
-    : VTX_READ_eg <"VTX_READ_128 $dst_gpr.XYZW, $src_gpr", buffer_id,
-                   (outs R600_Reg128:$dst_gpr), pattern> {
-
-  let MEGA_FETCH_COUNT = 16;
-  let DST_SEL_X        =  0;
-  let DST_SEL_Y        =  1;
-  let DST_SEL_Z        =  2;
-  let DST_SEL_W        =  3;
-  let DATA_FORMAT      =  0x22; // COLOR_32_32_32_32
-
-  // XXX: Need to force VTX_READ_128 instructions to write to the same register
-  // that holds its buffer address to avoid potential hangs.  We can't use
-  // the same constraint as VTX_READ_32_eg, because the $src_gpr.ptr and $dst
-  // registers are different sizes.
-}
-
-//===----------------------------------------------------------------------===//
-// VTX Read from parameter memory space
-//===----------------------------------------------------------------------===//
-
-def VTX_READ_PARAM_8_eg : VTX_READ_8_eg <0,
-  [(set i32:$dst_gpr, (load_param_exti8 ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_16_eg : VTX_READ_16_eg <0,
-  [(set i32:$dst_gpr, (load_param_exti16 ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_32_eg : VTX_READ_32_eg <0,
-  [(set i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_64_eg : VTX_READ_64_eg <0,
-  [(set v2i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_PARAM_128_eg : VTX_READ_128_eg <0,
-  [(set v4i32:$dst_gpr, (load_param ADDRVTX_READ:$src_gpr))]
->;
-
-//===----------------------------------------------------------------------===//
-// VTX Read from global memory space
-//===----------------------------------------------------------------------===//
-
-// 8-bit reads
-def VTX_READ_GLOBAL_8_eg : VTX_READ_8_eg <1,
-  [(set i32:$dst_gpr, (az_extloadi8_global ADDRVTX_READ:$src_gpr))]
->;
-
-def VTX_READ_GLOBAL_16_eg : VTX_READ_16_eg <1,
-  [(set i32:$dst_gpr, (az_extloadi16_global ADDRVTX_READ:$src_gpr))]
->;
-
-// 32-bit reads
-def VTX_READ_GLOBAL_32_eg : VTX_READ_32_eg <1,
-  [(set i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-// 64-bit reads
-def VTX_READ_GLOBAL_64_eg : VTX_READ_64_eg <1,
-  [(set v2i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-// 128-bit reads
-def VTX_READ_GLOBAL_128_eg : VTX_READ_128_eg <1,
-  [(set v4i32:$dst_gpr, (global_load ADDRVTX_READ:$src_gpr))]
->;
-
-} // End Predicates = [isEG]
-
-//===----------------------------------------------------------------------===//
-// Evergreen / Cayman Instructions
-//===----------------------------------------------------------------------===//
-
-let Predicates = [isEGorCayman] in {
-
-// Should be predicated on FeatureFP64
-// def FMA_64 : R600_3OP <
-//   0xA, "FMA_64",
-//   [(set f64:$dst, (fma f64:$src0, f64:$src1, f64:$src2))]
-// >;
-
-// BFE_UINT - bit_extract, an optimization for mask and shift
-// Src0 = Input
-// Src1 = Offset
-// Src2 = Width
-//
-// bit_extract = (Input << (32 - Offset - Width)) >> (32 - Width)
-//
-// Example Usage:
-// (Offset, Width)
-//
-// (0, 8)  = (Input << 24) >> 24 = (Input &  0xff)       >> 0
-// (8, 8)  = (Input << 16) >> 24 = (Input &  0xffff)     >> 8
-// (16, 8) = (Input <<  8) >> 24 = (Input &  0xffffff)   >> 16
-// (24, 8) = (Input <<  0) >> 24 = (Input &  0xffffffff) >> 24
-def BFE_UINT_eg : R600_3OP <0x4, "BFE_UINT",
-  [(set i32:$dst, (AMDGPUbfe_u32 i32:$src0, i32:$src1, i32:$src2))],
-  VecALU
->;
-
-def BFE_INT_eg : R600_3OP <0x5, "BFE_INT",
-  [(set i32:$dst, (AMDGPUbfe_i32 i32:$src0, i32:$src1, i32:$src2))],
-  VecALU
->;
-
-def : BFEPattern <BFE_UINT_eg, MOV_IMM_I32>;
-
-def BFI_INT_eg : R600_3OP <0x06, "BFI_INT",
-  [(set i32:$dst, (AMDGPUbfi i32:$src0, i32:$src1, i32:$src2))],
-  VecALU
->;
-
-def : Pat<(i32 (sext_inreg i32:$src, i1)),
-  (BFE_INT_eg i32:$src, (i32 ZERO), (i32 ONE_INT))>;
-def : Pat<(i32 (sext_inreg i32:$src, i8)),
-  (BFE_INT_eg i32:$src, (i32 ZERO), (MOV_IMM_I32 8))>;
-def : Pat<(i32 (sext_inreg i32:$src, i16)),
-  (BFE_INT_eg i32:$src, (i32 ZERO), (MOV_IMM_I32 16))>;
-
-defm : BFIPatterns <BFI_INT_eg, MOV_IMM_I32, R600_Reg64>;
-
-def BFM_INT_eg : R600_2OP <0xA0, "BFM_INT",
-  [(set i32:$dst, (AMDGPUbfm i32:$src0, i32:$src1))],
-  VecALU
->;
-
-def MULADD_UINT24_eg : R600_3OP <0x10, "MULADD_UINT24",
-  [(set i32:$dst, (AMDGPUmad_u24 i32:$src0, i32:$src1, i32:$src2))], VecALU
->;
-
-def : UMad24Pat<MULADD_UINT24_eg>;
-
-def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", [], VecALU>;
-def : ROTRPattern <BIT_ALIGN_INT_eg>;
-def MULADD_eg : MULADD_Common<0x14>;
-def MULADD_IEEE_eg : MULADD_IEEE_Common<0x18>;
-def FMA_eg : FMA_Common<0x7>;
-def ASHR_eg : ASHR_Common<0x15>;
-def LSHR_eg : LSHR_Common<0x16>;
-def LSHL_eg : LSHL_Common<0x17>;
-def CNDE_eg : CNDE_Common<0x19>;
-def CNDGT_eg : CNDGT_Common<0x1A>;
-def CNDGE_eg : CNDGE_Common<0x1B>;
-def MUL_LIT_eg : MUL_LIT_Common<0x1F>;
-def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>;
-def MUL_UINT24_eg : R600_2OP <0xB5, "MUL_UINT24",
-  [(set i32:$dst, (AMDGPUmul_u24 i32:$src0, i32:$src1))], VecALU
->;
-def DOT4_eg : DOT4_Common<0xBE>;
-defm CUBE_eg : CUBE_Common<0xC0>;
-
-def BCNT_INT : R600_1OP_Helper <0xAA, "BCNT_INT", ctpop, VecALU>;
-
-def ADDC_UINT : R600_2OP_Helper <0x52, "ADDC_UINT", AMDGPUcarry>;
-def SUBB_UINT : R600_2OP_Helper <0x53, "SUBB_UINT", AMDGPUborrow>;
-
-def FFBH_UINT : R600_1OP_Helper <0xAB, "FFBH_UINT", ctlz_zero_undef, VecALU>;
-def FFBL_INT : R600_1OP_Helper <0xAC, "FFBL_INT", cttz_zero_undef, VecALU>;
-
-let hasSideEffects = 1 in {
-  def MOVA_INT_eg : R600_1OP <0xCC, "MOVA_INT", [], VecALU>;
-}
-
-def TGSI_LIT_Z_eg : TGSI_LIT_Z_Common<MUL_LIT_eg, LOG_CLAMPED_eg, EXP_IEEE_eg>;
-
-def FLT_TO_INT_eg : FLT_TO_INT_Common<0x50> {
-  let Pattern = [];
-  let Itinerary = AnyALU;
-}
-
-def INT_TO_FLT_eg : INT_TO_FLT_Common<0x9B>;
-
-def FLT_TO_UINT_eg : FLT_TO_UINT_Common<0x9A> {
-  let Pattern = [];
-}
-
-def UINT_TO_FLT_eg : UINT_TO_FLT_Common<0x9C>;
-
-def GROUP_BARRIER : InstR600 <
-    (outs), (ins), "  GROUP_BARRIER", [(int_AMDGPU_barrier_local), (int_AMDGPU_barrier_global)], AnyALU>,
-    R600ALU_Word0,
-    R600ALU_Word1_OP2 <0x54> {
-
-  let dst = 0;
-  let dst_rel = 0;
-  let src0 = 0;
-  let src0_rel = 0;
-  let src0_neg = 0;
-  let src0_abs = 0;
-  let src1 = 0;
-  let src1_rel = 0;
-  let src1_neg = 0;
-  let src1_abs = 0;
-  let write = 0;
-  let omod = 0;
-  let clamp = 0;
-  let last = 1;
-  let bank_swizzle = 0;
-  let pred_sel = 0;
-  let update_exec_mask = 0;
-  let update_pred = 0;
-
-  let Inst{31-0}  = Word0;
-  let Inst{63-32} = Word1;
-
-  let ALUInst = 1;
-}
-
-def : Pat <
-	(int_AMDGPU_barrier_global),
-	(GROUP_BARRIER)
->;
-
-//===----------------------------------------------------------------------===//
-// LDS Instructions
-//===----------------------------------------------------------------------===//
-class R600_LDS  <bits<6> op, dag outs, dag ins, string asm,
-                 list<dag> pattern = []> :
-
-    InstR600 <outs, ins, asm, pattern, XALU>,
-    R600_ALU_LDS_Word0,
-    R600LDS_Word1 {
-
-  bits<6>  offset = 0;
-  let lds_op = op;
-
-  let Word1{27} = offset{0};
-  let Word1{12} = offset{1};
-  let Word1{28} = offset{2};
-  let Word1{31} = offset{3};
-  let Word0{12} = offset{4};
-  let Word0{25} = offset{5};
-
-
-  let Inst{31-0}  = Word0;
-  let Inst{63-32} = Word1;
-
-  let ALUInst = 1;
-  let HasNativeOperands = 1;
-  let UseNamedOperandTable = 1;
-}
-
-class R600_LDS_1A <bits<6> lds_op, string name, list<dag> pattern> : R600_LDS <
-  lds_op,
-  (outs R600_Reg32:$dst),
-  (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
-       LAST:$last, R600_Pred:$pred_sel,
-       BANK_SWIZZLE:$bank_swizzle),
-  "  "#name#" $last OQAP, $src0$src0_rel $pred_sel",
-  pattern
-  > {
-
-  let src1 = 0;
-  let src1_rel = 0;
-  let src2 = 0;
-  let src2_rel = 0;
-
-  let usesCustomInserter = 1;
-  let LDS_1A = 1;
-  let DisableEncoding = "$dst";
-}
-
-class R600_LDS_1A1D <bits<6> lds_op, dag outs, string name, list<dag> pattern,
-                     string dst =""> :
-    R600_LDS <
-  lds_op, outs,
-  (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
-       R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel,
-       LAST:$last, R600_Pred:$pred_sel,
-       BANK_SWIZZLE:$bank_swizzle),
-  "  "#name#" $last "#dst#"$src0$src0_rel, $src1$src1_rel, $pred_sel",
-  pattern
-  > {
-
-  field string BaseOp;
-
-  let src2 = 0;
-  let src2_rel = 0;
-  let LDS_1A1D = 1;
-}
-
-class R600_LDS_1A1D_NORET <bits<6> lds_op, string name, list<dag> pattern> :
-    R600_LDS_1A1D <lds_op, (outs), name, pattern> {
-  let BaseOp = name;
-}
-
-class R600_LDS_1A1D_RET <bits<6> lds_op, string name, list<dag> pattern> :
-    R600_LDS_1A1D <lds_op,  (outs R600_Reg32:$dst), name##"_RET", pattern, "OQAP, "> {
-
-  let BaseOp = name;
-  let usesCustomInserter = 1;
-  let DisableEncoding = "$dst";
-}
-
-class R600_LDS_1A2D <bits<6> lds_op, dag outs, string name, list<dag> pattern,
-                     string dst =""> :
-    R600_LDS <
-  lds_op, outs,
-  (ins R600_Reg32:$src0, REL:$src0_rel, SEL:$src0_sel,
-       R600_Reg32:$src1, REL:$src1_rel, SEL:$src1_sel,
-       R600_Reg32:$src2, REL:$src2_rel, SEL:$src2_sel,
-       LAST:$last, R600_Pred:$pred_sel, BANK_SWIZZLE:$bank_swizzle),
-  "  "#name# "$last "#dst#"$src0$src0_rel, $src1$src1_rel, $src2$src2_rel, $pred_sel",
-  pattern> {
-
-  field string BaseOp;
-
-  let LDS_1A1D = 0;
-  let LDS_1A2D = 1;
-}
-
-class R600_LDS_1A2D_NORET <bits<6> lds_op, string name, list<dag> pattern> :
-    R600_LDS_1A2D <lds_op, (outs), name, pattern> {
-  let BaseOp = name;
-}
-
-class R600_LDS_1A2D_RET <bits<6> lds_op, string name, list<dag> pattern> :
-    R600_LDS_1A2D <lds_op, (outs R600_Reg32:$dst), name, pattern> {
-
-  let BaseOp = name;
-  let usesCustomInserter = 1;
-  let DisableEncoding = "$dst";
-}
-
-def LDS_ADD : R600_LDS_1A1D_NORET <0x0, "LDS_ADD", [] >;
-def LDS_SUB : R600_LDS_1A1D_NORET <0x1, "LDS_SUB", [] >;
-def LDS_AND : R600_LDS_1A1D_NORET <0x9, "LDS_AND", [] >;
-def LDS_OR : R600_LDS_1A1D_NORET <0xa, "LDS_OR", [] >;
-def LDS_XOR : R600_LDS_1A1D_NORET <0xb, "LDS_XOR", [] >;
-def LDS_WRXCHG: R600_LDS_1A1D_NORET <0xd, "LDS_WRXCHG", [] >;
-def LDS_CMPST: R600_LDS_1A2D_NORET <0x10, "LDS_CMPST", [] >;
-def LDS_MIN_INT : R600_LDS_1A1D_NORET <0x5, "LDS_MIN_INT", [] >;
-def LDS_MAX_INT : R600_LDS_1A1D_NORET <0x6, "LDS_MAX_INT", [] >;
-def LDS_MIN_UINT : R600_LDS_1A1D_NORET <0x7, "LDS_MIN_UINT", [] >;
-def LDS_MAX_UINT : R600_LDS_1A1D_NORET <0x8, "LDS_MAX_UINT", [] >;
-def LDS_WRITE : R600_LDS_1A1D_NORET <0xD, "LDS_WRITE",
-  [(local_store (i32 R600_Reg32:$src1), R600_Reg32:$src0)]
->;
-def LDS_BYTE_WRITE : R600_LDS_1A1D_NORET<0x12, "LDS_BYTE_WRITE",
-  [(truncstorei8_local i32:$src1, i32:$src0)]
->;
-def LDS_SHORT_WRITE : R600_LDS_1A1D_NORET<0x13, "LDS_SHORT_WRITE",
-  [(truncstorei16_local i32:$src1, i32:$src0)]
->;
-def LDS_ADD_RET : R600_LDS_1A1D_RET <0x20, "LDS_ADD",
-  [(set i32:$dst, (atomic_load_add_local i32:$src0, i32:$src1))]
->;
-def LDS_SUB_RET : R600_LDS_1A1D_RET <0x21, "LDS_SUB",
-  [(set i32:$dst, (atomic_load_sub_local i32:$src0, i32:$src1))]
->;
-def LDS_AND_RET : R600_LDS_1A1D_RET <0x29, "LDS_AND",
-  [(set i32:$dst, (atomic_load_and_local i32:$src0, i32:$src1))]
->;
-def LDS_OR_RET : R600_LDS_1A1D_RET <0x2a, "LDS_OR",
-  [(set i32:$dst, (atomic_load_or_local i32:$src0, i32:$src1))]
->;
-def LDS_XOR_RET : R600_LDS_1A1D_RET <0x2b, "LDS_XOR",
-  [(set i32:$dst, (atomic_load_xor_local i32:$src0, i32:$src1))]
->;
-def LDS_MIN_INT_RET : R600_LDS_1A1D_RET <0x25, "LDS_MIN_INT",
-  [(set i32:$dst, (atomic_load_min_local i32:$src0, i32:$src1))]
->;
-def LDS_MAX_INT_RET : R600_LDS_1A1D_RET <0x26, "LDS_MAX_INT",
-  [(set i32:$dst, (atomic_load_max_local i32:$src0, i32:$src1))]
->;
-def LDS_MIN_UINT_RET : R600_LDS_1A1D_RET <0x27, "LDS_MIN_UINT",
-  [(set i32:$dst, (atomic_load_umin_local i32:$src0, i32:$src1))]
->;
-def LDS_MAX_UINT_RET : R600_LDS_1A1D_RET <0x28, "LDS_MAX_UINT",
-  [(set i32:$dst, (atomic_load_umax_local i32:$src0, i32:$src1))]
->;
-def LDS_WRXCHG_RET : R600_LDS_1A1D_RET <0x2d, "LDS_WRXCHG",
-  [(set i32:$dst, (atomic_swap_local i32:$src0, i32:$src1))]
->;
-def LDS_CMPST_RET : R600_LDS_1A2D_RET <0x30, "LDS_CMPST",
-  [(set i32:$dst, (atomic_cmp_swap_32_local i32:$src0, i32:$src1, i32:$src2))]
->;
-def LDS_READ_RET : R600_LDS_1A <0x32, "LDS_READ_RET",
-  [(set (i32 R600_Reg32:$dst), (local_load R600_Reg32:$src0))]
->;
-def LDS_BYTE_READ_RET : R600_LDS_1A <0x36, "LDS_BYTE_READ_RET",
-  [(set i32:$dst, (sextloadi8_local i32:$src0))]
->;
-def LDS_UBYTE_READ_RET : R600_LDS_1A <0x37, "LDS_UBYTE_READ_RET",
-  [(set i32:$dst, (az_extloadi8_local i32:$src0))]
->;
-def LDS_SHORT_READ_RET : R600_LDS_1A <0x38, "LDS_SHORT_READ_RET",
-  [(set i32:$dst, (sextloadi16_local i32:$src0))]
->;
-def LDS_USHORT_READ_RET : R600_LDS_1A <0x39, "LDS_USHORT_READ_RET",
-  [(set i32:$dst, (az_extloadi16_local i32:$src0))]
->;
-
-// TRUNC is used for the FLT_TO_INT instructions to work around a
-// perceived problem where the rounding modes are applied differently
-// depending on the instruction and the slot they are in.
-// See:
-// https://bugs.freedesktop.org/show_bug.cgi?id=50232
-// Mesa commit: a1a0974401c467cb86ef818f22df67c21774a38c
-//
-// XXX: Lowering SELECT_CC will sometimes generate fp_to_[su]int nodes,
-// which do not need to be truncated since the fp values are 0.0f or 1.0f.
-// We should look into handling these cases separately.
-def : Pat<(fp_to_sint f32:$src0), (FLT_TO_INT_eg (TRUNC $src0))>;
-
-def : Pat<(fp_to_uint f32:$src0), (FLT_TO_UINT_eg (TRUNC $src0))>;
-
-// SHA-256 Patterns
-def : SHA256MaPattern <BFI_INT_eg, XOR_INT>;
-
-def EG_ExportSwz : ExportSwzInst {
-  let Word1{19-16} = 0; // BURST_COUNT
-  let Word1{20} = 0; // VALID_PIXEL_MODE
-  let Word1{21} = eop;
-  let Word1{29-22} = inst;
-  let Word1{30} = 0; // MARK
-  let Word1{31} = 1; // BARRIER
-}
-defm : ExportPattern<EG_ExportSwz, 83>;
-
-def EG_ExportBuf : ExportBufInst {
-  let Word1{19-16} = 0; // BURST_COUNT
-  let Word1{20} = 0; // VALID_PIXEL_MODE
-  let Word1{21} = eop;
-  let Word1{29-22} = inst;
-  let Word1{30} = 0; // MARK
-  let Word1{31} = 1; // BARRIER
-}
-defm : SteamOutputExportPattern<EG_ExportBuf, 0x40, 0x41, 0x42, 0x43>;
-
-def CF_TC_EG : CF_CLAUSE_EG<1, (ins i32imm:$ADDR, i32imm:$COUNT),
-  "TEX $COUNT @$ADDR"> {
-  let POP_COUNT = 0;
-}
-def CF_VC_EG : CF_CLAUSE_EG<2, (ins i32imm:$ADDR, i32imm:$COUNT),
-  "VTX $COUNT @$ADDR"> {
-  let POP_COUNT = 0;
-}
-def WHILE_LOOP_EG : CF_CLAUSE_EG<6, (ins i32imm:$ADDR),
-  "LOOP_START_DX10 @$ADDR"> {
-  let POP_COUNT = 0;
-  let COUNT = 0;
-}
-def END_LOOP_EG : CF_CLAUSE_EG<5, (ins i32imm:$ADDR), "END_LOOP @$ADDR"> {
-  let POP_COUNT = 0;
-  let COUNT = 0;
-}
-def LOOP_BREAK_EG : CF_CLAUSE_EG<9, (ins i32imm:$ADDR),
-  "LOOP_BREAK @$ADDR"> {
-  let POP_COUNT = 0;
-  let COUNT = 0;
-}
-def CF_CONTINUE_EG : CF_CLAUSE_EG<8, (ins i32imm:$ADDR),
-  "CONTINUE @$ADDR"> {
-  let POP_COUNT = 0;
-  let COUNT = 0;
-}
-def CF_JUMP_EG : CF_CLAUSE_EG<10, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
-  "JUMP @$ADDR POP:$POP_COUNT"> {
-  let COUNT = 0;
-}
-def CF_PUSH_EG : CF_CLAUSE_EG<11, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
-                              "PUSH @$ADDR POP:$POP_COUNT"> {
-  let COUNT = 0;
-}
-def CF_ELSE_EG : CF_CLAUSE_EG<13, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
-  "ELSE @$ADDR POP:$POP_COUNT"> {
-  let COUNT = 0;
-}
-def CF_CALL_FS_EG : CF_CLAUSE_EG<19, (ins), "CALL_FS"> {
-  let ADDR = 0;
-  let COUNT = 0;
-  let POP_COUNT = 0;
-}
-def POP_EG : CF_CLAUSE_EG<14, (ins i32imm:$ADDR, i32imm:$POP_COUNT),
-  "POP @$ADDR POP:$POP_COUNT"> {
-  let COUNT = 0;
-}
-def CF_END_EG :  CF_CLAUSE_EG<0, (ins), "CF_END"> {
-  let COUNT = 0;
-  let POP_COUNT = 0;
-  let ADDR = 0;
-  let END_OF_PROGRAM = 1;
-}
-
-} // End Predicates = [isEGorCayman]

Removed: llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp (original)
+++ llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.cpp (removed)
@@ -1,642 +0,0 @@
-//===-- AMDGPUInstPrinter.cpp - AMDGPU MC Inst -> ASM ---------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-// \file
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUInstPrinter.h"
-#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
-#include "SIDefines.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/Support/MathExtras.h"
-
-using namespace llvm;
-
-void AMDGPUInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
-                                  StringRef Annot, const MCSubtargetInfo &STI) {
-  OS.flush();
-  printInstruction(MI, OS);
-
-  printAnnotation(OS, Annot);
-}
-
-void AMDGPUInstPrinter::printU8ImmOperand(const MCInst *MI, unsigned OpNo,
-                                           raw_ostream &O) {
-  O << formatHex(MI->getOperand(OpNo).getImm() & 0xff);
-}
-
-void AMDGPUInstPrinter::printU16ImmOperand(const MCInst *MI, unsigned OpNo,
-                                           raw_ostream &O) {
-  O << formatHex(MI->getOperand(OpNo).getImm() & 0xffff);
-}
-
-void AMDGPUInstPrinter::printU32ImmOperand(const MCInst *MI, unsigned OpNo,
-                                           raw_ostream &O) {
-  O << formatHex(MI->getOperand(OpNo).getImm() & 0xffffffff);
-}
-
-void AMDGPUInstPrinter::printU8ImmDecOperand(const MCInst *MI, unsigned OpNo,
-                                             raw_ostream &O) {
-  O << formatDec(MI->getOperand(OpNo).getImm() & 0xff);
-}
-
-void AMDGPUInstPrinter::printU16ImmDecOperand(const MCInst *MI, unsigned OpNo,
-                                              raw_ostream &O) {
-  O << formatDec(MI->getOperand(OpNo).getImm() & 0xffff);
-}
-
-void AMDGPUInstPrinter::printOffen(const MCInst *MI, unsigned OpNo,
-                                   raw_ostream &O) {
-  if (MI->getOperand(OpNo).getImm())
-    O << " offen";
-}
-
-void AMDGPUInstPrinter::printIdxen(const MCInst *MI, unsigned OpNo,
-                                   raw_ostream &O) {
-  if (MI->getOperand(OpNo).getImm())
-    O << " idxen";
-}
-
-void AMDGPUInstPrinter::printAddr64(const MCInst *MI, unsigned OpNo,
-                                    raw_ostream &O) {
-  if (MI->getOperand(OpNo).getImm())
-    O << " addr64";
-}
-
-void AMDGPUInstPrinter::printMBUFOffset(const MCInst *MI, unsigned OpNo,
-                                        raw_ostream &O) {
-  if (MI->getOperand(OpNo).getImm()) {
-    O << " offset:";
-    printU16ImmDecOperand(MI, OpNo, O);
-  }
-}
-
-void AMDGPUInstPrinter::printDSOffset(const MCInst *MI, unsigned OpNo,
-                                      raw_ostream &O) {
-  uint16_t Imm = MI->getOperand(OpNo).getImm();
-  if (Imm != 0) {
-    O << " offset:";
-    printU16ImmDecOperand(MI, OpNo, O);
-  }
-}
-
-void AMDGPUInstPrinter::printDSOffset0(const MCInst *MI, unsigned OpNo,
-                                        raw_ostream &O) {
-  if (MI->getOperand(OpNo).getImm()) {
-    O << " offset0:";
-    printU8ImmDecOperand(MI, OpNo, O);
-  }
-}
-
-void AMDGPUInstPrinter::printDSOffset1(const MCInst *MI, unsigned OpNo,
-                                        raw_ostream &O) {
-  if (MI->getOperand(OpNo).getImm()) {
-    O << " offset1:";
-    printU8ImmDecOperand(MI, OpNo, O);
-  }
-}
-
-void AMDGPUInstPrinter::printGDS(const MCInst *MI, unsigned OpNo,
-                                 raw_ostream &O) {
-  if (MI->getOperand(OpNo).getImm())
-    O << " gds";
-}
-
-void AMDGPUInstPrinter::printGLC(const MCInst *MI, unsigned OpNo,
-                                 raw_ostream &O) {
-  if (MI->getOperand(OpNo).getImm())
-    O << " glc";
-}
-
-void AMDGPUInstPrinter::printSLC(const MCInst *MI, unsigned OpNo,
-                                 raw_ostream &O) {
-  if (MI->getOperand(OpNo).getImm())
-    O << " slc";
-}
-
-void AMDGPUInstPrinter::printTFE(const MCInst *MI, unsigned OpNo,
-                                 raw_ostream &O) {
-  if (MI->getOperand(OpNo).getImm())
-    O << " tfe";
-}
-
-void AMDGPUInstPrinter::printRegOperand(unsigned reg, raw_ostream &O,
-                                        const MCRegisterInfo &MRI) {
-  switch (reg) {
-  case AMDGPU::VCC:
-    O << "vcc";
-    return;
-  case AMDGPU::SCC:
-    O << "scc";
-    return;
-  case AMDGPU::EXEC:
-    O << "exec";
-    return;
-  case AMDGPU::M0:
-    O << "m0";
-    return;
-  case AMDGPU::FLAT_SCR:
-    O << "flat_scratch";
-    return;
-  case AMDGPU::VCC_LO:
-    O << "vcc_lo";
-    return;
-  case AMDGPU::VCC_HI:
-    O << "vcc_hi";
-    return;
-  case AMDGPU::EXEC_LO:
-    O << "exec_lo";
-    return;
-  case AMDGPU::EXEC_HI:
-    O << "exec_hi";
-    return;
-  case AMDGPU::FLAT_SCR_LO:
-    O << "flat_scratch_lo";
-    return;
-  case AMDGPU::FLAT_SCR_HI:
-    O << "flat_scratch_hi";
-    return;
-  default:
-    break;
-  }
-
-  char Type;
-  unsigned NumRegs;
-
-  if (MRI.getRegClass(AMDGPU::VGPR_32RegClassID).contains(reg)) {
-    Type = 'v';
-    NumRegs = 1;
-  } else  if (MRI.getRegClass(AMDGPU::SGPR_32RegClassID).contains(reg)) {
-    Type = 's';
-    NumRegs = 1;
-  } else if (MRI.getRegClass(AMDGPU::VReg_64RegClassID).contains(reg)) {
-    Type = 'v';
-    NumRegs = 2;
-  } else  if (MRI.getRegClass(AMDGPU::SReg_64RegClassID).contains(reg)) {
-    Type = 's';
-    NumRegs = 2;
-  } else if (MRI.getRegClass(AMDGPU::VReg_128RegClassID).contains(reg)) {
-    Type = 'v';
-    NumRegs = 4;
-  } else  if (MRI.getRegClass(AMDGPU::SReg_128RegClassID).contains(reg)) {
-    Type = 's';
-    NumRegs = 4;
-  } else if (MRI.getRegClass(AMDGPU::VReg_96RegClassID).contains(reg)) {
-    Type = 'v';
-    NumRegs = 3;
-  } else if (MRI.getRegClass(AMDGPU::VReg_256RegClassID).contains(reg)) {
-    Type = 'v';
-    NumRegs = 8;
-  } else if (MRI.getRegClass(AMDGPU::SReg_256RegClassID).contains(reg)) {
-    Type = 's';
-    NumRegs = 8;
-  } else if (MRI.getRegClass(AMDGPU::VReg_512RegClassID).contains(reg)) {
-    Type = 'v';
-    NumRegs = 16;
-  } else if (MRI.getRegClass(AMDGPU::SReg_512RegClassID).contains(reg)) {
-    Type = 's';
-    NumRegs = 16;
-  } else {
-    O << getRegisterName(reg);
-    return;
-  }
-
-  // The low 8 bits of the encoding value is the register index, for both VGPRs
-  // and SGPRs.
-  unsigned RegIdx = MRI.getEncodingValue(reg) & ((1 << 8) - 1);
-  if (NumRegs == 1) {
-    O << Type << RegIdx;
-    return;
-  }
-
-  O << Type << '[' << RegIdx << ':' << (RegIdx + NumRegs - 1) << ']';
-}
-
-void AMDGPUInstPrinter::printVOPDst(const MCInst *MI, unsigned OpNo,
-                                    raw_ostream &O) {
-  if (MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::VOP3)
-    O << "_e64 ";
-  else
-    O << "_e32 ";
-
-  printOperand(MI, OpNo, O);
-}
-
-void AMDGPUInstPrinter::printImmediate32(uint32_t Imm, raw_ostream &O) {
-  int32_t SImm = static_cast<int32_t>(Imm);
-  if (SImm >= -16 && SImm <= 64) {
-    O << SImm;
-    return;
-  }
-
-  if (Imm == FloatToBits(0.0f))
-    O << "0.0";
-  else if (Imm == FloatToBits(1.0f))
-    O << "1.0";
-  else if (Imm == FloatToBits(-1.0f))
-    O << "-1.0";
-  else if (Imm == FloatToBits(0.5f))
-    O << "0.5";
-  else if (Imm == FloatToBits(-0.5f))
-    O << "-0.5";
-  else if (Imm == FloatToBits(2.0f))
-    O << "2.0";
-  else if (Imm == FloatToBits(-2.0f))
-    O << "-2.0";
-  else if (Imm == FloatToBits(4.0f))
-    O << "4.0";
-  else if (Imm == FloatToBits(-4.0f))
-    O << "-4.0";
-  else
-    O << formatHex(static_cast<uint64_t>(Imm));
-}
-
-void AMDGPUInstPrinter::printImmediate64(uint64_t Imm, raw_ostream &O) {
-  int64_t SImm = static_cast<int64_t>(Imm);
-  if (SImm >= -16 && SImm <= 64) {
-    O << SImm;
-    return;
-  }
-
-  if (Imm == DoubleToBits(0.0))
-    O << "0.0";
-  else if (Imm == DoubleToBits(1.0))
-    O << "1.0";
-  else if (Imm == DoubleToBits(-1.0))
-    O << "-1.0";
-  else if (Imm == DoubleToBits(0.5))
-    O << "0.5";
-  else if (Imm == DoubleToBits(-0.5))
-    O << "-0.5";
-  else if (Imm == DoubleToBits(2.0))
-    O << "2.0";
-  else if (Imm == DoubleToBits(-2.0))
-    O << "-2.0";
-  else if (Imm == DoubleToBits(4.0))
-    O << "4.0";
-  else if (Imm == DoubleToBits(-4.0))
-    O << "-4.0";
-  else
-    llvm_unreachable("64-bit literal constants not supported");
-}
-
-void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
-                                     raw_ostream &O) {
-
-  const MCOperand &Op = MI->getOperand(OpNo);
-  if (Op.isReg()) {
-    switch (Op.getReg()) {
-    // This is the default predicate state, so we don't need to print it.
-    case AMDGPU::PRED_SEL_OFF:
-      break;
-
-    default:
-      printRegOperand(Op.getReg(), O, MRI);
-      break;
-    }
-  } else if (Op.isImm()) {
-    const MCInstrDesc &Desc = MII.get(MI->getOpcode());
-    int RCID = Desc.OpInfo[OpNo].RegClass;
-    if (RCID != -1) {
-      const MCRegisterClass &ImmRC = MRI.getRegClass(RCID);
-      if (ImmRC.getSize() == 4)
-        printImmediate32(Op.getImm(), O);
-      else if (ImmRC.getSize() == 8)
-        printImmediate64(Op.getImm(), O);
-      else
-        llvm_unreachable("Invalid register class size");
-    } else if (Desc.OpInfo[OpNo].OperandType == MCOI::OPERAND_IMMEDIATE) {
-      printImmediate32(Op.getImm(), O);
-    } else {
-      // We hit this for the immediate instruction bits that don't yet have a
-      // custom printer.
-      // TODO: Eventually this should be unnecessary.
-      O << formatDec(Op.getImm());
-    }
-  } else if (Op.isFPImm()) {
-    // We special case 0.0 because otherwise it will be printed as an integer.
-    if (Op.getFPImm() == 0.0)
-      O << "0.0";
-    else {
-      const MCInstrDesc &Desc = MII.get(MI->getOpcode());
-      const MCRegisterClass &ImmRC = MRI.getRegClass(Desc.OpInfo[OpNo].RegClass);
-
-      if (ImmRC.getSize() == 4)
-        printImmediate32(FloatToBits(Op.getFPImm()), O);
-      else if (ImmRC.getSize() == 8)
-        printImmediate64(DoubleToBits(Op.getFPImm()), O);
-      else
-        llvm_unreachable("Invalid register class size");
-    }
-  } else if (Op.isExpr()) {
-    const MCExpr *Exp = Op.getExpr();
-    Exp->print(O, &MAI);
-  } else {
-    llvm_unreachable("unknown operand type in printOperand");
-  }
-}
-
-void AMDGPUInstPrinter::printOperandAndMods(const MCInst *MI, unsigned OpNo,
-                                            raw_ostream &O) {
-  unsigned InputModifiers = MI->getOperand(OpNo).getImm();
-  if (InputModifiers & SISrcMods::NEG)
-    O << '-';
-  if (InputModifiers & SISrcMods::ABS)
-    O << '|';
-  printOperand(MI, OpNo + 1, O);
-  if (InputModifiers & SISrcMods::ABS)
-    O << '|';
-}
-
-void AMDGPUInstPrinter::printInterpSlot(const MCInst *MI, unsigned OpNum,
-                                        raw_ostream &O) {
-  unsigned Imm = MI->getOperand(OpNum).getImm();
-
-  if (Imm == 2) {
-    O << "P0";
-  } else if (Imm == 1) {
-    O << "P20";
-  } else if (Imm == 0) {
-    O << "P10";
-  } else {
-    llvm_unreachable("Invalid interpolation parameter slot");
-  }
-}
-
-void AMDGPUInstPrinter::printMemOperand(const MCInst *MI, unsigned OpNo,
-                                        raw_ostream &O) {
-  printOperand(MI, OpNo, O);
-  O  << ", ";
-  printOperand(MI, OpNo + 1, O);
-}
-
-void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo,
-                                   raw_ostream &O, StringRef Asm,
-                                   StringRef Default) {
-  const MCOperand &Op = MI->getOperand(OpNo);
-  assert(Op.isImm());
-  if (Op.getImm() == 1) {
-    O << Asm;
-  } else {
-    O << Default;
-  }
-}
-
-void AMDGPUInstPrinter::printAbs(const MCInst *MI, unsigned OpNo,
-                                 raw_ostream &O) {
-  printIfSet(MI, OpNo, O, "|");
-}
-
-void AMDGPUInstPrinter::printClamp(const MCInst *MI, unsigned OpNo,
-                                   raw_ostream &O) {
-  printIfSet(MI, OpNo, O, "_SAT");
-}
-
-void AMDGPUInstPrinter::printClampSI(const MCInst *MI, unsigned OpNo,
-                                     raw_ostream &O) {
-  if (MI->getOperand(OpNo).getImm())
-    O << " clamp";
-}
-
-void AMDGPUInstPrinter::printOModSI(const MCInst *MI, unsigned OpNo,
-                                     raw_ostream &O) {
-  int Imm = MI->getOperand(OpNo).getImm();
-  if (Imm == SIOutMods::MUL2)
-    O << " mul:2";
-  else if (Imm == SIOutMods::MUL4)
-    O << " mul:4";
-  else if (Imm == SIOutMods::DIV2)
-    O << " div:2";
-}
-
-void AMDGPUInstPrinter::printLiteral(const MCInst *MI, unsigned OpNo,
-                                     raw_ostream &O) {
-  int32_t Imm = MI->getOperand(OpNo).getImm();
-  O << Imm << '(' << BitsToFloat(Imm) << ')';
-}
-
-void AMDGPUInstPrinter::printLast(const MCInst *MI, unsigned OpNo,
-                                  raw_ostream &O) {
-  printIfSet(MI, OpNo, O, "*", " ");
-}
-
-void AMDGPUInstPrinter::printNeg(const MCInst *MI, unsigned OpNo,
-                                 raw_ostream &O) {
-  printIfSet(MI, OpNo, O, "-");
-}
-
-void AMDGPUInstPrinter::printOMOD(const MCInst *MI, unsigned OpNo,
-                                  raw_ostream &O) {
-  switch (MI->getOperand(OpNo).getImm()) {
-  default: break;
-  case 1:
-    O << " * 2.0";
-    break;
-  case 2:
-    O << " * 4.0";
-    break;
-  case 3:
-    O << " / 2.0";
-    break;
-  }
-}
-
-void AMDGPUInstPrinter::printRel(const MCInst *MI, unsigned OpNo,
-                                 raw_ostream &O) {
-  printIfSet(MI, OpNo, O, "+");
-}
-
-void AMDGPUInstPrinter::printUpdateExecMask(const MCInst *MI, unsigned OpNo,
-                                            raw_ostream &O) {
-  printIfSet(MI, OpNo, O, "ExecMask,");
-}
-
-void AMDGPUInstPrinter::printUpdatePred(const MCInst *MI, unsigned OpNo,
-                                        raw_ostream &O) {
-  printIfSet(MI, OpNo, O, "Pred,");
-}
-
-void AMDGPUInstPrinter::printWrite(const MCInst *MI, unsigned OpNo,
-                                       raw_ostream &O) {
-  const MCOperand &Op = MI->getOperand(OpNo);
-  if (Op.getImm() == 0) {
-    O << " (MASKED)";
-  }
-}
-
-void AMDGPUInstPrinter::printSel(const MCInst *MI, unsigned OpNo,
-                                  raw_ostream &O) {
-  const char * chans = "XYZW";
-  int sel = MI->getOperand(OpNo).getImm();
-
-  int chan = sel & 3;
-  sel >>= 2;
-
-  if (sel >= 512) {
-    sel -= 512;
-    int cb = sel >> 12;
-    sel &= 4095;
-    O << cb << '[' << sel << ']';
-  } else if (sel >= 448) {
-    sel -= 448;
-    O << sel;
-  } else if (sel >= 0){
-    O << sel;
-  }
-
-  if (sel >= 0)
-    O << '.' << chans[chan];
-}
-
-void AMDGPUInstPrinter::printBankSwizzle(const MCInst *MI, unsigned OpNo,
-                                         raw_ostream &O) {
-  int BankSwizzle = MI->getOperand(OpNo).getImm();
-  switch (BankSwizzle) {
-  case 1:
-    O << "BS:VEC_021/SCL_122";
-    break;
-  case 2:
-    O << "BS:VEC_120/SCL_212";
-    break;
-  case 3:
-    O << "BS:VEC_102/SCL_221";
-    break;
-  case 4:
-    O << "BS:VEC_201";
-    break;
-  case 5:
-    O << "BS:VEC_210";
-    break;
-  default:
-    break;
-  }
-  return;
-}
-
-void AMDGPUInstPrinter::printRSel(const MCInst *MI, unsigned OpNo,
-                                  raw_ostream &O) {
-  unsigned Sel = MI->getOperand(OpNo).getImm();
-  switch (Sel) {
-  case 0:
-    O << 'X';
-    break;
-  case 1:
-    O << 'Y';
-    break;
-  case 2:
-    O << 'Z';
-    break;
-  case 3:
-    O << 'W';
-    break;
-  case 4:
-    O << '0';
-    break;
-  case 5:
-    O << '1';
-    break;
-  case 7:
-    O << '_';
-    break;
-  default:
-    break;
-  }
-}
-
-void AMDGPUInstPrinter::printCT(const MCInst *MI, unsigned OpNo,
-                                  raw_ostream &O) {
-  unsigned CT = MI->getOperand(OpNo).getImm();
-  switch (CT) {
-  case 0:
-    O << 'U';
-    break;
-  case 1:
-    O << 'N';
-    break;
-  default:
-    break;
-  }
-}
-
-void AMDGPUInstPrinter::printKCache(const MCInst *MI, unsigned OpNo,
-                                    raw_ostream &O) {
-  int KCacheMode = MI->getOperand(OpNo).getImm();
-  if (KCacheMode > 0) {
-    int KCacheBank = MI->getOperand(OpNo - 2).getImm();
-    O << "CB" << KCacheBank << ':';
-    int KCacheAddr = MI->getOperand(OpNo + 2).getImm();
-    int LineSize = (KCacheMode == 1) ? 16 : 32;
-    O << KCacheAddr * 16 << '-' << KCacheAddr * 16 + LineSize;
-  }
-}
-
-void AMDGPUInstPrinter::printSendMsg(const MCInst *MI, unsigned OpNo,
-                                     raw_ostream &O) {
-  unsigned SImm16 = MI->getOperand(OpNo).getImm();
-  unsigned Msg = SImm16 & 0xF;
-  if (Msg == 2 || Msg == 3) {
-    unsigned Op = (SImm16 >> 4) & 0xF;
-    if (Msg == 3)
-      O << "Gs_done(";
-    else
-      O << "Gs(";
-    if (Op == 0) {
-      O << "nop";
-    } else {
-      unsigned Stream = (SImm16 >> 8) & 0x3;
-      if (Op == 1)
-	O << "cut";
-      else if (Op == 2)
-	O << "emit";
-      else if (Op == 3)
-	O << "emit-cut";
-      O << " stream " << Stream;
-    }
-    O << "), [m0] ";
-  } else if (Msg == 1)
-    O << "interrupt ";
-  else if (Msg == 15)
-    O << "system ";
-  else
-    O << "unknown(" << Msg << ") ";
-}
-
-void AMDGPUInstPrinter::printWaitFlag(const MCInst *MI, unsigned OpNo,
-                                      raw_ostream &O) {
-  // Note: Mask values are taken from SIInsertWaits.cpp and not from ISA docs
-  // SIInsertWaits.cpp bits usage does not match ISA docs description but it
-  // works so it might be a misprint in docs.
-  unsigned SImm16 = MI->getOperand(OpNo).getImm();
-  unsigned Vmcnt = SImm16 & 0xF;
-  unsigned Expcnt = (SImm16 >> 4) & 0xF;
-  unsigned Lgkmcnt = (SImm16 >> 8) & 0xF;
-
-  bool NeedSpace = false;
-
-  if (Vmcnt != 0xF) {
-    O << "vmcnt(" << Vmcnt << ')';
-    NeedSpace = true;
-  }
-
-  if (Expcnt != 0x7) {
-    if (NeedSpace)
-      O << ' ';
-    O << "expcnt(" << Expcnt << ')';
-    NeedSpace = true;
-  }
-
-  if (Lgkmcnt != 0x7) {
-    if (NeedSpace)
-      O << ' ';
-    O << "lgkmcnt(" << Lgkmcnt << ')';
-  }
-}
-
-#include "AMDGPUGenAsmWriter.inc"

Removed: llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h (original)
+++ llvm/trunk/lib/Target/R600/InstPrinter/AMDGPUInstPrinter.h (removed)
@@ -1,88 +0,0 @@
-//===-- AMDGPUInstPrinter.h - AMDGPU MC Inst -> ASM interface ---*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_INSTPRINTER_AMDGPUINSTPRINTER_H
-#define LLVM_LIB_TARGET_R600_INSTPRINTER_AMDGPUINSTPRINTER_H
-
-#include "llvm/ADT/StringRef.h"
-#include "llvm/MC/MCInstPrinter.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace llvm {
-
-class AMDGPUInstPrinter : public MCInstPrinter {
-public:
-  AMDGPUInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
-                     const MCRegisterInfo &MRI)
-    : MCInstPrinter(MAI, MII, MRI) {}
-
-  //Autogenerated by tblgen
-  void printInstruction(const MCInst *MI, raw_ostream &O);
-  static const char *getRegisterName(unsigned RegNo);
-
-  void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot,
-                 const MCSubtargetInfo &STI) override;
-  static void printRegOperand(unsigned RegNo, raw_ostream &O,
-                              const MCRegisterInfo &MRI);
-
-private:
-  void printU8ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printU16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printU8ImmDecOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printU16ImmDecOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printU32ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printOffen(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printIdxen(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printAddr64(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printMBUFOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printDSOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printDSOffset0(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printDSOffset1(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printGDS(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printGLC(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printSLC(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printTFE(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printRegOperand(unsigned RegNo, raw_ostream &O);
-  void printVOPDst(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printImmediate32(uint32_t I, raw_ostream &O);
-  void printImmediate64(uint64_t I, raw_ostream &O);
-  void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  void printOperandAndMods(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printInterpSlot(const MCInst *MI, unsigned OpNum, raw_ostream &O);
-  void printMemOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O,
-                         StringRef Asm, StringRef Default = "");
-  static void printAbs(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printClamp(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printClampSI(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printOModSI(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printLiteral(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printLast(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printNeg(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printOMOD(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printRel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printUpdateExecMask(const MCInst *MI, unsigned OpNo,
-                                  raw_ostream &O);
-  static void printUpdatePred(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printWrite(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printSel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printBankSwizzle(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printRSel(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printCT(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printKCache(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printSendMsg(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-  static void printWaitFlag(const MCInst *MI, unsigned OpNo, raw_ostream &O);
-};
-
-} // End namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/InstPrinter/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/InstPrinter/CMakeLists.txt?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/InstPrinter/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/R600/InstPrinter/CMakeLists.txt (removed)
@@ -1,3 +0,0 @@
-add_llvm_library(LLVMR600AsmPrinter
-  AMDGPUInstPrinter.cpp
-  )

Removed: llvm/trunk/lib/Target/R600/InstPrinter/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/InstPrinter/LLVMBuild.txt?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/InstPrinter/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/R600/InstPrinter/LLVMBuild.txt (removed)
@@ -1,24 +0,0 @@
-;===- ./lib/Target/R600/InstPrinter/LLVMBuild.txt -----------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = R600AsmPrinter
-parent = R600
-required_libraries = MC Support
-add_to_library_groups = R600
-

Removed: llvm/trunk/lib/Target/R600/InstPrinter/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/InstPrinter/Makefile?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/InstPrinter/Makefile (original)
+++ llvm/trunk/lib/Target/R600/InstPrinter/Makefile (removed)
@@ -1,15 +0,0 @@
-#===- lib/Target/R600/AsmPrinter/Makefile ------------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
-LIBRARYNAME = LLVMR600AsmPrinter
-
-# Hack: we need to include 'main' x86 target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common

Removed: llvm/trunk/lib/Target/R600/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/LLVMBuild.txt?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/R600/LLVMBuild.txt (removed)
@@ -1,33 +0,0 @@
-;===- ./lib/Target/AMDIL/LLVMBuild.txt -------------------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[common]
-subdirectories = AsmParser InstPrinter MCTargetDesc TargetInfo
-
-[component_0]
-type = TargetGroup
-name = R600
-parent = Target
-has_asmparser = 1
-has_asmprinter = 1
-
-[component_1]
-type = Library
-name = R600CodeGen
-parent = R600
-required_libraries = Analysis AsmPrinter CodeGen Core IPO MC R600AsmParser R600AsmPrinter R600Desc R600Info Scalar SelectionDAG Support Target TransformUtils
-add_to_library_groups = R600

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUAsmBackend.cpp (removed)
@@ -1,144 +0,0 @@
-//===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
-#include "MCTargetDesc/AMDGPUFixupKinds.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/MC/MCAsmBackend.h"
-#include "llvm/MC/MCAssembler.h"
-#include "llvm/MC/MCFixupKindInfo.h"
-#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/MC/MCValue.h"
-#include "llvm/Support/TargetRegistry.h"
-
-using namespace llvm;
-
-namespace {
-
-class AMDGPUMCObjectWriter : public MCObjectWriter {
-public:
-  AMDGPUMCObjectWriter(raw_pwrite_stream &OS) : MCObjectWriter(OS, true) {}
-  void executePostLayoutBinding(MCAssembler &Asm,
-                                const MCAsmLayout &Layout) override {
-    //XXX: Implement if necessary.
-  }
-  void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
-                        const MCFragment *Fragment, const MCFixup &Fixup,
-                        MCValue Target, bool &IsPCRel,
-                        uint64_t &FixedValue) override {
-    assert(!"Not implemented");
-  }
-
-  void writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
-
-};
-
-class AMDGPUAsmBackend : public MCAsmBackend {
-public:
-  AMDGPUAsmBackend(const Target &T)
-    : MCAsmBackend() {}
-
-  unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
-  void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
-                  uint64_t Value, bool IsPCRel) const override;
-  bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
-                            const MCRelaxableFragment *DF,
-                            const MCAsmLayout &Layout) const override {
-    return false;
-  }
-  void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {
-    assert(!"Not implemented");
-  }
-  bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
-  bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
-
-  const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
-};
-
-} //End anonymous namespace
-
-void AMDGPUMCObjectWriter::writeObject(MCAssembler &Asm,
-                                       const MCAsmLayout &Layout) {
-  for (MCAssembler::iterator I = Asm.begin(), E = Asm.end(); I != E; ++I) {
-    Asm.writeSectionData(&*I, Layout);
-  }
-}
-
-void AMDGPUAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
-                                  unsigned DataSize, uint64_t Value,
-                                  bool IsPCRel) const {
-
-  switch ((unsigned)Fixup.getKind()) {
-    default: llvm_unreachable("Unknown fixup kind");
-    case AMDGPU::fixup_si_sopp_br: {
-      uint16_t *Dst = (uint16_t*)(Data + Fixup.getOffset());
-      *Dst = (Value - 4) / 4;
-      break;
-    }
-
-    case AMDGPU::fixup_si_rodata: {
-      uint32_t *Dst = (uint32_t*)(Data + Fixup.getOffset());
-      *Dst = Value;
-      break;
-    }
-
-    case AMDGPU::fixup_si_end_of_text: {
-      uint32_t *Dst = (uint32_t*)(Data + Fixup.getOffset());
-      // The value points to the last instruction in the text section, so we
-      // need to add 4 bytes to get to the start of the constants.
-      *Dst = Value + 4;
-      break;
-    }
-  }
-}
-
-const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
-                                                       MCFixupKind Kind) const {
-  const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
-    // name                   offset bits  flags
-    { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
-    { "fixup_si_rodata",      0,     32,   0 },
-    { "fixup_si_end_of_text", 0,     32,   MCFixupKindInfo::FKF_IsPCRel }
-  };
-
-  if (Kind < FirstTargetFixupKind)
-    return MCAsmBackend::getFixupKindInfo(Kind);
-
-  return Infos[Kind - FirstTargetFixupKind];
-}
-
-bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
-  OW->WriteZeros(Count);
-
-  return true;
-}
-
-//===----------------------------------------------------------------------===//
-// ELFAMDGPUAsmBackend class
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
-public:
-  ELFAMDGPUAsmBackend(const Target &T) : AMDGPUAsmBackend(T) { }
-
-  MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
-    return createAMDGPUELFObjectWriter(OS);
-  }
-};
-
-} // end anonymous namespace
-
-MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
-                                           const MCRegisterInfo &MRI,
-                                           const Triple &TT, StringRef CPU) {
-  return new ELFAMDGPUAsmBackend(T);
-}

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUELFObjectWriter.cpp (removed)
@@ -1,39 +0,0 @@
-//===-- AMDGPUELFObjectWriter.cpp - AMDGPU ELF Writer ----------------------==//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUMCTargetDesc.h"
-#include "llvm/MC/MCELFObjectWriter.h"
-#include "llvm/MC/MCFixup.h"
-
-using namespace llvm;
-
-namespace {
-
-class AMDGPUELFObjectWriter : public MCELFObjectTargetWriter {
-public:
-  AMDGPUELFObjectWriter();
-protected:
-  unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
-                        bool IsPCRel) const override {
-    return Fixup.getKind();
-  }
-
-};
-
-
-} // End anonymous namespace
-
-AMDGPUELFObjectWriter::AMDGPUELFObjectWriter()
-  : MCELFObjectTargetWriter(false, 0, 0, false) { }
-
-MCObjectWriter *llvm::createAMDGPUELFObjectWriter(raw_pwrite_stream &OS) {
-  MCELFObjectTargetWriter *MOTW = new AMDGPUELFObjectWriter();
-  return createELFObjectWriter(MOTW, OS, true);
-}

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUFixupKinds.h (removed)
@@ -1,34 +0,0 @@
-//===-- AMDGPUFixupKinds.h - AMDGPU Specific Fixup Entries ------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUFIXUPKINDS_H
-#define LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUFIXUPKINDS_H
-
-#include "llvm/MC/MCFixup.h"
-
-namespace llvm {
-namespace AMDGPU {
-enum Fixups {
-  /// 16-bit PC relative fixup for SOPP branch instructions.
-  fixup_si_sopp_br = FirstTargetFixupKind,
-
-  /// fixup for global addresses with constant initializers
-  fixup_si_rodata,
-
-  /// fixup for offset from instruction to end of text section
-  fixup_si_end_of_text,
-
-  // Marker
-  LastTargetFixupKind,
-  NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
-};
-}
-}
-
-#endif

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp (removed)
@@ -1,43 +0,0 @@
-//===-- MCTargetDesc/AMDGPUMCAsmInfo.cpp - Assembly Info ------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUMCAsmInfo.h"
-
-using namespace llvm;
-AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Triple &TT) : MCAsmInfoELF() {
-  HasSingleParameterDotFile = false;
-  //===------------------------------------------------------------------===//
-  MaxInstLength = 16;
-  SeparatorString = "\n";
-  CommentString = ";";
-  PrivateLabelPrefix = "";
-  InlineAsmStart = ";#ASMSTART";
-  InlineAsmEnd = ";#ASMEND";
-
-  //===--- Data Emission Directives -------------------------------------===//
-  ZeroDirective = ".zero";
-  AsciiDirective = ".ascii\t";
-  AscizDirective = ".asciz\t";
-  Data8bitsDirective = ".byte\t";
-  Data16bitsDirective = ".short\t";
-  Data32bitsDirective = ".long\t";
-  Data64bitsDirective = ".quad\t";
-  SunStyleELFSectionSwitchSyntax = true;
-  UsesELFSectionDirectiveForBSS = true;
-
-  //===--- Global Variable Emission Directives --------------------------===//
-  HasAggressiveSymbolFolding = true;
-  COMMDirectiveAlignmentIsInBytes = false;
-  HasDotTypeDotSizeDirective = false;
-  HasNoDeadStrip = true;
-  WeakRefDirective = ".weakref\t";
-  //===--- Dwarf Emission Directives -----------------------------------===//
-  SupportsDebugInformation = true;
-}

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h (removed)
@@ -1,32 +0,0 @@
-//===-- MCTargetDesc/AMDGPUMCAsmInfo.h - AMDGPU MCAsm Interface -*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCASMINFO_H
-#define LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCASMINFO_H
-
-#include "llvm/MC/MCAsmInfoELF.h"
-namespace llvm {
-
-class Triple;
-
-// If you need to create another MCAsmInfo class, which inherits from MCAsmInfo,
-// you will need to make sure your new class sets PrivateGlobalPrefix to
-// a prefix that won't appeary in a fuction name.  The default value
-// for PrivateGlobalPrefix is 'L', so it will consider any function starting
-// with 'L' as a local symbol.
-class AMDGPUMCAsmInfo : public MCAsmInfoELF {
-public:
-  explicit AMDGPUMCAsmInfo(const Triple &TT);
-};
-} // namespace llvm
-#endif

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.cpp (removed)
@@ -1,21 +0,0 @@
-//===-- AMDGPUCodeEmitter.cpp - AMDGPU Code Emitter interface -------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief CodeEmitter interface for R600 and SI codegen.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUMCCodeEmitter.h"
-
-using namespace llvm;
-
-// pin vtable to this file
-void AMDGPUMCCodeEmitter::anchor() {}
-

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCCodeEmitter.h (removed)
@@ -1,50 +0,0 @@
-//===-- AMDGPUCodeEmitter.h - AMDGPU Code Emitter interface -----*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief CodeEmitter interface for R600 and SI codegen.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCCODEEMITTER_H
-#define LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCCODEEMITTER_H
-
-#include "llvm/MC/MCCodeEmitter.h"
-#include "llvm/Support/raw_ostream.h"
-
-namespace llvm {
-
-class MCInst;
-class MCOperand;
-class MCSubtargetInfo;
-
-class AMDGPUMCCodeEmitter : public MCCodeEmitter {
-  virtual void anchor();
-public:
-
-  uint64_t getBinaryCodeForInstr(const MCInst &MI,
-                                 SmallVectorImpl<MCFixup> &Fixups,
-                                 const MCSubtargetInfo &STI) const;
-
-  virtual uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
-                                     SmallVectorImpl<MCFixup> &Fixups,
-                                     const MCSubtargetInfo &STI) const {
-    return 0;
-  }
-
-  virtual unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
-                                     SmallVectorImpl<MCFixup> &Fixups,
-                                     const MCSubtargetInfo &STI) const {
-    return 0;
-  }
-};
-
-} // End namespace llvm
-
-#endif

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.cpp (removed)
@@ -1,90 +0,0 @@
-//===-- AMDGPUMCTargetDesc.cpp - AMDGPU Target Descriptions ---------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief This file provides AMDGPU specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPUMCTargetDesc.h"
-#include "AMDGPUMCAsmInfo.h"
-#include "InstPrinter/AMDGPUInstPrinter.h"
-#include "SIDefines.h"
-#include "llvm/MC/MCCodeGenInfo.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/MachineLocation.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetRegistry.h"
-
-using namespace llvm;
-
-#define GET_INSTRINFO_MC_DESC
-#include "AMDGPUGenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_MC_DESC
-#include "AMDGPUGenSubtargetInfo.inc"
-
-#define GET_REGINFO_MC_DESC
-#include "AMDGPUGenRegisterInfo.inc"
-
-static MCInstrInfo *createAMDGPUMCInstrInfo() {
-  MCInstrInfo *X = new MCInstrInfo();
-  InitAMDGPUMCInstrInfo(X);
-  return X;
-}
-
-static MCRegisterInfo *createAMDGPUMCRegisterInfo(StringRef TT) {
-  MCRegisterInfo *X = new MCRegisterInfo();
-  InitAMDGPUMCRegisterInfo(X, 0);
-  return X;
-}
-
-static MCSubtargetInfo *
-createAMDGPUMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
-  MCSubtargetInfo * X = new MCSubtargetInfo();
-  InitAMDGPUMCSubtargetInfo(X, TT, CPU, FS);
-  return X;
-}
-
-static MCCodeGenInfo *createAMDGPUMCCodeGenInfo(StringRef TT, Reloc::Model RM,
-                                               CodeModel::Model CM,
-                                               CodeGenOpt::Level OL) {
-  MCCodeGenInfo *X = new MCCodeGenInfo();
-  X->initMCCodeGenInfo(RM, CM, OL);
-  return X;
-}
-
-static MCInstPrinter *createAMDGPUMCInstPrinter(const Triple &T,
-                                                unsigned SyntaxVariant,
-                                                const MCAsmInfo &MAI,
-                                                const MCInstrInfo &MII,
-                                                const MCRegisterInfo &MRI) {
-  return new AMDGPUInstPrinter(MAI, MII, MRI);
-}
-
-extern "C" void LLVMInitializeR600TargetMC() {
-  for (Target *T : {&TheAMDGPUTarget, &TheGCNTarget}) {
-    RegisterMCAsmInfo<AMDGPUMCAsmInfo> X(*T);
-
-    TargetRegistry::RegisterMCCodeGenInfo(*T, createAMDGPUMCCodeGenInfo);
-    TargetRegistry::RegisterMCInstrInfo(*T, createAMDGPUMCInstrInfo);
-    TargetRegistry::RegisterMCRegInfo(*T, createAMDGPUMCRegisterInfo);
-    TargetRegistry::RegisterMCSubtargetInfo(*T, createAMDGPUMCSubtargetInfo);
-    TargetRegistry::RegisterMCInstPrinter(*T, createAMDGPUMCInstPrinter);
-    TargetRegistry::RegisterMCAsmBackend(*T, createAMDGPUAsmBackend);
-  }
-
-  TargetRegistry::RegisterMCCodeEmitter(TheAMDGPUTarget,
-                                        createR600MCCodeEmitter);
-  TargetRegistry::RegisterMCCodeEmitter(TheGCNTarget, createSIMCCodeEmitter);
-}

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/AMDGPUMCTargetDesc.h (removed)
@@ -1,61 +0,0 @@
-//===-- AMDGPUMCTargetDesc.h - AMDGPU Target Descriptions -----*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Provides AMDGPU specific target descriptions.
-//
-//===----------------------------------------------------------------------===//
-//
-
-#ifndef LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCTARGETDESC_H
-#define LLVM_LIB_TARGET_R600_MCTARGETDESC_AMDGPUMCTARGETDESC_H
-
-#include "llvm/Support/DataTypes.h"
-#include "llvm/ADT/StringRef.h"
-
-namespace llvm {
-class MCAsmBackend;
-class MCCodeEmitter;
-class MCContext;
-class MCInstrInfo;
-class MCObjectWriter;
-class MCRegisterInfo;
-class MCSubtargetInfo;
-class Target;
-class Triple;
-class raw_pwrite_stream;
-class raw_ostream;
-
-extern Target TheAMDGPUTarget;
-extern Target TheGCNTarget;
-
-MCCodeEmitter *createR600MCCodeEmitter(const MCInstrInfo &MCII,
-                                       const MCRegisterInfo &MRI,
-                                       MCContext &Ctx);
-
-MCCodeEmitter *createSIMCCodeEmitter(const MCInstrInfo &MCII,
-                                     const MCRegisterInfo &MRI,
-                                     MCContext &Ctx);
-
-MCAsmBackend *createAMDGPUAsmBackend(const Target &T, const MCRegisterInfo &MRI,
-                                     const Triple &TT, StringRef CPU);
-
-MCObjectWriter *createAMDGPUELFObjectWriter(raw_pwrite_stream &OS);
-} // End llvm namespace
-
-#define GET_REGINFO_ENUM
-#include "AMDGPUGenRegisterInfo.inc"
-
-#define GET_INSTRINFO_ENUM
-#include "AMDGPUGenInstrInfo.inc"
-
-#define GET_SUBTARGETINFO_ENUM
-#include "AMDGPUGenSubtargetInfo.inc"
-
-#endif

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/CMakeLists.txt?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/CMakeLists.txt (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/CMakeLists.txt (removed)
@@ -1,10 +0,0 @@
-
-add_llvm_library(LLVMR600Desc
-  AMDGPUAsmBackend.cpp
-  AMDGPUELFObjectWriter.cpp
-  AMDGPUMCCodeEmitter.cpp
-  AMDGPUMCTargetDesc.cpp
-  AMDGPUMCAsmInfo.cpp
-  R600MCCodeEmitter.cpp
-  SIMCCodeEmitter.cpp
-  )

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/LLVMBuild.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/LLVMBuild.txt?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/LLVMBuild.txt (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/LLVMBuild.txt (removed)
@@ -1,23 +0,0 @@
-;===- ./lib/Target/R600/MCTargetDesc/LLVMBuild.txt -------------*- Conf -*--===;
-;
-;                     The LLVM Compiler Infrastructure
-;
-; This file is distributed under the University of Illinois Open Source
-; License. See LICENSE.TXT for details.
-;
-;===------------------------------------------------------------------------===;
-;
-; This is an LLVMBuild description file for the components in this subdirectory.
-;
-; For more information on the LLVMBuild system, please see:
-;
-;   http://llvm.org/docs/LLVMBuild.html
-;
-;===------------------------------------------------------------------------===;
-
-[component_0]
-type = Library
-name = R600Desc
-parent = R600
-required_libraries = MC R600AsmPrinter R600Info Support
-add_to_library_groups = R600

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/Makefile?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/Makefile (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/Makefile (removed)
@@ -1,16 +0,0 @@
-##===- lib/Target/AMDGPU/TargetDesc/Makefile ----------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../../..
-LIBRARYNAME = LLVMR600Desc
-
-# Hack: we need to include 'main' target directory to grab private headers
-CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
-
-include $(LEVEL)/Makefile.common

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/R600MCCodeEmitter.cpp (removed)
@@ -1,181 +0,0 @@
-//===- R600MCCodeEmitter.cpp - Code Emitter for R600->Cayman GPU families -===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-///
-/// \brief The R600 code emitter produces machine code that can be executed
-/// directly on the GPU device.
-//
-//===----------------------------------------------------------------------===//
-
-#include "R600Defines.h"
-#include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
-#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
-#include "llvm/MC/MCCodeEmitter.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/EndianStream.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-namespace {
-
-class R600MCCodeEmitter : public AMDGPUMCCodeEmitter {
-  R600MCCodeEmitter(const R600MCCodeEmitter &) = delete;
-  void operator=(const R600MCCodeEmitter &) = delete;
-  const MCInstrInfo &MCII;
-  const MCRegisterInfo &MRI;
-
-public:
-
-  R600MCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri)
-    : MCII(mcii), MRI(mri) { }
-
-  /// \brief Encode the instruction and write it to the OS.
-  void encodeInstruction(const MCInst &MI, raw_ostream &OS,
-                         SmallVectorImpl<MCFixup> &Fixups,
-                         const MCSubtargetInfo &STI) const override;
-
-  /// \returns the encoding for an MCOperand.
-  uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
-                             SmallVectorImpl<MCFixup> &Fixups,
-                             const MCSubtargetInfo &STI) const override;
-private:
-
-  void EmitByte(unsigned int byte, raw_ostream &OS) const;
-
-  void Emit(uint32_t value, raw_ostream &OS) const;
-  void Emit(uint64_t value, raw_ostream &OS) const;
-
-  unsigned getHWRegChan(unsigned reg) const;
-  unsigned getHWReg(unsigned regNo) const;
-
-};
-
-} // End anonymous namespace
-
-enum RegElement {
-  ELEMENT_X = 0,
-  ELEMENT_Y,
-  ELEMENT_Z,
-  ELEMENT_W
-};
-
-enum FCInstr {
-  FC_IF_PREDICATE = 0,
-  FC_ELSE,
-  FC_ENDIF,
-  FC_BGNLOOP,
-  FC_ENDLOOP,
-  FC_BREAK_PREDICATE,
-  FC_CONTINUE
-};
-
-MCCodeEmitter *llvm::createR600MCCodeEmitter(const MCInstrInfo &MCII,
-                                             const MCRegisterInfo &MRI,
-					     MCContext &Ctx) {
-  return new R600MCCodeEmitter(MCII, MRI);
-}
-
-void R600MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
-                                       SmallVectorImpl<MCFixup> &Fixups,
-                                       const MCSubtargetInfo &STI) const {
-  const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
-  if (MI.getOpcode() == AMDGPU::RETURN ||
-    MI.getOpcode() == AMDGPU::FETCH_CLAUSE ||
-    MI.getOpcode() == AMDGPU::ALU_CLAUSE ||
-    MI.getOpcode() == AMDGPU::BUNDLE ||
-    MI.getOpcode() == AMDGPU::KILL) {
-    return;
-  } else if (IS_VTX(Desc)) {
-    uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups, STI);
-    uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
-    if (!(STI.getFeatureBits()[AMDGPU::FeatureCaymanISA])) {
-      InstWord2 |= 1 << 19; // Mega-Fetch bit
-    }
-
-    Emit(InstWord01, OS);
-    Emit(InstWord2, OS);
-    Emit((uint32_t) 0, OS);
-  } else if (IS_TEX(Desc)) {
-      int64_t Sampler = MI.getOperand(14).getImm();
-
-      int64_t SrcSelect[4] = {
-        MI.getOperand(2).getImm(),
-        MI.getOperand(3).getImm(),
-        MI.getOperand(4).getImm(),
-        MI.getOperand(5).getImm()
-      };
-      int64_t Offsets[3] = {
-        MI.getOperand(6).getImm() & 0x1F,
-        MI.getOperand(7).getImm() & 0x1F,
-        MI.getOperand(8).getImm() & 0x1F
-      };
-
-      uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups, STI);
-      uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 |
-          SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 |
-          SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 |
-          Offsets[2] << 10;
-
-      Emit(Word01, OS);
-      Emit(Word2, OS);
-      Emit((uint32_t) 0, OS);
-  } else {
-    uint64_t Inst = getBinaryCodeForInstr(MI, Fixups, STI);
-    if ((STI.getFeatureBits()[AMDGPU::FeatureR600ALUInst]) &&
-       ((Desc.TSFlags & R600_InstFlag::OP1) ||
-         Desc.TSFlags & R600_InstFlag::OP2)) {
-      uint64_t ISAOpCode = Inst & (0x3FFULL << 39);
-      Inst &= ~(0x3FFULL << 39);
-      Inst |= ISAOpCode << 1;
-    }
-    Emit(Inst, OS);
-  }
-}
-
-void R600MCCodeEmitter::EmitByte(unsigned int Byte, raw_ostream &OS) const {
-  OS.write((uint8_t) Byte & 0xff);
-}
-
-void R600MCCodeEmitter::Emit(uint32_t Value, raw_ostream &OS) const {
-  support::endian::Writer<support::little>(OS).write(Value);
-}
-
-void R600MCCodeEmitter::Emit(uint64_t Value, raw_ostream &OS) const {
-  support::endian::Writer<support::little>(OS).write(Value);
-}
-
-unsigned R600MCCodeEmitter::getHWRegChan(unsigned reg) const {
-  return MRI.getEncodingValue(reg) >> HW_CHAN_SHIFT;
-}
-
-unsigned R600MCCodeEmitter::getHWReg(unsigned RegNo) const {
-  return MRI.getEncodingValue(RegNo) & HW_REG_MASK;
-}
-
-uint64_t R600MCCodeEmitter::getMachineOpValue(const MCInst &MI,
-                                              const MCOperand &MO,
-                                        SmallVectorImpl<MCFixup> &Fixup,
-                                        const MCSubtargetInfo &STI) const {
-  if (MO.isReg()) {
-    if (HAS_NATIVE_OPERANDS(MCII.get(MI.getOpcode()).TSFlags))
-      return MRI.getEncodingValue(MO.getReg());
-    return getHWReg(MO.getReg());
-  }
-
-  assert(MO.isImm());
-  return MO.getImm();
-}
-
-#include "AMDGPUGenMCCodeEmitter.inc"

Removed: llvm/trunk/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp (original)
+++ llvm/trunk/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp (removed)
@@ -1,289 +0,0 @@
-//===-- SIMCCodeEmitter.cpp - SI Code Emitter -------------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief The SI code emitter produces machine code that can be executed
-/// directly on the GPU device.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "MCTargetDesc/AMDGPUFixupKinds.h"
-#include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
-#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
-#include "SIDefines.h"
-#include "llvm/MC/MCCodeEmitter.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCFixup.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCInstrInfo.h"
-#include "llvm/MC/MCRegisterInfo.h"
-#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-namespace {
-
-class SIMCCodeEmitter : public  AMDGPUMCCodeEmitter {
-  SIMCCodeEmitter(const SIMCCodeEmitter &) = delete;
-  void operator=(const SIMCCodeEmitter &) = delete;
-  const MCInstrInfo &MCII;
-  const MCRegisterInfo &MRI;
-  MCContext &Ctx;
-
-  /// \brief Can this operand also contain immediate values?
-  bool isSrcOperand(const MCInstrDesc &Desc, unsigned OpNo) const;
-
-  /// \brief Encode an fp or int literal
-  uint32_t getLitEncoding(const MCOperand &MO, unsigned OpSize) const;
-
-public:
-  SIMCCodeEmitter(const MCInstrInfo &mcii, const MCRegisterInfo &mri,
-                  MCContext &ctx)
-    : MCII(mcii), MRI(mri), Ctx(ctx) { }
-
-  ~SIMCCodeEmitter() override {}
-
-  /// \brief Encode the instruction and write it to the OS.
-  void encodeInstruction(const MCInst &MI, raw_ostream &OS,
-                         SmallVectorImpl<MCFixup> &Fixups,
-                         const MCSubtargetInfo &STI) const override;
-
-  /// \returns the encoding for an MCOperand.
-  uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
-                             SmallVectorImpl<MCFixup> &Fixups,
-                             const MCSubtargetInfo &STI) const override;
-
-  /// \brief Use a fixup to encode the simm16 field for SOPP branch
-  ///        instructions.
-  unsigned getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
-                             SmallVectorImpl<MCFixup> &Fixups,
-                             const MCSubtargetInfo &STI) const override;
-};
-
-} // End anonymous namespace
-
-MCCodeEmitter *llvm::createSIMCCodeEmitter(const MCInstrInfo &MCII,
-                                           const MCRegisterInfo &MRI,
-                                           MCContext &Ctx) {
-  return new SIMCCodeEmitter(MCII, MRI, Ctx);
-}
-
-bool SIMCCodeEmitter::isSrcOperand(const MCInstrDesc &Desc,
-                                   unsigned OpNo) const {
-  unsigned OpType = Desc.OpInfo[OpNo].OperandType;
-
-  return OpType == AMDGPU::OPERAND_REG_IMM32 ||
-         OpType == AMDGPU::OPERAND_REG_INLINE_C;
-}
-
-// Returns the encoding value to use if the given integer is an integer inline
-// immediate value, or 0 if it is not.
-template <typename IntTy>
-static uint32_t getIntInlineImmEncoding(IntTy Imm) {
-  if (Imm >= 0 && Imm <= 64)
-    return 128 + Imm;
-
-  if (Imm >= -16 && Imm <= -1)
-    return 192 + std::abs(Imm);
-
-  return 0;
-}
-
-static uint32_t getLit32Encoding(uint32_t Val) {
-  uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
-  if (IntImm != 0)
-    return IntImm;
-
-  if (Val == FloatToBits(0.5f))
-    return 240;
-
-  if (Val == FloatToBits(-0.5f))
-    return 241;
-
-  if (Val == FloatToBits(1.0f))
-    return 242;
-
-  if (Val == FloatToBits(-1.0f))
-    return 243;
-
-  if (Val == FloatToBits(2.0f))
-    return 244;
-
-  if (Val == FloatToBits(-2.0f))
-    return 245;
-
-  if (Val == FloatToBits(4.0f))
-    return 246;
-
-  if (Val == FloatToBits(-4.0f))
-    return 247;
-
-  return 255;
-}
-
-static uint32_t getLit64Encoding(uint64_t Val) {
-  uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
-  if (IntImm != 0)
-    return IntImm;
-
-  if (Val == DoubleToBits(0.5))
-    return 240;
-
-  if (Val == DoubleToBits(-0.5))
-    return 241;
-
-  if (Val == DoubleToBits(1.0))
-    return 242;
-
-  if (Val == DoubleToBits(-1.0))
-    return 243;
-
-  if (Val == DoubleToBits(2.0))
-    return 244;
-
-  if (Val == DoubleToBits(-2.0))
-    return 245;
-
-  if (Val == DoubleToBits(4.0))
-    return 246;
-
-  if (Val == DoubleToBits(-4.0))
-    return 247;
-
-  return 255;
-}
-
-uint32_t SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
-                                         unsigned OpSize) const {
-  if (MO.isExpr())
-    return 255;
-
-  assert(!MO.isFPImm());
-
-  if (!MO.isImm())
-    return ~0;
-
-  if (OpSize == 4)
-    return getLit32Encoding(static_cast<uint32_t>(MO.getImm()));
-
-  assert(OpSize == 8);
-
-  return getLit64Encoding(static_cast<uint64_t>(MO.getImm()));
-}
-
-void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
-                                       SmallVectorImpl<MCFixup> &Fixups,
-                                       const MCSubtargetInfo &STI) const {
-
-  uint64_t Encoding = getBinaryCodeForInstr(MI, Fixups, STI);
-  const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
-  unsigned bytes = Desc.getSize();
-
-  for (unsigned i = 0; i < bytes; i++) {
-    OS.write((uint8_t) ((Encoding >> (8 * i)) & 0xff));
-  }
-
-  if (bytes > 4)
-    return;
-
-  // Check for additional literals in SRC0/1/2 (Op 1/2/3)
-  for (unsigned i = 0, e = MI.getNumOperands(); i < e; ++i) {
-
-    // Check if this operand should be encoded as [SV]Src
-    if (!isSrcOperand(Desc, i))
-      continue;
-
-    int RCID = Desc.OpInfo[i].RegClass;
-    const MCRegisterClass &RC = MRI.getRegClass(RCID);
-
-    // Is this operand a literal immediate?
-    const MCOperand &Op = MI.getOperand(i);
-    if (getLitEncoding(Op, RC.getSize()) != 255)
-      continue;
-
-    // Yes! Encode it
-    int64_t Imm = 0;
-
-    if (Op.isImm())
-      Imm = Op.getImm();
-    else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
-      llvm_unreachable("Must be immediate or expr");
-
-    for (unsigned j = 0; j < 4; j++) {
-      OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff));
-    }
-
-    // Only one literal value allowed
-    break;
-  }
-}
-
-unsigned SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
-                                            SmallVectorImpl<MCFixup> &Fixups,
-                                            const MCSubtargetInfo &STI) const {
-  const MCOperand &MO = MI.getOperand(OpNo);
-
-  if (MO.isExpr()) {
-    const MCExpr *Expr = MO.getExpr();
-    MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br;
-    Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
-    return 0;
-  }
-
-  return getMachineOpValue(MI, MO, Fixups, STI);
-}
-
-uint64_t SIMCCodeEmitter::getMachineOpValue(const MCInst &MI,
-                                            const MCOperand &MO,
-                                       SmallVectorImpl<MCFixup> &Fixups,
-                                       const MCSubtargetInfo &STI) const {
-  if (MO.isReg())
-    return MRI.getEncodingValue(MO.getReg());
-
-  if (MO.isExpr()) {
-    const MCSymbolRefExpr *Expr = cast<MCSymbolRefExpr>(MO.getExpr());
-    MCFixupKind Kind;
-    const MCSymbol *Sym =
-        Ctx.getOrCreateSymbol(StringRef(END_OF_TEXT_LABEL_NAME));
-
-    if (&Expr->getSymbol() == Sym) {
-      // Add the offset to the beginning of the constant values.
-      Kind = (MCFixupKind)AMDGPU::fixup_si_end_of_text;
-    } else {
-      // This is used for constant data stored in .rodata.
-     Kind = (MCFixupKind)AMDGPU::fixup_si_rodata;
-    }
-    Fixups.push_back(MCFixup::create(4, Expr, Kind, MI.getLoc()));
-  }
-
-  // Figure out the operand number, needed for isSrcOperand check
-  unsigned OpNo = 0;
-  for (unsigned e = MI.getNumOperands(); OpNo < e; ++OpNo) {
-    if (&MO == &MI.getOperand(OpNo))
-      break;
-  }
-
-  const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
-  if (isSrcOperand(Desc, OpNo)) {
-    int RCID = Desc.OpInfo[OpNo].RegClass;
-    const MCRegisterClass &RC = MRI.getRegClass(RCID);
-
-    uint32_t Enc = getLitEncoding(MO, RC.getSize());
-    if (Enc != ~0U && (Enc != 255 || Desc.getSize() == 4))
-      return Enc;
-
-  } else if (MO.isImm())
-    return MO.getImm();
-
-  llvm_unreachable("Encoding of this operand type is not supported yet.");
-  return 0;
-}
-

Removed: llvm/trunk/lib/Target/R600/Makefile
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/Makefile?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/Makefile (original)
+++ llvm/trunk/lib/Target/R600/Makefile (removed)
@@ -1,23 +0,0 @@
-##===- lib/Target/R600/Makefile ---------------------------*- Makefile -*-===##
-#
-#                     The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-##===----------------------------------------------------------------------===##
-
-LEVEL = ../../..
-LIBRARYNAME = LLVMR600CodeGen
-TARGET = AMDGPU
-
-# Make sure that tblgen is run, first thing.
-BUILT_SOURCES = AMDGPUGenRegisterInfo.inc AMDGPUGenInstrInfo.inc \
-		AMDGPUGenDAGISel.inc  AMDGPUGenSubtargetInfo.inc \
-		AMDGPUGenMCCodeEmitter.inc AMDGPUGenCallingConv.inc \
-		AMDGPUGenIntrinsics.inc AMDGPUGenDFAPacketizer.inc \
-		AMDGPUGenAsmWriter.inc AMDGPUGenAsmMatcher.inc
-
-DIRS = AsmParser InstPrinter TargetInfo MCTargetDesc
-
-include $(LEVEL)/Makefile.common

Removed: llvm/trunk/lib/Target/R600/Processors.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/Processors.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/Processors.td (original)
+++ llvm/trunk/lib/Target/R600/Processors.td (removed)
@@ -1,137 +0,0 @@
-//===-- Processors.td - R600 Processor definitions ------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-class Proc<string Name, ProcessorItineraries itin, list<SubtargetFeature> Features>
-: Processor<Name, itin, Features>;
-
-//===----------------------------------------------------------------------===//
-// R600
-//===----------------------------------------------------------------------===//
-def : Proc<"",           R600_VLIW5_Itin,
-    [FeatureR600, FeatureVertexCache]>;
-
-def : Proc<"r600",       R600_VLIW5_Itin,
-    [FeatureR600 , FeatureVertexCache, FeatureWavefrontSize64]>;
-
-def : Proc<"r630",       R600_VLIW5_Itin,
-    [FeatureR600, FeatureVertexCache, FeatureWavefrontSize32]>;
-
-def : Proc<"rs880",      R600_VLIW5_Itin,
-    [FeatureR600, FeatureWavefrontSize16]>;
-
-def : Proc<"rv670",      R600_VLIW5_Itin,
-    [FeatureR600, FeatureFP64, FeatureVertexCache, FeatureWavefrontSize64]>;
-
-//===----------------------------------------------------------------------===//
-// R700
-//===----------------------------------------------------------------------===//
-
-def : Proc<"rv710",      R600_VLIW5_Itin,
-    [FeatureR700, FeatureVertexCache, FeatureWavefrontSize32]>;
-
-def : Proc<"rv730",      R600_VLIW5_Itin,
-    [FeatureR700, FeatureVertexCache, FeatureWavefrontSize32]>;
-
-def : Proc<"rv770",      R600_VLIW5_Itin,
-    [FeatureR700, FeatureFP64, FeatureVertexCache, FeatureWavefrontSize64]>;
-
-//===----------------------------------------------------------------------===//
-// Evergreen
-//===----------------------------------------------------------------------===//
-
-def : Proc<"cedar",      R600_VLIW5_Itin,
-    [FeatureEvergreen, FeatureVertexCache, FeatureWavefrontSize32,
-     FeatureCFALUBug]>;
-
-def : Proc<"redwood",    R600_VLIW5_Itin,
-    [FeatureEvergreen, FeatureVertexCache, FeatureWavefrontSize64,
-     FeatureCFALUBug]>;
-
-def : Proc<"sumo",       R600_VLIW5_Itin,
-    [FeatureEvergreen, FeatureWavefrontSize64, FeatureCFALUBug]>;
-
-def : Proc<"juniper",    R600_VLIW5_Itin,
-    [FeatureEvergreen, FeatureVertexCache, FeatureWavefrontSize64]>;
-
-def : Proc<"cypress",    R600_VLIW5_Itin,
-    [FeatureEvergreen, FeatureFP64, FeatureVertexCache,
-     FeatureWavefrontSize64]>;
-
-//===----------------------------------------------------------------------===//
-// Northern Islands
-//===----------------------------------------------------------------------===//
-
-def : Proc<"barts",      R600_VLIW5_Itin,
-    [FeatureNorthernIslands, FeatureVertexCache, FeatureCFALUBug]>;
-
-def : Proc<"turks",      R600_VLIW5_Itin,
-    [FeatureNorthernIslands, FeatureVertexCache, FeatureCFALUBug]>;
-
-def : Proc<"caicos",     R600_VLIW5_Itin,
-    [FeatureNorthernIslands, FeatureCFALUBug]>;
-
-def : Proc<"cayman",     R600_VLIW4_Itin,
-    [FeatureNorthernIslands, FeatureFP64, FeatureCaymanISA]>;
-
-//===----------------------------------------------------------------------===//
-// Southern Islands
-//===----------------------------------------------------------------------===//
-
-def : ProcessorModel<"SI", SIFullSpeedModel,
-  [FeatureSouthernIslands, FeatureFastFMAF32]
->;
-
-def : ProcessorModel<"tahiti",   SIFullSpeedModel,
-  [FeatureSouthernIslands, FeatureFastFMAF32]
->;
-
-def : ProcessorModel<"pitcairn", SIQuarterSpeedModel, [FeatureSouthernIslands]>;
-
-def : ProcessorModel<"verde",    SIQuarterSpeedModel, [FeatureSouthernIslands]>;
-
-def : ProcessorModel<"oland",    SIQuarterSpeedModel, [FeatureSouthernIslands]>;
-
-def : ProcessorModel<"hainan",   SIQuarterSpeedModel, [FeatureSouthernIslands]>;
-
-//===----------------------------------------------------------------------===//
-// Sea Islands
-//===----------------------------------------------------------------------===//
-
-def : ProcessorModel<"bonaire",    SIQuarterSpeedModel,
-  [FeatureSeaIslands, FeatureLDSBankCount32]
->;
-
-def : ProcessorModel<"kabini",     SIQuarterSpeedModel,
-  [FeatureSeaIslands, FeatureLDSBankCount16]
->;
-
-def : ProcessorModel<"kaveri",     SIQuarterSpeedModel,
-  [FeatureSeaIslands, FeatureLDSBankCount32]
->;
-
-def : ProcessorModel<"hawaii", SIFullSpeedModel,
-  [FeatureSeaIslands, FeatureFastFMAF32, FeatureLDSBankCount32]
->;
-
-def : ProcessorModel<"mullins",    SIQuarterSpeedModel,
-  [FeatureSeaIslands, FeatureLDSBankCount16]>;
-
-//===----------------------------------------------------------------------===//
-// Volcanic Islands
-//===----------------------------------------------------------------------===//
-
-def : ProcessorModel<"tonga",   SIQuarterSpeedModel,
-  [FeatureVolcanicIslands, FeatureSGPRInitBug]
->;
-
-def : ProcessorModel<"iceland", SIQuarterSpeedModel,
-  [FeatureVolcanicIslands, FeatureSGPRInitBug]
->;
-
-def : ProcessorModel<"carrizo", SIQuarterSpeedModel, [FeatureVolcanicIslands]>;

Removed: llvm/trunk/lib/Target/R600/R600ClauseMergePass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600ClauseMergePass.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600ClauseMergePass.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600ClauseMergePass.cpp (removed)
@@ -1,206 +0,0 @@
-//===-- R600ClauseMergePass - Merge consecutive CF_ALU -------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// R600EmitClauseMarker pass emits CFAlu instruction in a conservative maneer.
-/// This pass is merging consecutive CFAlus where applicable.
-/// It needs to be called after IfCvt for best results.
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "R600Defines.h"
-#include "R600InstrInfo.h"
-#include "R600MachineFunctionInfo.h"
-#include "R600RegisterInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "r600mergeclause"
-
-namespace {
-
-static bool isCFAlu(const MachineInstr *MI) {
-  switch (MI->getOpcode()) {
-  case AMDGPU::CF_ALU:
-  case AMDGPU::CF_ALU_PUSH_BEFORE:
-    return true;
-  default:
-    return false;
-  }
-}
-
-class R600ClauseMergePass : public MachineFunctionPass {
-
-private:
-  static char ID;
-  const R600InstrInfo *TII;
-
-  unsigned getCFAluSize(const MachineInstr *MI) const;
-  bool isCFAluEnabled(const MachineInstr *MI) const;
-
-  /// IfCvt pass can generate "disabled" ALU clause marker that need to be
-  /// removed and their content affected to the previous alu clause.
-  /// This function parse instructions after CFAlu until it find a disabled
-  /// CFAlu and merge the content, or an enabled CFAlu.
-  void cleanPotentialDisabledCFAlu(MachineInstr *CFAlu) const;
-
-  /// Check whether LatrCFAlu can be merged into RootCFAlu and do it if
-  /// it is the case.
-  bool mergeIfPossible(MachineInstr *RootCFAlu, const MachineInstr *LatrCFAlu)
-      const;
-
-public:
-  R600ClauseMergePass(TargetMachine &tm) : MachineFunctionPass(ID) { }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override;
-};
-
-char R600ClauseMergePass::ID = 0;
-
-unsigned R600ClauseMergePass::getCFAluSize(const MachineInstr *MI) const {
-  assert(isCFAlu(MI));
-  return MI->getOperand(
-      TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::COUNT)).getImm();
-}
-
-bool R600ClauseMergePass::isCFAluEnabled(const MachineInstr *MI) const {
-  assert(isCFAlu(MI));
-  return MI->getOperand(
-      TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::Enabled)).getImm();
-}
-
-void R600ClauseMergePass::cleanPotentialDisabledCFAlu(MachineInstr *CFAlu)
-    const {
-  int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT);
-  MachineBasicBlock::iterator I = CFAlu, E = CFAlu->getParent()->end();
-  I++;
-  do {
-    while (I!= E && !isCFAlu(I))
-      I++;
-    if (I == E)
-      return;
-    MachineInstr *MI = I++;
-    if (isCFAluEnabled(MI))
-      break;
-    CFAlu->getOperand(CntIdx).setImm(getCFAluSize(CFAlu) + getCFAluSize(MI));
-    MI->eraseFromParent();
-  } while (I != E);
-}
-
-bool R600ClauseMergePass::mergeIfPossible(MachineInstr *RootCFAlu,
-                                          const MachineInstr *LatrCFAlu) const {
-  assert(isCFAlu(RootCFAlu) && isCFAlu(LatrCFAlu));
-  int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT);
-  unsigned RootInstCount = getCFAluSize(RootCFAlu),
-      LaterInstCount = getCFAluSize(LatrCFAlu);
-  unsigned CumuledInsts = RootInstCount + LaterInstCount;
-  if (CumuledInsts >= TII->getMaxAlusPerClause()) {
-    DEBUG(dbgs() << "Excess inst counts\n");
-    return false;
-  }
-  if (RootCFAlu->getOpcode() == AMDGPU::CF_ALU_PUSH_BEFORE)
-    return false;
-  // Is KCache Bank 0 compatible ?
-  int Mode0Idx =
-      TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_MODE0);
-  int KBank0Idx =
-      TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_BANK0);
-  int KBank0LineIdx =
-      TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_ADDR0);
-  if (LatrCFAlu->getOperand(Mode0Idx).getImm() &&
-      RootCFAlu->getOperand(Mode0Idx).getImm() &&
-      (LatrCFAlu->getOperand(KBank0Idx).getImm() !=
-       RootCFAlu->getOperand(KBank0Idx).getImm() ||
-      LatrCFAlu->getOperand(KBank0LineIdx).getImm() !=
-      RootCFAlu->getOperand(KBank0LineIdx).getImm())) {
-    DEBUG(dbgs() << "Wrong KC0\n");
-    return false;
-  }
-  // Is KCache Bank 1 compatible ?
-  int Mode1Idx =
-      TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_MODE1);
-  int KBank1Idx =
-      TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_BANK1);
-  int KBank1LineIdx =
-      TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_ADDR1);
-  if (LatrCFAlu->getOperand(Mode1Idx).getImm() &&
-      RootCFAlu->getOperand(Mode1Idx).getImm() &&
-      (LatrCFAlu->getOperand(KBank1Idx).getImm() !=
-      RootCFAlu->getOperand(KBank1Idx).getImm() ||
-      LatrCFAlu->getOperand(KBank1LineIdx).getImm() !=
-      RootCFAlu->getOperand(KBank1LineIdx).getImm())) {
-    DEBUG(dbgs() << "Wrong KC0\n");
-    return false;
-  }
-  if (LatrCFAlu->getOperand(Mode0Idx).getImm()) {
-    RootCFAlu->getOperand(Mode0Idx).setImm(
-        LatrCFAlu->getOperand(Mode0Idx).getImm());
-    RootCFAlu->getOperand(KBank0Idx).setImm(
-        LatrCFAlu->getOperand(KBank0Idx).getImm());
-    RootCFAlu->getOperand(KBank0LineIdx).setImm(
-        LatrCFAlu->getOperand(KBank0LineIdx).getImm());
-  }
-  if (LatrCFAlu->getOperand(Mode1Idx).getImm()) {
-    RootCFAlu->getOperand(Mode1Idx).setImm(
-        LatrCFAlu->getOperand(Mode1Idx).getImm());
-    RootCFAlu->getOperand(KBank1Idx).setImm(
-        LatrCFAlu->getOperand(KBank1Idx).getImm());
-    RootCFAlu->getOperand(KBank1LineIdx).setImm(
-        LatrCFAlu->getOperand(KBank1LineIdx).getImm());
-  }
-  RootCFAlu->getOperand(CntIdx).setImm(CumuledInsts);
-  RootCFAlu->setDesc(TII->get(LatrCFAlu->getOpcode()));
-  return true;
-}
-
-bool R600ClauseMergePass::runOnMachineFunction(MachineFunction &MF) {
-  TII = static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo());
-  for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
-                                                  BB != BB_E; ++BB) {
-    MachineBasicBlock &MBB = *BB;
-    MachineBasicBlock::iterator I = MBB.begin(),  E = MBB.end();
-    MachineBasicBlock::iterator LatestCFAlu = E;
-    while (I != E) {
-      MachineInstr *MI = I++;
-      if ((!TII->canBeConsideredALU(MI) && !isCFAlu(MI)) ||
-          TII->mustBeLastInClause(MI->getOpcode()))
-        LatestCFAlu = E;
-      if (!isCFAlu(MI))
-        continue;
-      cleanPotentialDisabledCFAlu(MI);
-
-      if (LatestCFAlu != E && mergeIfPossible(LatestCFAlu, MI)) {
-        MI->eraseFromParent();
-      } else {
-        assert(MI->getOperand(8).getImm() && "CF ALU instruction disabled");
-        LatestCFAlu = MI;
-      }
-    }
-  }
-  return false;
-}
-
-const char *R600ClauseMergePass::getPassName() const {
-  return "R600 Merge Clause Markers Pass";
-}
-
-} // end anonymous namespace
-
-
-llvm::FunctionPass *llvm::createR600ClauseMergePass(TargetMachine &TM) {
-  return new R600ClauseMergePass(TM);
-}

Removed: llvm/trunk/lib/Target/R600/R600ControlFlowFinalizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600ControlFlowFinalizer.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600ControlFlowFinalizer.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600ControlFlowFinalizer.cpp (removed)
@@ -1,679 +0,0 @@
-//===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// This pass compute turns all control flow pseudo instructions into native one
-/// computing their address on the fly ; it also sets STACK_SIZE info.
-//===----------------------------------------------------------------------===//
-
-#include "llvm/Support/Debug.h"
-#include "AMDGPU.h"
-#include "AMDGPUSubtarget.h"
-#include "R600Defines.h"
-#include "R600InstrInfo.h"
-#include "R600MachineFunctionInfo.h"
-#include "R600RegisterInfo.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "r600cf"
-
-namespace {
-
-struct CFStack {
-
-  enum StackItem {
-    ENTRY = 0,
-    SUB_ENTRY = 1,
-    FIRST_NON_WQM_PUSH = 2,
-    FIRST_NON_WQM_PUSH_W_FULL_ENTRY = 3
-  };
-
-  const AMDGPUSubtarget *ST;
-  std::vector<StackItem> BranchStack;
-  std::vector<StackItem> LoopStack;
-  unsigned MaxStackSize;
-  unsigned CurrentEntries;
-  unsigned CurrentSubEntries;
-
-  CFStack(const AMDGPUSubtarget *st, unsigned ShaderType) : ST(st),
-      // We need to reserve a stack entry for CALL_FS in vertex shaders.
-      MaxStackSize(ShaderType == ShaderType::VERTEX ? 1 : 0),
-      CurrentEntries(0), CurrentSubEntries(0) { }
-
-  unsigned getLoopDepth();
-  bool branchStackContains(CFStack::StackItem);
-  bool requiresWorkAroundForInst(unsigned Opcode);
-  unsigned getSubEntrySize(CFStack::StackItem Item);
-  void updateMaxStackSize();
-  void pushBranch(unsigned Opcode, bool isWQM = false);
-  void pushLoop();
-  void popBranch();
-  void popLoop();
-};
-
-unsigned CFStack::getLoopDepth() {
-  return LoopStack.size();
-}
-
-bool CFStack::branchStackContains(CFStack::StackItem Item) {
-  for (std::vector<CFStack::StackItem>::const_iterator I = BranchStack.begin(),
-       E = BranchStack.end(); I != E; ++I) {
-    if (*I == Item)
-      return true;
-  }
-  return false;
-}
-
-bool CFStack::requiresWorkAroundForInst(unsigned Opcode) {
-  if (Opcode == AMDGPU::CF_ALU_PUSH_BEFORE && ST->hasCaymanISA() &&
-      getLoopDepth() > 1)
-    return true;
-
-  if (!ST->hasCFAluBug())
-    return false;
-
-  switch(Opcode) {
-  default: return false;
-  case AMDGPU::CF_ALU_PUSH_BEFORE:
-  case AMDGPU::CF_ALU_ELSE_AFTER:
-  case AMDGPU::CF_ALU_BREAK:
-  case AMDGPU::CF_ALU_CONTINUE:
-    if (CurrentSubEntries == 0)
-      return false;
-    if (ST->getWavefrontSize() == 64) {
-      // We are being conservative here.  We only require this work-around if
-      // CurrentSubEntries > 3 &&
-      // (CurrentSubEntries % 4 == 3 || CurrentSubEntries % 4 == 0)
-      //
-      // We have to be conservative, because we don't know for certain that
-      // our stack allocation algorithm for Evergreen/NI is correct.  Applying this
-      // work-around when CurrentSubEntries > 3 allows us to over-allocate stack
-      // resources without any problems.
-      return CurrentSubEntries > 3;
-    } else {
-      assert(ST->getWavefrontSize() == 32);
-      // We are being conservative here.  We only require the work-around if
-      // CurrentSubEntries > 7 &&
-      // (CurrentSubEntries % 8 == 7 || CurrentSubEntries % 8 == 0)
-      // See the comment on the wavefront size == 64 case for why we are
-      // being conservative.
-      return CurrentSubEntries > 7;
-    }
-  }
-}
-
-unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
-  switch(Item) {
-  default:
-    return 0;
-  case CFStack::FIRST_NON_WQM_PUSH:
-  assert(!ST->hasCaymanISA());
-  if (ST->getGeneration() <= AMDGPUSubtarget::R700) {
-    // +1 For the push operation.
-    // +2 Extra space required.
-    return 3;
-  } else {
-    // Some documentation says that this is not necessary on Evergreen,
-    // but experimentation has show that we need to allocate 1 extra
-    // sub-entry for the first non-WQM push.
-    // +1 For the push operation.
-    // +1 Extra space required.
-    return 2;
-  }
-  case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY:
-    assert(ST->getGeneration() >= AMDGPUSubtarget::EVERGREEN);
-    // +1 For the push operation.
-    // +1 Extra space required.
-    return 2;
-  case CFStack::SUB_ENTRY:
-    return 1;
-  }
-}
-
-void CFStack::updateMaxStackSize() {
-  unsigned CurrentStackSize = CurrentEntries +
-                              (RoundUpToAlignment(CurrentSubEntries, 4) / 4);
-  MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
-}
-
-void CFStack::pushBranch(unsigned Opcode, bool isWQM) {
-  CFStack::StackItem Item = CFStack::ENTRY;
-  switch(Opcode) {
-  case AMDGPU::CF_PUSH_EG:
-  case AMDGPU::CF_ALU_PUSH_BEFORE:
-    if (!isWQM) {
-      if (!ST->hasCaymanISA() &&
-          !branchStackContains(CFStack::FIRST_NON_WQM_PUSH))
-        Item = CFStack::FIRST_NON_WQM_PUSH;  // May not be required on Evergreen/NI
-                                             // See comment in
-                                             // CFStack::getSubEntrySize()
-      else if (CurrentEntries > 0 &&
-               ST->getGeneration() > AMDGPUSubtarget::EVERGREEN &&
-               !ST->hasCaymanISA() &&
-               !branchStackContains(CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY))
-        Item = CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY;
-      else
-        Item = CFStack::SUB_ENTRY;
-    } else
-      Item = CFStack::ENTRY;
-    break;
-  }
-  BranchStack.push_back(Item);
-  if (Item == CFStack::ENTRY)
-    CurrentEntries++;
-  else
-    CurrentSubEntries += getSubEntrySize(Item);
-  updateMaxStackSize();
-}
-
-void CFStack::pushLoop() {
-  LoopStack.push_back(CFStack::ENTRY);
-  CurrentEntries++;
-  updateMaxStackSize();
-}
-
-void CFStack::popBranch() {
-  CFStack::StackItem Top = BranchStack.back();
-  if (Top == CFStack::ENTRY)
-    CurrentEntries--;
-  else
-    CurrentSubEntries-= getSubEntrySize(Top);
-  BranchStack.pop_back();
-}
-
-void CFStack::popLoop() {
-  CurrentEntries--;
-  LoopStack.pop_back();
-}
-
-class R600ControlFlowFinalizer : public MachineFunctionPass {
-
-private:
-  typedef std::pair<MachineInstr *, std::vector<MachineInstr *> > ClauseFile;
-
-  enum ControlFlowInstruction {
-    CF_TC,
-    CF_VC,
-    CF_CALL_FS,
-    CF_WHILE_LOOP,
-    CF_END_LOOP,
-    CF_LOOP_BREAK,
-    CF_LOOP_CONTINUE,
-    CF_JUMP,
-    CF_ELSE,
-    CF_POP,
-    CF_END
-  };
-
-  static char ID;
-  const R600InstrInfo *TII;
-  const R600RegisterInfo *TRI;
-  unsigned MaxFetchInst;
-  const AMDGPUSubtarget *ST;
-
-  bool IsTrivialInst(MachineInstr *MI) const {
-    switch (MI->getOpcode()) {
-    case AMDGPU::KILL:
-    case AMDGPU::RETURN:
-      return true;
-    default:
-      return false;
-    }
-  }
-
-  const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
-    unsigned Opcode = 0;
-    bool isEg = (ST->getGeneration() >= AMDGPUSubtarget::EVERGREEN);
-    switch (CFI) {
-    case CF_TC:
-      Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
-      break;
-    case CF_VC:
-      Opcode = isEg ? AMDGPU::CF_VC_EG : AMDGPU::CF_VC_R600;
-      break;
-    case CF_CALL_FS:
-      Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600;
-      break;
-    case CF_WHILE_LOOP:
-      Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600;
-      break;
-    case CF_END_LOOP:
-      Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600;
-      break;
-    case CF_LOOP_BREAK:
-      Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600;
-      break;
-    case CF_LOOP_CONTINUE:
-      Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600;
-      break;
-    case CF_JUMP:
-      Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600;
-      break;
-    case CF_ELSE:
-      Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600;
-      break;
-    case CF_POP:
-      Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
-      break;
-    case CF_END:
-      if (ST->hasCaymanISA()) {
-        Opcode = AMDGPU::CF_END_CM;
-        break;
-      }
-      Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600;
-      break;
-    }
-    assert (Opcode && "No opcode selected");
-    return TII->get(Opcode);
-  }
-
-  bool isCompatibleWithClause(const MachineInstr *MI,
-      std::set<unsigned> &DstRegs) const {
-    unsigned DstMI, SrcMI;
-    for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
-        E = MI->operands_end(); I != E; ++I) {
-      const MachineOperand &MO = *I;
-      if (!MO.isReg())
-        continue;
-      if (MO.isDef()) {
-        unsigned Reg = MO.getReg();
-        if (AMDGPU::R600_Reg128RegClass.contains(Reg))
-          DstMI = Reg;
-        else
-          DstMI = TRI->getMatchingSuperReg(Reg,
-              TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
-              &AMDGPU::R600_Reg128RegClass);
-      }
-      if (MO.isUse()) {
-        unsigned Reg = MO.getReg();
-        if (AMDGPU::R600_Reg128RegClass.contains(Reg))
-          SrcMI = Reg;
-        else
-          SrcMI = TRI->getMatchingSuperReg(Reg,
-              TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
-              &AMDGPU::R600_Reg128RegClass);
-      }
-    }
-    if ((DstRegs.find(SrcMI) == DstRegs.end())) {
-      DstRegs.insert(DstMI);
-      return true;
-    } else
-      return false;
-  }
-
-  ClauseFile
-  MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
-      const {
-    MachineBasicBlock::iterator ClauseHead = I;
-    std::vector<MachineInstr *> ClauseContent;
-    unsigned AluInstCount = 0;
-    bool IsTex = TII->usesTextureCache(ClauseHead);
-    std::set<unsigned> DstRegs;
-    for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
-      if (IsTrivialInst(I))
-        continue;
-      if (AluInstCount >= MaxFetchInst)
-        break;
-      if ((IsTex && !TII->usesTextureCache(I)) ||
-          (!IsTex && !TII->usesVertexCache(I)))
-        break;
-      if (!isCompatibleWithClause(I, DstRegs))
-        break;
-      AluInstCount ++;
-      ClauseContent.push_back(I);
-    }
-    MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
-        getHWInstrDesc(IsTex?CF_TC:CF_VC))
-        .addImm(0) // ADDR
-        .addImm(AluInstCount - 1); // COUNT
-    return ClauseFile(MIb, std::move(ClauseContent));
-  }
-
-  void getLiteral(MachineInstr *MI, std::vector<int64_t> &Lits) const {
-    static const unsigned LiteralRegs[] = {
-      AMDGPU::ALU_LITERAL_X,
-      AMDGPU::ALU_LITERAL_Y,
-      AMDGPU::ALU_LITERAL_Z,
-      AMDGPU::ALU_LITERAL_W
-    };
-    const SmallVector<std::pair<MachineOperand *, int64_t>, 3 > Srcs =
-        TII->getSrcs(MI);
-    for (unsigned i = 0, e = Srcs.size(); i < e; ++i) {
-      if (Srcs[i].first->getReg() != AMDGPU::ALU_LITERAL_X)
-        continue;
-      int64_t Imm = Srcs[i].second;
-      std::vector<int64_t>::iterator It =
-          std::find(Lits.begin(), Lits.end(), Imm);
-      if (It != Lits.end()) {
-        unsigned Index = It - Lits.begin();
-        Srcs[i].first->setReg(LiteralRegs[Index]);
-      } else {
-        assert(Lits.size() < 4 && "Too many literals in Instruction Group");
-        Srcs[i].first->setReg(LiteralRegs[Lits.size()]);
-        Lits.push_back(Imm);
-      }
-    }
-  }
-
-  MachineBasicBlock::iterator insertLiterals(
-      MachineBasicBlock::iterator InsertPos,
-      const std::vector<unsigned> &Literals) const {
-    MachineBasicBlock *MBB = InsertPos->getParent();
-    for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
-      unsigned LiteralPair0 = Literals[i];
-      unsigned LiteralPair1 = (i + 1 < e)?Literals[i + 1]:0;
-      InsertPos = BuildMI(MBB, InsertPos->getDebugLoc(),
-          TII->get(AMDGPU::LITERALS))
-          .addImm(LiteralPair0)
-          .addImm(LiteralPair1);
-    }
-    return InsertPos;
-  }
-
-  ClauseFile
-  MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
-      const {
-    MachineBasicBlock::iterator ClauseHead = I;
-    std::vector<MachineInstr *> ClauseContent;
-    I++;
-    for (MachineBasicBlock::instr_iterator E = MBB.instr_end(); I != E;) {
-      if (IsTrivialInst(I)) {
-        ++I;
-        continue;
-      }
-      if (!I->isBundle() && !TII->isALUInstr(I->getOpcode()))
-        break;
-      std::vector<int64_t> Literals;
-      if (I->isBundle()) {
-        MachineInstr *DeleteMI = I;
-        MachineBasicBlock::instr_iterator BI = I.getInstrIterator();
-        while (++BI != E && BI->isBundledWithPred()) {
-          BI->unbundleFromPred();
-          for (unsigned i = 0, e = BI->getNumOperands(); i != e; ++i) {
-            MachineOperand &MO = BI->getOperand(i);
-            if (MO.isReg() && MO.isInternalRead())
-              MO.setIsInternalRead(false);
-          }
-          getLiteral(BI, Literals);
-          ClauseContent.push_back(BI);
-        }
-        I = BI;
-        DeleteMI->eraseFromParent();
-      } else {
-        getLiteral(I, Literals);
-        ClauseContent.push_back(I);
-        I++;
-      }
-      for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
-        unsigned literal0 = Literals[i];
-        unsigned literal2 = (i + 1 < e)?Literals[i + 1]:0;
-        MachineInstr *MILit = BuildMI(MBB, I, I->getDebugLoc(),
-            TII->get(AMDGPU::LITERALS))
-            .addImm(literal0)
-            .addImm(literal2);
-        ClauseContent.push_back(MILit);
-      }
-    }
-    assert(ClauseContent.size() < 128 && "ALU clause is too big");
-    ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1);
-    return ClauseFile(ClauseHead, std::move(ClauseContent));
-  }
-
-  void
-  EmitFetchClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
-      unsigned &CfCount) {
-    CounterPropagateAddr(Clause.first, CfCount);
-    MachineBasicBlock *BB = Clause.first->getParent();
-    BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::FETCH_CLAUSE))
-        .addImm(CfCount);
-    for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
-      BB->splice(InsertPos, BB, Clause.second[i]);
-    }
-    CfCount += 2 * Clause.second.size();
-  }
-
-  void
-  EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
-      unsigned &CfCount) {
-    Clause.first->getOperand(0).setImm(0);
-    CounterPropagateAddr(Clause.first, CfCount);
-    MachineBasicBlock *BB = Clause.first->getParent();
-    BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE))
-        .addImm(CfCount);
-    for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
-      BB->splice(InsertPos, BB, Clause.second[i]);
-    }
-    CfCount += Clause.second.size();
-  }
-
-  void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
-    MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
-  }
-  void CounterPropagateAddr(const std::set<MachineInstr *> &MIs,
-                            unsigned Addr) const {
-    for (MachineInstr *MI : MIs) {
-      CounterPropagateAddr(MI, Addr);
-    }
-  }
-
-public:
-  R600ControlFlowFinalizer(TargetMachine &tm)
-      : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), ST(nullptr) {}
-
-  bool runOnMachineFunction(MachineFunction &MF) override {
-    ST = &MF.getSubtarget<AMDGPUSubtarget>();
-    MaxFetchInst = ST->getTexVTXClauseSize();
-    TII = static_cast<const R600InstrInfo *>(ST->getInstrInfo());
-    TRI = static_cast<const R600RegisterInfo *>(ST->getRegisterInfo());
-    R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
-
-    CFStack CFStack(ST, MFI->getShaderType());
-    for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
-        ++MB) {
-      MachineBasicBlock &MBB = *MB;
-      unsigned CfCount = 0;
-      std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
-      std::vector<MachineInstr * > IfThenElseStack;
-      if (MFI->getShaderType() == ShaderType::VERTEX) {
-        BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
-            getHWInstrDesc(CF_CALL_FS));
-        CfCount++;
-      }
-      std::vector<ClauseFile> FetchClauses, AluClauses;
-      std::vector<MachineInstr *> LastAlu(1);
-      std::vector<MachineInstr *> ToPopAfter;
-      
-      for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
-          I != E;) {
-        if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) {
-          DEBUG(dbgs() << CfCount << ":"; I->dump(););
-          FetchClauses.push_back(MakeFetchClause(MBB, I));
-          CfCount++;
-          LastAlu.back() = nullptr;
-          continue;
-        }
-
-        MachineBasicBlock::iterator MI = I;
-        if (MI->getOpcode() != AMDGPU::ENDIF)
-          LastAlu.back() = nullptr;
-        if (MI->getOpcode() == AMDGPU::CF_ALU)
-          LastAlu.back() = MI;
-        I++;
-        bool RequiresWorkAround =
-            CFStack.requiresWorkAroundForInst(MI->getOpcode());
-        switch (MI->getOpcode()) {
-        case AMDGPU::CF_ALU_PUSH_BEFORE:
-          if (RequiresWorkAround) {
-            DEBUG(dbgs() << "Applying bug work-around for ALU_PUSH_BEFORE\n");
-            BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::CF_PUSH_EG))
-                .addImm(CfCount + 1)
-                .addImm(1);
-            MI->setDesc(TII->get(AMDGPU::CF_ALU));
-            CfCount++;
-            CFStack.pushBranch(AMDGPU::CF_PUSH_EG);
-          } else
-            CFStack.pushBranch(AMDGPU::CF_ALU_PUSH_BEFORE);
-
-        case AMDGPU::CF_ALU:
-          I = MI;
-          AluClauses.push_back(MakeALUClause(MBB, I));
-          DEBUG(dbgs() << CfCount << ":"; MI->dump(););
-          CfCount++;
-          break;
-        case AMDGPU::WHILELOOP: {
-          CFStack.pushLoop();
-          MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
-              getHWInstrDesc(CF_WHILE_LOOP))
-              .addImm(1);
-          std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
-              std::set<MachineInstr *>());
-          Pair.second.insert(MIb);
-          LoopStack.push_back(std::move(Pair));
-          MI->eraseFromParent();
-          CfCount++;
-          break;
-        }
-        case AMDGPU::ENDLOOP: {
-          CFStack.popLoop();
-          std::pair<unsigned, std::set<MachineInstr *> > Pair =
-              std::move(LoopStack.back());
-          LoopStack.pop_back();
-          CounterPropagateAddr(Pair.second, CfCount);
-          BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
-              .addImm(Pair.first + 1);
-          MI->eraseFromParent();
-          CfCount++;
-          break;
-        }
-        case AMDGPU::IF_PREDICATE_SET: {
-          LastAlu.push_back(nullptr);
-          MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
-              getHWInstrDesc(CF_JUMP))
-              .addImm(0)
-              .addImm(0);
-          IfThenElseStack.push_back(MIb);
-          DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
-          MI->eraseFromParent();
-          CfCount++;
-          break;
-        }
-        case AMDGPU::ELSE: {
-          MachineInstr * JumpInst = IfThenElseStack.back();
-          IfThenElseStack.pop_back();
-          CounterPropagateAddr(JumpInst, CfCount);
-          MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
-              getHWInstrDesc(CF_ELSE))
-              .addImm(0)
-              .addImm(0);
-          DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
-          IfThenElseStack.push_back(MIb);
-          MI->eraseFromParent();
-          CfCount++;
-          break;
-        }
-        case AMDGPU::ENDIF: {
-          CFStack.popBranch();
-          if (LastAlu.back()) {
-            ToPopAfter.push_back(LastAlu.back());
-          } else {
-            MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
-                getHWInstrDesc(CF_POP))
-                .addImm(CfCount + 1)
-                .addImm(1);
-            (void)MIb;
-            DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
-            CfCount++;
-          }
-          
-          MachineInstr *IfOrElseInst = IfThenElseStack.back();
-          IfThenElseStack.pop_back();
-          CounterPropagateAddr(IfOrElseInst, CfCount);
-          IfOrElseInst->getOperand(1).setImm(1);
-          LastAlu.pop_back();
-          MI->eraseFromParent();
-          break;
-        }
-        case AMDGPU::BREAK: {
-          CfCount ++;
-          MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
-              getHWInstrDesc(CF_LOOP_BREAK))
-              .addImm(0);
-          LoopStack.back().second.insert(MIb);
-          MI->eraseFromParent();
-          break;
-        }
-        case AMDGPU::CONTINUE: {
-          MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
-              getHWInstrDesc(CF_LOOP_CONTINUE))
-              .addImm(0);
-          LoopStack.back().second.insert(MIb);
-          MI->eraseFromParent();
-          CfCount++;
-          break;
-        }
-        case AMDGPU::RETURN: {
-          BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END));
-          CfCount++;
-          MI->eraseFromParent();
-          if (CfCount % 2) {
-            BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD));
-            CfCount++;
-          }
-          for (unsigned i = 0, e = FetchClauses.size(); i < e; i++)
-            EmitFetchClause(I, FetchClauses[i], CfCount);
-          for (unsigned i = 0, e = AluClauses.size(); i < e; i++)
-            EmitALUClause(I, AluClauses[i], CfCount);
-        }
-        default:
-          if (TII->isExport(MI->getOpcode())) {
-            DEBUG(dbgs() << CfCount << ":"; MI->dump(););
-            CfCount++;
-          }
-          break;
-        }
-      }
-      for (unsigned i = 0, e = ToPopAfter.size(); i < e; ++i) {
-        MachineInstr *Alu = ToPopAfter[i];
-        BuildMI(MBB, Alu, MBB.findDebugLoc((MachineBasicBlock::iterator)Alu),
-            TII->get(AMDGPU::CF_ALU_POP_AFTER))
-            .addImm(Alu->getOperand(0).getImm())
-            .addImm(Alu->getOperand(1).getImm())
-            .addImm(Alu->getOperand(2).getImm())
-            .addImm(Alu->getOperand(3).getImm())
-            .addImm(Alu->getOperand(4).getImm())
-            .addImm(Alu->getOperand(5).getImm())
-            .addImm(Alu->getOperand(6).getImm())
-            .addImm(Alu->getOperand(7).getImm())
-            .addImm(Alu->getOperand(8).getImm());
-        Alu->eraseFromParent();
-      }
-      MFI->StackSize = CFStack.MaxStackSize;
-    }
-
-    return false;
-  }
-
-  const char *getPassName() const override {
-    return "R600 Control Flow Finalizer Pass";
-  }
-};
-
-char R600ControlFlowFinalizer::ID = 0;
-
-} // end anonymous namespace
-
-
-llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
-  return new R600ControlFlowFinalizer(TM);
-}

Removed: llvm/trunk/lib/Target/R600/R600Defines.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600Defines.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600Defines.h (original)
+++ llvm/trunk/lib/Target/R600/R600Defines.h (removed)
@@ -1,171 +0,0 @@
-//===-- R600Defines.h - R600 Helper Macros ----------------------*- C++ -*-===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-/// \file
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_R600DEFINES_H
-#define LLVM_LIB_TARGET_R600_R600DEFINES_H
-
-#include "llvm/MC/MCRegisterInfo.h"
-
-// Operand Flags
-#define MO_FLAG_CLAMP (1 << 0)
-#define MO_FLAG_NEG   (1 << 1)
-#define MO_FLAG_ABS   (1 << 2)
-#define MO_FLAG_MASK  (1 << 3)
-#define MO_FLAG_PUSH  (1 << 4)
-#define MO_FLAG_NOT_LAST  (1 << 5)
-#define MO_FLAG_LAST  (1 << 6)
-#define NUM_MO_FLAGS 7
-
-/// \brief Helper for getting the operand index for the instruction flags
-/// operand.
-#define GET_FLAG_OPERAND_IDX(Flags) (((Flags) >> 7) & 0x3)
-
-namespace R600_InstFlag {
-  enum TIF {
-    TRANS_ONLY = (1 << 0),
-    TEX = (1 << 1),
-    REDUCTION = (1 << 2),
-    FC = (1 << 3),
-    TRIG = (1 << 4),
-    OP3 = (1 << 5),
-    VECTOR = (1 << 6),
-    //FlagOperand bits 7, 8
-    NATIVE_OPERANDS = (1 << 9),
-    OP1 = (1 << 10),
-    OP2 = (1 << 11),
-    VTX_INST  = (1 << 12),
-    TEX_INST = (1 << 13),
-    ALU_INST = (1 << 14),
-    LDS_1A = (1 << 15),
-    LDS_1A1D = (1 << 16),
-    IS_EXPORT = (1 << 17),
-    LDS_1A2D = (1 << 18)
-  };
-}
-
-#define HAS_NATIVE_OPERANDS(Flags) ((Flags) & R600_InstFlag::NATIVE_OPERANDS)
-
-/// \brief Defines for extracting register information from register encoding
-#define HW_REG_MASK 0x1ff
-#define HW_CHAN_SHIFT 9
-
-#define GET_REG_CHAN(reg) ((reg) >> HW_CHAN_SHIFT)
-#define GET_REG_INDEX(reg) ((reg) & HW_REG_MASK)
-
-#define IS_VTX(desc) ((desc).TSFlags & R600_InstFlag::VTX_INST)
-#define IS_TEX(desc) ((desc).TSFlags & R600_InstFlag::TEX_INST)
-
-namespace OpName {
-
-  enum VecOps {
-    UPDATE_EXEC_MASK_X,
-    UPDATE_PREDICATE_X,
-    WRITE_X,
-    OMOD_X,
-    DST_REL_X,
-    CLAMP_X,
-    SRC0_X,
-    SRC0_NEG_X,
-    SRC0_REL_X,
-    SRC0_ABS_X,
-    SRC0_SEL_X,
-    SRC1_X,
-    SRC1_NEG_X,
-    SRC1_REL_X,
-    SRC1_ABS_X,
-    SRC1_SEL_X,
-    PRED_SEL_X,
-    UPDATE_EXEC_MASK_Y,
-    UPDATE_PREDICATE_Y,
-    WRITE_Y,
-    OMOD_Y,
-    DST_REL_Y,
-    CLAMP_Y,
-    SRC0_Y,
-    SRC0_NEG_Y,
-    SRC0_REL_Y,
-    SRC0_ABS_Y,
-    SRC0_SEL_Y,
-    SRC1_Y,
-    SRC1_NEG_Y,
-    SRC1_REL_Y,
-    SRC1_ABS_Y,
-    SRC1_SEL_Y,
-    PRED_SEL_Y,
-    UPDATE_EXEC_MASK_Z,
-    UPDATE_PREDICATE_Z,
-    WRITE_Z,
-    OMOD_Z,
-    DST_REL_Z,
-    CLAMP_Z,
-    SRC0_Z,
-    SRC0_NEG_Z,
-    SRC0_REL_Z,
-    SRC0_ABS_Z,
-    SRC0_SEL_Z,
-    SRC1_Z,
-    SRC1_NEG_Z,
-    SRC1_REL_Z,
-    SRC1_ABS_Z,
-    SRC1_SEL_Z,
-    PRED_SEL_Z,
-    UPDATE_EXEC_MASK_W,
-    UPDATE_PREDICATE_W,
-    WRITE_W,
-    OMOD_W,
-    DST_REL_W,
-    CLAMP_W,
-    SRC0_W,
-    SRC0_NEG_W,
-    SRC0_REL_W,
-    SRC0_ABS_W,
-    SRC0_SEL_W,
-    SRC1_W,
-    SRC1_NEG_W,
-    SRC1_REL_W,
-    SRC1_ABS_W,
-    SRC1_SEL_W,
-    PRED_SEL_W,
-    IMM_0,
-    IMM_1,
-    VEC_COUNT
- };
-
-}
-
-//===----------------------------------------------------------------------===//
-// Config register definitions
-//===----------------------------------------------------------------------===//
-
-#define R_02880C_DB_SHADER_CONTROL                    0x02880C
-#define   S_02880C_KILL_ENABLE(x)                      (((x) & 0x1) << 6)
-
-// These fields are the same for all shader types and families.
-#define   S_NUM_GPRS(x)                         (((x) & 0xFF) << 0)
-#define   S_STACK_SIZE(x)                       (((x) & 0xFF) << 8)
-//===----------------------------------------------------------------------===//
-// R600, R700 Registers
-//===----------------------------------------------------------------------===//
-
-#define R_028850_SQ_PGM_RESOURCES_PS                 0x028850
-#define R_028868_SQ_PGM_RESOURCES_VS                 0x028868
-
-//===----------------------------------------------------------------------===//
-// Evergreen, Northern Islands Registers
-//===----------------------------------------------------------------------===//
-
-#define R_028844_SQ_PGM_RESOURCES_PS                 0x028844
-#define R_028860_SQ_PGM_RESOURCES_VS                 0x028860
-#define R_028878_SQ_PGM_RESOURCES_GS                 0x028878
-#define R_0288D4_SQ_PGM_RESOURCES_LS                 0x0288d4
-
-#define R_0288E8_SQ_LDS_ALLOC                        0x0288E8
-
-#endif

Removed: llvm/trunk/lib/Target/R600/R600EmitClauseMarkers.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600EmitClauseMarkers.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600EmitClauseMarkers.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600EmitClauseMarkers.cpp (removed)
@@ -1,336 +0,0 @@
-//===-- R600EmitClauseMarkers.cpp - Emit CF_ALU ---------------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// Add CF_ALU. R600 Alu instructions are grouped in clause which can hold
-/// 128 Alu instructions ; these instructions can access up to 4 prefetched
-/// 4 lines of 16 registers from constant buffers. Such ALU clauses are
-/// initiated by CF_ALU instructions.
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "R600Defines.h"
-#include "R600InstrInfo.h"
-#include "R600MachineFunctionInfo.h"
-#include "R600RegisterInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-
-using namespace llvm;
-
-namespace llvm {
-  void initializeR600EmitClauseMarkersPass(PassRegistry&);
-}
-
-namespace {
-
-class R600EmitClauseMarkers : public MachineFunctionPass {
-
-private:
-  const R600InstrInfo *TII;
-  int Address;
-
-  unsigned OccupiedDwords(MachineInstr *MI) const {
-    switch (MI->getOpcode()) {
-    case AMDGPU::INTERP_PAIR_XY:
-    case AMDGPU::INTERP_PAIR_ZW:
-    case AMDGPU::INTERP_VEC_LOAD:
-    case AMDGPU::DOT_4:
-      return 4;
-    case AMDGPU::KILL:
-      return 0;
-    default:
-      break;
-    }
-
-    // These will be expanded to two ALU instructions in the
-    // ExpandSpecialInstructions pass.
-    if (TII->isLDSRetInstr(MI->getOpcode()))
-      return 2;
-
-    if(TII->isVector(*MI) ||
-        TII->isCubeOp(MI->getOpcode()) ||
-        TII->isReductionOp(MI->getOpcode()))
-      return 4;
-
-    unsigned NumLiteral = 0;
-    for (MachineInstr::mop_iterator It = MI->operands_begin(),
-        E = MI->operands_end(); It != E; ++It) {
-      MachineOperand &MO = *It;
-      if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X)
-        ++NumLiteral;
-    }
-    return 1 + NumLiteral;
-  }
-
-  bool isALU(const MachineInstr *MI) const {
-    if (TII->isALUInstr(MI->getOpcode()))
-      return true;
-    if (TII->isVector(*MI) || TII->isCubeOp(MI->getOpcode()))
-      return true;
-    switch (MI->getOpcode()) {
-    case AMDGPU::PRED_X:
-    case AMDGPU::INTERP_PAIR_XY:
-    case AMDGPU::INTERP_PAIR_ZW:
-    case AMDGPU::INTERP_VEC_LOAD:
-    case AMDGPU::COPY:
-    case AMDGPU::DOT_4:
-      return true;
-    default:
-      return false;
-    }
-  }
-
-  bool IsTrivialInst(MachineInstr *MI) const {
-    switch (MI->getOpcode()) {
-    case AMDGPU::KILL:
-    case AMDGPU::RETURN:
-    case AMDGPU::IMPLICIT_DEF:
-      return true;
-    default:
-      return false;
-    }
-  }
-
-  std::pair<unsigned, unsigned> getAccessedBankLine(unsigned Sel) const {
-    // Sel is (512 + (kc_bank << 12) + ConstIndex) << 2
-    // (See also R600ISelLowering.cpp)
-    // ConstIndex value is in [0, 4095];
-    return std::pair<unsigned, unsigned>(
-        ((Sel >> 2) - 512) >> 12, // KC_BANK
-        // Line Number of ConstIndex
-        // A line contains 16 constant registers however KCX bank can lock
-        // two line at the same time ; thus we want to get an even line number.
-        // Line number can be retrieved with (>>4), using (>>5) <<1 generates
-        // an even number.
-        ((((Sel >> 2) - 512) & 4095) >> 5) << 1);
-  }
-
-  bool SubstituteKCacheBank(MachineInstr *MI,
-      std::vector<std::pair<unsigned, unsigned> > &CachedConsts,
-      bool UpdateInstr = true) const {
-    std::vector<std::pair<unsigned, unsigned> > UsedKCache;
-
-    if (!TII->isALUInstr(MI->getOpcode()) && MI->getOpcode() != AMDGPU::DOT_4)
-      return true;
-
-    const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Consts =
-        TII->getSrcs(MI);
-    assert((TII->isALUInstr(MI->getOpcode()) ||
-        MI->getOpcode() == AMDGPU::DOT_4) && "Can't assign Const");
-    for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
-      if (Consts[i].first->getReg() != AMDGPU::ALU_CONST)
-        continue;
-      unsigned Sel = Consts[i].second;
-      unsigned Chan = Sel & 3, Index = ((Sel >> 2) - 512) & 31;
-      unsigned KCacheIndex = Index * 4 + Chan;
-      const std::pair<unsigned, unsigned> &BankLine = getAccessedBankLine(Sel);
-      if (CachedConsts.empty()) {
-        CachedConsts.push_back(BankLine);
-        UsedKCache.push_back(std::pair<unsigned, unsigned>(0, KCacheIndex));
-        continue;
-      }
-      if (CachedConsts[0] == BankLine) {
-        UsedKCache.push_back(std::pair<unsigned, unsigned>(0, KCacheIndex));
-        continue;
-      }
-      if (CachedConsts.size() == 1) {
-        CachedConsts.push_back(BankLine);
-        UsedKCache.push_back(std::pair<unsigned, unsigned>(1, KCacheIndex));
-        continue;
-      }
-      if (CachedConsts[1] == BankLine) {
-        UsedKCache.push_back(std::pair<unsigned, unsigned>(1, KCacheIndex));
-        continue;
-      }
-      return false;
-    }
-
-    if (!UpdateInstr)
-      return true;
-
-    for (unsigned i = 0, j = 0, n = Consts.size(); i < n; ++i) {
-      if (Consts[i].first->getReg() != AMDGPU::ALU_CONST)
-        continue;
-      switch(UsedKCache[j].first) {
-      case 0:
-        Consts[i].first->setReg(
-            AMDGPU::R600_KC0RegClass.getRegister(UsedKCache[j].second));
-        break;
-      case 1:
-        Consts[i].first->setReg(
-            AMDGPU::R600_KC1RegClass.getRegister(UsedKCache[j].second));
-        break;
-      default:
-        llvm_unreachable("Wrong Cache Line");
-      }
-      j++;
-    }
-    return true;
-  }
-
-  bool canClauseLocalKillFitInClause(
-                        unsigned AluInstCount,
-                        std::vector<std::pair<unsigned, unsigned> > KCacheBanks,
-                        MachineBasicBlock::iterator Def,
-                        MachineBasicBlock::iterator BBEnd) {
-    const R600RegisterInfo &TRI = TII->getRegisterInfo();
-    for (MachineInstr::const_mop_iterator
-           MOI = Def->operands_begin(),
-           MOE = Def->operands_end(); MOI != MOE; ++MOI) {
-      if (!MOI->isReg() || !MOI->isDef() ||
-          TRI.isPhysRegLiveAcrossClauses(MOI->getReg()))
-        continue;
-
-      // Def defines a clause local register, so check that its use will fit
-      // in the clause.
-      unsigned LastUseCount = 0;
-      for (MachineBasicBlock::iterator UseI = Def; UseI != BBEnd; ++UseI) {
-        AluInstCount += OccupiedDwords(UseI);
-        // Make sure we won't need to end the clause due to KCache limitations.
-        if (!SubstituteKCacheBank(UseI, KCacheBanks, false))
-          return false;
-
-        // We have reached the maximum instruction limit before finding the
-        // use that kills this register, so we cannot use this def in the
-        // current clause.
-        if (AluInstCount >= TII->getMaxAlusPerClause())
-          return false;
-
-        // Register kill flags have been cleared by the time we get to this
-        // pass, but it is safe to assume that all uses of this register
-        // occur in the same basic block as its definition, because
-        // it is illegal for the scheduler to schedule them in
-        // different blocks.
-        if (UseI->findRegisterUseOperandIdx(MOI->getReg()))
-          LastUseCount = AluInstCount;
-
-        if (UseI != Def && UseI->findRegisterDefOperandIdx(MOI->getReg()) != -1)
-          break;
-      }
-      if (LastUseCount)
-        return LastUseCount <= TII->getMaxAlusPerClause();
-      llvm_unreachable("Clause local register live at end of clause.");
-    }
-    return true;
-  }
-
-  MachineBasicBlock::iterator
-  MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) {
-    MachineBasicBlock::iterator ClauseHead = I;
-    std::vector<std::pair<unsigned, unsigned> > KCacheBanks;
-    bool PushBeforeModifier = false;
-    unsigned AluInstCount = 0;
-    for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
-      if (IsTrivialInst(I))
-        continue;
-      if (!isALU(I))
-        break;
-      if (AluInstCount > TII->getMaxAlusPerClause())
-        break;
-      if (I->getOpcode() == AMDGPU::PRED_X) {
-        // We put PRED_X in its own clause to ensure that ifcvt won't create
-        // clauses with more than 128 insts.
-        // IfCvt is indeed checking that "then" and "else" branches of an if
-        // statement have less than ~60 insts thus converted clauses can't be
-        // bigger than ~121 insts (predicate setter needs to be in the same
-        // clause as predicated alus).
-        if (AluInstCount > 0)
-          break;
-        if (TII->getFlagOp(I).getImm() & MO_FLAG_PUSH)
-          PushBeforeModifier = true;
-        AluInstCount ++;
-        continue;
-      }
-      // XXX: GROUP_BARRIER instructions cannot be in the same ALU clause as:
-      //
-      // * KILL or INTERP instructions
-      // * Any instruction that sets UPDATE_EXEC_MASK or UPDATE_PRED bits
-      // * Uses waterfalling (i.e. INDEX_MODE = AR.X)
-      //
-      // XXX: These checks have not been implemented yet.
-      if (TII->mustBeLastInClause(I->getOpcode())) {
-        I++;
-        break;
-      }
-
-      // If this instruction defines a clause local register, make sure
-      // its use can fit in this clause.
-      if (!canClauseLocalKillFitInClause(AluInstCount, KCacheBanks, I, E))
-        break;
-
-      if (!SubstituteKCacheBank(I, KCacheBanks))
-        break;
-      AluInstCount += OccupiedDwords(I);
-    }
-    unsigned Opcode = PushBeforeModifier ?
-        AMDGPU::CF_ALU_PUSH_BEFORE : AMDGPU::CF_ALU;
-    BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead), TII->get(Opcode))
-    // We don't use the ADDR field until R600ControlFlowFinalizer pass, where
-    // it is safe to assume it is 0. However if we always put 0 here, the ifcvt
-    // pass may assume that identical ALU clause starter at the beginning of a 
-    // true and false branch can be factorized which is not the case.
-        .addImm(Address++) // ADDR
-        .addImm(KCacheBanks.empty()?0:KCacheBanks[0].first) // KB0
-        .addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].first) // KB1
-        .addImm(KCacheBanks.empty()?0:2) // KM0
-        .addImm((KCacheBanks.size() < 2)?0:2) // KM1
-        .addImm(KCacheBanks.empty()?0:KCacheBanks[0].second) // KLINE0
-        .addImm((KCacheBanks.size() < 2)?0:KCacheBanks[1].second) // KLINE1
-        .addImm(AluInstCount) // COUNT
-        .addImm(1); // Enabled
-    return I;
-  }
-
-public:
-  static char ID;
-  R600EmitClauseMarkers() : MachineFunctionPass(ID), TII(nullptr), Address(0) {
-
-    initializeR600EmitClauseMarkersPass(*PassRegistry::getPassRegistry());
-  }
-
-  bool runOnMachineFunction(MachineFunction &MF) override {
-    TII = static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo());
-
-    for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
-                                                    BB != BB_E; ++BB) {
-      MachineBasicBlock &MBB = *BB;
-      MachineBasicBlock::iterator I = MBB.begin();
-      if (I->getOpcode() == AMDGPU::CF_ALU)
-        continue; // BB was already parsed
-      for (MachineBasicBlock::iterator E = MBB.end(); I != E;) {
-        if (isALU(I))
-          I = MakeALUClause(MBB, I);
-        else
-          ++I;
-      }
-    }
-    return false;
-  }
-
-  const char *getPassName() const override {
-    return "R600 Emit Clause Markers Pass";
-  }
-};
-
-char R600EmitClauseMarkers::ID = 0;
-
-} // end anonymous namespace
-
-INITIALIZE_PASS_BEGIN(R600EmitClauseMarkers, "emitclausemarkers",
-                      "R600 Emit Clause Markters", false, false)
-INITIALIZE_PASS_END(R600EmitClauseMarkers, "emitclausemarkers",
-                      "R600 Emit Clause Markters", false, false)
-
-llvm::FunctionPass *llvm::createR600EmitClauseMarkers() {
-  return new R600EmitClauseMarkers();
-}
-

Removed: llvm/trunk/lib/Target/R600/R600ExpandSpecialInstrs.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600ExpandSpecialInstrs.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600ExpandSpecialInstrs.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600ExpandSpecialInstrs.cpp (removed)
@@ -1,349 +0,0 @@
-//===-- R600ExpandSpecialInstrs.cpp - Expand special instructions ---------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// Vector, Reduction, and Cube instructions need to fill the entire instruction
-/// group to work correctly.  This pass expands these individual instructions
-/// into several instructions that will completely fill the instruction group.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPU.h"
-#include "R600Defines.h"
-#include "R600InstrInfo.h"
-#include "R600MachineFunctionInfo.h"
-#include "R600RegisterInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-
-using namespace llvm;
-
-namespace {
-
-class R600ExpandSpecialInstrsPass : public MachineFunctionPass {
-
-private:
-  static char ID;
-  const R600InstrInfo *TII;
-
-  void SetFlagInNewMI(MachineInstr *NewMI, const MachineInstr *OldMI,
-      unsigned Op);
-
-public:
-  R600ExpandSpecialInstrsPass(TargetMachine &tm) : MachineFunctionPass(ID),
-    TII(nullptr) { }
-
-  bool runOnMachineFunction(MachineFunction &MF) override;
-
-  const char *getPassName() const override {
-    return "R600 Expand special instructions pass";
-  }
-};
-
-} // End anonymous namespace
-
-char R600ExpandSpecialInstrsPass::ID = 0;
-
-FunctionPass *llvm::createR600ExpandSpecialInstrsPass(TargetMachine &TM) {
-  return new R600ExpandSpecialInstrsPass(TM);
-}
-
-void R600ExpandSpecialInstrsPass::SetFlagInNewMI(MachineInstr *NewMI,
-    const MachineInstr *OldMI, unsigned Op) {
-  int OpIdx = TII->getOperandIdx(*OldMI, Op);
-  if (OpIdx > -1) {
-    uint64_t Val = OldMI->getOperand(OpIdx).getImm();
-    TII->setImmOperand(NewMI, Op, Val);
-  }
-}
-
-bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
-  TII = static_cast<const R600InstrInfo *>(MF.getSubtarget().getInstrInfo());
-
-  const R600RegisterInfo &TRI = TII->getRegisterInfo();
-
-  for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
-                                                  BB != BB_E; ++BB) {
-    MachineBasicBlock &MBB = *BB;
-    MachineBasicBlock::iterator I = MBB.begin();
-    while (I != MBB.end()) {
-      MachineInstr &MI = *I;
-      I = std::next(I);
-
-      // Expand LDS_*_RET instructions
-      if (TII->isLDSRetInstr(MI.getOpcode())) {
-        int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
-        assert(DstIdx != -1);
-        MachineOperand &DstOp = MI.getOperand(DstIdx);
-        MachineInstr *Mov = TII->buildMovInstr(&MBB, I,
-                                               DstOp.getReg(), AMDGPU::OQAP);
-        DstOp.setReg(AMDGPU::OQAP);
-        int LDSPredSelIdx = TII->getOperandIdx(MI.getOpcode(),
-                                           AMDGPU::OpName::pred_sel);
-        int MovPredSelIdx = TII->getOperandIdx(Mov->getOpcode(),
-                                           AMDGPU::OpName::pred_sel);
-        // Copy the pred_sel bit
-        Mov->getOperand(MovPredSelIdx).setReg(
-            MI.getOperand(LDSPredSelIdx).getReg());
-      }
-
-      switch (MI.getOpcode()) {
-      default: break;
-      // Expand PRED_X to one of the PRED_SET instructions.
-      case AMDGPU::PRED_X: {
-        uint64_t Flags = MI.getOperand(3).getImm();
-        // The native opcode used by PRED_X is stored as an immediate in the
-        // third operand.
-        MachineInstr *PredSet = TII->buildDefaultInstruction(MBB, I,
-                                            MI.getOperand(2).getImm(), // opcode
-                                            MI.getOperand(0).getReg(), // dst
-                                            MI.getOperand(1).getReg(), // src0
-                                            AMDGPU::ZERO);             // src1
-        TII->addFlag(PredSet, 0, MO_FLAG_MASK);
-        if (Flags & MO_FLAG_PUSH) {
-          TII->setImmOperand(PredSet, AMDGPU::OpName::update_exec_mask, 1);
-        } else {
-          TII->setImmOperand(PredSet, AMDGPU::OpName::update_pred, 1);
-        }
-        MI.eraseFromParent();
-        continue;
-        }
-
-      case AMDGPU::INTERP_PAIR_XY: {
-        MachineInstr *BMI;
-        unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
-                MI.getOperand(2).getImm());
-
-        for (unsigned Chan = 0; Chan < 4; ++Chan) {
-          unsigned DstReg;
-
-          if (Chan < 2)
-            DstReg = MI.getOperand(Chan).getReg();
-          else
-            DstReg = Chan == 2 ? AMDGPU::T0_Z : AMDGPU::T0_W;
-
-          BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_XY,
-              DstReg, MI.getOperand(3 + (Chan % 2)).getReg(), PReg);
-
-          if (Chan > 0) {
-            BMI->bundleWithPred();
-          }
-          if (Chan >= 2)
-            TII->addFlag(BMI, 0, MO_FLAG_MASK);
-          if (Chan != 3)
-            TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
-        }
-
-        MI.eraseFromParent();
-        continue;
-        }
-
-      case AMDGPU::INTERP_PAIR_ZW: {
-        MachineInstr *BMI;
-        unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
-                MI.getOperand(2).getImm());
-
-        for (unsigned Chan = 0; Chan < 4; ++Chan) {
-          unsigned DstReg;
-
-          if (Chan < 2)
-            DstReg = Chan == 0 ? AMDGPU::T0_X : AMDGPU::T0_Y;
-          else
-            DstReg = MI.getOperand(Chan-2).getReg();
-
-          BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_ZW,
-              DstReg, MI.getOperand(3 + (Chan % 2)).getReg(), PReg);
-
-          if (Chan > 0) {
-            BMI->bundleWithPred();
-          }
-          if (Chan < 2)
-            TII->addFlag(BMI, 0, MO_FLAG_MASK);
-          if (Chan != 3)
-            TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
-        }
-
-        MI.eraseFromParent();
-        continue;
-        }
-
-      case AMDGPU::INTERP_VEC_LOAD: {
-        const R600RegisterInfo &TRI = TII->getRegisterInfo();
-        MachineInstr *BMI;
-        unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
-                MI.getOperand(1).getImm());
-        unsigned DstReg = MI.getOperand(0).getReg();
-
-        for (unsigned Chan = 0; Chan < 4; ++Chan) {
-          BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_LOAD_P0,
-              TRI.getSubReg(DstReg, TRI.getSubRegFromChannel(Chan)), PReg);
-          if (Chan > 0) {
-            BMI->bundleWithPred();
-          }
-          if (Chan != 3)
-            TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
-        }
-
-        MI.eraseFromParent();
-        continue;
-        }
-      case AMDGPU::DOT_4: {
-
-        const R600RegisterInfo &TRI = TII->getRegisterInfo();
-
-        unsigned DstReg = MI.getOperand(0).getReg();
-        unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
-
-        for (unsigned Chan = 0; Chan < 4; ++Chan) {
-          bool Mask = (Chan != TRI.getHWRegChan(DstReg));
-          unsigned SubDstReg =
-              AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
-          MachineInstr *BMI =
-              TII->buildSlotOfVectorInstruction(MBB, &MI, Chan, SubDstReg);
-          if (Chan > 0) {
-            BMI->bundleWithPred();
-          }
-          if (Mask) {
-            TII->addFlag(BMI, 0, MO_FLAG_MASK);
-          }
-          if (Chan != 3)
-            TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
-          unsigned Opcode = BMI->getOpcode();
-          // While not strictly necessary from hw point of view, we force
-          // all src operands of a dot4 inst to belong to the same slot.
-          unsigned Src0 = BMI->getOperand(
-              TII->getOperandIdx(Opcode, AMDGPU::OpName::src0))
-              .getReg();
-          unsigned Src1 = BMI->getOperand(
-              TII->getOperandIdx(Opcode, AMDGPU::OpName::src1))
-              .getReg();
-          (void) Src0;
-          (void) Src1;
-          if ((TRI.getEncodingValue(Src0) & 0xff) < 127 &&
-              (TRI.getEncodingValue(Src1) & 0xff) < 127)
-            assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1));
-        }
-        MI.eraseFromParent();
-        continue;
-      }
-      }
-
-      bool IsReduction = TII->isReductionOp(MI.getOpcode());
-      bool IsVector = TII->isVector(MI);
-      bool IsCube = TII->isCubeOp(MI.getOpcode());
-      if (!IsReduction && !IsVector && !IsCube) {
-        continue;
-      }
-
-      // Expand the instruction
-      //
-      // Reduction instructions:
-      // T0_X = DP4 T1_XYZW, T2_XYZW
-      // becomes:
-      // TO_X = DP4 T1_X, T2_X
-      // TO_Y (write masked) = DP4 T1_Y, T2_Y
-      // TO_Z (write masked) = DP4 T1_Z, T2_Z
-      // TO_W (write masked) = DP4 T1_W, T2_W
-      //
-      // Vector instructions:
-      // T0_X = MULLO_INT T1_X, T2_X
-      // becomes:
-      // T0_X = MULLO_INT T1_X, T2_X
-      // T0_Y (write masked) = MULLO_INT T1_X, T2_X
-      // T0_Z (write masked) = MULLO_INT T1_X, T2_X
-      // T0_W (write masked) = MULLO_INT T1_X, T2_X
-      //
-      // Cube instructions:
-      // T0_XYZW = CUBE T1_XYZW
-      // becomes:
-      // TO_X = CUBE T1_Z, T1_Y
-      // T0_Y = CUBE T1_Z, T1_X
-      // T0_Z = CUBE T1_X, T1_Z
-      // T0_W = CUBE T1_Y, T1_Z
-      for (unsigned Chan = 0; Chan < 4; Chan++) {
-        unsigned DstReg = MI.getOperand(
-                            TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg();
-        unsigned Src0 = MI.getOperand(
-                           TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg();
-        unsigned Src1 = 0;
-
-        // Determine the correct source registers
-        if (!IsCube) {
-          int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1);
-          if (Src1Idx != -1) {
-            Src1 = MI.getOperand(Src1Idx).getReg();
-          }
-        }
-        if (IsReduction) {
-          unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
-          Src0 = TRI.getSubReg(Src0, SubRegIndex);
-          Src1 = TRI.getSubReg(Src1, SubRegIndex);
-        } else if (IsCube) {
-          static const int CubeSrcSwz[] = {2, 2, 0, 1};
-          unsigned SubRegIndex0 = TRI.getSubRegFromChannel(CubeSrcSwz[Chan]);
-          unsigned SubRegIndex1 = TRI.getSubRegFromChannel(CubeSrcSwz[3 - Chan]);
-          Src1 = TRI.getSubReg(Src0, SubRegIndex1);
-          Src0 = TRI.getSubReg(Src0, SubRegIndex0);
-        }
-
-        // Determine the correct destination registers;
-        bool Mask = false;
-        bool NotLast = true;
-        if (IsCube) {
-          unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
-          DstReg = TRI.getSubReg(DstReg, SubRegIndex);
-        } else {
-          // Mask the write if the original instruction does not write to
-          // the current Channel.
-          Mask = (Chan != TRI.getHWRegChan(DstReg));
-          unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
-          DstReg = AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
-        }
-
-        // Set the IsLast bit
-        NotLast = (Chan != 3 );
-
-        // Add the new instruction
-        unsigned Opcode = MI.getOpcode();
-        switch (Opcode) {
-        case AMDGPU::CUBE_r600_pseudo:
-          Opcode = AMDGPU::CUBE_r600_real;
-          break;
-        case AMDGPU::CUBE_eg_pseudo:
-          Opcode = AMDGPU::CUBE_eg_real;
-          break;
-        default:
-          break;
-        }
-
-        MachineInstr *NewMI =
-          TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1);
-
-        if (Chan != 0)
-          NewMI->bundleWithPred();
-        if (Mask) {
-          TII->addFlag(NewMI, 0, MO_FLAG_MASK);
-        }
-        if (NotLast) {
-          TII->addFlag(NewMI, 0, MO_FLAG_NOT_LAST);
-        }
-        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::clamp);
-        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::literal);
-        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src0_abs);
-        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src1_abs);
-        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src0_neg);
-        SetFlagInNewMI(NewMI, &MI, AMDGPU::OpName::src1_neg);
-      }
-      MI.eraseFromParent();
-    }
-  }
-  return false;
-}

Removed: llvm/trunk/lib/Target/R600/R600ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600ISelLowering.cpp?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/R600/R600ISelLowering.cpp (removed)
@@ -1,2286 +0,0 @@
-//===-- R600ISelLowering.cpp - R600 DAG Lowering Implementation -----------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief Custom DAG lowering for R600
-//
-//===----------------------------------------------------------------------===//
-
-#include "R600ISelLowering.h"
-#include "AMDGPUFrameLowering.h"
-#include "AMDGPUIntrinsicInfo.h"
-#include "AMDGPUSubtarget.h"
-#include "R600Defines.h"
-#include "R600InstrInfo.h"
-#include "R600MachineFunctionInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/IR/Argument.h"
-#include "llvm/IR/Function.h"
-
-using namespace llvm;
-
-R600TargetLowering::R600TargetLowering(TargetMachine &TM,
-                                       const AMDGPUSubtarget &STI)
-    : AMDGPUTargetLowering(TM, STI), Gen(STI.getGeneration()) {
-  addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass);
-  addRegisterClass(MVT::f32, &AMDGPU::R600_Reg32RegClass);
-  addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass);
-  addRegisterClass(MVT::i32, &AMDGPU::R600_Reg32RegClass);
-  addRegisterClass(MVT::v2f32, &AMDGPU::R600_Reg64RegClass);
-  addRegisterClass(MVT::v2i32, &AMDGPU::R600_Reg64RegClass);
-
-  computeRegisterProperties(STI.getRegisterInfo());
-
-  // Set condition code actions
-  setCondCodeAction(ISD::SETO,   MVT::f32, Expand);
-  setCondCodeAction(ISD::SETUO,  MVT::f32, Expand);
-  setCondCodeAction(ISD::SETLT,  MVT::f32, Expand);
-  setCondCodeAction(ISD::SETLE,  MVT::f32, Expand);
-  setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETUGE, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
-  setCondCodeAction(ISD::SETULE, MVT::f32, Expand);
-
-  setCondCodeAction(ISD::SETLE, MVT::i32, Expand);
-  setCondCodeAction(ISD::SETLT, MVT::i32, Expand);
-  setCondCodeAction(ISD::SETULE, MVT::i32, Expand);
-  setCondCodeAction(ISD::SETULT, MVT::i32, Expand);
-
-  setOperationAction(ISD::FCOS, MVT::f32, Custom);
-  setOperationAction(ISD::FSIN, MVT::f32, Custom);
-
-  setOperationAction(ISD::SETCC, MVT::v4i32, Expand);
-  setOperationAction(ISD::SETCC, MVT::v2i32, Expand);
-
-  setOperationAction(ISD::BR_CC, MVT::i32, Expand);
-  setOperationAction(ISD::BR_CC, MVT::f32, Expand);
-  setOperationAction(ISD::BRCOND, MVT::Other, Custom);
-
-  setOperationAction(ISD::FSUB, MVT::f32, Expand);
-
-  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
-  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
-  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i1, Custom);
-
-  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
-  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
-
-  setOperationAction(ISD::SETCC, MVT::i32, Expand);
-  setOperationAction(ISD::SETCC, MVT::f32, Expand);
-  setOperationAction(ISD::FP_TO_UINT, MVT::i1, Custom);
-  setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
-  setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
-
-  setOperationAction(ISD::SELECT, MVT::i32, Expand);
-  setOperationAction(ISD::SELECT, MVT::f32, Expand);
-  setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
-  setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
-
-  // ADD, SUB overflow.
-  // TODO: turn these into Legal?
-  if (Subtarget->hasCARRY())
-    setOperationAction(ISD::UADDO, MVT::i32, Custom);
-
-  if (Subtarget->hasBORROW())
-    setOperationAction(ISD::USUBO, MVT::i32, Custom);
-
-  // Expand sign extension of vectors
-  if (!Subtarget->hasBFE())
-    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
-
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Expand);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Expand);
-
-  if (!Subtarget->hasBFE())
-    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Expand);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Expand);
-
-  if (!Subtarget->hasBFE())
-    setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Expand);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Expand);
-
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Expand);
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Expand);
-
-  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Expand);
-
-
-  // Legalize loads and stores to the private address space.
-  setOperationAction(ISD::LOAD, MVT::i32, Custom);
-  setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
-  setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
-
-  // EXTLOAD should be the same as ZEXTLOAD. It is legal for some address
-  // spaces, so it is custom lowered to handle those where it isn't.
-  for (MVT VT : MVT::integer_valuetypes()) {
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Custom);
-    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Custom);
-
-    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
-    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Custom);
-    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Custom);
-
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Custom);
-    setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Custom);
-  }
-
-  setOperationAction(ISD::STORE, MVT::i8, Custom);
-  setOperationAction(ISD::STORE, MVT::i32, Custom);
-  setOperationAction(ISD::STORE, MVT::v2i32, Custom);
-  setOperationAction(ISD::STORE, MVT::v4i32, Custom);
-  setTruncStoreAction(MVT::i32, MVT::i8, Custom);
-  setTruncStoreAction(MVT::i32, MVT::i16, Custom);
-
-  setOperationAction(ISD::LOAD, MVT::i32, Custom);
-  setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
-  setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
-
-  setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Custom);
-  setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f32, Custom);
-  setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
-  setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
-
-  setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i32, Custom);
-  setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f32, Custom);
-  setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
-  setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
-
-  setTargetDAGCombine(ISD::FP_ROUND);
-  setTargetDAGCombine(ISD::FP_TO_SINT);
-  setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
-  setTargetDAGCombine(ISD::SELECT_CC);
-  setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
-
-  // We don't have 64-bit shifts. Thus we need either SHX i64 or SHX_PARTS i32
-  //  to be Legal/Custom in order to avoid library calls.
-  setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
-  setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
-  setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
-
-  setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
-
-  const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
-  for (MVT VT : ScalarIntVTs) {
-    setOperationAction(ISD::ADDC, VT, Expand);
-    setOperationAction(ISD::SUBC, VT, Expand);
-    setOperationAction(ISD::ADDE, VT, Expand);
-    setOperationAction(ISD::SUBE, VT, Expand);
-  }
-
-  setSchedulingPreference(Sched::Source);
-}
-
-MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
-    MachineInstr * MI, MachineBasicBlock * BB) const {
-  MachineFunction * MF = BB->getParent();
-  MachineRegisterInfo &MRI = MF->getRegInfo();
-  MachineBasicBlock::iterator I = *MI;
-  const R600InstrInfo *TII =
-      static_cast<const R600InstrInfo *>(Subtarget->getInstrInfo());
-
-  switch (MI->getOpcode()) {
-  default:
-    // Replace LDS_*_RET instruction that don't have any uses with the
-    // equivalent LDS_*_NORET instruction.
-    if (TII->isLDSRetInstr(MI->getOpcode())) {
-      int DstIdx = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
-      assert(DstIdx != -1);
-      MachineInstrBuilder NewMI;
-      // FIXME: getLDSNoRetOp method only handles LDS_1A1D LDS ops. Add
-      //        LDS_1A2D support and remove this special case.
-      if (!MRI.use_empty(MI->getOperand(DstIdx).getReg()) ||
-           MI->getOpcode() == AMDGPU::LDS_CMPST_RET)
-        return BB;
-
-      NewMI = BuildMI(*BB, I, BB->findDebugLoc(I),
-                      TII->get(AMDGPU::getLDSNoRetOp(MI->getOpcode())));
-      for (unsigned i = 1, e = MI->getNumOperands(); i < e; ++i) {
-        NewMI.addOperand(MI->getOperand(i));
-      }
-    } else {
-      return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
-    }
-    break;
-  case AMDGPU::CLAMP_R600: {
-    MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
-                                                   AMDGPU::MOV,
-                                                   MI->getOperand(0).getReg(),
-                                                   MI->getOperand(1).getReg());
-    TII->addFlag(NewMI, 0, MO_FLAG_CLAMP);
-    break;
-  }
-
-  case AMDGPU::FABS_R600: {
-    MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
-                                                    AMDGPU::MOV,
-                                                    MI->getOperand(0).getReg(),
-                                                    MI->getOperand(1).getReg());
-    TII->addFlag(NewMI, 0, MO_FLAG_ABS);
-    break;
-  }
-
-  case AMDGPU::FNEG_R600: {
-    MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
-                                                    AMDGPU::MOV,
-                                                    MI->getOperand(0).getReg(),
-                                                    MI->getOperand(1).getReg());
-    TII->addFlag(NewMI, 0, MO_FLAG_NEG);
-    break;
-  }
-
-  case AMDGPU::MASK_WRITE: {
-    unsigned maskedRegister = MI->getOperand(0).getReg();
-    assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
-    MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
-    TII->addFlag(defInstr, 0, MO_FLAG_MASK);
-    break;
-  }
-
-  case AMDGPU::MOV_IMM_F32:
-    TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(),
-                     MI->getOperand(1).getFPImm()->getValueAPF()
-                         .bitcastToAPInt().getZExtValue());
-    break;
-  case AMDGPU::MOV_IMM_I32:
-    TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(),
-                     MI->getOperand(1).getImm());
-    break;
-  case AMDGPU::CONST_COPY: {
-    MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV,
-        MI->getOperand(0).getReg(), AMDGPU::ALU_CONST);
-    TII->setImmOperand(NewMI, AMDGPU::OpName::src0_sel,
-        MI->getOperand(1).getImm());
-    break;
-  }
-
-  case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
-  case AMDGPU::RAT_WRITE_CACHELESS_64_eg:
-  case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
-    unsigned EOP = (std::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
-
-    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
-            .addOperand(MI->getOperand(0))
-            .addOperand(MI->getOperand(1))
-            .addImm(EOP); // Set End of program bit
-    break;
-  }
-
-  case AMDGPU::TXD: {
-    unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
-    unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
-    MachineOperand &RID = MI->getOperand(4);
-    MachineOperand &SID = MI->getOperand(5);
-    unsigned TextureId = MI->getOperand(6).getImm();
-    unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
-    unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
-
-    switch (TextureId) {
-    case 5: // Rect
-      CTX = CTY = 0;
-      break;
-    case 6: // Shadow1D
-      SrcW = SrcZ;
-      break;
-    case 7: // Shadow2D
-      SrcW = SrcZ;
-      break;
-    case 8: // ShadowRect
-      CTX = CTY = 0;
-      SrcW = SrcZ;
-      break;
-    case 9: // 1DArray
-      SrcZ = SrcY;
-      CTZ = 0;
-      break;
-    case 10: // 2DArray
-      CTZ = 0;
-      break;
-    case 11: // Shadow1DArray
-      SrcZ = SrcY;
-      CTZ = 0;
-      break;
-    case 12: // Shadow2DArray
-      CTZ = 0;
-      break;
-    }
-    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
-            .addOperand(MI->getOperand(3))
-            .addImm(SrcX)
-            .addImm(SrcY)
-            .addImm(SrcZ)
-            .addImm(SrcW)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(1)
-            .addImm(2)
-            .addImm(3)
-            .addOperand(RID)
-            .addOperand(SID)
-            .addImm(CTX)
-            .addImm(CTY)
-            .addImm(CTZ)
-            .addImm(CTW);
-    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
-            .addOperand(MI->getOperand(2))
-            .addImm(SrcX)
-            .addImm(SrcY)
-            .addImm(SrcZ)
-            .addImm(SrcW)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(1)
-            .addImm(2)
-            .addImm(3)
-            .addOperand(RID)
-            .addOperand(SID)
-            .addImm(CTX)
-            .addImm(CTY)
-            .addImm(CTZ)
-            .addImm(CTW);
-    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G))
-            .addOperand(MI->getOperand(0))
-            .addOperand(MI->getOperand(1))
-            .addImm(SrcX)
-            .addImm(SrcY)
-            .addImm(SrcZ)
-            .addImm(SrcW)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(1)
-            .addImm(2)
-            .addImm(3)
-            .addOperand(RID)
-            .addOperand(SID)
-            .addImm(CTX)
-            .addImm(CTY)
-            .addImm(CTZ)
-            .addImm(CTW)
-            .addReg(T0, RegState::Implicit)
-            .addReg(T1, RegState::Implicit);
-    break;
-  }
-
-  case AMDGPU::TXD_SHADOW: {
-    unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
-    unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
-    MachineOperand &RID = MI->getOperand(4);
-    MachineOperand &SID = MI->getOperand(5);
-    unsigned TextureId = MI->getOperand(6).getImm();
-    unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
-    unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
-
-    switch (TextureId) {
-    case 5: // Rect
-      CTX = CTY = 0;
-      break;
-    case 6: // Shadow1D
-      SrcW = SrcZ;
-      break;
-    case 7: // Shadow2D
-      SrcW = SrcZ;
-      break;
-    case 8: // ShadowRect
-      CTX = CTY = 0;
-      SrcW = SrcZ;
-      break;
-    case 9: // 1DArray
-      SrcZ = SrcY;
-      CTZ = 0;
-      break;
-    case 10: // 2DArray
-      CTZ = 0;
-      break;
-    case 11: // Shadow1DArray
-      SrcZ = SrcY;
-      CTZ = 0;
-      break;
-    case 12: // Shadow2DArray
-      CTZ = 0;
-      break;
-    }
-
-    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
-            .addOperand(MI->getOperand(3))
-            .addImm(SrcX)
-            .addImm(SrcY)
-            .addImm(SrcZ)
-            .addImm(SrcW)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(1)
-            .addImm(2)
-            .addImm(3)
-            .addOperand(RID)
-            .addOperand(SID)
-            .addImm(CTX)
-            .addImm(CTY)
-            .addImm(CTZ)
-            .addImm(CTW);
-    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
-            .addOperand(MI->getOperand(2))
-            .addImm(SrcX)
-            .addImm(SrcY)
-            .addImm(SrcZ)
-            .addImm(SrcW)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(1)
-            .addImm(2)
-            .addImm(3)
-            .addOperand(RID)
-            .addOperand(SID)
-            .addImm(CTX)
-            .addImm(CTY)
-            .addImm(CTZ)
-            .addImm(CTW);
-    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G))
-            .addOperand(MI->getOperand(0))
-            .addOperand(MI->getOperand(1))
-            .addImm(SrcX)
-            .addImm(SrcY)
-            .addImm(SrcZ)
-            .addImm(SrcW)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(0)
-            .addImm(1)
-            .addImm(2)
-            .addImm(3)
-            .addOperand(RID)
-            .addOperand(SID)
-            .addImm(CTX)
-            .addImm(CTY)
-            .addImm(CTZ)
-            .addImm(CTW)
-            .addReg(T0, RegState::Implicit)
-            .addReg(T1, RegState::Implicit);
-    break;
-  }
-
-  case AMDGPU::BRANCH:
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
-              .addOperand(MI->getOperand(0));
-      break;
-
-  case AMDGPU::BRANCH_COND_f32: {
-    MachineInstr *NewMI =
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
-              AMDGPU::PREDICATE_BIT)
-              .addOperand(MI->getOperand(1))
-              .addImm(OPCODE_IS_NOT_ZERO)
-              .addImm(0); // Flags
-    TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
-    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND))
-            .addOperand(MI->getOperand(0))
-            .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
-    break;
-  }
-
-  case AMDGPU::BRANCH_COND_i32: {
-    MachineInstr *NewMI =
-      BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
-            AMDGPU::PREDICATE_BIT)
-            .addOperand(MI->getOperand(1))
-            .addImm(OPCODE_IS_NOT_ZERO_INT)
-            .addImm(0); // Flags
-    TII->addFlag(NewMI, 0, MO_FLAG_PUSH);
-    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND))
-           .addOperand(MI->getOperand(0))
-            .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
-    break;
-  }
-
-  case AMDGPU::EG_ExportSwz:
-  case AMDGPU::R600_ExportSwz: {
-    // Instruction is left unmodified if its not the last one of its type
-    bool isLastInstructionOfItsType = true;
-    unsigned InstExportType = MI->getOperand(1).getImm();
-    for (MachineBasicBlock::iterator NextExportInst = std::next(I),
-         EndBlock = BB->end(); NextExportInst != EndBlock;
-         NextExportInst = std::next(NextExportInst)) {
-      if (NextExportInst->getOpcode() == AMDGPU::EG_ExportSwz ||
-          NextExportInst->getOpcode() == AMDGPU::R600_ExportSwz) {
-        unsigned CurrentInstExportType = NextExportInst->getOperand(1)
-            .getImm();
-        if (CurrentInstExportType == InstExportType) {
-          isLastInstructionOfItsType = false;
-          break;
-        }
-      }
-    }
-    bool EOP = (std::next(I)->getOpcode() == AMDGPU::RETURN) ? 1 : 0;
-    if (!EOP && !isLastInstructionOfItsType)
-      return BB;
-    unsigned CfInst = (MI->getOpcode() == AMDGPU::EG_ExportSwz)? 84 : 40;
-    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
-            .addOperand(MI->getOperand(0))
-            .addOperand(MI->getOperand(1))
-            .addOperand(MI->getOperand(2))
-            .addOperand(MI->getOperand(3))
-            .addOperand(MI->getOperand(4))
-            .addOperand(MI->getOperand(5))
-            .addOperand(MI->getOperand(6))
-            .addImm(CfInst)
-            .addImm(EOP);
-    break;
-  }
-  case AMDGPU::RETURN: {
-    // RETURN instructions must have the live-out registers as implicit uses,
-    // otherwise they appear dead.
-    R600MachineFunctionInfo *MFI = MF->getInfo<R600MachineFunctionInfo>();
-    MachineInstrBuilder MIB(*MF, MI);
-    for (unsigned i = 0, e = MFI->LiveOuts.size(); i != e; ++i)
-      MIB.addReg(MFI->LiveOuts[i], RegState::Implicit);
-    return BB;
-  }
-  }
-
-  MI->eraseFromParent();
-  return BB;
-}
-
-//===----------------------------------------------------------------------===//
-// Custom DAG Lowering Operations
-//===----------------------------------------------------------------------===//
-
-SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
-  MachineFunction &MF = DAG.getMachineFunction();
-  R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
-  switch (Op.getOpcode()) {
-  default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
-  case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
-  case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
-  case ISD::SHL_PARTS: return LowerSHLParts(Op, DAG);
-  case ISD::SRA_PARTS:
-  case ISD::SRL_PARTS: return LowerSRXParts(Op, DAG);
-  case ISD::UADDO: return LowerUADDSUBO(Op, DAG, ISD::ADD, AMDGPUISD::CARRY);
-  case ISD::USUBO: return LowerUADDSUBO(Op, DAG, ISD::SUB, AMDGPUISD::BORROW);
-  case ISD::FCOS:
-  case ISD::FSIN: return LowerTrig(Op, DAG);
-  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
-  case ISD::STORE: return LowerSTORE(Op, DAG);
-  case ISD::LOAD: {
-    SDValue Result = LowerLOAD(Op, DAG);
-    assert((!Result.getNode() ||
-            Result.getNode()->getNumValues() == 2) &&
-           "Load should return a value and a chain");
-    return Result;
-  }
-
-  case ISD::BRCOND: return LowerBRCOND(Op, DAG);
-  case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
-  case ISD::INTRINSIC_VOID: {
-    SDValue Chain = Op.getOperand(0);
-    unsigned IntrinsicID =
-                         cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
-    switch (IntrinsicID) {
-    case AMDGPUIntrinsic::AMDGPU_store_output: {
-      int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
-      unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex);
-      MFI->LiveOuts.push_back(Reg);
-      return DAG.getCopyToReg(Chain, SDLoc(Op), Reg, Op.getOperand(2));
-    }
-    case AMDGPUIntrinsic::R600_store_swizzle: {
-      SDLoc DL(Op);
-      const SDValue Args[8] = {
-        Chain,
-        Op.getOperand(2), // Export Value
-        Op.getOperand(3), // ArrayBase
-        Op.getOperand(4), // Type
-        DAG.getConstant(0, DL, MVT::i32), // SWZ_X
-        DAG.getConstant(1, DL, MVT::i32), // SWZ_Y
-        DAG.getConstant(2, DL, MVT::i32), // SWZ_Z
-        DAG.getConstant(3, DL, MVT::i32) // SWZ_W
-      };
-      return DAG.getNode(AMDGPUISD::EXPORT, DL, Op.getValueType(), Args);
-    }
-
-    // default for switch(IntrinsicID)
-    default: break;
-    }
-    // break out of case ISD::INTRINSIC_VOID in switch(Op.getOpcode())
-    break;
-  }
-  case ISD::INTRINSIC_WO_CHAIN: {
-    unsigned IntrinsicID =
-                         cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
-    EVT VT = Op.getValueType();
-    SDLoc DL(Op);
-    switch(IntrinsicID) {
-    default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
-    case AMDGPUIntrinsic::R600_load_input: {
-      int64_t RegIndex = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
-      unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister(RegIndex);
-      MachineFunction &MF = DAG.getMachineFunction();
-      MachineRegisterInfo &MRI = MF.getRegInfo();
-      MRI.addLiveIn(Reg);
-      return DAG.getCopyFromReg(DAG.getEntryNode(),
-          SDLoc(DAG.getEntryNode()), Reg, VT);
-    }
-
-    case AMDGPUIntrinsic::R600_interp_input: {
-      int slot = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
-      int ijb = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
-      MachineSDNode *interp;
-      if (ijb < 0) {
-        const R600InstrInfo *TII =
-            static_cast<const R600InstrInfo *>(Subtarget->getInstrInfo());
-        interp = DAG.getMachineNode(AMDGPU::INTERP_VEC_LOAD, DL,
-            MVT::v4f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32));
-        return DAG.getTargetExtractSubreg(
-            TII->getRegisterInfo().getSubRegFromChannel(slot % 4),
-            DL, MVT::f32, SDValue(interp, 0));
-      }
-      MachineFunction &MF = DAG.getMachineFunction();
-      MachineRegisterInfo &MRI = MF.getRegInfo();
-      unsigned RegisterI = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb);
-      unsigned RegisterJ = AMDGPU::R600_TReg32RegClass.getRegister(2 * ijb + 1);
-      MRI.addLiveIn(RegisterI);
-      MRI.addLiveIn(RegisterJ);
-      SDValue RegisterINode = DAG.getCopyFromReg(DAG.getEntryNode(),
-          SDLoc(DAG.getEntryNode()), RegisterI, MVT::f32);
-      SDValue RegisterJNode = DAG.getCopyFromReg(DAG.getEntryNode(),
-          SDLoc(DAG.getEntryNode()), RegisterJ, MVT::f32);
-
-      if (slot % 4 < 2)
-        interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL,
-            MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32),
-            RegisterJNode, RegisterINode);
-      else
-        interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
-            MVT::f32, MVT::f32, DAG.getTargetConstant(slot / 4, DL, MVT::i32),
-            RegisterJNode, RegisterINode);
-      return SDValue(interp, slot % 2);
-    }
-    case AMDGPUIntrinsic::R600_interp_xy:
-    case AMDGPUIntrinsic::R600_interp_zw: {
-      int slot = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
-      MachineSDNode *interp;
-      SDValue RegisterINode = Op.getOperand(2);
-      SDValue RegisterJNode = Op.getOperand(3);
-
-      if (IntrinsicID == AMDGPUIntrinsic::R600_interp_xy)
-        interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_XY, DL,
-            MVT::f32, MVT::f32, DAG.getTargetConstant(slot, DL, MVT::i32),
-            RegisterJNode, RegisterINode);
-      else
-        interp = DAG.getMachineNode(AMDGPU::INTERP_PAIR_ZW, DL,
-            MVT::f32, MVT::f32, DAG.getTargetConstant(slot, DL, MVT::i32),
-            RegisterJNode, RegisterINode);
-      return DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f32,
-          SDValue(interp, 0), SDValue(interp, 1));
-    }
-    case AMDGPUIntrinsic::R600_tex:
-    case AMDGPUIntrinsic::R600_texc:
-    case AMDGPUIntrinsic::R600_txl:
-    case AMDGPUIntrinsic::R600_txlc:
-    case AMDGPUIntrinsic::R600_txb:
-    case AMDGPUIntrinsic::R600_txbc:
-    case AMDGPUIntrinsic::R600_txf:
-    case AMDGPUIntrinsic::R600_txq:
-    case AMDGPUIntrinsic::R600_ddx:
-    case AMDGPUIntrinsic::R600_ddy:
-    case AMDGPUIntrinsic::R600_ldptr: {
-      unsigned TextureOp;
-      switch (IntrinsicID) {
-      case AMDGPUIntrinsic::R600_tex:
-        TextureOp = 0;
-        break;
-      case AMDGPUIntrinsic::R600_texc:
-        TextureOp = 1;
-        break;
-      case AMDGPUIntrinsic::R600_txl:
-        TextureOp = 2;
-        break;
-      case AMDGPUIntrinsic::R600_txlc:
-        TextureOp = 3;
-        break;
-      case AMDGPUIntrinsic::R600_txb:
-        TextureOp = 4;
-        break;
-      case AMDGPUIntrinsic::R600_txbc:
-        TextureOp = 5;
-        break;
-      case AMDGPUIntrinsic::R600_txf:
-        TextureOp = 6;
-        break;
-      case AMDGPUIntrinsic::R600_txq:
-        TextureOp = 7;
-        break;
-      case AMDGPUIntrinsic::R600_ddx:
-        TextureOp = 8;
-        break;
-      case AMDGPUIntrinsic::R600_ddy:
-        TextureOp = 9;
-        break;
-      case AMDGPUIntrinsic::R600_ldptr:
-        TextureOp = 10;
-        break;
-      default:
-        llvm_unreachable("Unknow Texture Operation");
-      }
-
-      SDValue TexArgs[19] = {
-        DAG.getConstant(TextureOp, DL, MVT::i32),
-        Op.getOperand(1),
-        DAG.getConstant(0, DL, MVT::i32),
-        DAG.getConstant(1, DL, MVT::i32),
-        DAG.getConstant(2, DL, MVT::i32),
-        DAG.getConstant(3, DL, MVT::i32),
-        Op.getOperand(2),
-        Op.getOperand(3),
-        Op.getOperand(4),
-        DAG.getConstant(0, DL, MVT::i32),
-        DAG.getConstant(1, DL, MVT::i32),
-        DAG.getConstant(2, DL, MVT::i32),
-        DAG.getConstant(3, DL, MVT::i32),
-        Op.getOperand(5),
-        Op.getOperand(6),
-        Op.getOperand(7),
-        Op.getOperand(8),
-        Op.getOperand(9),
-        Op.getOperand(10)
-      };
-      return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs);
-    }
-    case AMDGPUIntrinsic::AMDGPU_dp4: {
-      SDValue Args[8] = {
-      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
-          DAG.getConstant(0, DL, MVT::i32)),
-      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
-          DAG.getConstant(0, DL, MVT::i32)),
-      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
-          DAG.getConstant(1, DL, MVT::i32)),
-      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
-          DAG.getConstant(1, DL, MVT::i32)),
-      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
-          DAG.getConstant(2, DL, MVT::i32)),
-      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
-          DAG.getConstant(2, DL, MVT::i32)),
-      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
-          DAG.getConstant(3, DL, MVT::i32)),
-      DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
-          DAG.getConstant(3, DL, MVT::i32))
-      };
-      return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args);
-    }
-
-    case Intrinsic::r600_read_ngroups_x:
-      return LowerImplicitParameter(DAG, VT, DL, 0);
-    case Intrinsic::r600_read_ngroups_y:
-      return LowerImplicitParameter(DAG, VT, DL, 1);
-    case Intrinsic::r600_read_ngroups_z:
-      return LowerImplicitParameter(DAG, VT, DL, 2);
-    case Intrinsic::r600_read_global_size_x:
-      return LowerImplicitParameter(DAG, VT, DL, 3);
-    case Intrinsic::r600_read_global_size_y:
-      return LowerImplicitParameter(DAG, VT, DL, 4);
-    case Intrinsic::r600_read_global_size_z:
-      return LowerImplicitParameter(DAG, VT, DL, 5);
-    case Intrinsic::r600_read_local_size_x:
-      return LowerImplicitParameter(DAG, VT, DL, 6);
-    case Intrinsic::r600_read_local_size_y:
-      return LowerImplicitParameter(DAG, VT, DL, 7);
-    case Intrinsic::r600_read_local_size_z:
-      return LowerImplicitParameter(DAG, VT, DL, 8);
-
-    case Intrinsic::AMDGPU_read_workdim:
-      return LowerImplicitParameter(DAG, VT, DL, MFI->ABIArgOffset / 4);
-
-    case Intrinsic::r600_read_tgid_x:
-      return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
-                                  AMDGPU::T1_X, VT);
-    case Intrinsic::r600_read_tgid_y:
-      return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
-                                  AMDGPU::T1_Y, VT);
-    case Intrinsic::r600_read_tgid_z:
-      return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
-                                  AMDGPU::T1_Z, VT);
-    case Intrinsic::r600_read_tidig_x:
-      return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
-                                  AMDGPU::T0_X, VT);
-    case Intrinsic::r600_read_tidig_y:
-      return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
-                                  AMDGPU::T0_Y, VT);
-    case Intrinsic::r600_read_tidig_z:
-      return CreateLiveInRegister(DAG, &AMDGPU::R600_TReg32RegClass,
-                                  AMDGPU::T0_Z, VT);
-    case Intrinsic::AMDGPU_rsq:
-      // XXX - I'm assuming SI's RSQ_LEGACY matches R600's behavior.
-      return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
-
-    case AMDGPUIntrinsic::AMDGPU_fract:
-    case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name.
-      return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
-    }
-    // break out of case ISD::INTRINSIC_WO_CHAIN in switch(Op.getOpcode())
-    break;
-  }
-  } // end switch(Op.getOpcode())
-  return SDValue();
-}
-
-void R600TargetLowering::ReplaceNodeResults(SDNode *N,
-                                            SmallVectorImpl<SDValue> &Results,
-                                            SelectionDAG &DAG) const {
-  switch (N->getOpcode()) {
-  default:
-    AMDGPUTargetLowering::ReplaceNodeResults(N, Results, DAG);
-    return;
-  case ISD::FP_TO_UINT:
-    if (N->getValueType(0) == MVT::i1) {
-      Results.push_back(LowerFPTOUINT(N->getOperand(0), DAG));
-      return;
-    }
-    // Fall-through. Since we don't care about out of bounds values
-    // we can use FP_TO_SINT for uints too. The DAGLegalizer code for uint
-    // considers some extra cases which are not necessary here.
-  case ISD::FP_TO_SINT: {
-    SDValue Result;
-    if (expandFP_TO_SINT(N, Result, DAG))
-      Results.push_back(Result);
-    return;
-  }
-  case ISD::SDIVREM: {
-    SDValue Op = SDValue(N, 1);
-    SDValue RES = LowerSDIVREM(Op, DAG);
-    Results.push_back(RES);
-    Results.push_back(RES.getValue(1));
-    break;
-  }
-  case ISD::UDIVREM: {
-    SDValue Op = SDValue(N, 0);
-    LowerUDIVREM64(Op, DAG, Results);
-    break;
-  }
-  }
-}
-
-SDValue R600TargetLowering::vectorToVerticalVector(SelectionDAG &DAG,
-                                                   SDValue Vector) const {
-
-  SDLoc DL(Vector);
-  EVT VecVT = Vector.getValueType();
-  EVT EltVT = VecVT.getVectorElementType();
-  SmallVector<SDValue, 8> Args;
-
-  for (unsigned i = 0, e = VecVT.getVectorNumElements();
-                                                           i != e; ++i) {
-    Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vector,
-                               DAG.getConstant(i, DL, getVectorIdxTy())));
-  }
-
-  return DAG.getNode(AMDGPUISD::BUILD_VERTICAL_VECTOR, DL, VecVT, Args);
-}
-
-SDValue R600TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
-                                                    SelectionDAG &DAG) const {
-
-  SDLoc DL(Op);
-  SDValue Vector = Op.getOperand(0);
-  SDValue Index = Op.getOperand(1);
-
-  if (isa<ConstantSDNode>(Index) ||
-      Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR)
-    return Op;
-
-  Vector = vectorToVerticalVector(DAG, Vector);
-  return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getValueType(),
-                     Vector, Index);
-}
-
-SDValue R600TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
-                                                   SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  SDValue Vector = Op.getOperand(0);
-  SDValue Value = Op.getOperand(1);
-  SDValue Index = Op.getOperand(2);
-
-  if (isa<ConstantSDNode>(Index) ||
-      Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR)
-    return Op;
-
-  Vector = vectorToVerticalVector(DAG, Vector);
-  SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, Op.getValueType(),
-                               Vector, Value, Index);
-  return vectorToVerticalVector(DAG, Insert);
-}
-
-SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
-  // On hw >= R700, COS/SIN input must be between -1. and 1.
-  // Thus we lower them to TRIG ( FRACT ( x / 2Pi + 0.5) - 0.5)
-  EVT VT = Op.getValueType();
-  SDValue Arg = Op.getOperand(0);
-  SDLoc DL(Op);
-  SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
-      DAG.getNode(ISD::FADD, DL, VT,
-        DAG.getNode(ISD::FMUL, DL, VT, Arg,
-          DAG.getConstantFP(0.15915494309, DL, MVT::f32)),
-        DAG.getConstantFP(0.5, DL, MVT::f32)));
-  unsigned TrigNode;
-  switch (Op.getOpcode()) {
-  case ISD::FCOS:
-    TrigNode = AMDGPUISD::COS_HW;
-    break;
-  case ISD::FSIN:
-    TrigNode = AMDGPUISD::SIN_HW;
-    break;
-  default:
-    llvm_unreachable("Wrong trig opcode");
-  }
-  SDValue TrigVal = DAG.getNode(TrigNode, DL, VT,
-      DAG.getNode(ISD::FADD, DL, VT, FractPart,
-        DAG.getConstantFP(-0.5, DL, MVT::f32)));
-  if (Gen >= AMDGPUSubtarget::R700)
-    return TrigVal;
-  // On R600 hw, COS/SIN input must be between -Pi and Pi.
-  return DAG.getNode(ISD::FMUL, DL, VT, TrigVal,
-      DAG.getConstantFP(3.14159265359, DL, MVT::f32));
-}
-
-SDValue R600TargetLowering::LowerSHLParts(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-
-  SDValue Lo = Op.getOperand(0);
-  SDValue Hi = Op.getOperand(1);
-  SDValue Shift = Op.getOperand(2);
-  SDValue Zero = DAG.getConstant(0, DL, VT);
-  SDValue One  = DAG.getConstant(1, DL, VT);
-
-  SDValue Width  = DAG.getConstant(VT.getSizeInBits(), DL, VT);
-  SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, DL, VT);
-  SDValue BigShift  = DAG.getNode(ISD::SUB, DL, VT, Shift, Width);
-  SDValue CompShift = DAG.getNode(ISD::SUB, DL, VT, Width1, Shift);
-
-  // The dance around Width1 is necessary for 0 special case.
-  // Without it the CompShift might be 32, producing incorrect results in
-  // Overflow. So we do the shift in two steps, the alternative is to
-  // add a conditional to filter the special case.
-
-  SDValue Overflow = DAG.getNode(ISD::SRL, DL, VT, Lo, CompShift);
-  Overflow = DAG.getNode(ISD::SRL, DL, VT, Overflow, One);
-
-  SDValue HiSmall = DAG.getNode(ISD::SHL, DL, VT, Hi, Shift);
-  HiSmall = DAG.getNode(ISD::OR, DL, VT, HiSmall, Overflow);
-  SDValue LoSmall = DAG.getNode(ISD::SHL, DL, VT, Lo, Shift);
-
-  SDValue HiBig = DAG.getNode(ISD::SHL, DL, VT, Lo, BigShift);
-  SDValue LoBig = Zero;
-
-  Hi = DAG.getSelectCC(DL, Shift, Width, HiSmall, HiBig, ISD::SETULT);
-  Lo = DAG.getSelectCC(DL, Shift, Width, LoSmall, LoBig, ISD::SETULT);
-
-  return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT,VT), Lo, Hi);
-}
-
-SDValue R600TargetLowering::LowerSRXParts(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-
-  SDValue Lo = Op.getOperand(0);
-  SDValue Hi = Op.getOperand(1);
-  SDValue Shift = Op.getOperand(2);
-  SDValue Zero = DAG.getConstant(0, DL, VT);
-  SDValue One  = DAG.getConstant(1, DL, VT);
-
-  const bool SRA = Op.getOpcode() == ISD::SRA_PARTS;
-
-  SDValue Width  = DAG.getConstant(VT.getSizeInBits(), DL, VT);
-  SDValue Width1 = DAG.getConstant(VT.getSizeInBits() - 1, DL, VT);
-  SDValue BigShift  = DAG.getNode(ISD::SUB, DL, VT, Shift, Width);
-  SDValue CompShift = DAG.getNode(ISD::SUB, DL, VT, Width1, Shift);
-
-  // The dance around Width1 is necessary for 0 special case.
-  // Without it the CompShift might be 32, producing incorrect results in
-  // Overflow. So we do the shift in two steps, the alternative is to
-  // add a conditional to filter the special case.
-
-  SDValue Overflow = DAG.getNode(ISD::SHL, DL, VT, Hi, CompShift);
-  Overflow = DAG.getNode(ISD::SHL, DL, VT, Overflow, One);
-
-  SDValue HiSmall = DAG.getNode(SRA ? ISD::SRA : ISD::SRL, DL, VT, Hi, Shift);
-  SDValue LoSmall = DAG.getNode(ISD::SRL, DL, VT, Lo, Shift);
-  LoSmall = DAG.getNode(ISD::OR, DL, VT, LoSmall, Overflow);
-
-  SDValue LoBig = DAG.getNode(SRA ? ISD::SRA : ISD::SRL, DL, VT, Hi, BigShift);
-  SDValue HiBig = SRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, Width1) : Zero;
-
-  Hi = DAG.getSelectCC(DL, Shift, Width, HiSmall, HiBig, ISD::SETULT);
-  Lo = DAG.getSelectCC(DL, Shift, Width, LoSmall, LoBig, ISD::SETULT);
-
-  return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT,VT), Lo, Hi);
-}
-
-SDValue R600TargetLowering::LowerUADDSUBO(SDValue Op, SelectionDAG &DAG,
-                                          unsigned mainop, unsigned ovf) const {
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-
-  SDValue Lo = Op.getOperand(0);
-  SDValue Hi = Op.getOperand(1);
-
-  SDValue OVF = DAG.getNode(ovf, DL, VT, Lo, Hi);
-  // Extend sign.
-  OVF = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, OVF,
-                    DAG.getValueType(MVT::i1));
-
-  SDValue Res = DAG.getNode(mainop, DL, VT, Lo, Hi);
-
-  return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT, VT), Res, OVF);
-}
-
-SDValue R600TargetLowering::LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  return DAG.getNode(
-      ISD::SETCC,
-      DL,
-      MVT::i1,
-      Op, DAG.getConstantFP(0.0f, DL, MVT::f32),
-      DAG.getCondCode(ISD::SETNE)
-      );
-}
-
-SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
-                                                   SDLoc DL,
-                                                   unsigned DwordOffset) const {
-  unsigned ByteOffset = DwordOffset * 4;
-  PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
-                                      AMDGPUAS::CONSTANT_BUFFER_0);
-
-  // We shouldn't be using an offset wider than 16-bits for implicit parameters.
-  assert(isInt<16>(ByteOffset));
-
-  return DAG.getLoad(VT, DL, DAG.getEntryNode(),
-                     DAG.getConstant(ByteOffset, DL, MVT::i32), // PTR
-                     MachinePointerInfo(ConstantPointerNull::get(PtrType)),
-                     false, false, false, 0);
-}
-
-bool R600TargetLowering::isZero(SDValue Op) const {
-  if(ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
-    return Cst->isNullValue();
-  } else if(ConstantFPSDNode *CstFP = dyn_cast<ConstantFPSDNode>(Op)){
-    return CstFP->isZero();
-  } else {
-    return false;
-  }
-}
-
-SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  EVT VT = Op.getValueType();
-
-  SDValue LHS = Op.getOperand(0);
-  SDValue RHS = Op.getOperand(1);
-  SDValue True = Op.getOperand(2);
-  SDValue False = Op.getOperand(3);
-  SDValue CC = Op.getOperand(4);
-  SDValue Temp;
-
-  if (VT == MVT::f32) {
-    DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
-    SDValue MinMax = CombineFMinMaxLegacy(DL, VT, LHS, RHS, True, False, CC, DCI);
-    if (MinMax)
-      return MinMax;
-  }
-
-  // LHS and RHS are guaranteed to be the same value type
-  EVT CompareVT = LHS.getValueType();
-
-  // Check if we can lower this to a native operation.
-
-  // Try to lower to a SET* instruction:
-  //
-  // SET* can match the following patterns:
-  //
-  // select_cc f32, f32, -1,  0, cc_supported
-  // select_cc f32, f32, 1.0f, 0.0f, cc_supported
-  // select_cc i32, i32, -1,  0, cc_supported
-  //
-
-  // Move hardware True/False values to the correct operand.
-  ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
-  ISD::CondCode InverseCC =
-     ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32);
-  if (isHWTrueValue(False) && isHWFalseValue(True)) {
-    if (isCondCodeLegal(InverseCC, CompareVT.getSimpleVT())) {
-      std::swap(False, True);
-      CC = DAG.getCondCode(InverseCC);
-    } else {
-      ISD::CondCode SwapInvCC = ISD::getSetCCSwappedOperands(InverseCC);
-      if (isCondCodeLegal(SwapInvCC, CompareVT.getSimpleVT())) {
-        std::swap(False, True);
-        std::swap(LHS, RHS);
-        CC = DAG.getCondCode(SwapInvCC);
-      }
-    }
-  }
-
-  if (isHWTrueValue(True) && isHWFalseValue(False) &&
-      (CompareVT == VT || VT == MVT::i32)) {
-    // This can be matched by a SET* instruction.
-    return DAG.getNode(ISD::SELECT_CC, DL, VT, LHS, RHS, True, False, CC);
-  }
-
-  // Try to lower to a CND* instruction:
-  //
-  // CND* can match the following patterns:
-  //
-  // select_cc f32, 0.0, f32, f32, cc_supported
-  // select_cc f32, 0.0, i32, i32, cc_supported
-  // select_cc i32, 0,   f32, f32, cc_supported
-  // select_cc i32, 0,   i32, i32, cc_supported
-  //
-
-  // Try to move the zero value to the RHS
-  if (isZero(LHS)) {
-    ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
-    // Try swapping the operands
-    ISD::CondCode CCSwapped = ISD::getSetCCSwappedOperands(CCOpcode);
-    if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) {
-      std::swap(LHS, RHS);
-      CC = DAG.getCondCode(CCSwapped);
-    } else {
-      // Try inverting the conditon and then swapping the operands
-      ISD::CondCode CCInv = ISD::getSetCCInverse(CCOpcode, CompareVT.isInteger());
-      CCSwapped = ISD::getSetCCSwappedOperands(CCInv);
-      if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) {
-        std::swap(True, False);
-        std::swap(LHS, RHS);
-        CC = DAG.getCondCode(CCSwapped);
-      }
-    }
-  }
-  if (isZero(RHS)) {
-    SDValue Cond = LHS;
-    SDValue Zero = RHS;
-    ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
-    if (CompareVT != VT) {
-      // Bitcast True / False to the correct types.  This will end up being
-      // a nop, but it allows us to define only a single pattern in the
-      // .TD files for each CND* instruction rather than having to have
-      // one pattern for integer True/False and one for fp True/False
-      True = DAG.getNode(ISD::BITCAST, DL, CompareVT, True);
-      False = DAG.getNode(ISD::BITCAST, DL, CompareVT, False);
-    }
-
-    switch (CCOpcode) {
-    case ISD::SETONE:
-    case ISD::SETUNE:
-    case ISD::SETNE:
-      CCOpcode = ISD::getSetCCInverse(CCOpcode, CompareVT == MVT::i32);
-      Temp = True;
-      True = False;
-      False = Temp;
-      break;
-    default:
-      break;
-    }
-    SDValue SelectNode = DAG.getNode(ISD::SELECT_CC, DL, CompareVT,
-        Cond, Zero,
-        True, False,
-        DAG.getCondCode(CCOpcode));
-    return DAG.getNode(ISD::BITCAST, DL, VT, SelectNode);
-  }
-
-  // If we make it this for it means we have no native instructions to handle
-  // this SELECT_CC, so we must lower it.
-  SDValue HWTrue, HWFalse;
-
-  if (CompareVT == MVT::f32) {
-    HWTrue = DAG.getConstantFP(1.0f, DL, CompareVT);
-    HWFalse = DAG.getConstantFP(0.0f, DL, CompareVT);
-  } else if (CompareVT == MVT::i32) {
-    HWTrue = DAG.getConstant(-1, DL, CompareVT);
-    HWFalse = DAG.getConstant(0, DL, CompareVT);
-  }
-  else {
-    llvm_unreachable("Unhandled value type in LowerSELECT_CC");
-  }
-
-  // Lower this unsupported SELECT_CC into a combination of two supported
-  // SELECT_CC operations.
-  SDValue Cond = DAG.getNode(ISD::SELECT_CC, DL, CompareVT, LHS, RHS, HWTrue, HWFalse, CC);
-
-  return DAG.getNode(ISD::SELECT_CC, DL, VT,
-      Cond, HWFalse,
-      True, False,
-      DAG.getCondCode(ISD::SETNE));
-}
-
-/// LLVM generates byte-addressed pointers.  For indirect addressing, we need to
-/// convert these pointers to a register index.  Each register holds
-/// 16 bytes, (4 x 32bit sub-register), but we need to take into account the
-/// \p StackWidth, which tells us how many of the 4 sub-registrers will be used
-/// for indirect addressing.
-SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr,
-                                               unsigned StackWidth,
-                                               SelectionDAG &DAG) const {
-  unsigned SRLPad;
-  switch(StackWidth) {
-  case 1:
-    SRLPad = 2;
-    break;
-  case 2:
-    SRLPad = 3;
-    break;
-  case 4:
-    SRLPad = 4;
-    break;
-  default: llvm_unreachable("Invalid stack width");
-  }
-
-  SDLoc DL(Ptr);
-  return DAG.getNode(ISD::SRL, DL, Ptr.getValueType(), Ptr,
-                     DAG.getConstant(SRLPad, DL, MVT::i32));
-}
-
-void R600TargetLowering::getStackAddress(unsigned StackWidth,
-                                         unsigned ElemIdx,
-                                         unsigned &Channel,
-                                         unsigned &PtrIncr) const {
-  switch (StackWidth) {
-  default:
-  case 1:
-    Channel = 0;
-    if (ElemIdx > 0) {
-      PtrIncr = 1;
-    } else {
-      PtrIncr = 0;
-    }
-    break;
-  case 2:
-    Channel = ElemIdx % 2;
-    if (ElemIdx == 2) {
-      PtrIncr = 1;
-    } else {
-      PtrIncr = 0;
-    }
-    break;
-  case 4:
-    Channel = ElemIdx;
-    PtrIncr = 0;
-    break;
-  }
-}
-
-SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
-  SDLoc DL(Op);
-  StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
-  SDValue Chain = Op.getOperand(0);
-  SDValue Value = Op.getOperand(1);
-  SDValue Ptr = Op.getOperand(2);
-
-  SDValue Result = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
-  if (Result.getNode()) {
-    return Result;
-  }
-
-  if (StoreNode->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS) {
-    if (StoreNode->isTruncatingStore()) {
-      EVT VT = Value.getValueType();
-      assert(VT.bitsLE(MVT::i32));
-      EVT MemVT = StoreNode->getMemoryVT();
-      SDValue MaskConstant;
-      if (MemVT == MVT::i8) {
-        MaskConstant = DAG.getConstant(0xFF, DL, MVT::i32);
-      } else {
-        assert(MemVT == MVT::i16);
-        MaskConstant = DAG.getConstant(0xFFFF, DL, MVT::i32);
-      }
-      SDValue DWordAddr = DAG.getNode(ISD::SRL, DL, VT, Ptr,
-                                      DAG.getConstant(2, DL, MVT::i32));
-      SDValue ByteIndex = DAG.getNode(ISD::AND, DL, Ptr.getValueType(), Ptr,
-                                      DAG.getConstant(0x00000003, DL, VT));
-      SDValue TruncValue = DAG.getNode(ISD::AND, DL, VT, Value, MaskConstant);
-      SDValue Shift = DAG.getNode(ISD::SHL, DL, VT, ByteIndex,
-                                   DAG.getConstant(3, DL, VT));
-      SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, VT, TruncValue, Shift);
-      SDValue Mask = DAG.getNode(ISD::SHL, DL, VT, MaskConstant, Shift);
-      // XXX: If we add a 64-bit ZW register class, then we could use a 2 x i32
-      // vector instead.
-      SDValue Src[4] = {
-        ShiftedValue,
-        DAG.getConstant(0, DL, MVT::i32),
-        DAG.getConstant(0, DL, MVT::i32),
-        Mask
-      };
-      SDValue Input = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i32, Src);
-      SDValue Args[3] = { Chain, Input, DWordAddr };
-      return DAG.getMemIntrinsicNode(AMDGPUISD::STORE_MSKOR, DL,
-                                     Op->getVTList(), Args, MemVT,
-                                     StoreNode->getMemOperand());
-    } else if (Ptr->getOpcode() != AMDGPUISD::DWORDADDR &&
-               Value.getValueType().bitsGE(MVT::i32)) {
-      // Convert pointer from byte address to dword address.
-      Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, Ptr.getValueType(),
-                        DAG.getNode(ISD::SRL, DL, Ptr.getValueType(),
-                                    Ptr, DAG.getConstant(2, DL, MVT::i32)));
-
-      if (StoreNode->isTruncatingStore() || StoreNode->isIndexed()) {
-        llvm_unreachable("Truncated and indexed stores not supported yet");
-      } else {
-        Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand());
-      }
-      return Chain;
-    }
-  }
-
-  EVT ValueVT = Value.getValueType();
-
-  if (StoreNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
-    return SDValue();
-  }
-
-  SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
-  if (Ret.getNode()) {
-    return Ret;
-  }
-  // Lowering for indirect addressing
-
-  const MachineFunction &MF = DAG.getMachineFunction();
-  const AMDGPUFrameLowering *TFL =
-      static_cast<const AMDGPUFrameLowering *>(Subtarget->getFrameLowering());
-  unsigned StackWidth = TFL->getStackWidth(MF);
-
-  Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG);
-
-  if (ValueVT.isVector()) {
-    unsigned NumElemVT = ValueVT.getVectorNumElements();
-    EVT ElemVT = ValueVT.getVectorElementType();
-    SmallVector<SDValue, 4> Stores(NumElemVT);
-
-    assert(NumElemVT >= StackWidth && "Stack width cannot be greater than "
-                                      "vector width in load");
-
-    for (unsigned i = 0; i < NumElemVT; ++i) {
-      unsigned Channel, PtrIncr;
-      getStackAddress(StackWidth, i, Channel, PtrIncr);
-      Ptr = DAG.getNode(ISD::ADD, DL, MVT::i32, Ptr,
-                        DAG.getConstant(PtrIncr, DL, MVT::i32));
-      SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT,
-                                 Value, DAG.getConstant(i, DL, MVT::i32));
-
-      Stores[i] = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
-                              Chain, Elem, Ptr,
-                              DAG.getTargetConstant(Channel, DL, MVT::i32));
-    }
-     Chain =  DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
-   } else {
-    if (ValueVT == MVT::i8) {
-      Value = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Value);
-    }
-    Chain = DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other, Chain, Value, Ptr,
-    DAG.getTargetConstant(0, DL, MVT::i32)); // Channel
-  }
-
-  return Chain;
-}
-
-// return (512 + (kc_bank << 12)
-static int
-ConstantAddressBlock(unsigned AddressSpace) {
-  switch (AddressSpace) {
-  case AMDGPUAS::CONSTANT_BUFFER_0:
-    return 512;
-  case AMDGPUAS::CONSTANT_BUFFER_1:
-    return 512 + 4096;
-  case AMDGPUAS::CONSTANT_BUFFER_2:
-    return 512 + 4096 * 2;
-  case AMDGPUAS::CONSTANT_BUFFER_3:
-    return 512 + 4096 * 3;
-  case AMDGPUAS::CONSTANT_BUFFER_4:
-    return 512 + 4096 * 4;
-  case AMDGPUAS::CONSTANT_BUFFER_5:
-    return 512 + 4096 * 5;
-  case AMDGPUAS::CONSTANT_BUFFER_6:
-    return 512 + 4096 * 6;
-  case AMDGPUAS::CONSTANT_BUFFER_7:
-    return 512 + 4096 * 7;
-  case AMDGPUAS::CONSTANT_BUFFER_8:
-    return 512 + 4096 * 8;
-  case AMDGPUAS::CONSTANT_BUFFER_9:
-    return 512 + 4096 * 9;
-  case AMDGPUAS::CONSTANT_BUFFER_10:
-    return 512 + 4096 * 10;
-  case AMDGPUAS::CONSTANT_BUFFER_11:
-    return 512 + 4096 * 11;
-  case AMDGPUAS::CONSTANT_BUFFER_12:
-    return 512 + 4096 * 12;
-  case AMDGPUAS::CONSTANT_BUFFER_13:
-    return 512 + 4096 * 13;
-  case AMDGPUAS::CONSTANT_BUFFER_14:
-    return 512 + 4096 * 14;
-  case AMDGPUAS::CONSTANT_BUFFER_15:
-    return 512 + 4096 * 15;
-  default:
-    return -1;
-  }
-}
-
-SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
-{
-  EVT VT = Op.getValueType();
-  SDLoc DL(Op);
-  LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
-  SDValue Chain = Op.getOperand(0);
-  SDValue Ptr = Op.getOperand(1);
-  SDValue LoweredLoad;
-
-  SDValue Ret = AMDGPUTargetLowering::LowerLOAD(Op, DAG);
-  if (Ret.getNode()) {
-    SDValue Ops[2] = {
-      Ret,
-      Chain
-    };
-    return DAG.getMergeValues(Ops, DL);
-  }
-
-  // Lower loads constant address space global variable loads
-  if (LoadNode->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
-      isa<GlobalVariable>(GetUnderlyingObject(
-          LoadNode->getMemOperand()->getValue(), *getDataLayout()))) {
-
-    SDValue Ptr = DAG.getZExtOrTrunc(LoadNode->getBasePtr(), DL,
-        getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
-    Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
-        DAG.getConstant(2, DL, MVT::i32));
-    return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op->getVTList(),
-                       LoadNode->getChain(), Ptr,
-                       DAG.getTargetConstant(0, DL, MVT::i32),
-                       Op.getOperand(2));
-  }
-
-  if (LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS && VT.isVector()) {
-    SDValue MergedValues[2] = {
-      ScalarizeVectorLoad(Op, DAG),
-      Chain
-    };
-    return DAG.getMergeValues(MergedValues, DL);
-  }
-
-  int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace());
-  if (ConstantBlock > -1 &&
-      ((LoadNode->getExtensionType() == ISD::NON_EXTLOAD) ||
-       (LoadNode->getExtensionType() == ISD::ZEXTLOAD))) {
-    SDValue Result;
-    if (isa<ConstantExpr>(LoadNode->getMemOperand()->getValue()) ||
-        isa<Constant>(LoadNode->getMemOperand()->getValue()) ||
-        isa<ConstantSDNode>(Ptr)) {
-      SDValue Slots[4];
-      for (unsigned i = 0; i < 4; i++) {
-        // We want Const position encoded with the following formula :
-        // (((512 + (kc_bank << 12) + const_index) << 2) + chan)
-        // const_index is Ptr computed by llvm using an alignment of 16.
-        // Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and
-        // then div by 4 at the ISel step
-        SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
-            DAG.getConstant(4 * i + ConstantBlock * 16, DL, MVT::i32));
-        Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr);
-      }
-      EVT NewVT = MVT::v4i32;
-      unsigned NumElements = 4;
-      if (VT.isVector()) {
-        NewVT = VT;
-        NumElements = VT.getVectorNumElements();
-      }
-      Result = DAG.getNode(ISD::BUILD_VECTOR, DL, NewVT,
-                           makeArrayRef(Slots, NumElements));
-    } else {
-      // non-constant ptr can't be folded, keeps it as a v4f32 load
-      Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
-          DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
-                      DAG.getConstant(4, DL, MVT::i32)),
-                      DAG.getConstant(LoadNode->getAddressSpace() -
-                                      AMDGPUAS::CONSTANT_BUFFER_0, DL, MVT::i32)
-          );
-    }
-
-    if (!VT.isVector()) {
-      Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result,
-                           DAG.getConstant(0, DL, MVT::i32));
-    }
-
-    SDValue MergedValues[2] = {
-      Result,
-      Chain
-    };
-    return DAG.getMergeValues(MergedValues, DL);
-  }
-
-  // For most operations returning SDValue() will result in the node being
-  // expanded by the DAG Legalizer. This is not the case for ISD::LOAD, so we
-  // need to manually expand loads that may be legal in some address spaces and
-  // illegal in others. SEXT loads from CONSTANT_BUFFER_0 are supported for
-  // compute shaders, since the data is sign extended when it is uploaded to the
-  // buffer. However SEXT loads from other address spaces are not supported, so
-  // we need to expand them here.
-  if (LoadNode->getExtensionType() == ISD::SEXTLOAD) {
-    EVT MemVT = LoadNode->getMemoryVT();
-    assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8));
-    SDValue NewLoad = DAG.getExtLoad(ISD::EXTLOAD, DL, VT, Chain, Ptr,
-                                  LoadNode->getPointerInfo(), MemVT,
-                                  LoadNode->isVolatile(),
-                                  LoadNode->isNonTemporal(),
-                                  LoadNode->isInvariant(),
-                                  LoadNode->getAlignment());
-    SDValue Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, NewLoad,
-                              DAG.getValueType(MemVT));
-
-    SDValue MergedValues[2] = { Res, Chain };
-    return DAG.getMergeValues(MergedValues, DL);
-  }
-
-  if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
-    return SDValue();
-  }
-
-  // Lowering for indirect addressing
-  const MachineFunction &MF = DAG.getMachineFunction();
-  const AMDGPUFrameLowering *TFL =
-      static_cast<const AMDGPUFrameLowering *>(Subtarget->getFrameLowering());
-  unsigned StackWidth = TFL->getStackWidth(MF);
-
-  Ptr = stackPtrToRegIndex(Ptr, StackWidth, DAG);
-
-  if (VT.isVector()) {
-    unsigned NumElemVT = VT.getVectorNumElements();
-    EVT ElemVT = VT.getVectorElementType();
-    SDValue Loads[4];
-
-    assert(NumElemVT >= StackWidth && "Stack width cannot be greater than "
-                                      "vector width in load");
-
-    for (unsigned i = 0; i < NumElemVT; ++i) {
-      unsigned Channel, PtrIncr;
-      getStackAddress(StackWidth, i, Channel, PtrIncr);
-      Ptr = DAG.getNode(ISD::ADD, DL, MVT::i32, Ptr,
-                        DAG.getConstant(PtrIncr, DL, MVT::i32));
-      Loads[i] = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, ElemVT,
-                             Chain, Ptr,
-                             DAG.getTargetConstant(Channel, DL, MVT::i32),
-                             Op.getOperand(2));
-    }
-    for (unsigned i = NumElemVT; i < 4; ++i) {
-      Loads[i] = DAG.getUNDEF(ElemVT);
-    }
-    EVT TargetVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, 4);
-    LoweredLoad = DAG.getNode(ISD::BUILD_VECTOR, DL, TargetVT, Loads);
-  } else {
-    LoweredLoad = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, VT,
-                              Chain, Ptr,
-                              DAG.getTargetConstant(0, DL, MVT::i32), // Channel
-                              Op.getOperand(2));
-  }
-
-  SDValue Ops[2] = {
-    LoweredLoad,
-    Chain
-  };
-
-  return DAG.getMergeValues(Ops, DL);
-}
-
-SDValue R600TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
-  SDValue Chain = Op.getOperand(0);
-  SDValue Cond  = Op.getOperand(1);
-  SDValue Jump  = Op.getOperand(2);
-
-  return DAG.getNode(AMDGPUISD::BRANCH_COND, SDLoc(Op), Op.getValueType(),
-                     Chain, Jump, Cond);
-}
-
-/// XXX Only kernel functions are supported, so we can assume for now that
-/// every function is a kernel function, but in the future we should use
-/// separate calling conventions for kernel and non-kernel functions.
-SDValue R600TargetLowering::LowerFormalArguments(
-                                      SDValue Chain,
-                                      CallingConv::ID CallConv,
-                                      bool isVarArg,
-                                      const SmallVectorImpl<ISD::InputArg> &Ins,
-                                      SDLoc DL, SelectionDAG &DAG,
-                                      SmallVectorImpl<SDValue> &InVals) const {
-  SmallVector<CCValAssign, 16> ArgLocs;
-  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
-                 *DAG.getContext());
-  MachineFunction &MF = DAG.getMachineFunction();
-  R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
-
-  SmallVector<ISD::InputArg, 8> LocalIns;
-
-  getOriginalFunctionArgs(DAG, MF.getFunction(), Ins, LocalIns);
-
-  AnalyzeFormalArguments(CCInfo, LocalIns);
-
-  for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
-    CCValAssign &VA = ArgLocs[i];
-    const ISD::InputArg &In = Ins[i];
-    EVT VT = In.VT;
-    EVT MemVT = VA.getLocVT();
-    if (!VT.isVector() && MemVT.isVector()) {
-      // Get load source type if scalarized.
-      MemVT = MemVT.getVectorElementType();
-    }
-
-    if (MFI->getShaderType() != ShaderType::COMPUTE) {
-      unsigned Reg = MF.addLiveIn(VA.getLocReg(), &AMDGPU::R600_Reg128RegClass);
-      SDValue Register = DAG.getCopyFromReg(Chain, DL, Reg, VT);
-      InVals.push_back(Register);
-      continue;
-    }
-
-    PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
-                                          AMDGPUAS::CONSTANT_BUFFER_0);
-
-    // i64 isn't a legal type, so the register type used ends up as i32, which
-    // isn't expected here. It attempts to create this sextload, but it ends up
-    // being invalid. Somehow this seems to work with i64 arguments, but breaks
-    // for <1 x i64>.
-
-    // The first 36 bytes of the input buffer contains information about
-    // thread group and global sizes.
-    ISD::LoadExtType Ext = ISD::NON_EXTLOAD;
-    if (MemVT.getScalarSizeInBits() != VT.getScalarSizeInBits()) {
-      // FIXME: This should really check the extload type, but the handling of
-      // extload vector parameters seems to be broken.
-
-      // Ext = In.Flags.isSExt() ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
-      Ext = ISD::SEXTLOAD;
-    }
-
-    // Compute the offset from the value.
-    // XXX - I think PartOffset should give you this, but it seems to give the
-    // size of the register which isn't useful.
-
-    unsigned ValBase = ArgLocs[In.getOrigArgIndex()].getLocMemOffset();
-    unsigned PartOffset = VA.getLocMemOffset();
-    unsigned Offset = 36 + VA.getLocMemOffset();
-
-    MachinePointerInfo PtrInfo(UndefValue::get(PtrTy), PartOffset - ValBase);
-    SDValue Arg = DAG.getLoad(ISD::UNINDEXED, Ext, VT, DL, Chain,
-                              DAG.getConstant(Offset, DL, MVT::i32),
-                              DAG.getUNDEF(MVT::i32),
-                              PtrInfo,
-                              MemVT, false, true, true, 4);
-
-    // 4 is the preferred alignment for the CONSTANT memory space.
-    InVals.push_back(Arg);
-    MFI->ABIArgOffset = Offset + MemVT.getStoreSize();
-  }
-  return Chain;
-}
-
-EVT R600TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
-   if (!VT.isVector())
-     return MVT::i32;
-   return VT.changeVectorElementTypeToInteger();
-}
-
-static SDValue CompactSwizzlableVector(
-  SelectionDAG &DAG, SDValue VectorEntry,
-  DenseMap<unsigned, unsigned> &RemapSwizzle) {
-  assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR);
-  assert(RemapSwizzle.empty());
-  SDValue NewBldVec[4] = {
-    VectorEntry.getOperand(0),
-    VectorEntry.getOperand(1),
-    VectorEntry.getOperand(2),
-    VectorEntry.getOperand(3)
-  };
-
-  for (unsigned i = 0; i < 4; i++) {
-    if (NewBldVec[i].getOpcode() == ISD::UNDEF)
-      // We mask write here to teach later passes that the ith element of this
-      // vector is undef. Thus we can use it to reduce 128 bits reg usage,
-      // break false dependencies and additionnaly make assembly easier to read.
-      RemapSwizzle[i] = 7; // SEL_MASK_WRITE
-    if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(NewBldVec[i])) {
-      if (C->isZero()) {
-        RemapSwizzle[i] = 4; // SEL_0
-        NewBldVec[i] = DAG.getUNDEF(MVT::f32);
-      } else if (C->isExactlyValue(1.0)) {
-        RemapSwizzle[i] = 5; // SEL_1
-        NewBldVec[i] = DAG.getUNDEF(MVT::f32);
-      }
-    }
-
-    if (NewBldVec[i].getOpcode() == ISD::UNDEF)
-      continue;
-    for (unsigned j = 0; j < i; j++) {
-      if (NewBldVec[i] == NewBldVec[j]) {
-        NewBldVec[i] = DAG.getUNDEF(NewBldVec[i].getValueType());
-        RemapSwizzle[i] = j;
-        break;
-      }
-    }
-  }
-
-  return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
-                     VectorEntry.getValueType(), NewBldVec);
-}
-
-static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
-                                DenseMap<unsigned, unsigned> &RemapSwizzle) {
-  assert(VectorEntry.getOpcode() == ISD::BUILD_VECTOR);
-  assert(RemapSwizzle.empty());
-  SDValue NewBldVec[4] = {
-      VectorEntry.getOperand(0),
-      VectorEntry.getOperand(1),
-      VectorEntry.getOperand(2),
-      VectorEntry.getOperand(3)
-  };
-  bool isUnmovable[4] = { false, false, false, false };
-  for (unsigned i = 0; i < 4; i++) {
-    RemapSwizzle[i] = i;
-    if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
-      unsigned Idx = dyn_cast<ConstantSDNode>(NewBldVec[i].getOperand(1))
-          ->getZExtValue();
-      if (i == Idx)
-        isUnmovable[Idx] = true;
-    }
-  }
-
-  for (unsigned i = 0; i < 4; i++) {
-    if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
-      unsigned Idx = dyn_cast<ConstantSDNode>(NewBldVec[i].getOperand(1))
-          ->getZExtValue();
-      if (isUnmovable[Idx])
-        continue;
-      // Swap i and Idx
-      std::swap(NewBldVec[Idx], NewBldVec[i]);
-      std::swap(RemapSwizzle[i], RemapSwizzle[Idx]);
-      break;
-    }
-  }
-
-  return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(VectorEntry),
-                     VectorEntry.getValueType(), NewBldVec);
-}
-
-
-SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector,
-                                            SDValue Swz[4], SelectionDAG &DAG,
-                                            SDLoc DL) const {
-  assert(BuildVector.getOpcode() == ISD::BUILD_VECTOR);
-  // Old -> New swizzle values
-  DenseMap<unsigned, unsigned> SwizzleRemap;
-
-  BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
-  for (unsigned i = 0; i < 4; i++) {
-    unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
-    if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
-      Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
-  }
-
-  SwizzleRemap.clear();
-  BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
-  for (unsigned i = 0; i < 4; i++) {
-    unsigned Idx = cast<ConstantSDNode>(Swz[i])->getZExtValue();
-    if (SwizzleRemap.find(Idx) != SwizzleRemap.end())
-      Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32);
-  }
-
-  return BuildVector;
-}
-
-
-//===----------------------------------------------------------------------===//
-// Custom DAG Optimizations
-//===----------------------------------------------------------------------===//
-
-SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
-                                              DAGCombinerInfo &DCI) const {
-  SelectionDAG &DAG = DCI.DAG;
-
-  switch (N->getOpcode()) {
-  default: return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
-  // (f32 fp_round (f64 uint_to_fp a)) -> (f32 uint_to_fp a)
-  case ISD::FP_ROUND: {
-      SDValue Arg = N->getOperand(0);
-      if (Arg.getOpcode() == ISD::UINT_TO_FP && Arg.getValueType() == MVT::f64) {
-        return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), N->getValueType(0),
-                           Arg.getOperand(0));
-      }
-      break;
-    }
-
-  // (i32 fp_to_sint (fneg (select_cc f32, f32, 1.0, 0.0 cc))) ->
-  // (i32 select_cc f32, f32, -1, 0 cc)
-  //
-  // Mesa's GLSL frontend generates the above pattern a lot and we can lower
-  // this to one of the SET*_DX10 instructions.
-  case ISD::FP_TO_SINT: {
-    SDValue FNeg = N->getOperand(0);
-    if (FNeg.getOpcode() != ISD::FNEG) {
-      return SDValue();
-    }
-    SDValue SelectCC = FNeg.getOperand(0);
-    if (SelectCC.getOpcode() != ISD::SELECT_CC ||
-        SelectCC.getOperand(0).getValueType() != MVT::f32 || // LHS
-        SelectCC.getOperand(2).getValueType() != MVT::f32 || // True
-        !isHWTrueValue(SelectCC.getOperand(2)) ||
-        !isHWFalseValue(SelectCC.getOperand(3))) {
-      return SDValue();
-    }
-
-    SDLoc dl(N);
-    return DAG.getNode(ISD::SELECT_CC, dl, N->getValueType(0),
-                           SelectCC.getOperand(0), // LHS
-                           SelectCC.getOperand(1), // RHS
-                           DAG.getConstant(-1, dl, MVT::i32), // True
-                           DAG.getConstant(0, dl, MVT::i32),  // False
-                           SelectCC.getOperand(4)); // CC
-
-    break;
-  }
-
-  // insert_vector_elt (build_vector elt0, ... , eltN), NewEltIdx, idx
-  // => build_vector elt0, ... , NewEltIdx, ... , eltN
-  case ISD::INSERT_VECTOR_ELT: {
-    SDValue InVec = N->getOperand(0);
-    SDValue InVal = N->getOperand(1);
-    SDValue EltNo = N->getOperand(2);
-    SDLoc dl(N);
-
-    // If the inserted element is an UNDEF, just use the input vector.
-    if (InVal.getOpcode() == ISD::UNDEF)
-      return InVec;
-
-    EVT VT = InVec.getValueType();
-
-    // If we can't generate a legal BUILD_VECTOR, exit
-    if (!isOperationLegal(ISD::BUILD_VECTOR, VT))
-      return SDValue();
-
-    // Check that we know which element is being inserted
-    if (!isa<ConstantSDNode>(EltNo))
-      return SDValue();
-    unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
-
-    // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
-    // be converted to a BUILD_VECTOR).  Fill in the Ops vector with the
-    // vector elements.
-    SmallVector<SDValue, 8> Ops;
-    if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
-      Ops.append(InVec.getNode()->op_begin(),
-                 InVec.getNode()->op_end());
-    } else if (InVec.getOpcode() == ISD::UNDEF) {
-      unsigned NElts = VT.getVectorNumElements();
-      Ops.append(NElts, DAG.getUNDEF(InVal.getValueType()));
-    } else {
-      return SDValue();
-    }
-
-    // Insert the element
-    if (Elt < Ops.size()) {
-      // All the operands of BUILD_VECTOR must have the same type;
-      // we enforce that here.
-      EVT OpVT = Ops[0].getValueType();
-      if (InVal.getValueType() != OpVT)
-        InVal = OpVT.bitsGT(InVal.getValueType()) ?
-          DAG.getNode(ISD::ANY_EXTEND, dl, OpVT, InVal) :
-          DAG.getNode(ISD::TRUNCATE, dl, OpVT, InVal);
-      Ops[Elt] = InVal;
-    }
-
-    // Return the new vector
-    return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
-  }
-
-  // Extract_vec (Build_vector) generated by custom lowering
-  // also needs to be customly combined
-  case ISD::EXTRACT_VECTOR_ELT: {
-    SDValue Arg = N->getOperand(0);
-    if (Arg.getOpcode() == ISD::BUILD_VECTOR) {
-      if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
-        unsigned Element = Const->getZExtValue();
-        return Arg->getOperand(Element);
-      }
-    }
-    if (Arg.getOpcode() == ISD::BITCAST &&
-        Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
-      if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
-        unsigned Element = Const->getZExtValue();
-        return DAG.getNode(ISD::BITCAST, SDLoc(N), N->getVTList(),
-            Arg->getOperand(0).getOperand(Element));
-      }
-    }
-  }
-
-  case ISD::SELECT_CC: {
-    // Try common optimizations
-    SDValue Ret = AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
-    if (Ret.getNode())
-      return Ret;
-
-    // fold selectcc (selectcc x, y, a, b, cc), b, a, b, seteq ->
-    //      selectcc x, y, a, b, inv(cc)
-    //
-    // fold selectcc (selectcc x, y, a, b, cc), b, a, b, setne ->
-    //      selectcc x, y, a, b, cc
-    SDValue LHS = N->getOperand(0);
-    if (LHS.getOpcode() != ISD::SELECT_CC) {
-      return SDValue();
-    }
-
-    SDValue RHS = N->getOperand(1);
-    SDValue True = N->getOperand(2);
-    SDValue False = N->getOperand(3);
-    ISD::CondCode NCC = cast<CondCodeSDNode>(N->getOperand(4))->get();
-
-    if (LHS.getOperand(2).getNode() != True.getNode() ||
-        LHS.getOperand(3).getNode() != False.getNode() ||
-        RHS.getNode() != False.getNode()) {
-      return SDValue();
-    }
-
-    switch (NCC) {
-    default: return SDValue();
-    case ISD::SETNE: return LHS;
-    case ISD::SETEQ: {
-      ISD::CondCode LHSCC = cast<CondCodeSDNode>(LHS.getOperand(4))->get();
-      LHSCC = ISD::getSetCCInverse(LHSCC,
-                                  LHS.getOperand(0).getValueType().isInteger());
-      if (DCI.isBeforeLegalizeOps() ||
-          isCondCodeLegal(LHSCC, LHS.getOperand(0).getSimpleValueType()))
-        return DAG.getSelectCC(SDLoc(N),
-                               LHS.getOperand(0),
-                               LHS.getOperand(1),
-                               LHS.getOperand(2),
-                               LHS.getOperand(3),
-                               LHSCC);
-      break;
-    }
-    }
-    return SDValue();
-  }
-
-  case AMDGPUISD::EXPORT: {
-    SDValue Arg = N->getOperand(1);
-    if (Arg.getOpcode() != ISD::BUILD_VECTOR)
-      break;
-
-    SDValue NewArgs[8] = {
-      N->getOperand(0), // Chain
-      SDValue(),
-      N->getOperand(2), // ArrayBase
-      N->getOperand(3), // Type
-      N->getOperand(4), // SWZ_X
-      N->getOperand(5), // SWZ_Y
-      N->getOperand(6), // SWZ_Z
-      N->getOperand(7) // SWZ_W
-    };
-    SDLoc DL(N);
-    NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG, DL);
-    return DAG.getNode(AMDGPUISD::EXPORT, DL, N->getVTList(), NewArgs);
-  }
-  case AMDGPUISD::TEXTURE_FETCH: {
-    SDValue Arg = N->getOperand(1);
-    if (Arg.getOpcode() != ISD::BUILD_VECTOR)
-      break;
-
-    SDValue NewArgs[19] = {
-      N->getOperand(0),
-      N->getOperand(1),
-      N->getOperand(2),
-      N->getOperand(3),
-      N->getOperand(4),
-      N->getOperand(5),
-      N->getOperand(6),
-      N->getOperand(7),
-      N->getOperand(8),
-      N->getOperand(9),
-      N->getOperand(10),
-      N->getOperand(11),
-      N->getOperand(12),
-      N->getOperand(13),
-      N->getOperand(14),
-      N->getOperand(15),
-      N->getOperand(16),
-      N->getOperand(17),
-      N->getOperand(18),
-    };
-    SDLoc DL(N);
-    NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG, DL);
-    return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, N->getVTList(), NewArgs);
-  }
-  }
-
-  return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
-}
-
-static bool
-FoldOperand(SDNode *ParentNode, unsigned SrcIdx, SDValue &Src, SDValue &Neg,
-            SDValue &Abs, SDValue &Sel, SDValue &Imm, SelectionDAG &DAG) {
-  const R600InstrInfo *TII =
-      static_cast<const R600InstrInfo *>(DAG.getSubtarget().getInstrInfo());
-  if (!Src.isMachineOpcode())
-    return false;
-  switch (Src.getMachineOpcode()) {
-  case AMDGPU::FNEG_R600:
-    if (!Neg.getNode())
-      return false;
-    Src = Src.getOperand(0);
-    Neg = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32);
-    return true;
-  case AMDGPU::FABS_R600:
-    if (!Abs.getNode())
-      return false;
-    Src = Src.getOperand(0);
-    Abs = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32);
-    return true;
-  case AMDGPU::CONST_COPY: {
-    unsigned Opcode = ParentNode->getMachineOpcode();
-    bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
-
-    if (!Sel.getNode())
-      return false;
-
-    SDValue CstOffset = Src.getOperand(0);
-    if (ParentNode->getValueType(0).isVector())
-      return false;
-
-    // Gather constants values
-    int SrcIndices[] = {
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src2),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
-    };
-    std::vector<unsigned> Consts;
-    for (int OtherSrcIdx : SrcIndices) {
-      int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
-      if (OtherSrcIdx < 0 || OtherSelIdx < 0)
-        continue;
-      if (HasDst) {
-        OtherSrcIdx--;
-        OtherSelIdx--;
-      }
-      if (RegisterSDNode *Reg =
-          dyn_cast<RegisterSDNode>(ParentNode->getOperand(OtherSrcIdx))) {
-        if (Reg->getReg() == AMDGPU::ALU_CONST) {
-          ConstantSDNode *Cst
-            = cast<ConstantSDNode>(ParentNode->getOperand(OtherSelIdx));
-          Consts.push_back(Cst->getZExtValue());
-        }
-      }
-    }
-
-    ConstantSDNode *Cst = cast<ConstantSDNode>(CstOffset);
-    Consts.push_back(Cst->getZExtValue());
-    if (!TII->fitsConstReadLimitations(Consts)) {
-      return false;
-    }
-
-    Sel = CstOffset;
-    Src = DAG.getRegister(AMDGPU::ALU_CONST, MVT::f32);
-    return true;
-  }
-  case AMDGPU::MOV_IMM_I32:
-  case AMDGPU::MOV_IMM_F32: {
-    unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
-    uint64_t ImmValue = 0;
-
-
-    if (Src.getMachineOpcode() == AMDGPU::MOV_IMM_F32) {
-      ConstantFPSDNode *FPC = dyn_cast<ConstantFPSDNode>(Src.getOperand(0));
-      float FloatValue = FPC->getValueAPF().convertToFloat();
-      if (FloatValue == 0.0) {
-        ImmReg = AMDGPU::ZERO;
-      } else if (FloatValue == 0.5) {
-        ImmReg = AMDGPU::HALF;
-      } else if (FloatValue == 1.0) {
-        ImmReg = AMDGPU::ONE;
-      } else {
-        ImmValue = FPC->getValueAPF().bitcastToAPInt().getZExtValue();
-      }
-    } else {
-      ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src.getOperand(0));
-      uint64_t Value = C->getZExtValue();
-      if (Value == 0) {
-        ImmReg = AMDGPU::ZERO;
-      } else if (Value == 1) {
-        ImmReg = AMDGPU::ONE_INT;
-      } else {
-        ImmValue = Value;
-      }
-    }
-
-    // Check that we aren't already using an immediate.
-    // XXX: It's possible for an instruction to have more than one
-    // immediate operand, but this is not supported yet.
-    if (ImmReg == AMDGPU::ALU_LITERAL_X) {
-      if (!Imm.getNode())
-        return false;
-      ConstantSDNode *C = dyn_cast<ConstantSDNode>(Imm);
-      assert(C);
-      if (C->getZExtValue())
-        return false;
-      Imm = DAG.getTargetConstant(ImmValue, SDLoc(ParentNode), MVT::i32);
-    }
-    Src = DAG.getRegister(ImmReg, MVT::i32);
-    return true;
-  }
-  default:
-    return false;
-  }
-}
-
-
-/// \brief Fold the instructions after selecting them
-SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node,
-                                            SelectionDAG &DAG) const {
-  const R600InstrInfo *TII =
-      static_cast<const R600InstrInfo *>(DAG.getSubtarget().getInstrInfo());
-  if (!Node->isMachineOpcode())
-    return Node;
-  unsigned Opcode = Node->getMachineOpcode();
-  SDValue FakeOp;
-
-  std::vector<SDValue> Ops(Node->op_begin(), Node->op_end());
-
-  if (Opcode == AMDGPU::DOT_4) {
-    int OperandIdx[] = {
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
-        };
-    int NegIdx[] = {
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
-    };
-    int AbsIdx[] = {
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
-    };
-    for (unsigned i = 0; i < 8; i++) {
-      if (OperandIdx[i] < 0)
-        return Node;
-      SDValue &Src = Ops[OperandIdx[i] - 1];
-      SDValue &Neg = Ops[NegIdx[i] - 1];
-      SDValue &Abs = Ops[AbsIdx[i] - 1];
-      bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
-      int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]);
-      if (HasDst)
-        SelIdx--;
-      SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp;
-      if (FoldOperand(Node, i, Src, Neg, Abs, Sel, FakeOp, DAG))
-        return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
-    }
-  } else if (Opcode == AMDGPU::REG_SEQUENCE) {
-    for (unsigned i = 1, e = Node->getNumOperands(); i < e; i += 2) {
-      SDValue &Src = Ops[i];
-      if (FoldOperand(Node, i, Src, FakeOp, FakeOp, FakeOp, FakeOp, DAG))
-        return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
-    }
-  } else if (Opcode == AMDGPU::CLAMP_R600) {
-    SDValue Src = Node->getOperand(0);
-    if (!Src.isMachineOpcode() ||
-        !TII->hasInstrModifiers(Src.getMachineOpcode()))
-      return Node;
-    int ClampIdx = TII->getOperandIdx(Src.getMachineOpcode(),
-        AMDGPU::OpName::clamp);
-    if (ClampIdx < 0)
-      return Node;
-    SDLoc DL(Node);
-    std::vector<SDValue> Ops(Src->op_begin(), Src->op_end());
-    Ops[ClampIdx - 1] = DAG.getTargetConstant(1, DL, MVT::i32);
-    return DAG.getMachineNode(Src.getMachineOpcode(), DL,
-                              Node->getVTList(), Ops);
-  } else {
-    if (!TII->hasInstrModifiers(Opcode))
-      return Node;
-    int OperandIdx[] = {
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
-    };
-    int NegIdx[] = {
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
-    };
-    int AbsIdx[] = {
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
-      TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
-      -1
-    };
-    for (unsigned i = 0; i < 3; i++) {
-      if (OperandIdx[i] < 0)
-        return Node;
-      SDValue &Src = Ops[OperandIdx[i] - 1];
-      SDValue &Neg = Ops[NegIdx[i] - 1];
-      SDValue FakeAbs;
-      SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
-      bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
-      int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]);
-      int ImmIdx = TII->getOperandIdx(Opcode, AMDGPU::OpName::literal);
-      if (HasDst) {
-        SelIdx--;
-        ImmIdx--;
-      }
-      SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp;
-      SDValue &Imm = Ops[ImmIdx];
-      if (FoldOperand(Node, i, Src, Neg, Abs, Sel, Imm, DAG))
-        return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
-    }
-  }
-
-  return Node;
-}

Removed: llvm/trunk/lib/Target/R600/R600ISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600ISelLowering.h?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600ISelLowering.h (original)
+++ llvm/trunk/lib/Target/R600/R600ISelLowering.h (removed)
@@ -1,80 +0,0 @@
-//===-- R600ISelLowering.h - R600 DAG Lowering Interface -*- C++ -*--------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-/// \file
-/// \brief R600 DAG Lowering interface definition
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_R600_R600ISELLOWERING_H
-#define LLVM_LIB_TARGET_R600_R600ISELLOWERING_H
-
-#include "AMDGPUISelLowering.h"
-
-namespace llvm {
-
-class R600InstrInfo;
-
-class R600TargetLowering : public AMDGPUTargetLowering {
-public:
-  R600TargetLowering(TargetMachine &TM, const AMDGPUSubtarget &STI);
-  MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI,
-      MachineBasicBlock * BB) const override;
-  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
-  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
-  void ReplaceNodeResults(SDNode * N,
-                          SmallVectorImpl<SDValue> &Results,
-                          SelectionDAG &DAG) const override;
-  SDValue LowerFormalArguments(
-                              SDValue Chain,
-                              CallingConv::ID CallConv,
-                              bool isVarArg,
-                              const SmallVectorImpl<ISD::InputArg> &Ins,
-                              SDLoc DL, SelectionDAG &DAG,
-                              SmallVectorImpl<SDValue> &InVals) const override;
-  EVT getSetCCResultType(LLVMContext &, EVT VT) const override;
-private:
-  unsigned Gen;
-  /// Each OpenCL kernel has nine implicit parameters that are stored in the
-  /// first nine dwords of a Vertex Buffer.  These implicit parameters are
-  /// lowered to load instructions which retrieve the values from the Vertex
-  /// Buffer.
-  SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
-                                 SDLoc DL, unsigned DwordOffset) const;
-
-  void lowerImplicitParameter(MachineInstr *MI, MachineBasicBlock &BB,
-      MachineRegisterInfo & MRI, unsigned dword_offset) const;
-  SDValue OptimizeSwizzle(SDValue BuildVector, SDValue Swz[], SelectionDAG &DAG,
-                          SDLoc DL) const;
-  SDValue vectorToVerticalVector(SelectionDAG &DAG, SDValue Vector) const;
-
-  SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerFPTOUINT(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerSHLParts(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerSRXParts(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerUADDSUBO(SDValue Op, SelectionDAG &DAG,
-                        unsigned mainop, unsigned ovf) const;
-
-  SDValue stackPtrToRegIndex(SDValue Ptr, unsigned StackWidth,
-                                          SelectionDAG &DAG) const;
-  void getStackAddress(unsigned StackWidth, unsigned ElemIdx,
-                       unsigned &Channel, unsigned &PtrIncr) const;
-  bool isZero(SDValue Op) const;
-  SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
-};
-
-} // End namespace llvm;
-
-#endif

Removed: llvm/trunk/lib/Target/R600/R600InstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/R600InstrFormats.td?rev=239656&view=auto
==============================================================================
--- llvm/trunk/lib/Target/R600/R600InstrFormats.td (original)
+++ llvm/trunk/lib/Target/R600/R600InstrFormats.td (removed)
@@ -1,495 +0,0 @@
-//===-- R600InstrFormats.td - R600 Instruction Encodings ------------------===//
-//
-//                     The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// R600 Instruction format definitions.
-//
-//===----------------------------------------------------------------------===//
-
-class InstR600 <dag outs, dag ins, string asm, list<dag> pattern,
-                InstrItinClass itin>
-    : AMDGPUInst <outs, ins, asm, pattern> {
-
-  field bits<64> Inst;
-  bit Trig = 0;
-  bit Op3 = 0;
-  bit isVector = 0;
-  bits<2> FlagOperandIdx = 0;
-  bit Op1 = 0;
-  bit Op2 = 0;
-  bit LDS_1A = 0;
-  bit LDS_1A1D = 0;
-  bit HasNativeOperands = 0;
-  bit VTXInst = 0;
-  bit TEXInst = 0;
-  bit ALUInst = 0;
-  bit IsExport = 0;
-  bit LDS_1A2D = 0;
-
-  let Namespace = "AMDGPU";
-  let OutOperandList = outs;
-  let InOperandList = ins;
-  let AsmString = asm;
-  let Pattern = pattern;
-  let Itinerary = itin;
-
-  // No AsmMatcher support.
-  let isCodeGenOnly = 1;
-
-  let TSFlags{4} = Trig;
-  let TSFlags{5} = Op3;
-
-  // Vector instructions are instructions that must fill all slots in an
-  // instruction group
-  let TSFlags{6} = isVector;
-  let TSFlags{8-7} = FlagOperandIdx;
-  let TSFlags{9} = HasNativeOperands;
-  let TSFlags{10} = Op1;
-  let TSFlags{11} = Op2;
-  let TSFlags{12} = VTXInst;
-  let TSFlags{13} = TEXInst;
-  let TSFlags{14} = ALUInst;
-  let TSFlags{15} = LDS_1A;
-  let TSFlags{16} = LDS_1A1D;
-  let TSFlags{17} = IsExport;
-  let TSFlags{18} = LDS_1A2D;
-}
-
-//===----------------------------------------------------------------------===//
-// ALU instructions
-//===----------------------------------------------------------------------===//
-
-class R600_ALU_LDS_Word0 {
-  field bits<32> Word0;
-
-  bits<11> src0;
-  bits<1>  src0_rel;
-  bits<11> src1;
-  bits<1>  src1_rel;
-  bits<3>  index_mode = 0;
-  bits<2>  pred_sel;
-  bits<1>  last;
-
-  bits<9>  src0_sel  = src0{8-0};
-  bits<2>  src0_chan = src0{10-9};
-  bits<9>  src1_sel  = src1{8-0};
-  bits<2>  src1_chan = src1{10-9};
-
-  let Word0{8-0}   = src0_sel;
-  let Word0{9}     = src0_rel;
-  let Word0{11-10} = src0_chan;
-  let Word0{21-13} = src1_sel;
-  let Word0{22}    = src1_rel;
-  let Word0{24-23} = src1_chan;
-  let Word0{28-26} = index_mode;
-  let Word0{30-29} = pred_sel;
-  let Word0{31}    = last;
-}
-
-class R600ALU_Word0 : R600_ALU_LDS_Word0 {
-
-  bits<1>  src0_neg;
-  bits<1>  src1_neg;
-
-  let Word0{12}    = src0_neg;
-  let Word0{25}    = src1_neg;
-}
-
-class R600ALU_Word1 {
-  field bits<32> Word1;
-
-  bits<11> dst;
-  bits<3>  bank_swizzle;
-  bits<1>  dst_rel;
-  bits<1>  clamp;
-
-  bits<7>  dst_sel  = dst{6-0};
-  bits<2>  dst_chan = dst{10-9};
-
-  let Word1{20-18} = bank_swizzle;
-  let Word1{27-21} = dst_sel;
-  let Word1{28}    = dst_rel;
-  let Word1{30-29} = dst_chan;
-  let Word1{31}    = clamp;
-}
-
-class R600ALU_Word1_OP2 <bits<11> alu_inst> : R600ALU_Word1{
-
-  bits<1>  src0_abs;
-  bits<1>  src1_abs;
-  bits<1>  update_exec_mask;
-  bits<1>  update_pred;
-  bits<1>  write;
-  bits<2>  omod;
-
-  let Word1{0}     = src0_abs;
-  let Word1{1}     = src1_abs;
-  let Word1{2}     = update_exec_mask;
-  let Word1{3}     = update_pred;
-  let Word1{4}     = write;
-  let Word1{6-5}   = omod;
-  let Word1{17-7}  = alu_inst;
-}
-
-class R600ALU_Word1_OP3 <bits<5> alu_inst> : R600ALU_Word1{
-
-  bits<11> src2;
-  bits<1>  src2_rel;
-  bits<1>  src2_neg;
-
-  bits<9>  src2_sel = src2{8-0};
-  bits<2>  src2_chan = src2{10-9};
-
-  let Word1{8-0}   = src2_sel;
-  let Word1{9}     = src2_rel;
-  let Word1{11-10} = src2_chan;
-  let Word1{12}    = src2_neg;
-  let Word1{17-13} = alu_inst;
-}
-
-class R600LDS_Word1 {
-  field bits<32> Word1;
-
-  bits<11> src2;
-  bits<9>  src2_sel  = src2{8-0};
-  bits<2>  src2_chan = src2{10-9};
-  bits<1>  src2_rel;
-  // offset specifies the stride offset to the second set of data to be read
-  // from.  This is a dword offset.
-  bits<5>  alu_inst = 17; // OP3_INST_LDS_IDX_OP
-  bits<3>  bank_swizzle;
-  bits<6>  lds_op;
-  bits<2>  dst_chan = 0;
-
-  let Word1{8-0}   = src2_sel;
-  let Word1{9}     = src2_rel;
-  let Word1{11-10} = src2_chan;
-  let Word1{17-13} = alu_inst;
-  let Word1{20-18} = bank_swizzle;
-  let Word1{26-21} = lds_op;
-  let Word1{30-29} = dst_chan;
-}
-
-
-/*
-XXX: R600 subtarget uses a slightly different encoding than the other
-subtargets.  We currently handle this in R600MCCodeEmitter, but we may
-want to use these instruction classes in the future.
-
-class R600ALU_Word1_OP2_r600 : R600ALU_Word1_OP2 {
-
-  bits<1>  fog_merge;
-  bits<10> alu_inst;
-
-  let Inst{37}    = fog_merge;
-  let Inst{39-38} = omod;
-  let Inst{49-40} = alu_inst;
-}
-
-class R600ALU_Word1_OP2_r700 : R600ALU_Word1_OP2 {
-
-  bits<11> alu_inst;
-
-  let Inst{38-37} = omod;
-  let Inst{49-39} = alu_inst;
-}
-*/
-
-//===----------------------------------------------------------------------===//
-// Vertex Fetch instructions
-//===----------------------------------------------------------------------===//
-
-class VTX_WORD0 {
-  field bits<32> Word0;
-  bits<7> src_gpr;
-  bits<5> VC_INST;
-  bits<2> FETCH_TYPE;
-  bits<1> FETCH_WHOLE_QUAD;
-  bits<8> BUFFER_ID;
-  bits<1> SRC_REL;
-  bits<2> SRC_SEL_X;
-
-  let Word0{4-0}   = VC_INST;
-  let Word0{6-5}   = FETCH_TYPE;
-  let Word0{7}     = FETCH_WHOLE_QUAD;
-  let Word0{15-8}  = BUFFER_ID;
-  let Word0{22-16} = src_gpr;
-  let Word0{23}    = SRC_REL;
-  let Word0{25-24} = SRC_SEL_X;
-}
-
-class VTX_WORD0_eg : VTX_WORD0 {
-
-  bits<6> MEGA_FETCH_COUNT;
-
-  let Word0{31-26} = MEGA_FETCH_COUNT;
-}
-
-class VTX_WORD0_cm : VTX_WORD0 {
-
-  bits<2> SRC_SEL_Y;
-  bits<2> STRUCTURED_READ;
-  bits<1> LDS_REQ;
-  bits<1> COALESCED_READ;
-
-  let Word0{27-26} = SRC_SEL_Y;
-  let Word0{29-28} = STRUCTURED_READ;
-  let Word0{30}    = LDS_REQ;
-  let Word0{31}    = COALESCED_READ;
-}
-
-class VTX_WORD1_GPR {
-  field bits<32> Word1;
-  bits<7> dst_gpr;
-  bits<1> DST_REL;
-  bits<3> DST_SEL_X;
-  bits<3> DST_SEL_Y;
-  bits<3> DST_SEL_Z;
-  bits<3> DST_SEL_W;
-  bits<1> USE_CONST_FIELDS;
-  bits<6> DATA_FORMAT;
-  bits<2> NUM_FORMAT_ALL;
-  bits<1> FORMAT_COMP_ALL;
-  bits<1> SRF_MODE_ALL;
-
-  let Word1{6-0} = dst_gpr;
-  let Word1{7}    = DST_REL;
-  let Word1{8}    = 0; // Reserved
-  let Word1{11-9} = DST_SEL_X;
-  let Word1{14-12} = DST_SEL_Y;
-  let Word1{17-15} = DST_SEL_Z;
-  let Word1{20-18} = DST_SEL_W;
-  let Word1{21}    = USE_CONST_FIELDS;
-  let Word1{27-22} = DATA_FORMAT;
-  let Word1{29-28} = NUM_FORMAT_ALL;
-  let Word1{30}    = FORMAT_COMP_ALL;
-  let Word1{31}    = SRF_MODE_ALL;
-}
-
-//===----------------------------------------------------------------------===//
-// Texture fetch instructions
-//===----------------------------------------------------------------------===//
-
-class TEX_WORD0 {
-  field bits<32> Word0;
-
-  bits<5> TEX_INST;
-  bits<2> INST_MOD;
-  bits<1> FETCH_WHOLE_QUAD;
-  bits<8> RESOURCE_ID;
-  bits<7> SRC_GPR;
-  bits<1> SRC_REL;
-  bits<1> ALT_CONST;
-  bits<2> RESOURCE_INDEX_MODE;
-  bits<2> SAMPLER_INDEX_MODE;
-
-  let Word0{4-0} = TEX_INST;
-  let Word0{6-5} = INST_MOD;
-  let Word0{7} = FETCH_WHOLE_QUAD;
-  let Word0{15-8} = RESOURCE_ID;
-  let Word0{22-16} = SRC_GPR;
-  let Word0{23} = SRC_REL;
-  let Word0{24} = ALT_CONST;
-  let Word0{26-25} = RESOURCE_INDEX_MODE;
-  let Word0{28-27} = SAMPLER_INDEX_MODE;
-}
-
-class TEX_WORD1 {
-  field bits<32> Word1;
-
-  bits<7> DST_GPR;
-  bits<1> DST_REL;
-  bits<3> DST_SEL_X;
-  bits<3> DST_SEL_Y;
-  bits<3> DST_SEL_Z;
-  bits<3> DST_SEL_W;
-  bits<7> LOD_BIAS;
-  bits<1> COORD_TYPE_X;
-  bits<1> COORD_TYPE_Y;
-  bits<1> COORD_TYPE_Z;
-  bits<1> COORD_TYPE_W;
-
-  let Word1{6-0} = DST_GPR;
-  let Word1{7} = DST_REL;
-  let Word1{11-9} = DST_SEL_X;
-  let Word1{14-12} = DST_SEL_Y;
-  let Word1{17-15} = DST_SEL_Z;
-  let Word1{20-18} = DST_SEL_W;
-  let Word1{27-21} = LOD_BIAS;
-  let Word1{28} = COORD_TYPE_X;
-  let Word1{29} = COORD_TYPE_Y;
-  let Word1{30} = COORD_TYPE_Z;
-  let Word1{31} = COORD_TYPE_W;
-}
-
-class TEX_WORD2 {
-  field bits<32> Word2;
-
-  bits<5> OFFSET_X;
-  bits<5> OFFSET_Y;
-  bits<5> OFFSET_Z;
-  bits<5> SAMPLER_ID;
-  bits<3> SRC_SEL_X;
-  bits<3> SRC_SEL_Y;
-  bits<3> SRC_SEL_Z;
-  bits<3> SRC_SEL_W;
-
-  let Word2{4-0} = OFFSET_X;
-  let Word2{9-5} = OFFSET_Y;
-  let Word2{14-10} = OFFSET_Z;
-  let Word2{19-15} = SAMPLER_ID;
-  let Word2{22-20} = SRC_SEL_X;
-  let Word2{25-23} = SRC_SEL_Y;
-  let Word2{28-26} = SRC_SEL_Z;
-  let Word2{31-29} = SRC_SEL_W;
-}
-
-//===----------------------------------------------------------------------===//
-// Control Flow Instructions
-//===----------------------------------------------------------------------===//
-
-class CF_WORD1_R600 {
-  field bits<32> Word1;
-
-  bits<3> POP_COUNT;
-  bits<5> CF_CONST;
-  bits<2> COND;
-  bits<3> COUNT;
-  bits<6> CALL_COUNT;
-  bits<1> COUNT_3;
-  bits<1> END_OF_PROGRAM;
-  bits<1> VALID_PIXEL_MODE;
-  bits<7> CF_INST;
-  bits<1> WHOLE_QUAD_MODE;
-  bits<1> BARRIER;
-
-  let Word1{2-0} = POP_COUNT;
-  let Word1{7-3} = CF_CONST;
-  let Word1{9-8} = COND;
-  let Word1{12-10} = COUNT;
-  let Word1{18-13} = CALL_COUNT;
-  let Word1{19} = COUNT_3;
-  let Word1{21} = END_OF_PROGRAM;
-  let Word1{22} = VALID_PIXEL_MODE;
-  let Word1{29-23} = CF_INST;
-  let Word1{30} = WHOLE_QUAD_MODE;
-  let Word1{31} = BARRIER;
-}
-
-class CF_WORD0_EG {
-  field bits<32> Word0;
-
-  bits<24> ADDR;
-  bits<3> JUMPTABLE_SEL;
-
-  let Word0{23-0} = ADDR;
-  let Word0{26-24} = JUMPTABLE_SEL;
-}
-
-class CF_WORD1_EG {
-  field bits<32> Word1;
-
-  bits<3> POP_COUNT;
-  bits<5> CF_CONST;
-  bits<2> COND;
-  bits<6> COUNT;
-  bits<1> VALID_PIXEL_MODE;
-  bits<1> END_OF_PROGRAM;
-  bits<8> CF_INST;
-  bits<1> BARRIER;
-
-  let Word1{2-0} = POP_COUNT;
-  let Word1{7-3} = CF_CONST;
-  let Word1{9-8} = COND;
-  let Word1{15-10} = COUNT;
-  let Word1{20} = VALID_PIXEL_MODE;
-  let Word1{21} = END_OF_PROGRAM;
-  let Word1{29-22} = CF_INST;
-  let Word1{31} = BARRIER;
-}
-
-class CF_ALU_WORD0 {
-  field bits<32> Word0;
-
-  bits<22> ADDR;
-  bits<4> KCACHE_BANK0;
-  bits<4> KCACHE_BANK1;
-  bits<2> KCACHE_MODE0;
-
-  let Word0{21-0} = ADDR;
-  let Word0{25-22} = KCACHE_BANK0;
-  let Word0{29-26} = KCACHE_BANK1;
-  let Word0{31-30} = KCACHE_MODE0;
-}
-
-class CF_ALU_WORD1 {
-  field bits<32> Word1;
-
-  bits<2> KCACHE_MODE1;
-  bits<8> KCACHE_ADDR0;
-  bits<8> KCACHE_ADDR1;
-  bits<7> COUNT;
-  bits<1> ALT_CONST;
-  bits<4> CF_INST;
-  bits<1> WHOLE_QUAD_MODE;
-  bits<1> BARRIER;
-
-  let Word1{1-0} = KCACHE_MODE1;
-  let Word1{9-2} = KCACHE_ADDR0;
-  let Word1{17-10} = KCACHE_ADDR1;
-  let Word1{24-18} = COUNT;
-  let Word1{25} = ALT_CONST;
-  let Word1{29-26} = CF_INST;
-  let Word1{30} = WHOLE_QUAD_MODE;
-  let Word1{31} = BARRIER;
-}
-
-class CF_ALLOC_EXPORT_WORD0_RAT {
-  field bits<32> Word0;
-
-  bits<4> rat_id;
-  bits<6> rat_inst;
-  bits<2> rim;
-  bits<2> type;
-  bits<7> rw_gpr;
-  bits<1> rw_rel;
-  bits<7> index_gpr;
-  bits<2> elem_size;
-
-  let Word0{3-0}   = rat_id;
-  let Word0{9-4}   = rat_inst;
-  let Word0{10}    = 0; // Reserved
-  let Word0{12-11} = rim;
-  let Word0{14-13} = type;
-  let Word0{21-15} = rw_gpr;
-  let Word0{22}    = rw_rel;
-  let Word0{29-23} = index_gpr;
-  let Word0{31-30} = elem_size;
-}
-
-class CF_ALLOC_EXPORT_WORD1_BUF {
-  field bits<32> Word1;
-
-  bits<12> array_size;
-  bits<4>  comp_mask;
-  bits<4>  burst_count;
-  bits<1>  vpm;
-  bits<1>  eop;
-  bits<8>  cf_inst;
-  bits<1>  mark;
-  bits<1>  barrier;
-
-  let Word1{11-0} = array_size;
-  let Word1{15-12} = comp_mask;
-  let Word1{19-16} = burst_count;
-  let Word1{20}    = vpm;
-  let Word1{21}    = eop;
-  let Word1{29-22} = cf_inst;
-  let Word1{30}    = mark;
-  let Word1{31}    = barrier;
-}






More information about the llvm-commits mailing list