[llvm] r282667 - AMDGPU: Partially fix control flow at -O0

Galina Kistanova via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 30 11:29:28 PDT 2016


Hi Matt,

This revision introduced warning on one our of builders:
 http://lab.llvm.org:8011/builders/clang-3stage-ubuntu/builds

 The warning:
 /home/buildbot/Buildbot/Slave1a/clang-3stage-ubuntu/llvm.src/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp:269:21:
 warning: variable ‘CopyOp’ set but not used [-Wunused-but-set-variable]

Please have a look?

Thanks

Galina



On Wed, Sep 28, 2016 at 6:44 PM, Matt Arsenault via llvm-commits <
llvm-commits at lists.llvm.org> wrote:

> Author: arsenm
> Date: Wed Sep 28 20:44:16 2016
> New Revision: 282667
>
> URL: http://llvm.org/viewvc/llvm-project?rev=282667&view=rev
> Log:
> AMDGPU: Partially fix control flow at -O0
>
> Fixes to allow spilling all registers at the end of the block
> work with exec modifications. Don't emit s_and_saveexec_b64 for
> if lowering, and instead emit copies. Mark control flow mask
> instructions as terminators to get correct spill code placement
> with fast regalloc, and then have a separate optimization pass
> form the saveexec.
>
> This should work if SGPRs are spilled to VGPRs, but
> will likely fail in the case that an SGPR spills to memory
> and no workitem takes a divergent branch.
>
> Added:
>     llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
>     llvm/trunk/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
>     llvm/trunk/test/CodeGen/MIR/AMDGPU/optimize-if-exec-masking.mir
> Modified:
>     llvm/trunk/lib/Target/AMDGPU/AMDGPU.h
>     llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
>     llvm/trunk/lib/Target/AMDGPU/CMakeLists.txt
>     llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
>     llvm/trunk/lib/Target/AMDGPU/SIInstructions.td
>     llvm/trunk/lib/Target/AMDGPU/SILowerControlFlow.cpp
>
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPU.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/AMDGPU.h?rev=282667&r1=282666&r2=282667&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPU.h (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPU.h Wed Sep 28 20:44:16 2016
> @@ -73,6 +73,9 @@ extern char &SILowerControlFlowID;
>  void initializeSIInsertSkipsPass(PassRegistry &);
>  extern char &SIInsertSkipsPassID;
>
> +void initializeSIOptimizeExecMaskingPass(PassRegistry &);
> +extern char &SIOptimizeExecMaskingID;
> +
>  // Passes common to R600 and SI
>  FunctionPass *createAMDGPUPromoteAlloca(const TargetMachine *TM =
> nullptr);
>  void initializeAMDGPUPromoteAllocaPass(PassRegistry&);
>
> Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/AMDGPUTargetMachine.cpp?rev=282667&r1=282666&r2=282667&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp Wed Sep 28
> 20:44:16 2016
> @@ -83,6 +83,7 @@ extern "C" void LLVMInitializeAMDGPUTarg
>    initializeSILowerControlFlowPass(*PR);
>    initializeSIInsertSkipsPass(*PR);
>    initializeSIDebuggerInsertNopsPass(*PR);
> +  initializeSIOptimizeExecMaskingPass(*PR);
>  }
>
>  static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple
> &TT) {
> @@ -333,6 +334,7 @@ public:
>    void addFastRegAlloc(FunctionPass *RegAllocPass) override;
>    void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
>    void addPreRegAlloc() override;
> +  void addPostRegAlloc() override;
>    void addPreSched2() override;
>    void addPreEmitPass() override;
>  };
> @@ -548,7 +550,6 @@ bool GCNPassConfig::addGlobalInstruction
>  #endif
>
>  void GCNPassConfig::addPreRegAlloc() {
> -
>    addPass(createSIShrinkInstructionsPass());
>    addPass(createSIWholeQuadModePass());
>  }
> @@ -556,7 +557,11 @@ void GCNPassConfig::addPreRegAlloc() {
>  void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
>    // FIXME: We have to disable the verifier here because of
> PHIElimination +
>    // TwoAddressInstructions disabling it.
> -  insertPass(&TwoAddressInstructionPassID, &SILowerControlFlowID, false);
> +
> +  // This must be run immediately after phi elimination and before
> +  // TwoAddressInstructions, otherwise the processing of the tied operand
> of
> +  // SI_ELSE will introduce a copy of the tied operand source after the
> else.
> +  insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
>
>    TargetPassConfig::addFastRegAlloc(RegAllocPass);
>  }
> @@ -566,13 +571,19 @@ void GCNPassConfig::addOptimizedRegAlloc
>    // passes might recompute live intervals.
>    insertPass(&MachineSchedulerID, &SIFixControlFlowLiveIntervalsID);
>
> -  // TODO: It might be better to run this right after phi elimination,
> but for
> -  // now that would require not running the verifier.
> -  insertPass(&RenameIndependentSubregsID, &SILowerControlFlowID);
> +  // This must be run immediately after phi elimination and before
> +  // TwoAddressInstructions, otherwise the processing of the tied operand
> of
> +  // SI_ELSE will introduce a copy of the tied operand source after the
> else.
> +  insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
>
>    TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
>  }
>
> +void GCNPassConfig::addPostRegAlloc() {
> +  addPass(&SIOptimizeExecMaskingID);
> +  TargetPassConfig::addPostRegAlloc();
> +}
> +
>  void GCNPassConfig::addPreSched2() {
>  }
>
>
> Modified: llvm/trunk/lib/Target/AMDGPU/CMakeLists.txt
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/CMakeLists.txt?rev=282667&r1=282666&r2=282667&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/CMakeLists.txt (original)
> +++ llvm/trunk/lib/Target/AMDGPU/CMakeLists.txt Wed Sep 28 20:44:16 2016
> @@ -77,6 +77,7 @@ add_llvm_target(AMDGPUCodeGen
>    SILowerI1Copies.cpp
>    SIMachineFunctionInfo.cpp
>    SIMachineScheduler.cpp
> +  SIOptimizeExecMasking.cpp
>    SIRegisterInfo.cpp
>    SIShrinkInstructions.cpp
>    SITypeRewriter.cpp
>
> Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/SIInstrInfo.cpp?rev=282667&r1=282666&r2=282667&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp Wed Sep 28 20:44:16 2016
> @@ -856,7 +856,24 @@ bool SIInstrInfo::expandPostRAPseudo(Mac
>    DebugLoc DL = MBB.findDebugLoc(MI);
>    switch (MI.getOpcode()) {
>    default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
> -
> +  case AMDGPU::S_MOV_B64_term: {
> +    // This is only a terminator to get the correct spill code placement
> during
> +    // register allocation.
> +    MI.setDesc(get(AMDGPU::S_MOV_B64));
> +    break;
> +  }
> +  case AMDGPU::S_XOR_B64_term: {
> +    // This is only a terminator to get the correct spill code placement
> during
> +    // register allocation.
> +    MI.setDesc(get(AMDGPU::S_XOR_B64));
> +    break;
> +  }
> +  case AMDGPU::S_ANDN2_B64_term: {
> +    // This is only a terminator to get the correct spill code placement
> during
> +    // register allocation.
> +    MI.setDesc(get(AMDGPU::S_ANDN2_B64));
> +    break;
> +  }
>    case AMDGPU::V_MOV_B64_PSEUDO: {
>      unsigned Dst = MI.getOperand(0).getReg();
>      unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
>
> Modified: llvm/trunk/lib/Target/AMDGPU/SIInstructions.td
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/SIInstructions.td?rev=282667&r1=282666&r2=282667&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/SIInstructions.td (original)
> +++ llvm/trunk/lib/Target/AMDGPU/SIInstructions.td Wed Sep 28 20:44:16
> 2016
> @@ -112,6 +112,27 @@ def GET_GROUPSTATICSIZE : PseudoInstSI <
>    [(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>;
>  } // End let usesCustomInserter = 1, SALU = 1
>
> +def S_MOV_B64_term : PseudoInstSI<(outs SReg_64:$dst),
> +   (ins SSrc_b64:$src0)> {
> +  let SALU = 1;
> +  let isAsCheapAsAMove = 1;
> +  let isTerminator = 1;
> +}
> +
> +def S_XOR_B64_term : PseudoInstSI<(outs SReg_64:$dst),
> +   (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
> +  let SALU = 1;
> +  let isAsCheapAsAMove = 1;
> +  let isTerminator = 1;
> +}
> +
> +def S_ANDN2_B64_term : PseudoInstSI<(outs SReg_64:$dst),
> +   (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
> +  let SALU = 1;
> +  let isAsCheapAsAMove = 1;
> +  let isTerminator = 1;
> +}
> +
>  // SI pseudo instructions. These are used by the CFG structurizer pass
>  // and should be lowered to ISA instructions prior to codegen.
>
> @@ -132,9 +153,9 @@ def SI_IF: CFPseudoInstSI <
>    (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),
>    [(set i64:$dst, (int_amdgcn_if i1:$vcc, bb:$target))], 1, 1> {
>    let Constraints = "";
> -  let Size = 8;
> -  let mayStore = 1;
> +  let Size = 12;
>    let mayLoad = 1;
> +  let mayStore = 1;
>    let hasSideEffects = 1;
>  }
>
>
> Modified: llvm/trunk/lib/Target/AMDGPU/SILowerControlFlow.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/SILowerControlFlow.cpp?rev=282667&r1=282666&r2=282667&view=diff
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/SILowerControlFlow.cpp (original)
> +++ llvm/trunk/lib/Target/AMDGPU/SILowerControlFlow.cpp Wed Sep 28
> 20:44:16 2016
> @@ -70,6 +70,7 @@ private:
>    const SIRegisterInfo *TRI;
>    const SIInstrInfo *TII;
>    LiveIntervals *LIS;
> +  MachineRegisterInfo *MRI;
>
>    void emitIf(MachineInstr &MI);
>    void emitElse(MachineInstr &MI);
> @@ -86,7 +87,8 @@ public:
>      MachineFunctionPass(ID),
>      TRI(nullptr),
>      TII(nullptr),
> -    LIS(nullptr) {}
> +    LIS(nullptr),
> +    MRI(nullptr) {}
>
>    bool runOnMachineFunction(MachineFunction &MF) override;
>
> @@ -95,8 +97,12 @@ public:
>    }
>
>    void getAnalysisUsage(AnalysisUsage &AU) const override {
> -    AU.addPreserved<LiveIntervals>();
> +    // Should preserve the same set that TwoAddressInstructions does.
>      AU.addPreserved<SlotIndexes>();
> +    AU.addPreserved<LiveIntervals>();
> +    AU.addPreservedID(LiveVariablesID);
> +    AU.addPreservedID(MachineLoopInfoID);
> +    AU.addPreservedID(MachineDominatorsID);
>      AU.setPreservesCFG();
>      MachineFunctionPass::getAnalysisUsage(AU);
>    }
> @@ -109,6 +115,13 @@ char SILowerControlFlow::ID = 0;
>  INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
>                 "SI lower control flow", false, false)
>
> +static void setImpSCCDefDead(MachineInstr &MI, bool IsDead) {
> +  MachineOperand &ImpDefSCC = MI.getOperand(3);
> +  assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
> +
> +  ImpDefSCC.setIsDead(IsDead);
> +}
> +
>  char &llvm::SILowerControlFlowID = SILowerControlFlow::ID;
>
>  void SILowerControlFlow::emitIf(MachineInstr &MI) {
> @@ -123,14 +136,36 @@ void SILowerControlFlow::emitIf(MachineI
>
>    unsigned SaveExecReg = SaveExec.getReg();
>
> -  MachineInstr *AndSaveExec =
> -    BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64),
> SaveExecReg)
> -    .addOperand(Cond);
> +  MachineOperand &ImpDefSCC = MI.getOperand(4);
> +  assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
> +
> +  // Add an implicit def of exec to discourage scheduling VALU after this
> which
> +  // will interfere with trying to form s_and_saveexec_b64 later.
> +  MachineInstr *CopyExec =
> +    BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), SaveExecReg)
> +    .addReg(AMDGPU::EXEC)
> +    .addReg(AMDGPU::EXEC, RegState::ImplicitDefine);
> +
> +  unsigned Tmp = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
> +
> +  MachineInstr *And =
> +    BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), Tmp)
> +    .addReg(SaveExecReg)
> +    //.addReg(AMDGPU::EXEC)
> +    .addReg(Cond.getReg());
> +  setImpSCCDefDead(*And, true);
>
>    MachineInstr *Xor =
>      BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), SaveExecReg)
> -    .addReg(AMDGPU::EXEC)
> +    .addReg(Tmp)
>      .addReg(SaveExecReg);
> +  setImpSCCDefDead(*Xor, ImpDefSCC.isDead());
> +
> +  // Use a copy that is a terminator to get correct spill code placement
> it with
> +  // fast regalloc.
> +  MachineInstr *SetExec =
> +    BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64_term), AMDGPU::EXEC)
> +    .addReg(Tmp, RegState::Kill);
>
>    // Insert a pseudo terminator to help keep the verifier happy. This
> will also
>    // be used later when inserting skips.
> @@ -143,11 +178,17 @@ void SILowerControlFlow::emitIf(MachineI
>      return;
>    }
>
> +  LIS->InsertMachineInstrInMaps(*CopyExec);
> +
> +  // Replace with and so we don't need to fix the live interval for
> condition
> +  // register.
> +  LIS->ReplaceMachineInstrInMaps(MI, *And);
>
> -  LIS->ReplaceMachineInstrInMaps(MI, *AndSaveExec);
>    LIS->InsertMachineInstrInMaps(*Xor);
> +  LIS->InsertMachineInstrInMaps(*SetExec);
>    LIS->InsertMachineInstrInMaps(*NewBr);
>
> +  LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::EXEC, TRI));
>    MI.eraseFromParent();
>
>    // FIXME: Is there a better way of adjusting the liveness? It shouldn't
> be
> @@ -155,6 +196,7 @@ void SILowerControlFlow::emitIf(MachineI
>    // valno.
>    LIS->removeInterval(SaveExecReg);
>    LIS->createAndComputeVirtRegInterval(SaveExecReg);
> +  LIS->createAndComputeVirtRegInterval(Tmp);
>  }
>
>  void SILowerControlFlow::emitElse(MachineInstr &MI) {
> @@ -167,11 +209,18 @@ void SILowerControlFlow::emitElse(Machin
>    bool ExecModified = MI.getOperand(3).getImm() != 0;
>    MachineBasicBlock::iterator Start = MBB.begin();
>
> +  // We are running before TwoAddressInstructions, and si_else's operands
> are
> +  // tied. In order to correctly tie the registers, split this into a
> copy of
> +  // the src like it does.
> +  BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), DstReg)
> +    .addOperand(MI.getOperand(1)); // Saved EXEC
> +
>    // This must be inserted before phis and any spill code inserted before
> the
>    // else.
>    MachineInstr *OrSaveExec =
>      BuildMI(MBB, Start, DL, TII->get(AMDGPU::S_OR_SAVEEXEC_B64), DstReg)
> -    .addOperand(MI.getOperand(1)); // Saved EXEC
> +    .addReg(DstReg);
> +
>    MachineBasicBlock *DestBB = MI.getOperand(2).getMBB();
>
>    MachineBasicBlock::iterator ElsePt(MI);
> @@ -187,14 +236,12 @@ void SILowerControlFlow::emitElse(Machin
>    }
>
>    MachineInstr *Xor =
> -    BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
> +    BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_XOR_B64_term),
> AMDGPU::EXEC)
>      .addReg(AMDGPU::EXEC)
>      .addReg(DstReg);
>
> -  MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
> -  // Insert a pseudo terminator to help keep the verifier happy.
>    MachineInstr *Branch =
> -    BuildMI(MBB, Term, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
> +    BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::SI_MASK_BRANCH))
>      .addMBB(DestBB);
>
>    if (!LIS) {
> @@ -246,7 +293,7 @@ void SILowerControlFlow::emitLoop(Machin
>    const DebugLoc &DL = MI.getDebugLoc();
>
>    MachineInstr *AndN2 =
> -    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
> +    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_term),
> AMDGPU::EXEC)
>      .addReg(AMDGPU::EXEC)
>      .addOperand(MI.getOperand(0));
>
> @@ -288,6 +335,7 @@ bool SILowerControlFlow::runOnMachineFun
>
>    // This doesn't actually need LiveIntervals, but we can preserve them.
>    LIS = getAnalysisIfAvailable<LiveIntervals>();
> +  MRI = &MF.getRegInfo();
>
>    MachineFunction::iterator NextBB;
>    for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
>
> Added: llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/
> AMDGPU/SIOptimizeExecMasking.cpp?rev=282667&view=auto
> ============================================================
> ==================
> --- llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp (added)
> +++ llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp Wed Sep 28
> 20:44:16 2016
> @@ -0,0 +1,304 @@
> +//===-- SIOptimizeExecMasking.cpp ------------------------------
> -----------===//
> +//
> +//                     The LLVM Compiler Infrastructure
> +//
> +// This file is distributed under the University of Illinois Open Source
> +// License. See LICENSE.TXT for details.
> +//
> +//===------------------------------------------------------
> ----------------===//
> +
> +#include "AMDGPU.h"
> +#include "AMDGPUSubtarget.h"
> +#include "SIInstrInfo.h"
> +#include "llvm/CodeGen/LiveIntervalAnalysis.h"
> +#include "llvm/CodeGen/MachineFunctionPass.h"
> +#include "llvm/CodeGen/MachineInstrBuilder.h"
> +#include "llvm/CodeGen/MachineRegisterInfo.h"
> +#include "llvm/Support/Debug.h"
> +
> +using namespace llvm;
> +
> +#define DEBUG_TYPE "si-optimize-exec-masking"
> +
> +namespace {
> +
> +class SIOptimizeExecMasking : public MachineFunctionPass {
> +public:
> +  static char ID;
> +
> +public:
> +  SIOptimizeExecMasking() : MachineFunctionPass(ID) {
> +    initializeSIOptimizeExecMaskingPass(*PassRegistry::
> getPassRegistry());
> +  }
> +
> +  bool runOnMachineFunction(MachineFunction &MF) override;
> +
> +  const char *getPassName() const override {
> +    return "SI optimize exec mask operations";
> +  }
> +
> +  void getAnalysisUsage(AnalysisUsage &AU) const override {
> +    AU.setPreservesCFG();
> +    MachineFunctionPass::getAnalysisUsage(AU);
> +  }
> +};
> +
> +} // End anonymous namespace.
> +
> +INITIALIZE_PASS_BEGIN(SIOptimizeExecMasking, DEBUG_TYPE,
> +                      "SI optimize exec mask operations", false, false)
> +INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
> +INITIALIZE_PASS_END(SIOptimizeExecMasking, DEBUG_TYPE,
> +                    "SI optimize exec mask operations", false, false)
> +
> +char SIOptimizeExecMasking::ID = 0;
> +
> +char &llvm::SIOptimizeExecMaskingID = SIOptimizeExecMasking::ID;
> +
> +/// If \p MI is a copy from exec, return the register copied to.
> +static unsigned isCopyFromExec(const MachineInstr &MI) {
> +  switch (MI.getOpcode()) {
> +  case AMDGPU::COPY:
> +  case AMDGPU::S_MOV_B64:
> +  case AMDGPU::S_MOV_B64_term: {
> +    const MachineOperand &Src = MI.getOperand(1);
> +    if (Src.isReg() && Src.getReg() == AMDGPU::EXEC)
> +      return MI.getOperand(0).getReg();
> +  }
> +  }
> +
> +  return AMDGPU::NoRegister;
> +}
> +
> +/// If \p MI is a copy to exec, return the register copied from.
> +static unsigned isCopyToExec(const MachineInstr &MI) {
> +  switch (MI.getOpcode()) {
> +  case AMDGPU::COPY:
> +  case AMDGPU::S_MOV_B64: {
> +    const MachineOperand &Dst = MI.getOperand(0);
> +    if (Dst.isReg() && Dst.getReg() == AMDGPU::EXEC)
> +      return MI.getOperand(1).getReg();
> +    break;
> +  }
> +  case AMDGPU::S_MOV_B64_term:
> +    llvm_unreachable("should have been replaced");
> +  }
> +
> +  return AMDGPU::NoRegister;
> +}
> +
> +static unsigned getSaveExecOp(unsigned Opc) {
> +  switch (Opc) {
> +  case AMDGPU::S_AND_B64:
> +    return AMDGPU::S_AND_SAVEEXEC_B64;
> +  case AMDGPU::S_OR_B64:
> +    return AMDGPU::S_OR_SAVEEXEC_B64;
> +  case AMDGPU::S_XOR_B64:
> +    return AMDGPU::S_XOR_SAVEEXEC_B64;
> +  case AMDGPU::S_ANDN2_B64:
> +    return AMDGPU::S_ANDN2_SAVEEXEC_B64;
> +  case AMDGPU::S_ORN2_B64:
> +    return AMDGPU::S_ORN2_SAVEEXEC_B64;
> +  case AMDGPU::S_NAND_B64:
> +    return AMDGPU::S_NAND_SAVEEXEC_B64;
> +  case AMDGPU::S_NOR_B64:
> +    return AMDGPU::S_NOR_SAVEEXEC_B64;
> +  case AMDGPU::S_XNOR_B64:
> +    return AMDGPU::S_XNOR_SAVEEXEC_B64;
> +  default:
> +    return AMDGPU::INSTRUCTION_LIST_END;
> +  }
> +}
> +
> +// These are only terminators to get correct spill code placement during
> +// register allocation, so turn them back into normal instructions. Only
> one of
> +// these is expected per block.
> +static bool removeTerminatorBit(const SIInstrInfo &TII, MachineInstr &MI)
> {
> +  switch (MI.getOpcode()) {
> +  case AMDGPU::S_MOV_B64_term: {
> +    MI.setDesc(TII.get(AMDGPU::COPY));
> +    return true;
> +  }
> +  case AMDGPU::S_XOR_B64_term: {
> +    // This is only a terminator to get the correct spill code placement
> during
> +    // register allocation.
> +    MI.setDesc(TII.get(AMDGPU::S_XOR_B64));
> +    return true;
> +  }
> +  case AMDGPU::S_ANDN2_B64_term: {
> +    // This is only a terminator to get the correct spill code placement
> during
> +    // register allocation.
> +    MI.setDesc(TII.get(AMDGPU::S_ANDN2_B64));
> +    return true;
> +  }
> +  default:
> +    return false;
> +  }
> +}
> +
> +static MachineBasicBlock::reverse_iterator fixTerminators(
> +  const SIInstrInfo &TII,
> +  MachineBasicBlock &MBB) {
> +  MachineBasicBlock::reverse_iterator I = MBB.rbegin(), E = MBB.rend();
> +  for (; I != E; ++I) {
> +    if (!I->isTerminator())
> +      return I;
> +
> +    if (removeTerminatorBit(TII, *I))
> +      return I;
> +  }
> +
> +  return E;
> +}
> +
> +static MachineBasicBlock::reverse_iterator findExecCopy(
> +  const SIInstrInfo &TII,
> +  MachineBasicBlock &MBB,
> +  MachineBasicBlock::reverse_iterator I,
> +  unsigned CopyToExec) {
> +  const unsigned InstLimit = 25;
> +
> +  auto E = MBB.rend();
> +  for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) {
> +    unsigned CopyFromExec = isCopyFromExec(*I);
> +    if (CopyFromExec != AMDGPU::NoRegister)
> +      return I;
> +  }
> +
> +  return E;
> +}
> +
> +// XXX - Seems LivePhysRegs doesn't work correctly since it will
> incorrectly
> +// repor tthe register as unavailable because a super-register with a
> lane mask
> +// as unavailable.
> +static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg) {
> +  for (MachineBasicBlock *Succ : MBB.successors()) {
> +    if (Succ->isLiveIn(Reg))
> +      return true;
> +  }
> +
> +  return false;
> +}
> +
> +bool SIOptimizeExecMasking::runOnMachineFunction(MachineFunction &MF) {
> +  const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
> +  const SIRegisterInfo *TRI = ST.getRegisterInfo();
> +  const SIInstrInfo *TII = ST.getInstrInfo();
> +
> +  // Optimize sequences emitted for control flow lowering. They are
> originally
> +  // emitted as the separate operations because spill code may need to be
> +  // inserted for the saved copy of exec.
> +  //
> +  //     x = copy exec
> +  //     z = s_<op>_b64 x, y
> +  //     exec = copy z
> +  // =>
> +  //     x = s_<op>_saveexec_b64 y
> +  //
> +
> +  for (MachineBasicBlock &MBB : MF) {
> +    MachineBasicBlock::reverse_iterator I = fixTerminators(*TII, MBB);
> +    MachineBasicBlock::reverse_iterator E = MBB.rend();
> +    if (I == E)
> +      continue;
> +
> +    unsigned CopyToExec = isCopyToExec(*I);
> +    if (CopyToExec == AMDGPU::NoRegister)
> +      continue;
> +
> +    // Scan backwards to find the def.
> +    auto CopyToExecInst = &*I;
> +    auto CopyFromExecInst = findExecCopy(*TII, MBB, I, CopyToExec);
> +    if (CopyFromExecInst == E)
> +      continue;
> +
> +    if (isLiveOut(MBB, CopyToExec)) {
> +      // The copied register is live out and has a second use in another
> block.
> +      DEBUG(dbgs() << "Exec copy source register is live out\n");
> +      continue;
> +    }
> +
> +    unsigned CopyFromExec = CopyFromExecInst->getOperand(0).getReg();
> +    MachineInstr *SaveExecInst = nullptr;
> +    SmallVector<MachineInstr *, 4> OtherUseInsts;
> +
> +    for (MachineBasicBlock::iterator J
> +           = std::next(CopyFromExecInst->getIterator()), JE =
> I->getIterator();
> +         J != JE; ++J) {
> +      if (SaveExecInst && J->readsRegister(AMDGPU::EXEC, TRI)) {
> +        DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');
> +        // Make sure this is inserted after any VALU ops that may have
> been
> +        // scheduled in between.
> +        SaveExecInst = nullptr;
> +        break;
> +      }
> +
> +      if (J->modifiesRegister(CopyToExec, TRI)) {
> +        if (SaveExecInst) {
> +          DEBUG(dbgs() << "Multiple instructions modify "
> +                << PrintReg(CopyToExec, TRI) << '\n');
> +          SaveExecInst = nullptr;
> +          break;
> +        }
> +
> +        unsigned SaveExecOp = getSaveExecOp(J->getOpcode());
> +        if (SaveExecOp == AMDGPU::INSTRUCTION_LIST_END)
> +          break;
> +
> +        if (J->readsRegister(CopyFromExec, TRI)) {
> +          SaveExecInst = &*J;
> +          DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst <<
> '\n');
> +        } else {
> +          DEBUG(dbgs() << "Instruction does not read exec copy: " << *J
> << '\n');
> +          break;
> +        }
> +      }
> +
> +      if (SaveExecInst && J->readsRegister(CopyToExec, TRI))
> +        OtherUseInsts.push_back(&*J);
> +    }
> +
> +    if (!SaveExecInst)
> +      continue;
> +
> +    DEBUG(dbgs() << "Insert save exec op: " << *SaveExecInst << '\n');
> +
> +    MachineOperand &Src0 = SaveExecInst->getOperand(1);
> +    MachineOperand &Src1 = SaveExecInst->getOperand(2);
> +
> +    MachineOperand *CopyOp = nullptr;
> +    MachineOperand *OtherOp = nullptr;
> +
> +    if (Src0.isReg() && Src0.getReg() == CopyFromExec) {
> +      CopyOp = &Src0;
> +      OtherOp = &Src1;
> +    } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) {
> +      if (!SaveExecInst->isCommutable())
> +        break;
> +
> +      CopyOp = &Src1;
> +      OtherOp = &Src0;
> +    } else
> +      llvm_unreachable("unexpected");
> +
> +    CopyFromExecInst->eraseFromParent();
> +
> +    auto InsPt = SaveExecInst->getIterator();
> +    const DebugLoc &DL = SaveExecInst->getDebugLoc();
> +
> +    BuildMI(MBB, InsPt, DL, TII->get(getSaveExecOp(
> SaveExecInst->getOpcode())),
> +            CopyFromExec)
> +      .addReg(OtherOp->getReg());
> +    SaveExecInst->eraseFromParent();
> +
> +    CopyToExecInst->eraseFromParent();
> +
> +    for (MachineInstr *OtherInst : OtherUseInsts) {
> +      OtherInst->substituteRegister(CopyToExec, AMDGPU::EXEC,
> +                                    AMDGPU::NoSubRegister, *TRI);
> +    }
> +  }
> +
> +  return true;
> +
> +}
>
> Added: llvm/trunk/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/
> CodeGen/AMDGPU/control-flow-fastregalloc.ll?rev=282667&view=auto
> ============================================================
> ==================
> --- llvm/trunk/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll (added)
> +++ llvm/trunk/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll Wed Sep
> 28 20:44:16 2016
> @@ -0,0 +1,296 @@
> +; RUN: llc -O0 -mtriple=amdgcn--amdhsa -march=amdgcn
> -amdgpu-spill-sgpr-to-vgpr=0 -verify-machineinstrs < %s | FileCheck
> -check-prefix=VMEM -check-prefix=GCN %s
> +; RUN: llc -O0 -mtriple=amdgcn--amdhsa -march=amdgcn
> -amdgpu-spill-sgpr-to-vgpr=1 -verify-machineinstrs < %s | FileCheck
> -check-prefix=VGPR -check-prefix=GCN %s
> +
> +; Verify registers used for tracking exec mask changes when all
> +; registers are spilled at the end of the block. The SGPR spill
> +; placement relative to the exec modifications are important.
> +
> +; FIXME: This checks with SGPR to VGPR spilling disabled, but this may
> +; not work correctly in cases where no workitems take a branch.
> +
> +
> +; GCN-LABEL: {{^}}divergent_if_endif:
> +
> +; GCN: {{^}}; BB#0:
> +; GCN: s_mov_b32 m0, -1
> +; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
> +
> +; GCN: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], v0,
> +; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}},
> exec
> +; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}},
> s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}}, [[CMP0]]
> +; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}},
> s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[
> SAVEEXEC_HI]]{{\]}}
> +
> +; Spill saved exec
> +; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]],
> [[SAVEEXEC_LO_LANE:[0-9]+]]
> +; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]],
> [[SAVEEXEC_HI_LANE:[0-9]+]]
> +
> +
> +; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
> +; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[8:11], s12 ; 8-byte
> Folded Spill
> +; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
> +; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[8:11], s12 offset:4
> ; 8-byte Folded Spill
> +
> +; Spill load
> +; GCN: buffer_store_dword [[LOAD0]], off, s[8:11], s12
> offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill
> +; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}
> +
> +; GCN: s_waitcnt vmcnt(0) expcnt(0)
> +; GCN: mask branch [[ENDIF:BB[0-9]+_[0-9]+]]
> +
> +; GCN: {{^}}BB{{[0-9]+}}_1: ; %if
> +; GCN: s_mov_b32 m0, -1
> +; GCN: ds_read_b32 [[LOAD1:v[0-9]+]]
> +; GCN: buffer_load_dword [[RELOAD_LOAD0:v[0-9]+]], off, s[8:11], s12
> offset:[[LOAD0_OFFSET]] ; 4-byte Folded Reload
> +; GCN: s_waitcnt vmcnt(0)
> +
> +; Spill val register
> +; GCN: v_add_i32_e32 [[VAL:v[0-9]+]], vcc, [[LOAD1]], [[RELOAD_LOAD0]]
> +; GCN: buffer_store_dword [[VAL]], off, s[8:11], s12
> offset:[[VAL_OFFSET:[0-9]+]] ; 4-byte Folded Spill
> +; GCN: s_waitcnt vmcnt(0)
> +
> +; VMEM: [[ENDIF]]:
> +; Reload and restore exec mask
> +; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]],
> [[SAVEEXEC_LO_LANE]]
> +; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]],
> [[SAVEEXEC_HI_LANE]]
> +
> +
> +
> +; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off,
> s[8:11], s12 ; 8-byte Folded Reload
> +; VMEM: s_waitcnt vmcnt(0)
> +; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]],
> v[[V_RELOAD_SAVEEXEC_LO]]
> +
> +; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off,
> s[8:11], s12 offset:4 ; 8-byte Folded Reload
> +; VMEM: s_waitcnt vmcnt(0)
> +; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]],
> v[[V_RELOAD_SAVEEXEC_HI]]
> +
> +; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]
> ]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
> +
> +; Restore val
> +; GCN: buffer_load_dword [[RELOAD_VAL:v[0-9]+]], off, s[8:11], s12
> offset:[[VAL_OFFSET]] ; 4-byte Folded Reload
> +
> +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RELOAD_VAL]]
> +define void @divergent_if_endif(i32 addrspace(1)* %out) #0 {
> +entry:
> +  %tid = call i32 @llvm.amdgcn.workitem.id.x()
> +  %load0 = load volatile i32, i32 addrspace(3)* undef
> +  %cmp0 = icmp eq i32 %tid, 0
> +  br i1 %cmp0, label %if, label %endif
> +
> +if:
> +  %load1 = load volatile i32, i32 addrspace(3)* undef
> +  %val = add i32 %load0, %load1
> +  br label %endif
> +
> +endif:
> +  %tmp4 = phi i32 [ %val, %if ], [ 0, %entry ]
> +  store i32 %tmp4, i32 addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}divergent_loop:
> +; GCN: {{^}}; BB#0:
> +
> +; GCN: s_mov_b32 m0, -1
> +; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]
> +
> +; GCN: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], v0,
> +
> +; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}},
> exec
> +; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}},
> s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]
> +; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}},
> s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[
> SAVEEXEC_HI]]{{\]}}
> +
> +; Spill saved exec
> +; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]],
> [[SAVEEXEC_LO_LANE:[0-9]+]]
> +; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]],
> [[SAVEEXEC_HI_LANE:[0-9]+]]
> +
> +
> +; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
> +; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[8:11], s12 ; 8-byte
> Folded Spill
> +; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
> +; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[8:11], s12 offset:4
> ; 8-byte Folded Spill
> +
> +; Spill load
> +; GCN: buffer_store_dword [[LOAD0]], off, s[8:11], s12
> offset:[[VAL_OFFSET:[0-9]+]] ; 4-byte Folded Spill
> +
> +; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}
> +
> +; GCN: s_waitcnt vmcnt(0) expcnt(0)
> +; GCN-NEXT: ; mask branch [[END:BB[0-9]+_[0-9]+]]
> +; GCN-NEXT: s_cbranch_execz [[END]]
> +
> +
> +; GCN: [[LOOP:BB[0-9]+_[0-9]+]]:
> +; GCN: buffer_load_dword v[[VAL_LOOP_RELOAD:[0-9]+]], off, s[8:11], s12
> offset:[[VAL_OFFSET]] ; 4-byte Folded Reload
> +; GCN: v_subrev_i32_e32 [[VAL_LOOP:v[0-9]+]], vcc, v{{[0-9]+}},
> v[[VAL_LOOP_RELOAD]]
> +; GCN: v_cmp_ne_i32_e32 vcc,
> +; GCN: s_and_b64 vcc, exec, vcc
> +; GCN: buffer_store_dword [[VAL_LOOP]], off, s[8:11], s12
> offset:[[VAL_SUB_OFFSET:[0-9]+]] ; 4-byte Folded Spill
> +; GCN: s_waitcnt vmcnt(0) expcnt(0)
> +; GCN-NEXT: s_cbranch_vccnz [[LOOP]]
> +
> +
> +; GCN: [[END]]:
> +; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]],
> [[SAVEEXEC_LO_LANE]]
> +; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]],
> [[SAVEEXEC_HI_LANE]]
> +
> +; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off,
> s[8:11], s12 ; 8-byte Folded Reload
> +; VMEM: s_waitcnt vmcnt(0)
> +; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]],
> v[[V_RELOAD_SAVEEXEC_LO]]
> +
> +; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off,
> s[8:11], s12 offset:4 ; 8-byte Folded Reload
> +; VMEM: s_waitcnt vmcnt(0)
> +; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]],
> v[[V_RELOAD_SAVEEXEC_HI]]
> +
> +; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]
> ]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
> +; GCN: buffer_load_dword v[[VAL_END:[0-9]+]], off, s[8:11], s12
> offset:[[VAL_SUB_OFFSET]] ; 4-byte Folded Reload
> +
> +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[VAL_END]]
> +define void @divergent_loop(i32 addrspace(1)* %out) #0 {
> +entry:
> +  %tid = call i32 @llvm.amdgcn.workitem.id.x()
> +  %load0 = load volatile i32, i32 addrspace(3)* undef
> +  %cmp0 = icmp eq i32 %tid, 0
> +  br i1 %cmp0, label %loop, label %end
> +
> +loop:
> +  %i = phi i32 [ %i.inc, %loop ], [ 0, %entry ]
> +  %val = phi i32 [ %val.sub, %loop ], [ %load0, %entry ]
> +  %load1 = load volatile i32, i32 addrspace(3)* undef
> +  %i.inc = add i32 %i, 1
> +  %val.sub = sub i32 %val, %load1
> +  %cmp1 = icmp ne i32 %i, 256
> +  br i1 %cmp1, label %loop, label %end
> +
> +end:
> +  %tmp4 = phi i32 [ %val.sub, %loop ], [ 0, %entry ]
> +  store i32 %tmp4, i32 addrspace(1)* %out
> +  ret void
> +}
> +
> +; GCN-LABEL: {{^}}divergent_if_else_endif:
> +; GCN: {{^}}; BB#0:
> +
> +; GCN: s_mov_b32 m0, -1
> +; VMEM: ds_read_b32 [[LOAD0:v[0-9]+]]
> +
> +; GCN: v_cmp_ne_i32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], v0,
> +
> +; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}},
> exec
> +; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[[ANDEXEC_HI:[0-9]+]]{{\]}},
> s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]
> +; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[SAVEEXEC_HI]]{{\]}},
> s{{\[}}[[ANDEXEC_LO]]:[[ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[
> SAVEEXEC_HI]]{{\]}}
> +
> +; Spill load
> +; GCN: buffer_store_dword [[LOAD0]], off, s[8:11], s12 ; 4-byte Folded
> Spill
> +
> +; Spill saved exec
> +; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]],
> [[SAVEEXEC_LO_LANE:[0-9]+]]
> +; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]],
> [[SAVEEXEC_HI_LANE:[0-9]+]]
> +
> +; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]
> +; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[8:11], s12
> offset:[[SAVEEXEC_LO_OFFSET:[0-9]+]] ; 8-byte Folded Spill
> +; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]
> +; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[8:11], s12
> offset:[[SAVEEXEC_HI_OFFSET:[0-9]+]] ; 8-byte Folded Spill
> +
> +; GCN: s_mov_b64 exec, [[CMP0]]
> +; GCN: s_waitcnt vmcnt(0) expcnt(0)
> +
> +; FIXME: It makes no sense to put this skip here
> +; GCN-NEXT: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]]
> +; GCN: s_cbranch_execz [[FLOW]]
> +; GCN-NEXT: s_branch [[ELSE:BB[0-9]+_[0-9]+]]
> +
> +; GCN: [[FLOW]]: ; %Flow
> +; VGPR: v_readlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_LO:[0-9]+]],
> [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]
> +; VGPR: v_readlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_HI:[0-9]+]],
> [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]
> +
> +
> +; VMEM: buffer_load_dword v[[FLOW_V_RELOAD_SAVEEXEC_LO:[0-9]+]], off,
> s[8:11], s12 offset:[[SAVEEXEC_LO_OFFSET]]
> +; VMEM: s_waitcnt vmcnt(0)
> +; VMEM: v_readfirstlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_LO:[0-9]+]],
> v[[FLOW_V_RELOAD_SAVEEXEC_LO]]
> +
> +; VMEM: buffer_load_dword v[[FLOW_V_RELOAD_SAVEEXEC_HI:[0-9]+]], off,
> s[8:11], s12 offset:[[SAVEEXEC_HI_OFFSET]] ; 8-byte Folded Reload
> +; VMEM: s_waitcnt vmcnt(0)
> +; VMEM: v_readfirstlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_HI:[0-9]+]],
> v[[FLOW_V_RELOAD_SAVEEXEC_HI]]
> +
> +; GCN: s_or_saveexec_b64 s{{\[}}[[FLOW_S_RELOAD_
> SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}, s{{\[}}[[FLOW_S_RELOAD_
> SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}
> +
> +; Regular spill value restored after exec modification
> +; GCN: buffer_load_dword [[FLOW_VAL:v[0-9]+]], off, s[8:11], s12
> offset:[[FLOW_VAL_OFFSET:[0-9]+]] ; 4-byte Folded Reload
> +
> +
> +; Spill saved exec
> +; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[FLOW_S_RELOAD_SAVEEXEC_LO]],
> [[FLOW_SAVEEXEC_LO_LANE:[0-9]+]]
> +; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[FLOW_S_RELOAD_SAVEEXEC_HI]],
> [[FLOW_SAVEEXEC_HI_LANE:[0-9]+]]
> +
> +
> +; VMEM: v_mov_b32_e32 v[[FLOW_V_SAVEEXEC_LO:[0-9]+]],
> s[[FLOW_S_RELOAD_SAVEEXEC_LO]]
> +; VMEM: buffer_store_dword v[[FLOW_V_SAVEEXEC_LO]], off, s[8:11], s12
> offset:[[FLOW_SAVEEXEC_LO_OFFSET:[0-9]+]] ; 8-byte Folded Spill
> +; VMEM: v_mov_b32_e32 v[[FLOW_V_SAVEEXEC_HI:[0-9]+]],
> s[[FLOW_S_RELOAD_SAVEEXEC_HI]]
> +; VMEM: buffer_store_dword v[[FLOW_V_SAVEEXEC_HI]], off, s[8:11], s12
> offset:[[FLOW_SAVEEXEC_HI_OFFSET:[0-9]+]] ; 8-byte Folded Spill
> +
> +; GCN: buffer_store_dword [[FLOW_VAL]], off, s[8:11], s12
> offset:[[RESULT_OFFSET:[0-9]+]] ; 4-byte Folded Spill
> +; GCN: s_xor_b64 exec, exec, s{{\[}}[[FLOW_S_RELOAD_
> SAVEEXEC_LO]]:[[FLOW_S_RELOAD_SAVEEXEC_HI]]{{\]}}
> +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
> +; GCN-NEXT: ; mask branch [[ENDIF:BB[0-9]+_[0-9]+]]
> +; GCN-NEXT: s_cbranch_execz [[ENDIF]]
> +
> +
> +; GCN: BB{{[0-9]+}}_2: ; %if
> +; GCN: ds_read_b32
> +; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[8:11], s12 ;
> 4-byte Folded Reload
> +; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, v{{[0-9]+}}, v[[LOAD0_RELOAD]]
> +; GCN: buffer_store_dword [[ADD]], off, s[8:11], s12
> offset:[[RESULT_OFFSET]] ; 4-byte Folded Spill
> +; GCN: s_waitcnt vmcnt(0) expcnt(0)
> +; GCN-NEXT: s_branch [[ENDIF:BB[0-9]+_[0-9]+]]
> +
> +; GCN: [[ELSE]]: ; %else
> +; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[8:11], s12 ;
> 4-byte Folded Reload
> +; GCN: v_subrev_i32_e32 [[SUB:v[0-9]+]], vcc, v{{[0-9]+}},
> v[[LOAD0_RELOAD]]
> +; GCN: buffer_store_dword [[ADD]], off, s[8:11], s12
> offset:[[FLOW_RESULT_OFFSET:[0-9]+]] ; 4-byte Folded Spill
> +; GCN: s_waitcnt vmcnt(0) expcnt(0)
> +; GCN-NEXT: s_branch [[FLOW]]
> +
> +; GCN: [[ENDIF]]:
> +; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]],
> [[FLOW_SAVEEXEC_LO_LANE]]
> +; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]], [[SPILL_VGPR]],
> [[FLOW_SAVEEXEC_HI_LANE]]
> +
> +
> +; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+]], off,
> s[8:11], s12 offset:[[FLOW_SAVEEXEC_LO_OFFSET]] ; 8-byte Folded Reload
> +; VMEM: s_waitcnt vmcnt(0)
> +; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]],
> v[[V_RELOAD_SAVEEXEC_LO]]
> +
> +; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+]], off,
> s[8:11], s12 offset:[[FLOW_SAVEEXEC_HI_OFFSET]] ; 8-byte Folded Reload
> +; VMEM: s_waitcnt vmcnt(0)
> +; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+]],
> v[[V_RELOAD_SAVEEXEC_HI]]
> +
> +; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]
> ]:[[S_RELOAD_SAVEEXEC_HI]]{{\]}}
> +
> +; GCN: buffer_load_dword v[[RESULT:[0-9]+]], off, s[8:11], s12
> offset:[[RESULT_OFFSET]] ; 4-byte Folded Reload
> +; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RESULT]]
> +define void @divergent_if_else_endif(i32 addrspace(1)* %out) #0 {
> +entry:
> +  %tid = call i32 @llvm.amdgcn.workitem.id.x()
> +  %load0 = load volatile i32, i32 addrspace(3)* undef
> +  %cmp0 = icmp eq i32 %tid, 0
> +  br i1 %cmp0, label %if, label %else
> +
> +if:
> +  %load1 = load volatile i32, i32 addrspace(3)* undef
> +  %val0 = add i32 %load0, %load1
> +  br label %endif
> +
> +else:
> +  %load2 = load volatile i32, i32 addrspace(3)* undef
> +  %val1 = sub i32 %load0, %load2
> +  br label %endif
> +
> +endif:
> +  %result = phi i32 [ %val0, %if ], [ %val1, %else ]
> +  store i32 %result, i32 addrspace(1)* %out
> +  ret void
> +}
> +
> +declare i32 @llvm.amdgcn.workitem.id.x() #1
> +
> +attributes #0 = { nounwind }
> +attributes #1 = { nounwind readnone }
>
> Added: llvm/trunk/test/CodeGen/MIR/AMDGPU/optimize-if-exec-masking.mir
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/
> CodeGen/MIR/AMDGPU/optimize-if-exec-masking.mir?rev=282667&view=auto
> ============================================================
> ==================
> --- llvm/trunk/test/CodeGen/MIR/AMDGPU/optimize-if-exec-masking.mir
> (added)
> +++ llvm/trunk/test/CodeGen/MIR/AMDGPU/optimize-if-exec-masking.mir Wed
> Sep 28 20:44:16 2016
> @@ -0,0 +1,755 @@
> +# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass
> si-optimize-exec-masking -o -  %s | FileCheck %s
> +
> +--- |
> +  target datalayout = "e-p:32:32-p1:64:64-p2:64:64-
> p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-
> v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
> +
> +  define void @optimize_if_and_saveexec_xor(i32 %z, i32 %v) #0 {
> +  main_body:
> +    %id = call i32 @llvm.amdgcn.workitem.id.x()
> +    %cc = icmp eq i32 %id, 0
> +    %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %cc)
> +    %1 = extractvalue { i1, i64 } %0, 0
> +    %2 = extractvalue { i1, i64 } %0, 1
> +    br i1 %1, label %if, label %end
> +
> +  if:                                               ; preds = %main_body
> +    %v.if = load volatile i32, i32 addrspace(1)* undef
> +    br label %end
> +
> +  end:                                              ; preds = %if,
> %main_body
> +    %r = phi i32 [ 4, %main_body ], [ %v.if, %if ]
> +    call void @llvm.amdgcn.end.cf(i64 %2)
> +    store i32 %r, i32 addrspace(1)* undef
> +    ret void
> +  }
> +
> +  define void @optimize_if_and_saveexec(i32 %z, i32 %v)  #0 {
> +  main_body:
> +      br i1 undef, label %if, label %end
> +
> +  if:
> +    br label %end
> +
> +  end:
> +    ret void
> +  }
> +
> +  define void @optimize_if_or_saveexec(i32 %z, i32 %v)  #0 {
> +  main_body:
> +      br i1 undef, label %if, label %end
> +
> +  if:
> +    br label %end
> +
> +  end:
> +    ret void
> +  }
> +
> +
> +  define void @optimize_if_and_saveexec_xor_valu_middle(i32 %z, i32 %v)
> #0 {
> +  main_body:
> +    %id = call i32 @llvm.amdgcn.workitem.id.x()
> +    %cc = icmp eq i32 %id, 0
> +    %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %cc)
> +    %1 = extractvalue { i1, i64 } %0, 0
> +    %2 = extractvalue { i1, i64 } %0, 1
> +    store i32 %id, i32 addrspace(1)* undef
> +    br i1 %1, label %if, label %end
> +
> +  if:                                               ; preds = %main_body
> +    %v.if = load volatile i32, i32 addrspace(1)* undef
> +    br label %end
> +
> +  end:                                              ; preds = %if,
> %main_body
> +    %r = phi i32 [ 4, %main_body ], [ %v.if, %if ]
> +    call void @llvm.amdgcn.end.cf(i64 %2)
> +    store i32 %r, i32 addrspace(1)* undef
> +    ret void
> +  }
> +
> +  define void @optimize_if_and_saveexec_xor_wrong_reg(i32 %z, i32 %v)
> #0 {
> +  main_body:
> +      br i1 undef, label %if, label %end
> +
> +  if:
> +    br label %end
> +
> +  end:
> +    ret void
> +  }
> +
> +  define void @optimize_if_and_saveexec_xor_modify_copy_to_exec(i32 %z,
> i32 %v)  #0 {
> +  main_body:
> +      br i1 undef, label %if, label %end
> +
> +  if:
> +    br label %end
> +
> +  end:
> +    ret void
> +  }
> +
> +  define void @optimize_if_and_saveexec_xor_live_out_setexec(i32 %z, i32
> %v)  #0 {
> +  main_body:
> +      br i1 undef, label %if, label %end
> +
> +  if:
> +    br label %end
> +
> +  end:
> +    ret void
> +  }
> +
> +  define void @optimize_if_unknown_saveexec(i32 %z, i32 %v)  #0 {
> +  main_body:
> +      br i1 undef, label %if, label %end
> +
> +  if:
> +    br label %end
> +
> +  end:
> +    ret void
> +  }
> +
> +  define void @optimize_if_andn2_saveexec(i32 %z, i32 %v)  #0 {
> +  main_body:
> +      br i1 undef, label %if, label %end
> +
> +  if:
> +    br label %end
> +
> +  end:
> +    ret void
> +  }
> +
> +  define void @optimize_if_andn2_saveexec_no_commute(i32 %z, i32 %v)  #0
> {
> +  main_body:
> +      br i1 undef, label %if, label %end
> +
> +  if:
> +    br label %end
> +
> +  end:
> +    ret void
> +  }
> +
> +  ; Function Attrs: nounwind readnone
> +  declare i32 @llvm.amdgcn.workitem.id.x() #1
> +
> +  declare { i1, i64 } @llvm.amdgcn.if(i1)
> +
> +  declare void @llvm.amdgcn.end.cf(i64)
> +
> +
> +  attributes #0 = { nounwind }
> +  attributes #1 = { nounwind readnone }
> +
> +...
> +---
> +# CHECK-LABEL: name: optimize_if_and_saveexec_xor{{$}}
> +# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec,
> implicit-def %scc, implicit %exec
> +# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %exec, killed %sgpr0_sgpr1,
> implicit-def %scc
> +# CHECK-NEXT: SI_MASK_BRANCH
> +
> +name:            optimize_if_and_saveexec_xor
> +alignment:       0
> +exposesReturnsTwice: false
> +legalized:       false
> +regBankSelected: false
> +selected:        false
> +tracksRegLiveness: true
> +liveins:
> +  - { reg: '%vgpr0' }
> +frameInfo:
> +  isFrameAddressTaken: false
> +  isReturnAddressTaken: false
> +  hasStackMap:     false
> +  hasPatchPoint:   false
> +  stackSize:       0
> +  offsetAdjustment: 0
> +  maxAlignment:    0
> +  adjustsStack:    false
> +  hasCalls:        false
> +  maxCallFrameSize: 0
> +  hasOpaqueSPAdjustment: false
> +  hasVAStart:      false
> +  hasMustTailInVarArgFunc: false
> +body:             |
> +  bb.0.main_body:
> +    successors: %bb.1.if, %bb.2.end
> +    liveins: %vgpr0
> +
> +    %sgpr0_sgpr1 = COPY %exec
> +    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
> +    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
> +    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
> +    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1,
> implicit-def %scc
> +    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
> +    SI_MASK_BRANCH %bb.2.end, implicit %exec
> +    S_BRANCH %bb.1.if
> +
> +  bb.1.if:
> +    successors: %bb.2.end
> +    liveins: %sgpr0_sgpr1
> +
> +    %sgpr7 = S_MOV_B32 61440
> +    %sgpr6 = S_MOV_B32 -1
> +    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0,
> 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
> +
> +  bb.2.end:
> +    liveins: %vgpr0, %sgpr0_sgpr1
> +
> +    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
> +    %sgpr3 = S_MOV_B32 61440
> +    %sgpr2 = S_MOV_B32 -1
> +    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0,
> 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
> +    S_ENDPGM
> +
> +...
> +---
> +# CHECK-LABEL: name: optimize_if_and_saveexec{{$}}
> +# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec,
> implicit-def %scc, implicit %exec
> +# CHECK-NEXT: SI_MASK_BRANCH
> +
> +name:            optimize_if_and_saveexec
> +alignment:       0
> +exposesReturnsTwice: false
> +legalized:       false
> +regBankSelected: false
> +selected:        false
> +tracksRegLiveness: true
> +liveins:
> +  - { reg: '%vgpr0' }
> +frameInfo:
> +  isFrameAddressTaken: false
> +  isReturnAddressTaken: false
> +  hasStackMap:     false
> +  hasPatchPoint:   false
> +  stackSize:       0
> +  offsetAdjustment: 0
> +  maxAlignment:    0
> +  adjustsStack:    false
> +  hasCalls:        false
> +  maxCallFrameSize: 0
> +  hasOpaqueSPAdjustment: false
> +  hasVAStart:      false
> +  hasMustTailInVarArgFunc: false
> +body:             |
> +  bb.0.main_body:
> +    successors: %bb.1.if, %bb.2.end
> +    liveins: %vgpr0
> +
> +    %sgpr0_sgpr1 = COPY %exec
> +    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
> +    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
> +    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
> +    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
> +    SI_MASK_BRANCH %bb.2.end, implicit %exec
> +    S_BRANCH %bb.1.if
> +
> +  bb.1.if:
> +    successors: %bb.2.end
> +    liveins: %sgpr0_sgpr1
> +
> +    %sgpr7 = S_MOV_B32 61440
> +    %sgpr6 = S_MOV_B32 -1
> +    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0,
> 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
> +
> +  bb.2.end:
> +    liveins: %vgpr0, %sgpr0_sgpr1
> +
> +    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
> +    %sgpr3 = S_MOV_B32 61440
> +    %sgpr2 = S_MOV_B32 -1
> +    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0,
> 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
> +    S_ENDPGM
> +
> +...
> +---
> +# CHECK-LABEL: name: optimize_if_or_saveexec{{$}}
> +# CHECK: %sgpr0_sgpr1 = S_OR_SAVEEXEC_B64 %vcc, implicit-def %exec,
> implicit-def %scc, implicit %exec
> +# CHECK-NEXT: SI_MASK_BRANCH
> +
> +name:            optimize_if_or_saveexec
> +alignment:       0
> +exposesReturnsTwice: false
> +legalized:       false
> +regBankSelected: false
> +selected:        false
> +tracksRegLiveness: true
> +liveins:
> +  - { reg: '%vgpr0' }
> +frameInfo:
> +  isFrameAddressTaken: false
> +  isReturnAddressTaken: false
> +  hasStackMap:     false
> +  hasPatchPoint:   false
> +  stackSize:       0
> +  offsetAdjustment: 0
> +  maxAlignment:    0
> +  adjustsStack:    false
> +  hasCalls:        false
> +  maxCallFrameSize: 0
> +  hasOpaqueSPAdjustment: false
> +  hasVAStart:      false
> +  hasMustTailInVarArgFunc: false
> +body:             |
> +  bb.0.main_body:
> +    successors: %bb.1.if, %bb.2.end
> +    liveins: %vgpr0
> +
> +    %sgpr0_sgpr1 = COPY %exec
> +    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
> +    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
> +    %sgpr2_sgpr3 = S_OR_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
> +    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
> +    SI_MASK_BRANCH %bb.2.end, implicit %exec
> +    S_BRANCH %bb.1.if
> +
> +  bb.1.if:
> +    successors: %bb.2.end
> +    liveins: %sgpr0_sgpr1
> +
> +    %sgpr7 = S_MOV_B32 61440
> +    %sgpr6 = S_MOV_B32 -1
> +    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0,
> 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
> +
> +  bb.2.end:
> +    liveins: %vgpr0, %sgpr0_sgpr1
> +
> +    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
> +    %sgpr3 = S_MOV_B32 61440
> +    %sgpr2 = S_MOV_B32 -1
> +    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0,
> 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
> +    S_ENDPGM
> +
> +...
> +---
> +# CHECK-LABEL: name: optimize_if_and_saveexec_xor_valu_middle
> +# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def
> %scc
> +# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET %vgpr0, undef
> %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into
> `i32 addrspace(1)* undef`)
> +# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1,
> implicit-def %scc
> +# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
> +# CHECK-NEXT: SI_MASK_BRANCH
> +name:            optimize_if_and_saveexec_xor_valu_middle
> +alignment:       0
> +exposesReturnsTwice: false
> +legalized:       false
> +regBankSelected: false
> +selected:        false
> +tracksRegLiveness: true
> +liveins:
> +  - { reg: '%vgpr0' }
> +frameInfo:
> +  isFrameAddressTaken: false
> +  isReturnAddressTaken: false
> +  hasStackMap:     false
> +  hasPatchPoint:   false
> +  stackSize:       0
> +  offsetAdjustment: 0
> +  maxAlignment:    0
> +  adjustsStack:    false
> +  hasCalls:        false
> +  maxCallFrameSize: 0
> +  hasOpaqueSPAdjustment: false
> +  hasVAStart:      false
> +  hasMustTailInVarArgFunc: false
> +body:             |
> +  bb.0.main_body:
> +    successors: %bb.1.if, %bb.2.end
> +    liveins: %vgpr0
> +
> +    %sgpr0_sgpr1 = COPY %exec
> +    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
> +    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
> +    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
> +    BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0,
> 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
> +    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1,
> implicit-def %scc
> +    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
> +    SI_MASK_BRANCH %bb.2.end, implicit %exec
> +    S_BRANCH %bb.1.if
> +
> +  bb.1.if:
> +    successors: %bb.2.end
> +    liveins: %sgpr0_sgpr1
> +
> +    %sgpr7 = S_MOV_B32 61440
> +    %sgpr6 = S_MOV_B32 -1
> +    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0,
> 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
> +
> +  bb.2.end:
> +    liveins: %vgpr0, %sgpr0_sgpr1
> +
> +    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
> +    %sgpr3 = S_MOV_B32 61440
> +    %sgpr2 = S_MOV_B32 -1
> +    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0,
> 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
> +    S_ENDPGM
> +
> +...
> +---
> +# CHECK-LABEL: name: optimize_if_and_saveexec_xor_wrong_reg{{$}}
> +# CHECK: %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def
> %scc
> +# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed
> %sgpr0_sgpr1, implicit-def %scc
> +# CHECK-NEXT: %exec = COPY %sgpr0_sgpr1
> +# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
> +name:            optimize_if_and_saveexec_xor_wrong_reg
> +alignment:       0
> +exposesReturnsTwice: false
> +legalized:       false
> +regBankSelected: false
> +selected:        false
> +tracksRegLiveness: true
> +liveins:
> +  - { reg: '%vgpr0' }
> +frameInfo:
> +  isFrameAddressTaken: false
> +  isReturnAddressTaken: false
> +  hasStackMap:     false
> +  hasPatchPoint:   false
> +  stackSize:       0
> +  offsetAdjustment: 0
> +  maxAlignment:    0
> +  adjustsStack:    false
> +  hasCalls:        false
> +  maxCallFrameSize: 0
> +  hasOpaqueSPAdjustment: false
> +  hasVAStart:      false
> +  hasMustTailInVarArgFunc: false
> +body:             |
> +  bb.0.main_body:
> +    successors: %bb.1.if, %bb.2.end
> +    liveins: %vgpr0
> +
> +    %sgpr6 = S_MOV_B32 -1
> +    %sgpr7 = S_MOV_B32 61440
> +    %sgpr0_sgpr1 = COPY %exec
> +    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
> +    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
> +    %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
> +    %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1,
> implicit-def %scc
> +    %exec = S_MOV_B64_term %sgpr0_sgpr1
> +    SI_MASK_BRANCH %bb.2.end, implicit %exec
> +    S_BRANCH %bb.1.if
> +
> +  bb.1.if:
> +    successors: %bb.2.end
> +    liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7
> +    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0,
> 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
> +
> +  bb.2.end:
> +    liveins: %vgpr0, %sgpr0_sgpr1, %sgpr4_sgpr5_sgpr6_sgpr7
> +
> +    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
> +    %sgpr3 = S_MOV_B32 61440
> +    %sgpr2 = S_MOV_B32 -1
> +    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr4_sgpr5_sgpr6_sgpr7, 0,
> 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
> +    S_ENDPGM
> +
> +...
> +---
> +# CHECK-LABEL: name: optimize_if_and_saveexec_xor_
> modify_copy_to_exec{{$}}
> +# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def
> %scc
> +# CHECK-NEXT: %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1,
> implicit-def %scc
> +# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1,
> implicit-def %scc
> +# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
> +# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
> +
> +name:            optimize_if_and_saveexec_xor_modify_copy_to_exec
> +alignment:       0
> +exposesReturnsTwice: false
> +legalized:       false
> +regBankSelected: false
> +selected:        false
> +tracksRegLiveness: true
> +liveins:
> +  - { reg: '%vgpr0' }
> +frameInfo:
> +  isFrameAddressTaken: false
> +  isReturnAddressTaken: false
> +  hasStackMap:     false
> +  hasPatchPoint:   false
> +  stackSize:       0
> +  offsetAdjustment: 0
> +  maxAlignment:    0
> +  adjustsStack:    false
> +  hasCalls:        false
> +  maxCallFrameSize: 0
> +  hasOpaqueSPAdjustment: false
> +  hasVAStart:      false
> +  hasMustTailInVarArgFunc: false
> +body:             |
> +  bb.0.main_body:
> +    successors: %bb.1.if, %bb.2.end
> +    liveins: %vgpr0
> +
> +    %sgpr0_sgpr1 = COPY %exec
> +    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
> +    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
> +    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
> +    %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
> +    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1,
> implicit-def %scc
> +    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
> +    SI_MASK_BRANCH %bb.2.end, implicit %exec
> +    S_BRANCH %bb.1.if
> +
> +  bb.1.if:
> +    successors: %bb.2.end
> +    liveins: %sgpr0_sgpr1
> +
> +    %sgpr7 = S_MOV_B32 61440
> +    %sgpr6 = S_MOV_B32 -1
> +    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0,
> 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
> +
> +  bb.2.end:
> +    liveins: %vgpr0, %sgpr0_sgpr1
> +
> +    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
> +    %sgpr0 = S_MOV_B32 0
> +    %sgpr1 = S_MOV_B32 1
> +    %sgpr2 = S_MOV_B32 -1
> +    %sgpr3 = S_MOV_B32 61440
> +    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0,
> 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
> +    S_ENDPGM
> +
> +...
> +---
> +# CHECK-LABEL: name: optimize_if_and_saveexec_xor_live_out_setexec{{$}}
> +# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def
> %scc
> +# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1,
> implicit-def %scc
> +# CHECK-NEXT: %exec = COPY %sgpr2_sgpr3
> +# CHECK-NEXT: SI_MASK_BRANCH
> +name:            optimize_if_and_saveexec_xor_live_out_setexec
> +alignment:       0
> +exposesReturnsTwice: false
> +legalized:       false
> +regBankSelected: false
> +selected:        false
> +tracksRegLiveness: true
> +liveins:
> +  - { reg: '%vgpr0' }
> +frameInfo:
> +  isFrameAddressTaken: false
> +  isReturnAddressTaken: false
> +  hasStackMap:     false
> +  hasPatchPoint:   false
> +  stackSize:       0
> +  offsetAdjustment: 0
> +  maxAlignment:    0
> +  adjustsStack:    false
> +  hasCalls:        false
> +  maxCallFrameSize: 0
> +  hasOpaqueSPAdjustment: false
> +  hasVAStart:      false
> +  hasMustTailInVarArgFunc: false
> +body:             |
> +  bb.0.main_body:
> +    successors: %bb.1.if, %bb.2.end
> +    liveins: %vgpr0
> +
> +    %sgpr0_sgpr1 = COPY %exec
> +    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
> +    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
> +    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
> +    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1,
> implicit-def %scc
> +    %exec = S_MOV_B64_term %sgpr2_sgpr3
> +    SI_MASK_BRANCH %bb.2.end, implicit %exec
> +    S_BRANCH %bb.1.if
> +
> +  bb.1.if:
> +    successors: %bb.2.end
> +    liveins: %sgpr0_sgpr1, %sgpr2_sgpr3
> +    S_SLEEP 0, implicit %sgpr2_sgpr3
> +    %sgpr7 = S_MOV_B32 61440
> +    %sgpr6 = S_MOV_B32 -1
> +    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0,
> 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
> +
> +  bb.2.end:
> +    liveins: %vgpr0, %sgpr0_sgpr1
> +
> +    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
> +    %sgpr3 = S_MOV_B32 61440
> +    %sgpr2 = S_MOV_B32 -1
> +    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0,
> 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
> +    S_ENDPGM
> +
> +...
> +
> +# CHECK-LABEL: name: optimize_if_unknown_saveexec{{$}}
> +# CHECK: %sgpr0_sgpr1 = COPY %exec
> +# CHECK: %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo,
> implicit-def %scc
> +# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
> +# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
> +
> +name:            optimize_if_unknown_saveexec
> +alignment:       0
> +exposesReturnsTwice: false
> +legalized:       false
> +regBankSelected: false
> +selected:        false
> +tracksRegLiveness: true
> +liveins:
> +  - { reg: '%vgpr0' }
> +frameInfo:
> +  isFrameAddressTaken: false
> +  isReturnAddressTaken: false
> +  hasStackMap:     false
> +  hasPatchPoint:   false
> +  stackSize:       0
> +  offsetAdjustment: 0
> +  maxAlignment:    0
> +  adjustsStack:    false
> +  hasCalls:        false
> +  maxCallFrameSize: 0
> +  hasOpaqueSPAdjustment: false
> +  hasVAStart:      false
> +  hasMustTailInVarArgFunc: false
> +body:             |
> +  bb.0.main_body:
> +    successors: %bb.1.if, %bb.2.end
> +    liveins: %vgpr0
> +
> +    %sgpr0_sgpr1 = COPY %exec
> +    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
> +    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
> +    %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def
> %scc
> +    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
> +    SI_MASK_BRANCH %bb.2.end, implicit %exec
> +    S_BRANCH %bb.1.if
> +
> +  bb.1.if:
> +    successors: %bb.2.end
> +    liveins: %sgpr0_sgpr1
> +
> +    %sgpr7 = S_MOV_B32 61440
> +    %sgpr6 = S_MOV_B32 -1
> +    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0,
> 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
> +
> +  bb.2.end:
> +    liveins: %vgpr0, %sgpr0_sgpr1
> +
> +    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
> +    %sgpr3 = S_MOV_B32 61440
> +    %sgpr2 = S_MOV_B32 -1
> +    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0,
> 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
> +    S_ENDPGM
> +
> +...
> +---
> +# CHECK-LABEL: name: optimize_if_andn2_saveexec{{$}}
> +# CHECK: %sgpr0_sgpr1 = S_ANDN2_SAVEEXEC_B64 %vcc, implicit-def %exec,
> implicit-def %scc, implicit %exec
> +# CHECK-NEXT: SI_MASK_BRANCH
> +
> +name:            optimize_if_andn2_saveexec
> +alignment:       0
> +exposesReturnsTwice: false
> +legalized:       false
> +regBankSelected: false
> +selected:        false
> +tracksRegLiveness: true
> +liveins:
> +  - { reg: '%vgpr0' }
> +frameInfo:
> +  isFrameAddressTaken: false
> +  isReturnAddressTaken: false
> +  hasStackMap:     false
> +  hasPatchPoint:   false
> +  stackSize:       0
> +  offsetAdjustment: 0
> +  maxAlignment:    0
> +  adjustsStack:    false
> +  hasCalls:        false
> +  maxCallFrameSize: 0
> +  hasOpaqueSPAdjustment: false
> +  hasVAStart:      false
> +  hasMustTailInVarArgFunc: false
> +body:             |
> +  bb.0.main_body:
> +    successors: %bb.1.if, %bb.2.end
> +    liveins: %vgpr0
> +
> +    %sgpr0_sgpr1 = COPY %exec
> +    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
> +    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
> +    %sgpr2_sgpr3 = S_ANDN2_B64 %sgpr0_sgpr1, killed %vcc, implicit-def
> %scc
> +    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
> +    SI_MASK_BRANCH %bb.2.end, implicit %exec
> +    S_BRANCH %bb.1.if
> +
> +  bb.1.if:
> +    successors: %bb.2.end
> +    liveins: %sgpr0_sgpr1
> +
> +    %sgpr7 = S_MOV_B32 61440
> +    %sgpr6 = S_MOV_B32 -1
> +    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0,
> 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
> +
> +  bb.2.end:
> +    liveins: %vgpr0, %sgpr0_sgpr1
> +
> +    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
> +    %sgpr3 = S_MOV_B32 61440
> +    %sgpr2 = S_MOV_B32 -1
> +    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0,
> 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
> +    S_ENDPGM
> +
> +...
> +---
> +# CHECK-LABEL: name: optimize_if_andn2_saveexec_no_commute{{$}}
> +# CHECK: %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1,
> implicit-def %scc
> +# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
> +# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec
> +name:            optimize_if_andn2_saveexec_no_commute
> +alignment:       0
> +exposesReturnsTwice: false
> +legalized:       false
> +regBankSelected: false
> +selected:        false
> +tracksRegLiveness: true
> +liveins:
> +  - { reg: '%vgpr0' }
> +frameInfo:
> +  isFrameAddressTaken: false
> +  isReturnAddressTaken: false
> +  hasStackMap:     false
> +  hasPatchPoint:   false
> +  stackSize:       0
> +  offsetAdjustment: 0
> +  maxAlignment:    0
> +  adjustsStack:    false
> +  hasCalls:        false
> +  maxCallFrameSize: 0
> +  hasOpaqueSPAdjustment: false
> +  hasVAStart:      false
> +  hasMustTailInVarArgFunc: false
> +body:             |
> +  bb.0.main_body:
> +    successors: %bb.1.if, %bb.2.end
> +    liveins: %vgpr0
> +
> +    %sgpr0_sgpr1 = COPY %exec
> +    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
> +    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
> +    %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def
> %scc
> +    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
> +    SI_MASK_BRANCH %bb.2.end, implicit %exec
> +    S_BRANCH %bb.1.if
> +
> +  bb.1.if:
> +    successors: %bb.2.end
> +    liveins: %sgpr0_sgpr1
> +
> +    %sgpr7 = S_MOV_B32 61440
> +    %sgpr6 = S_MOV_B32 -1
> +    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0,
> 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
> +
> +  bb.2.end:
> +    liveins: %vgpr0, %sgpr0_sgpr1
> +
> +    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
> +    %sgpr3 = S_MOV_B32 61440
> +    %sgpr2 = S_MOV_B32 -1
> +    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0,
> 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
> +    S_ENDPGM
> +
> +...
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20160930/abbf8922/attachment.html>


More information about the llvm-commits mailing list