<div dir="ltr"><div><div><div><div>Hi Matt,<br><br></div>This revision introduced warning on one our of builders:<br> <a href="http://lab.llvm.org:8011/builders/clang-3stage-ubuntu/builds">http://lab.llvm.org:8011/builders/clang-3stage-ubuntu/builds</a><br> <br> The warning:<br> /home/buildbot/Buildbot/Slave1a/clang-3stage-ubuntu/llvm.src/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp:269:21:<br> warning: variable ‘CopyOp’ set but not used [-Wunused-but-set-variable]<br><br></div>Please have a look?<br><br></div>Thanks<br><br></div>Galina<br><div><div><div><div><br><br></div></div></div></div></div><div class="gmail_extra"><br><div class="gmail_quote">On Wed, Sep 28, 2016 at 6:44 PM, Matt Arsenault via llvm-commits <span dir="ltr"><<a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Author: arsenm<br>
Date: Wed Sep 28 20:44:16 2016<br>
New Revision: 282667<br>
<br>
URL: <a href="http://llvm.org/viewvc/llvm-project?rev=282667&view=rev" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-<wbr>project?rev=282667&view=rev</a><br>
Log:<br>
AMDGPU: Partially fix control flow at -O0<br>
<br>
Fixes to allow spilling all registers at the end of the block<br>
work with exec modifications. Don't emit s_and_saveexec_b64 for<br>
if lowering, and instead emit copies. Mark control flow mask<br>
instructions as terminators to get correct spill code placement<br>
with fast regalloc, and then have a separate optimization pass<br>
form the saveexec.<br>
<br>
This should work if SGPRs are spilled to VGPRs, but<br>
will likely fail in the case that an SGPR spills to memory<br>
and no workitem takes a divergent branch.<br>
<br>
Added:<br>
    llvm/trunk/lib/Target/AMDGPU/<wbr>SIOptimizeExecMasking.cpp<br>
    llvm/trunk/test/CodeGen/<wbr>AMDGPU/control-flow-<wbr>fastregalloc.ll<br>
    llvm/trunk/test/CodeGen/MIR/<wbr>AMDGPU/optimize-if-exec-<wbr>masking.mir<br>
Modified:<br>
    llvm/trunk/lib/Target/AMDGPU/<wbr>AMDGPU.h<br>
    llvm/trunk/lib/Target/AMDGPU/<wbr>AMDGPUTargetMachine.cpp<br>
    llvm/trunk/lib/Target/AMDGPU/<wbr>CMakeLists.txt<br>
    llvm/trunk/lib/Target/AMDGPU/<wbr>SIInstrInfo.cpp<br>
    llvm/trunk/lib/Target/AMDGPU/<wbr>SIInstructions.td<br>
    llvm/trunk/lib/Target/AMDGPU/<wbr>SILowerControlFlow.cpp<br>
<br>
Modified: llvm/trunk/lib/Target/AMDGPU/<wbr>AMDGPU.h<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPU.h?rev=282667&r1=282666&r2=282667&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-<wbr>project/llvm/trunk/lib/Target/<wbr>AMDGPU/AMDGPU.h?rev=282667&r1=<wbr>282666&r2=282667&view=diff</a><br>
==============================<wbr>==============================<wbr>==================<br>
--- llvm/trunk/lib/Target/AMDGPU/<wbr>AMDGPU.h (original)<br>
+++ llvm/trunk/lib/Target/AMDGPU/<wbr>AMDGPU.h Wed Sep 28 20:44:16 2016<br>
@@ -73,6 +73,9 @@ extern char &SILowerControlFlowID;<br>
 void initializeSIInsertSkipsPass(<wbr>PassRegistry &);<br>
 extern char &SIInsertSkipsPassID;<br>
<br>
+void initializeSIOptimizeExecMaskin<wbr>gPass(PassRegistry &);<br>
+extern char &SIOptimizeExecMaskingID;<br>
+<br>
 // Passes common to R600 and SI<br>
 FunctionPass *createAMDGPUPromoteAlloca(<wbr>const TargetMachine *TM = nullptr);<br>
 void initializeAMDGPUPromoteAllocaP<wbr>ass(PassRegistry&);<br>
<br>
Modified: llvm/trunk/lib/Target/AMDGPU/<wbr>AMDGPUTargetMachine.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp?rev=282667&r1=282666&r2=282667&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-<wbr>project/llvm/trunk/lib/Target/<wbr>AMDGPU/AMDGPUTargetMachine.<wbr>cpp?rev=282667&r1=282666&r2=<wbr>282667&view=diff</a><br>
==============================<wbr>==============================<wbr>==================<br>
--- llvm/trunk/lib/Target/AMDGPU/<wbr>AMDGPUTargetMachine.cpp (original)<br>
+++ llvm/trunk/lib/Target/AMDGPU/<wbr>AMDGPUTargetMachine.cpp Wed Sep 28 20:44:16 2016<br>
@@ -83,6 +83,7 @@ extern "C" void LLVMInitializeAMDGPUTarg<br>
   initializeSILowerControlFlowPa<wbr>ss(*PR);<br>
   initializeSIInsertSkipsPass(*<wbr>PR);<br>
   initializeSIDebuggerInsertNops<wbr>Pass(*PR);<br>
+  initializeSIOptimizeExecMaskin<wbr>gPass(*PR);<br>
 }<br>
<br>
 static std::unique_ptr<<wbr>TargetLoweringObjectFile> createTLOF(const Triple &TT) {<br>
@@ -333,6 +334,7 @@ public:<br>
   void addFastRegAlloc(FunctionPass *RegAllocPass) override;<br>
   void addOptimizedRegAlloc(<wbr>FunctionPass *RegAllocPass) override;<br>
   void addPreRegAlloc() override;<br>
+  void addPostRegAlloc() override;<br>
   void addPreSched2() override;<br>
   void addPreEmitPass() override;<br>
 };<br>
@@ -548,7 +550,6 @@ bool GCNPassConfig::<wbr>addGlobalInstruction<br>
 #endif<br>
<br>
 void GCNPassConfig::addPreRegAlloc(<wbr>) {<br>
-<br>
   addPass(<wbr>createSIShrinkInstructionsPass<wbr>());<br>
   addPass(<wbr>createSIWholeQuadModePass());<br>
 }<br>
@@ -556,7 +557,11 @@ void GCNPassConfig::addPreRegAlloc(<wbr>) {<br>
 void GCNPassConfig::<wbr>addFastRegAlloc(FunctionPass *RegAllocPass) {<br>
   // FIXME: We have to disable the verifier here because of PHIElimination +<br>
   // TwoAddressInstructions disabling it.<br>
-  insertPass(&<wbr>TwoAddressInstructionPassID, &SILowerControlFlowID, false);<br>
+<br>
+  // This must be run immediately after phi elimination and before<br>
+  // TwoAddressInstructions, otherwise the processing of the tied operand of<br>
+  // SI_ELSE will introduce a copy of the tied operand source after the else.<br>
+  insertPass(&PHIEliminationID, &SILowerControlFlowID, false);<br>
<br>
   TargetPassConfig::<wbr>addFastRegAlloc(RegAllocPass);<br>
 }<br>
@@ -566,13 +571,19 @@ void GCNPassConfig::<wbr>addOptimizedRegAlloc<br>
   // passes might recompute live intervals.<br>
   insertPass(&<wbr>MachineSchedulerID, &<wbr>SIFixControlFlowLiveIntervalsI<wbr>D);<br>
<br>
-  // TODO: It might be better to run this right after phi elimination, but for<br>
-  // now that would require not running the verifier.<br>
-  insertPass(&<wbr>RenameIndependentSubregsID, &SILowerControlFlowID);<br>
+  // This must be run immediately after phi elimination and before<br>
+  // TwoAddressInstructions, otherwise the processing of the tied operand of<br>
+  // SI_ELSE will introduce a copy of the tied operand source after the else.<br>
+  insertPass(&PHIEliminationID, &SILowerControlFlowID, false);<br>
<br>
   TargetPassConfig::<wbr>addOptimizedRegAlloc(<wbr>RegAllocPass);<br>
 }<br>
<br>
+void GCNPassConfig::<wbr>addPostRegAlloc() {<br>
+  addPass(&<wbr>SIOptimizeExecMaskingID);<br>
+  TargetPassConfig::<wbr>addPostRegAlloc();<br>
+}<br>
+<br>
 void GCNPassConfig::addPreSched2() {<br>
 }<br>
<br>
<br>
Modified: llvm/trunk/lib/Target/AMDGPU/<wbr>CMakeLists.txt<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/CMakeLists.txt?rev=282667&r1=282666&r2=282667&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-<wbr>project/llvm/trunk/lib/Target/<wbr>AMDGPU/CMakeLists.txt?rev=<wbr>282667&r1=282666&r2=282667&<wbr>view=diff</a><br>
==============================<wbr>==============================<wbr>==================<br>
--- llvm/trunk/lib/Target/AMDGPU/<wbr>CMakeLists.txt (original)<br>
+++ llvm/trunk/lib/Target/AMDGPU/<wbr>CMakeLists.txt Wed Sep 28 20:44:16 2016<br>
@@ -77,6 +77,7 @@ add_llvm_target(AMDGPUCodeGen<br>
   SILowerI1Copies.cpp<br>
   SIMachineFunctionInfo.cpp<br>
   SIMachineScheduler.cpp<br>
+  SIOptimizeExecMasking.cpp<br>
   SIRegisterInfo.cpp<br>
   SIShrinkInstructions.cpp<br>
   SITypeRewriter.cpp<br>
<br>
Modified: llvm/trunk/lib/Target/AMDGPU/<wbr>SIInstrInfo.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp?rev=282667&r1=282666&r2=282667&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-<wbr>project/llvm/trunk/lib/Target/<wbr>AMDGPU/SIInstrInfo.cpp?rev=<wbr>282667&r1=282666&r2=282667&<wbr>view=diff</a><br>
==============================<wbr>==============================<wbr>==================<br>
--- llvm/trunk/lib/Target/AMDGPU/<wbr>SIInstrInfo.cpp (original)<br>
+++ llvm/trunk/lib/Target/AMDGPU/<wbr>SIInstrInfo.cpp Wed Sep 28 20:44:16 2016<br>
@@ -856,7 +856,24 @@ bool SIInstrInfo::<wbr>expandPostRAPseudo(Mac<br>
   DebugLoc DL = MBB.findDebugLoc(MI);<br>
   switch (MI.getOpcode()) {<br>
   default: return AMDGPUInstrInfo::<wbr>expandPostRAPseudo(MI);<br>
-<br>
+  case AMDGPU::S_MOV_B64_term: {<br>
+    // This is only a terminator to get the correct spill code placement during<br>
+    // register allocation.<br>
+    MI.setDesc(get(AMDGPU::S_MOV_<wbr>B64));<br>
+    break;<br>
+  }<br>
+  case AMDGPU::S_XOR_B64_term: {<br>
+    // This is only a terminator to get the correct spill code placement during<br>
+    // register allocation.<br>
+    MI.setDesc(get(AMDGPU::S_XOR_<wbr>B64));<br>
+    break;<br>
+  }<br>
+  case AMDGPU::S_ANDN2_B64_term: {<br>
+    // This is only a terminator to get the correct spill code placement during<br>
+    // register allocation.<br>
+    MI.setDesc(get(AMDGPU::S_<wbr>ANDN2_B64));<br>
+    break;<br>
+  }<br>
   case AMDGPU::V_MOV_B64_PSEUDO: {<br>
     unsigned Dst = MI.getOperand(0).getReg();<br>
     unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);<br>
<br>
Modified: llvm/trunk/lib/Target/AMDGPU/<wbr>SIInstructions.td<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstructions.td?rev=282667&r1=282666&r2=282667&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-<wbr>project/llvm/trunk/lib/Target/<wbr>AMDGPU/SIInstructions.td?rev=<wbr>282667&r1=282666&r2=282667&<wbr>view=diff</a><br>
==============================<wbr>==============================<wbr>==================<br>
--- llvm/trunk/lib/Target/AMDGPU/<wbr>SIInstructions.td (original)<br>
+++ llvm/trunk/lib/Target/AMDGPU/<wbr>SIInstructions.td Wed Sep 28 20:44:16 2016<br>
@@ -112,6 +112,27 @@ def GET_GROUPSTATICSIZE : PseudoInstSI <<br>
   [(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]<wbr>>;<br>
 } // End let usesCustomInserter = 1, SALU = 1<br>
<br>
+def S_MOV_B64_term : PseudoInstSI<(outs SReg_64:$dst),<br>
+   (ins SSrc_b64:$src0)> {<br>
+  let SALU = 1;<br>
+  let isAsCheapAsAMove = 1;<br>
+  let isTerminator = 1;<br>
+}<br>
+<br>
+def S_XOR_B64_term : PseudoInstSI<(outs SReg_64:$dst),<br>
+   (ins SSrc_b64:$src0, SSrc_b64:$src1)> {<br>
+  let SALU = 1;<br>
+  let isAsCheapAsAMove = 1;<br>
+  let isTerminator = 1;<br>
+}<br>
+<br>
+def S_ANDN2_B64_term : PseudoInstSI<(outs SReg_64:$dst),<br>
+   (ins SSrc_b64:$src0, SSrc_b64:$src1)> {<br>
+  let SALU = 1;<br>
+  let isAsCheapAsAMove = 1;<br>
+  let isTerminator = 1;<br>
+}<br>
+<br>
 // SI pseudo instructions. These are used by the CFG structurizer pass<br>
 // and should be lowered to ISA instructions prior to codegen.<br>
<br>
@@ -132,9 +153,9 @@ def SI_IF: CFPseudoInstSI <<br>
   (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),<br>
   [(set i64:$dst, (int_amdgcn_if i1:$vcc, bb:$target))], 1, 1> {<br>
   let Constraints = "";<br>
-  let Size = 8;<br>
-  let mayStore = 1;<br>
+  let Size = 12;<br>
   let mayLoad = 1;<br>
+  let mayStore = 1;<br>
   let hasSideEffects = 1;<br>
 }<br>
<br>
<br>
Modified: llvm/trunk/lib/Target/AMDGPU/<wbr>SILowerControlFlow.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SILowerControlFlow.cpp?rev=282667&r1=282666&r2=282667&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-<wbr>project/llvm/trunk/lib/Target/<wbr>AMDGPU/SILowerControlFlow.cpp?<wbr>rev=282667&r1=282666&r2=<wbr>282667&view=diff</a><br>
==============================<wbr>==============================<wbr>==================<br>
--- llvm/trunk/lib/Target/AMDGPU/<wbr>SILowerControlFlow.cpp (original)<br>
+++ llvm/trunk/lib/Target/AMDGPU/<wbr>SILowerControlFlow.cpp Wed Sep 28 20:44:16 2016<br>
@@ -70,6 +70,7 @@ private:<br>
   const SIRegisterInfo *TRI;<br>
   const SIInstrInfo *TII;<br>
   LiveIntervals *LIS;<br>
+  MachineRegisterInfo *MRI;<br>
<br>
   void emitIf(MachineInstr &MI);<br>
   void emitElse(MachineInstr &MI);<br>
@@ -86,7 +87,8 @@ public:<br>
     MachineFunctionPass(ID),<br>
     TRI(nullptr),<br>
     TII(nullptr),<br>
-    LIS(nullptr) {}<br>
+    LIS(nullptr),<br>
+    MRI(nullptr) {}<br>
<br>
   bool runOnMachineFunction(<wbr>MachineFunction &MF) override;<br>
<br>
@@ -95,8 +97,12 @@ public:<br>
   }<br>
<br>
   void getAnalysisUsage(AnalysisUsage &AU) const override {<br>
-    AU.addPreserved<LiveIntervals><wbr>();<br>
+    // Should preserve the same set that TwoAddressInstructions does.<br>
     AU.addPreserved<SlotIndexes>()<wbr>;<br>
+    AU.addPreserved<LiveIntervals><wbr>();<br>
+    AU.addPreservedID(<wbr>LiveVariablesID);<br>
+    AU.addPreservedID(<wbr>MachineLoopInfoID);<br>
+    AU.addPreservedID(<wbr>MachineDominatorsID);<br>
     AU.setPreservesCFG();<br>
     MachineFunctionPass::<wbr>getAnalysisUsage(AU);<br>
   }<br>
@@ -109,6 +115,13 @@ char SILowerControlFlow::ID = 0;<br>
 INITIALIZE_PASS(<wbr>SILowerControlFlow, DEBUG_TYPE,<br>
                "SI lower control flow", false, false)<br>
<br>
+static void setImpSCCDefDead(MachineInstr &MI, bool IsDead) {<br>
+  MachineOperand &ImpDefSCC = MI.getOperand(3);<br>
+  assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());<br>
+<br>
+  ImpDefSCC.setIsDead(IsDead);<br>
+}<br>
+<br>
 char &llvm::SILowerControlFlowID = SILowerControlFlow::ID;<br>
<br>
 void SILowerControlFlow::emitIf(<wbr>MachineInstr &MI) {<br>
@@ -123,14 +136,36 @@ void SILowerControlFlow::emitIf(<wbr>MachineI<br>
<br>
   unsigned SaveExecReg = SaveExec.getReg();<br>
<br>
-  MachineInstr *AndSaveExec =<br>
-    BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_<wbr>SAVEEXEC_B64), SaveExecReg)<br>
-    .addOperand(Cond);<br>
+  MachineOperand &ImpDefSCC = MI.getOperand(4);<br>
+  assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());<br>
+<br>
+  // Add an implicit def of exec to discourage scheduling VALU after this which<br>
+  // will interfere with trying to form s_and_saveexec_b64 later.<br>
+  MachineInstr *CopyExec =<br>
+    BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), SaveExecReg)<br>
+    .addReg(AMDGPU::EXEC)<br>
+    .addReg(AMDGPU::EXEC, RegState::ImplicitDefine);<br>
+<br>
+  unsigned Tmp = MRI->createVirtualRegister(&<wbr>AMDGPU::SReg_64RegClass);<br>
+<br>
+  MachineInstr *And =<br>
+    BuildMI(MBB, I, DL, TII->get(AMDGPU::S_AND_B64), Tmp)<br>
+    .addReg(SaveExecReg)<br>
+    //.addReg(AMDGPU::EXEC)<br>
+    .addReg(Cond.getReg());<br>
+  setImpSCCDefDead(*And, true);<br>
<br>
   MachineInstr *Xor =<br>
     BuildMI(MBB, I, DL, TII->get(AMDGPU::S_XOR_B64), SaveExecReg)<br>
-    .addReg(AMDGPU::EXEC)<br>
+    .addReg(Tmp)<br>
     .addReg(SaveExecReg);<br>
+  setImpSCCDefDead(*Xor, ImpDefSCC.isDead());<br>
+<br>
+  // Use a copy that is a terminator to get correct spill code placement it with<br>
+  // fast regalloc.<br>
+  MachineInstr *SetExec =<br>
+    BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64_<wbr>term), AMDGPU::EXEC)<br>
+    .addReg(Tmp, RegState::Kill);<br>
<br>
   // Insert a pseudo terminator to help keep the verifier happy. This will also<br>
   // be used later when inserting skips.<br>
@@ -143,11 +178,17 @@ void SILowerControlFlow::emitIf(<wbr>MachineI<br>
     return;<br>
   }<br>
<br>
+  LIS->InsertMachineInstrInMaps(<wbr>*CopyExec);<br>
+<br>
+  // Replace with and so we don't need to fix the live interval for condition<br>
+  // register.<br>
+  LIS-><wbr>ReplaceMachineInstrInMaps(MI, *And);<br>
<br>
-  LIS-><wbr>ReplaceMachineInstrInMaps(MI, *AndSaveExec);<br>
   LIS->InsertMachineInstrInMaps(<wbr>*Xor);<br>
+  LIS->InsertMachineInstrInMaps(<wbr>*SetExec);<br>
   LIS->InsertMachineInstrInMaps(<wbr>*NewBr);<br>
<br>
+  LIS->removeRegUnit(*<wbr>MCRegUnitIterator(AMDGPU::<wbr>EXEC, TRI));<br>
   MI.eraseFromParent();<br>
<br>
   // FIXME: Is there a better way of adjusting the liveness? It shouldn't be<br>
@@ -155,6 +196,7 @@ void SILowerControlFlow::emitIf(<wbr>MachineI<br>
   // valno.<br>
   LIS->removeInterval(<wbr>SaveExecReg);<br>
   LIS-><wbr>createAndComputeVirtRegInterva<wbr>l(SaveExecReg);<br>
+  LIS-><wbr>createAndComputeVirtRegInterva<wbr>l(Tmp);<br>
 }<br>
<br>
 void SILowerControlFlow::emitElse(<wbr>MachineInstr &MI) {<br>
@@ -167,11 +209,18 @@ void SILowerControlFlow::emitElse(<wbr>Machin<br>
   bool ExecModified = MI.getOperand(3).getImm() != 0;<br>
   MachineBasicBlock::iterator Start = MBB.begin();<br>
<br>
+  // We are running before TwoAddressInstructions, and si_else's operands are<br>
+  // tied. In order to correctly tie the registers, split this into a copy of<br>
+  // the src like it does.<br>
+  BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), DstReg)<br>
+    .addOperand(MI.getOperand(1)); // Saved EXEC<br>
+<br>
   // This must be inserted before phis and any spill code inserted before the<br>
   // else.<br>
   MachineInstr *OrSaveExec =<br>
     BuildMI(MBB, Start, DL, TII->get(AMDGPU::S_OR_<wbr>SAVEEXEC_B64), DstReg)<br>
-    .addOperand(MI.getOperand(1)); // Saved EXEC<br>
+    .addReg(DstReg);<br>
+<br>
   MachineBasicBlock *DestBB = MI.getOperand(2).getMBB();<br>
<br>
   MachineBasicBlock::iterator ElsePt(MI);<br>
@@ -187,14 +236,12 @@ void SILowerControlFlow::emitElse(<wbr>Machin<br>
   }<br>
<br>
   MachineInstr *Xor =<br>
-    BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)<br>
+    BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_XOR_B64_<wbr>term), AMDGPU::EXEC)<br>
     .addReg(AMDGPU::EXEC)<br>
     .addReg(DstReg);<br>
<br>
-  MachineBasicBlock::iterator Term = MBB.getFirstTerminator();<br>
-  // Insert a pseudo terminator to help keep the verifier happy.<br>
   MachineInstr *Branch =<br>
-    BuildMI(MBB, Term, DL, TII->get(AMDGPU::SI_MASK_<wbr>BRANCH))<br>
+    BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::SI_MASK_<wbr>BRANCH))<br>
     .addMBB(DestBB);<br>
<br>
   if (!LIS) {<br>
@@ -246,7 +293,7 @@ void SILowerControlFlow::emitLoop(<wbr>Machin<br>
   const DebugLoc &DL = MI.getDebugLoc();<br>
<br>
   MachineInstr *AndN2 =<br>
-    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)<br>
+    BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_<wbr>term), AMDGPU::EXEC)<br>
     .addReg(AMDGPU::EXEC)<br>
     .addOperand(MI.getOperand(0));<br>
<br>
@@ -288,6 +335,7 @@ bool SILowerControlFlow::<wbr>runOnMachineFun<br>
<br>
   // This doesn't actually need LiveIntervals, but we can preserve them.<br>
   LIS = getAnalysisIfAvailable<<wbr>LiveIntervals>();<br>
+  MRI = &MF.getRegInfo();<br>
<br>
   MachineFunction::iterator NextBB;<br>
   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();<br>
<br>
Added: llvm/trunk/lib/Target/AMDGPU/<wbr>SIOptimizeExecMasking.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMasking.cpp?rev=282667&view=auto" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-<wbr>project/llvm/trunk/lib/Target/<wbr>AMDGPU/SIOptimizeExecMasking.<wbr>cpp?rev=282667&view=auto</a><br>
==============================<wbr>==============================<wbr>==================<br>
--- llvm/trunk/lib/Target/AMDGPU/<wbr>SIOptimizeExecMasking.cpp (added)<br>
+++ llvm/trunk/lib/Target/AMDGPU/<wbr>SIOptimizeExecMasking.cpp Wed Sep 28 20:44:16 2016<br>
@@ -0,0 +1,304 @@<br>
+//===-- SIOptimizeExecMasking.cpp ------------------------------<wbr>-----------===//<br>
+//<br>
+//                     The LLVM Compiler Infrastructure<br>
+//<br>
+// This file is distributed under the University of Illinois Open Source<br>
+// License. See LICENSE.TXT for details.<br>
+//<br>
+//===------------------------<wbr>------------------------------<wbr>----------------===//<br>
+<br>
+#include "AMDGPU.h"<br>
+#include "AMDGPUSubtarget.h"<br>
+#include "SIInstrInfo.h"<br>
+#include "llvm/CodeGen/<wbr>LiveIntervalAnalysis.h"<br>
+#include "llvm/CodeGen/<wbr>MachineFunctionPass.h"<br>
+#include "llvm/CodeGen/<wbr>MachineInstrBuilder.h"<br>
+#include "llvm/CodeGen/<wbr>MachineRegisterInfo.h"<br>
+#include "llvm/Support/Debug.h"<br>
+<br>
+using namespace llvm;<br>
+<br>
+#define DEBUG_TYPE "si-optimize-exec-masking"<br>
+<br>
+namespace {<br>
+<br>
+class SIOptimizeExecMasking : public MachineFunctionPass {<br>
+public:<br>
+  static char ID;<br>
+<br>
+public:<br>
+  SIOptimizeExecMasking() : MachineFunctionPass(ID) {<br>
+    initializeSIOptimizeExecMaskin<wbr>gPass(*PassRegistry::<wbr>getPassRegistry());<br>
+  }<br>
+<br>
+  bool runOnMachineFunction(<wbr>MachineFunction &MF) override;<br>
+<br>
+  const char *getPassName() const override {<br>
+    return "SI optimize exec mask operations";<br>
+  }<br>
+<br>
+  void getAnalysisUsage(AnalysisUsage &AU) const override {<br>
+    AU.setPreservesCFG();<br>
+    MachineFunctionPass::<wbr>getAnalysisUsage(AU);<br>
+  }<br>
+};<br>
+<br>
+} // End anonymous namespace.<br>
+<br>
+INITIALIZE_PASS_BEGIN(<wbr>SIOptimizeExecMasking, DEBUG_TYPE,<br>
+                      "SI optimize exec mask operations", false, false)<br>
+INITIALIZE_PASS_DEPENDENCY(<wbr>LiveIntervals)<br>
+INITIALIZE_PASS_END(<wbr>SIOptimizeExecMasking, DEBUG_TYPE,<br>
+                    "SI optimize exec mask operations", false, false)<br>
+<br>
+char SIOptimizeExecMasking::ID = 0;<br>
+<br>
+char &llvm::SIOptimizeExecMaskingID = SIOptimizeExecMasking::ID;<br>
+<br>
+/// If \p MI is a copy from exec, return the register copied to.<br>
+static unsigned isCopyFromExec(const MachineInstr &MI) {<br>
+  switch (MI.getOpcode()) {<br>
+  case AMDGPU::COPY:<br>
+  case AMDGPU::S_MOV_B64:<br>
+  case AMDGPU::S_MOV_B64_term: {<br>
+    const MachineOperand &Src = MI.getOperand(1);<br>
+    if (Src.isReg() && Src.getReg() == AMDGPU::EXEC)<br>
+      return MI.getOperand(0).getReg();<br>
+  }<br>
+  }<br>
+<br>
+  return AMDGPU::NoRegister;<br>
+}<br>
+<br>
+/// If \p MI is a copy to exec, return the register copied from.<br>
+static unsigned isCopyToExec(const MachineInstr &MI) {<br>
+  switch (MI.getOpcode()) {<br>
+  case AMDGPU::COPY:<br>
+  case AMDGPU::S_MOV_B64: {<br>
+    const MachineOperand &Dst = MI.getOperand(0);<br>
+    if (Dst.isReg() && Dst.getReg() == AMDGPU::EXEC)<br>
+      return MI.getOperand(1).getReg();<br>
+    break;<br>
+  }<br>
+  case AMDGPU::S_MOV_B64_term:<br>
+    llvm_unreachable("should have been replaced");<br>
+  }<br>
+<br>
+  return AMDGPU::NoRegister;<br>
+}<br>
+<br>
+static unsigned getSaveExecOp(unsigned Opc) {<br>
+  switch (Opc) {<br>
+  case AMDGPU::S_AND_B64:<br>
+    return AMDGPU::S_AND_SAVEEXEC_B64;<br>
+  case AMDGPU::S_OR_B64:<br>
+    return AMDGPU::S_OR_SAVEEXEC_B64;<br>
+  case AMDGPU::S_XOR_B64:<br>
+    return AMDGPU::S_XOR_SAVEEXEC_B64;<br>
+  case AMDGPU::S_ANDN2_B64:<br>
+    return AMDGPU::S_ANDN2_SAVEEXEC_B64;<br>
+  case AMDGPU::S_ORN2_B64:<br>
+    return AMDGPU::S_ORN2_SAVEEXEC_B64;<br>
+  case AMDGPU::S_NAND_B64:<br>
+    return AMDGPU::S_NAND_SAVEEXEC_B64;<br>
+  case AMDGPU::S_NOR_B64:<br>
+    return AMDGPU::S_NOR_SAVEEXEC_B64;<br>
+  case AMDGPU::S_XNOR_B64:<br>
+    return AMDGPU::S_XNOR_SAVEEXEC_B64;<br>
+  default:<br>
+    return AMDGPU::INSTRUCTION_LIST_END;<br>
+  }<br>
+}<br>
+<br>
+// These are only terminators to get correct spill code placement during<br>
+// register allocation, so turn them back into normal instructions. Only one of<br>
+// these is expected per block.<br>
+static bool removeTerminatorBit(const SIInstrInfo &TII, MachineInstr &MI) {<br>
+  switch (MI.getOpcode()) {<br>
+  case AMDGPU::S_MOV_B64_term: {<br>
+    MI.setDesc(TII.get(AMDGPU::<wbr>COPY));<br>
+    return true;<br>
+  }<br>
+  case AMDGPU::S_XOR_B64_term: {<br>
+    // This is only a terminator to get the correct spill code placement during<br>
+    // register allocation.<br>
+    MI.setDesc(TII.get(AMDGPU::S_<wbr>XOR_B64));<br>
+    return true;<br>
+  }<br>
+  case AMDGPU::S_ANDN2_B64_term: {<br>
+    // This is only a terminator to get the correct spill code placement during<br>
+    // register allocation.<br>
+    MI.setDesc(TII.get(AMDGPU::S_<wbr>ANDN2_B64));<br>
+    return true;<br>
+  }<br>
+  default:<br>
+    return false;<br>
+  }<br>
+}<br>
+<br>
+static MachineBasicBlock::reverse_<wbr>iterator fixTerminators(<br>
+  const SIInstrInfo &TII,<br>
+  MachineBasicBlock &MBB) {<br>
+  MachineBasicBlock::reverse_<wbr>iterator I = MBB.rbegin(), E = MBB.rend();<br>
+  for (; I != E; ++I) {<br>
+    if (!I->isTerminator())<br>
+      return I;<br>
+<br>
+    if (removeTerminatorBit(TII, *I))<br>
+      return I;<br>
+  }<br>
+<br>
+  return E;<br>
+}<br>
+<br>
+static MachineBasicBlock::reverse_<wbr>iterator findExecCopy(<br>
+  const SIInstrInfo &TII,<br>
+  MachineBasicBlock &MBB,<br>
+  MachineBasicBlock::reverse_<wbr>iterator I,<br>
+  unsigned CopyToExec) {<br>
+  const unsigned InstLimit = 25;<br>
+<br>
+  auto E = MBB.rend();<br>
+  for (unsigned N = 0; N <= InstLimit && I != E; ++I, ++N) {<br>
+    unsigned CopyFromExec = isCopyFromExec(*I);<br>
+    if (CopyFromExec != AMDGPU::NoRegister)<br>
+      return I;<br>
+  }<br>
+<br>
+  return E;<br>
+}<br>
+<br>
+// XXX - Seems LivePhysRegs doesn't work correctly since it will incorrectly<br>
+// repor tthe register as unavailable because a super-register with a lane mask<br>
+// as unavailable.<br>
+static bool isLiveOut(const MachineBasicBlock &MBB, unsigned Reg) {<br>
+  for (MachineBasicBlock *Succ : MBB.successors()) {<br>
+    if (Succ->isLiveIn(Reg))<br>
+      return true;<br>
+  }<br>
+<br>
+  return false;<br>
+}<br>
+<br>
+bool SIOptimizeExecMasking::<wbr>runOnMachineFunction(<wbr>MachineFunction &MF) {<br>
+  const SISubtarget &ST = MF.getSubtarget<SISubtarget>()<wbr>;<br>
+  const SIRegisterInfo *TRI = ST.getRegisterInfo();<br>
+  const SIInstrInfo *TII = ST.getInstrInfo();<br>
+<br>
+  // Optimize sequences emitted for control flow lowering. They are originally<br>
+  // emitted as the separate operations because spill code may need to be<br>
+  // inserted for the saved copy of exec.<br>
+  //<br>
+  //     x = copy exec<br>
+  //     z = s_<op>_b64 x, y<br>
+  //     exec = copy z<br>
+  // =><br>
+  //     x = s_<op>_saveexec_b64 y<br>
+  //<br>
+<br>
+  for (MachineBasicBlock &MBB : MF) {<br>
+    MachineBasicBlock::reverse_<wbr>iterator I = fixTerminators(*TII, MBB);<br>
+    MachineBasicBlock::reverse_<wbr>iterator E = MBB.rend();<br>
+    if (I == E)<br>
+      continue;<br>
+<br>
+    unsigned CopyToExec = isCopyToExec(*I);<br>
+    if (CopyToExec == AMDGPU::NoRegister)<br>
+      continue;<br>
+<br>
+    // Scan backwards to find the def.<br>
+    auto CopyToExecInst = &*I;<br>
+    auto CopyFromExecInst = findExecCopy(*TII, MBB, I, CopyToExec);<br>
+    if (CopyFromExecInst == E)<br>
+      continue;<br>
+<br>
+    if (isLiveOut(MBB, CopyToExec)) {<br>
+      // The copied register is live out and has a second use in another block.<br>
+      DEBUG(dbgs() << "Exec copy source register is live out\n");<br>
+      continue;<br>
+    }<br>
+<br>
+    unsigned CopyFromExec = CopyFromExecInst->getOperand(<wbr>0).getReg();<br>
+    MachineInstr *SaveExecInst = nullptr;<br>
+    SmallVector<MachineInstr *, 4> OtherUseInsts;<br>
+<br>
+    for (MachineBasicBlock::iterator J<br>
+           = std::next(CopyFromExecInst-><wbr>getIterator()), JE = I->getIterator();<br>
+         J != JE; ++J) {<br>
+      if (SaveExecInst && J->readsRegister(AMDGPU::EXEC, TRI)) {<br>
+        DEBUG(dbgs() << "exec read prevents saveexec: " << *J << '\n');<br>
+        // Make sure this is inserted after any VALU ops that may have been<br>
+        // scheduled in between.<br>
+        SaveExecInst = nullptr;<br>
+        break;<br>
+      }<br>
+<br>
+      if (J->modifiesRegister(<wbr>CopyToExec, TRI)) {<br>
+        if (SaveExecInst) {<br>
+          DEBUG(dbgs() << "Multiple instructions modify "<br>
+                << PrintReg(CopyToExec, TRI) << '\n');<br>
+          SaveExecInst = nullptr;<br>
+          break;<br>
+        }<br>
+<br>
+        unsigned SaveExecOp = getSaveExecOp(J->getOpcode());<br>
+        if (SaveExecOp == AMDGPU::INSTRUCTION_LIST_END)<br>
+          break;<br>
+<br>
+        if (J->readsRegister(<wbr>CopyFromExec, TRI)) {<br>
+          SaveExecInst = &*J;<br>
+          DEBUG(dbgs() << "Found save exec op: " << *SaveExecInst << '\n');<br>
+        } else {<br>
+          DEBUG(dbgs() << "Instruction does not read exec copy: " << *J << '\n');<br>
+          break;<br>
+        }<br>
+      }<br>
+<br>
+      if (SaveExecInst && J->readsRegister(CopyToExec, TRI))<br>
+        OtherUseInsts.push_back(&*J);<br>
+    }<br>
+<br>
+    if (!SaveExecInst)<br>
+      continue;<br>
+<br>
+    DEBUG(dbgs() << "Insert save exec op: " << *SaveExecInst << '\n');<br>
+<br>
+    MachineOperand &Src0 = SaveExecInst->getOperand(1);<br>
+    MachineOperand &Src1 = SaveExecInst->getOperand(2);<br>
+<br>
+    MachineOperand *CopyOp = nullptr;<br>
+    MachineOperand *OtherOp = nullptr;<br>
+<br>
+    if (Src0.isReg() && Src0.getReg() == CopyFromExec) {<br>
+      CopyOp = &Src0;<br>
+      OtherOp = &Src1;<br>
+    } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) {<br>
+      if (!SaveExecInst->isCommutable()<wbr>)<br>
+        break;<br>
+<br>
+      CopyOp = &Src1;<br>
+      OtherOp = &Src0;<br>
+    } else<br>
+      llvm_unreachable("unexpected")<wbr>;<br>
+<br>
+    CopyFromExecInst-><wbr>eraseFromParent();<br>
+<br>
+    auto InsPt = SaveExecInst->getIterator();<br>
+    const DebugLoc &DL = SaveExecInst->getDebugLoc();<br>
+<br>
+    BuildMI(MBB, InsPt, DL, TII->get(getSaveExecOp(<wbr>SaveExecInst->getOpcode())),<br>
+            CopyFromExec)<br>
+      .addReg(OtherOp->getReg());<br>
+    SaveExecInst->eraseFromParent(<wbr>);<br>
+<br>
+    CopyToExecInst-><wbr>eraseFromParent();<br>
+<br>
+    for (MachineInstr *OtherInst : OtherUseInsts) {<br>
+      OtherInst->substituteRegister(<wbr>CopyToExec, AMDGPU::EXEC,<br>
+                                    AMDGPU::NoSubRegister, *TRI);<br>
+    }<br>
+  }<br>
+<br>
+  return true;<br>
+<br>
+}<br>
<br>
Added: llvm/trunk/test/CodeGen/<wbr>AMDGPU/control-flow-<wbr>fastregalloc.ll<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll?rev=282667&view=auto" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-<wbr>project/llvm/trunk/test/<wbr>CodeGen/AMDGPU/control-flow-<wbr>fastregalloc.ll?rev=282667&<wbr>view=auto</a><br>
==============================<wbr>==============================<wbr>==================<br>
--- llvm/trunk/test/CodeGen/<wbr>AMDGPU/control-flow-<wbr>fastregalloc.ll (added)<br>
+++ llvm/trunk/test/CodeGen/<wbr>AMDGPU/control-flow-<wbr>fastregalloc.ll Wed Sep 28 20:44:16 2016<br>
@@ -0,0 +1,296 @@<br>
+; RUN: llc -O0 -mtriple=amdgcn--amdhsa -march=amdgcn -amdgpu-spill-sgpr-to-vgpr=0 -verify-machineinstrs < %s | FileCheck -check-prefix=VMEM -check-prefix=GCN %s<br>
+; RUN: llc -O0 -mtriple=amdgcn--amdhsa -march=amdgcn -amdgpu-spill-sgpr-to-vgpr=1 -verify-machineinstrs < %s | FileCheck -check-prefix=VGPR -check-prefix=GCN %s<br>
+<br>
+; Verify registers used for tracking exec mask changes when all<br>
+; registers are spilled at the end of the block. The SGPR spill<br>
+; placement relative to the exec modifications are important.<br>
+<br>
+; FIXME: This checks with SGPR to VGPR spilling disabled, but this may<br>
+; not work correctly in cases where no workitems take a branch.<br>
+<br>
+<br>
+; GCN-LABEL: {{^}}divergent_if_endif:<br>
+<br>
+; GCN: {{^}}; BB#0:<br>
+; GCN: s_mov_b32 m0, -1<br>
+; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]<br>
+<br>
+; GCN: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], v0,<br>
+; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:<wbr>[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec<br>
+; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[<wbr>[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[<wbr>SAVEEXEC_HI]]{{\]}}, [[CMP0]]<br>
+; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[<wbr>SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[<wbr>ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[<wbr>SAVEEXEC_HI]]{{\]}}<br>
+<br>
+; Spill saved exec<br>
+; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]<br>
+; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]<br>
+<br>
+<br>
+; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]<br>
+; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[8:11], s12 ; 8-byte Folded Spill<br>
+; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]<br>
+; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[8:11], s12 offset:4 ; 8-byte Folded Spill<br>
+<br>
+; Spill load<br>
+; GCN: buffer_store_dword [[LOAD0]], off, s[8:11], s12 offset:[[LOAD0_OFFSET:[0-9]+]] ; 4-byte Folded Spill<br>
+; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[<wbr>ANDEXEC_HI]]{{\]}}<br>
+<br>
+; GCN: s_waitcnt vmcnt(0) expcnt(0)<br>
+; GCN: mask branch [[ENDIF:BB[0-9]+_[0-9]+]]<br>
+<br>
+; GCN: {{^}}BB{{[0-9]+}}_1: ; %if<br>
+; GCN: s_mov_b32 m0, -1<br>
+; GCN: ds_read_b32 [[LOAD1:v[0-9]+]]<br>
+; GCN: buffer_load_dword [[RELOAD_LOAD0:v[0-9]+]], off, s[8:11], s12 offset:[[LOAD0_OFFSET]] ; 4-byte Folded Reload<br>
+; GCN: s_waitcnt vmcnt(0)<br>
+<br>
+; Spill val register<br>
+; GCN: v_add_i32_e32 [[VAL:v[0-9]+]], vcc, [[LOAD1]], [[RELOAD_LOAD0]]<br>
+; GCN: buffer_store_dword [[VAL]], off, s[8:11], s12 offset:[[VAL_OFFSET:[0-9]+]] ; 4-byte Folded Spill<br>
+; GCN: s_waitcnt vmcnt(0)<br>
+<br>
+; VMEM: [[ENDIF]]:<br>
+; Reload and restore exec mask<br>
+; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+<wbr>]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]<br>
+; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+<wbr>]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]<br>
+<br>
+<br>
+<br>
+; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+<wbr>]], off, s[8:11], s12 ; 8-byte Folded Reload<br>
+; VMEM: s_waitcnt vmcnt(0)<br>
+; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+<wbr>]], v[[V_RELOAD_SAVEEXEC_LO]]<br>
+<br>
+; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+<wbr>]], off, s[8:11], s12 offset:4 ; 8-byte Folded Reload<br>
+; VMEM: s_waitcnt vmcnt(0)<br>
+; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+<wbr>]], v[[V_RELOAD_SAVEEXEC_HI]]<br>
+<br>
+; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]<wbr>]:[[S_RELOAD_SAVEEXEC_HI]]{{\]<wbr>}}<br>
+<br>
+; Restore val<br>
+; GCN: buffer_load_dword [[RELOAD_VAL:v[0-9]+]], off, s[8:11], s12 offset:[[VAL_OFFSET]] ; 4-byte Folded Reload<br>
+<br>
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RELOAD_VAL]]<br>
+define void @divergent_if_endif(i32 addrspace(1)* %out) #0 {<br>
+entry:<br>
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()<br>
+  %load0 = load volatile i32, i32 addrspace(3)* undef<br>
+  %cmp0 = icmp eq i32 %tid, 0<br>
+  br i1 %cmp0, label %if, label %endif<br>
+<br>
+if:<br>
+  %load1 = load volatile i32, i32 addrspace(3)* undef<br>
+  %val = add i32 %load0, %load1<br>
+  br label %endif<br>
+<br>
+endif:<br>
+  %tmp4 = phi i32 [ %val, %if ], [ 0, %entry ]<br>
+  store i32 %tmp4, i32 addrspace(1)* %out<br>
+  ret void<br>
+}<br>
+<br>
+; GCN-LABEL: {{^}}divergent_loop:<br>
+; GCN: {{^}}; BB#0:<br>
+<br>
+; GCN: s_mov_b32 m0, -1<br>
+; GCN: ds_read_b32 [[LOAD0:v[0-9]+]]<br>
+<br>
+; GCN: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], v0,<br>
+<br>
+; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:<wbr>[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec<br>
+; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[<wbr>[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:<wbr>[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]<br>
+; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[<wbr>SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[<wbr>ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[<wbr>SAVEEXEC_HI]]{{\]}}<br>
+<br>
+; Spill saved exec<br>
+; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]<br>
+; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]<br>
+<br>
+<br>
+; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]<br>
+; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[8:11], s12 ; 8-byte Folded Spill<br>
+; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]<br>
+; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[8:11], s12 offset:4 ; 8-byte Folded Spill<br>
+<br>
+; Spill load<br>
+; GCN: buffer_store_dword [[LOAD0]], off, s[8:11], s12 offset:[[VAL_OFFSET:[0-9]+]] ; 4-byte Folded Spill<br>
+<br>
+; GCN: s_mov_b64 exec, s{{\[}}[[ANDEXEC_LO]]:[[<wbr>ANDEXEC_HI]]{{\]}}<br>
+<br>
+; GCN: s_waitcnt vmcnt(0) expcnt(0)<br>
+; GCN-NEXT: ; mask branch [[END:BB[0-9]+_[0-9]+]]<br>
+; GCN-NEXT: s_cbranch_execz [[END]]<br>
+<br>
+<br>
+; GCN: [[LOOP:BB[0-9]+_[0-9]+]]:<br>
+; GCN: buffer_load_dword v[[VAL_LOOP_RELOAD:[0-9]+]], off, s[8:11], s12 offset:[[VAL_OFFSET]] ; 4-byte Folded Reload<br>
+; GCN: v_subrev_i32_e32 [[VAL_LOOP:v[0-9]+]], vcc, v{{[0-9]+}}, v[[VAL_LOOP_RELOAD]]<br>
+; GCN: v_cmp_ne_i32_e32 vcc,<br>
+; GCN: s_and_b64 vcc, exec, vcc<br>
+; GCN: buffer_store_dword [[VAL_LOOP]], off, s[8:11], s12 offset:[[VAL_SUB_OFFSET:[0-9]+<wbr>]] ; 4-byte Folded Spill<br>
+; GCN: s_waitcnt vmcnt(0) expcnt(0)<br>
+; GCN-NEXT: s_cbranch_vccnz [[LOOP]]<br>
+<br>
+<br>
+; GCN: [[END]]:<br>
+; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+<wbr>]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]<br>
+; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+<wbr>]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]<br>
+<br>
+; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+<wbr>]], off, s[8:11], s12 ; 8-byte Folded Reload<br>
+; VMEM: s_waitcnt vmcnt(0)<br>
+; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+<wbr>]], v[[V_RELOAD_SAVEEXEC_LO]]<br>
+<br>
+; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+<wbr>]], off, s[8:11], s12 offset:4 ; 8-byte Folded Reload<br>
+; VMEM: s_waitcnt vmcnt(0)<br>
+; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+<wbr>]], v[[V_RELOAD_SAVEEXEC_HI]]<br>
+<br>
+; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]<wbr>]:[[S_RELOAD_SAVEEXEC_HI]]{{\]<wbr>}}<br>
+; GCN: buffer_load_dword v[[VAL_END:[0-9]+]], off, s[8:11], s12 offset:[[VAL_SUB_OFFSET]] ; 4-byte Folded Reload<br>
+<br>
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[VAL_END]]<br>
+define void @divergent_loop(i32 addrspace(1)* %out) #0 {<br>
+entry:<br>
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()<br>
+  %load0 = load volatile i32, i32 addrspace(3)* undef<br>
+  %cmp0 = icmp eq i32 %tid, 0<br>
+  br i1 %cmp0, label %loop, label %end<br>
+<br>
+loop:<br>
+  %i = phi i32 [ %i.inc, %loop ], [ 0, %entry ]<br>
+  %val = phi i32 [ %val.sub, %loop ], [ %load0, %entry ]<br>
+  %load1 = load volatile i32, i32 addrspace(3)* undef<br>
+  %i.inc = add i32 %i, 1<br>
+  %val.sub = sub i32 %val, %load1<br>
+  %cmp1 = icmp ne i32 %i, 256<br>
+  br i1 %cmp1, label %loop, label %end<br>
+<br>
+end:<br>
+  %tmp4 = phi i32 [ %val.sub, %loop ], [ 0, %entry ]<br>
+  store i32 %tmp4, i32 addrspace(1)* %out<br>
+  ret void<br>
+}<br>
+<br>
+; GCN-LABEL: {{^}}divergent_if_else_endif:<br>
+; GCN: {{^}}; BB#0:<br>
+<br>
+; GCN: s_mov_b32 m0, -1<br>
+; VMEM: ds_read_b32 [[LOAD0:v[0-9]+]]<br>
+<br>
+; GCN: v_cmp_ne_i32_e64 [[CMP0:s\[[0-9]+:[0-9]\]]], v0,<br>
+<br>
+; GCN: s_mov_b64 s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:<wbr>[[SAVEEXEC_HI:[0-9]+]]{{\]}}, exec<br>
+; GCN: s_and_b64 s{{\[}}[[ANDEXEC_LO:[0-9]+]]:[<wbr>[ANDEXEC_HI:[0-9]+]]{{\]}}, s{{\[}}[[SAVEEXEC_LO:[0-9]+]]:<wbr>[[SAVEEXEC_HI:[0-9]+]]{{\]}}, [[CMP0]]<br>
+; GCN: s_xor_b64 s{{\[}}[[SAVEEXEC_LO]]:[[<wbr>SAVEEXEC_HI]]{{\]}}, s{{\[}}[[ANDEXEC_LO]]:[[<wbr>ANDEXEC_HI]]{{\]}}, s{{\[}}[[SAVEEXEC_LO]]:[[<wbr>SAVEEXEC_HI]]{{\]}}<br>
+<br>
+; Spill load<br>
+; GCN: buffer_store_dword [[LOAD0]], off, s[8:11], s12 ; 4-byte Folded Spill<br>
+<br>
+; Spill saved exec<br>
+; VGPR: v_writelane_b32 [[SPILL_VGPR:v[0-9]+]], s[[SAVEEXEC_LO]], [[SAVEEXEC_LO_LANE:[0-9]+]]<br>
+; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[SAVEEXEC_HI]], [[SAVEEXEC_HI_LANE:[0-9]+]]<br>
+<br>
+; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_LO:[0-9]+]], s[[SAVEEXEC_LO]]<br>
+; VMEM: buffer_store_dword v[[V_SAVEEXEC_LO]], off, s[8:11], s12 offset:[[SAVEEXEC_LO_OFFSET:[<wbr>0-9]+]] ; 8-byte Folded Spill<br>
+; VMEM: v_mov_b32_e32 v[[V_SAVEEXEC_HI:[0-9]+]], s[[SAVEEXEC_HI]]<br>
+; VMEM: buffer_store_dword v[[V_SAVEEXEC_HI]], off, s[8:11], s12 offset:[[SAVEEXEC_HI_OFFSET:[<wbr>0-9]+]] ; 8-byte Folded Spill<br>
+<br>
+; GCN: s_mov_b64 exec, [[CMP0]]<br>
+; GCN: s_waitcnt vmcnt(0) expcnt(0)<br>
+<br>
+; FIXME: It makes no sense to put this skip here<br>
+; GCN-NEXT: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]]<br>
+; GCN: s_cbranch_execz [[FLOW]]<br>
+; GCN-NEXT: s_branch [[ELSE:BB[0-9]+_[0-9]+]]<br>
+<br>
+; GCN: [[FLOW]]: ; %Flow<br>
+; VGPR: v_readlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_LO:[<wbr>0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]<br>
+; VGPR: v_readlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_HI:[<wbr>0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_HI_LANE]]<br>
+<br>
+<br>
+; VMEM: buffer_load_dword v[[FLOW_V_RELOAD_SAVEEXEC_LO:[<wbr>0-9]+]], off, s[8:11], s12 offset:[[SAVEEXEC_LO_OFFSET]]<br>
+; VMEM: s_waitcnt vmcnt(0)<br>
+; VMEM: v_readfirstlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_LO:[<wbr>0-9]+]], v[[FLOW_V_RELOAD_SAVEEXEC_LO]]<br>
+<br>
+; VMEM: buffer_load_dword v[[FLOW_V_RELOAD_SAVEEXEC_HI:[<wbr>0-9]+]], off, s[8:11], s12 offset:[[SAVEEXEC_HI_OFFSET]] ; 8-byte Folded Reload<br>
+; VMEM: s_waitcnt vmcnt(0)<br>
+; VMEM: v_readfirstlane_b32 s[[FLOW_S_RELOAD_SAVEEXEC_HI:[<wbr>0-9]+]], v[[FLOW_V_RELOAD_SAVEEXEC_HI]]<br>
+<br>
+; GCN: s_or_saveexec_b64 s{{\[}}[[FLOW_S_RELOAD_<wbr>SAVEEXEC_LO]]:[[FLOW_S_RELOAD_<wbr>SAVEEXEC_HI]]{{\]}}, s{{\[}}[[FLOW_S_RELOAD_<wbr>SAVEEXEC_LO]]:[[FLOW_S_RELOAD_<wbr>SAVEEXEC_HI]]{{\]}}<br>
+<br>
+; Regular spill value restored after exec modification<br>
+; GCN: buffer_load_dword [[FLOW_VAL:v[0-9]+]], off, s[8:11], s12 offset:[[FLOW_VAL_OFFSET:[0-9]<wbr>+]] ; 4-byte Folded Reload<br>
+<br>
+<br>
+; Spill saved exec<br>
+; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[FLOW_S_RELOAD_SAVEEXEC_LO]]<wbr>, [[FLOW_SAVEEXEC_LO_LANE:[0-9]+<wbr>]]<br>
+; VGPR: v_writelane_b32 [[SPILL_VGPR]], s[[FLOW_S_RELOAD_SAVEEXEC_HI]]<wbr>, [[FLOW_SAVEEXEC_HI_LANE:[0-9]+<wbr>]]<br>
+<br>
+<br>
+; VMEM: v_mov_b32_e32 v[[FLOW_V_SAVEEXEC_LO:[0-9]+]]<wbr>, s[[FLOW_S_RELOAD_SAVEEXEC_LO]]<br>
+; VMEM: buffer_store_dword v[[FLOW_V_SAVEEXEC_LO]], off, s[8:11], s12 offset:[[FLOW_SAVEEXEC_LO_<wbr>OFFSET:[0-9]+]] ; 8-byte Folded Spill<br>
+; VMEM: v_mov_b32_e32 v[[FLOW_V_SAVEEXEC_HI:[0-9]+]]<wbr>, s[[FLOW_S_RELOAD_SAVEEXEC_HI]]<br>
+; VMEM: buffer_store_dword v[[FLOW_V_SAVEEXEC_HI]], off, s[8:11], s12 offset:[[FLOW_SAVEEXEC_HI_<wbr>OFFSET:[0-9]+]] ; 8-byte Folded Spill<br>
+<br>
+; GCN: buffer_store_dword [[FLOW_VAL]], off, s[8:11], s12 offset:[[RESULT_OFFSET:[0-9]+]<wbr>] ; 4-byte Folded Spill<br>
+; GCN: s_xor_b64 exec, exec, s{{\[}}[[FLOW_S_RELOAD_<wbr>SAVEEXEC_LO]]:[[FLOW_S_RELOAD_<wbr>SAVEEXEC_HI]]{{\]}}<br>
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)<br>
+; GCN-NEXT: ; mask branch [[ENDIF:BB[0-9]+_[0-9]+]]<br>
+; GCN-NEXT: s_cbranch_execz [[ENDIF]]<br>
+<br>
+<br>
+; GCN: BB{{[0-9]+}}_2: ; %if<br>
+; GCN: ds_read_b32<br>
+; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[8:11], s12 ; 4-byte Folded Reload<br>
+; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, v{{[0-9]+}}, v[[LOAD0_RELOAD]]<br>
+; GCN: buffer_store_dword [[ADD]], off, s[8:11], s12 offset:[[RESULT_OFFSET]] ; 4-byte Folded Spill<br>
+; GCN: s_waitcnt vmcnt(0) expcnt(0)<br>
+; GCN-NEXT: s_branch [[ENDIF:BB[0-9]+_[0-9]+]]<br>
+<br>
+; GCN: [[ELSE]]: ; %else<br>
+; GCN: buffer_load_dword v[[LOAD0_RELOAD:[0-9]+]], off, s[8:11], s12 ; 4-byte Folded Reload<br>
+; GCN: v_subrev_i32_e32 [[SUB:v[0-9]+]], vcc, v{{[0-9]+}}, v[[LOAD0_RELOAD]]<br>
+; GCN: buffer_store_dword [[ADD]], off, s[8:11], s12 offset:[[FLOW_RESULT_OFFSET:[<wbr>0-9]+]] ; 4-byte Folded Spill<br>
+; GCN: s_waitcnt vmcnt(0) expcnt(0)<br>
+; GCN-NEXT: s_branch [[FLOW]]<br>
+<br>
+; GCN: [[ENDIF]]:<br>
+; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+<wbr>]], [[SPILL_VGPR]], [[FLOW_SAVEEXEC_LO_LANE]]<br>
+; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+<wbr>]], [[SPILL_VGPR]], [[FLOW_SAVEEXEC_HI_LANE]]<br>
+<br>
+<br>
+; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_LO:[0-9]+<wbr>]], off, s[8:11], s12 offset:[[FLOW_SAVEEXEC_LO_<wbr>OFFSET]] ; 8-byte Folded Reload<br>
+; VMEM: s_waitcnt vmcnt(0)<br>
+; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+<wbr>]], v[[V_RELOAD_SAVEEXEC_LO]]<br>
+<br>
+; VMEM: buffer_load_dword v[[V_RELOAD_SAVEEXEC_HI:[0-9]+<wbr>]], off, s[8:11], s12 offset:[[FLOW_SAVEEXEC_HI_<wbr>OFFSET]] ; 8-byte Folded Reload<br>
+; VMEM: s_waitcnt vmcnt(0)<br>
+; VMEM: v_readfirstlane_b32 s[[S_RELOAD_SAVEEXEC_HI:[0-9]+<wbr>]], v[[V_RELOAD_SAVEEXEC_HI]]<br>
+<br>
+; GCN: s_or_b64 exec, exec, s{{\[}}[[S_RELOAD_SAVEEXEC_LO]<wbr>]:[[S_RELOAD_SAVEEXEC_HI]]{{\]<wbr>}}<br>
+<br>
+; GCN: buffer_load_dword v[[RESULT:[0-9]+]], off, s[8:11], s12 offset:[[RESULT_OFFSET]] ; 4-byte Folded Reload<br>
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v[[RESULT]]<br>
+define void @divergent_if_else_endif(i32 addrspace(1)* %out) #0 {<br>
+entry:<br>
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()<br>
+  %load0 = load volatile i32, i32 addrspace(3)* undef<br>
+  %cmp0 = icmp eq i32 %tid, 0<br>
+  br i1 %cmp0, label %if, label %else<br>
+<br>
+if:<br>
+  %load1 = load volatile i32, i32 addrspace(3)* undef<br>
+  %val0 = add i32 %load0, %load1<br>
+  br label %endif<br>
+<br>
+else:<br>
+  %load2 = load volatile i32, i32 addrspace(3)* undef<br>
+  %val1 = sub i32 %load0, %load2<br>
+  br label %endif<br>
+<br>
+endif:<br>
+  %result = phi i32 [ %val0, %if ], [ %val1, %else ]<br>
+  store i32 %result, i32 addrspace(1)* %out<br>
+  ret void<br>
+}<br>
+<br>
+declare i32 @llvm.amdgcn.workitem.id.x() #1<br>
+<br>
+attributes #0 = { nounwind }<br>
+attributes #1 = { nounwind readnone }<br>
<br>
Added: llvm/trunk/test/CodeGen/MIR/<wbr>AMDGPU/optimize-if-exec-<wbr>masking.mir<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MIR/AMDGPU/optimize-if-exec-masking.mir?rev=282667&view=auto" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-<wbr>project/llvm/trunk/test/<wbr>CodeGen/MIR/AMDGPU/optimize-<wbr>if-exec-masking.mir?rev=<wbr>282667&view=auto</a><br>
==============================<wbr>==============================<wbr>==================<br>
--- llvm/trunk/test/CodeGen/MIR/<wbr>AMDGPU/optimize-if-exec-<wbr>masking.mir (added)<br>
+++ llvm/trunk/test/CodeGen/MIR/<wbr>AMDGPU/optimize-if-exec-<wbr>masking.mir Wed Sep 28 20:44:16 2016<br>
@@ -0,0 +1,755 @@<br>
+# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-optimize-exec-masking -o -  %s | FileCheck %s<br>
+<br>
+--- |<br>
+  target datalayout = "e-p:32:32-p1:64:64-p2:64:64-<wbr>p3:32:32-p4:64:64-p5:32:32-<wbr>i64:64-v16:16-v24:32-v32:32-<wbr>v48:64-v96:128-v192:256-v256:<wbr>256-v512:512-v1024:1024-v2048:<wbr>2048-n32:64"<br>
+<br>
+  define void @optimize_if_and_saveexec_xor(<wbr>i32 %z, i32 %v) #0 {<br>
+  main_body:<br>
+    %id = call i32 @llvm.amdgcn.workitem.id.x()<br>
+    %cc = icmp eq i32 %id, 0<br>
+    %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %cc)<br>
+    %1 = extractvalue { i1, i64 } %0, 0<br>
+    %2 = extractvalue { i1, i64 } %0, 1<br>
+    br i1 %1, label %if, label %end<br>
+<br>
+  if:                                               ; preds = %main_body<br>
+    %v.if = load volatile i32, i32 addrspace(1)* undef<br>
+    br label %end<br>
+<br>
+  end:                                              ; preds = %if, %main_body<br>
+    %r = phi i32 [ 4, %main_body ], [ %v.if, %if ]<br>
+    call void @<a href="http://llvm.amdgcn.end.cf" rel="noreferrer" target="_blank">llvm.amdgcn.end.cf</a>(i64 %2)<br>
+    store i32 %r, i32 addrspace(1)* undef<br>
+    ret void<br>
+  }<br>
+<br>
+  define void @optimize_if_and_saveexec(i32 %z, i32 %v)  #0 {<br>
+  main_body:<br>
+      br i1 undef, label %if, label %end<br>
+<br>
+  if:<br>
+    br label %end<br>
+<br>
+  end:<br>
+    ret void<br>
+  }<br>
+<br>
+  define void @optimize_if_or_saveexec(i32 %z, i32 %v)  #0 {<br>
+  main_body:<br>
+      br i1 undef, label %if, label %end<br>
+<br>
+  if:<br>
+    br label %end<br>
+<br>
+  end:<br>
+    ret void<br>
+  }<br>
+<br>
+<br>
+  define void @optimize_if_and_saveexec_xor_<wbr>valu_middle(i32 %z, i32 %v) #0 {<br>
+  main_body:<br>
+    %id = call i32 @llvm.amdgcn.workitem.id.x()<br>
+    %cc = icmp eq i32 %id, 0<br>
+    %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %cc)<br>
+    %1 = extractvalue { i1, i64 } %0, 0<br>
+    %2 = extractvalue { i1, i64 } %0, 1<br>
+    store i32 %id, i32 addrspace(1)* undef<br>
+    br i1 %1, label %if, label %end<br>
+<br>
+  if:                                               ; preds = %main_body<br>
+    %v.if = load volatile i32, i32 addrspace(1)* undef<br>
+    br label %end<br>
+<br>
+  end:                                              ; preds = %if, %main_body<br>
+    %r = phi i32 [ 4, %main_body ], [ %v.if, %if ]<br>
+    call void @<a href="http://llvm.amdgcn.end.cf" rel="noreferrer" target="_blank">llvm.amdgcn.end.cf</a>(i64 %2)<br>
+    store i32 %r, i32 addrspace(1)* undef<br>
+    ret void<br>
+  }<br>
+<br>
+  define void @optimize_if_and_saveexec_xor_<wbr>wrong_reg(i32 %z, i32 %v)  #0 {<br>
+  main_body:<br>
+      br i1 undef, label %if, label %end<br>
+<br>
+  if:<br>
+    br label %end<br>
+<br>
+  end:<br>
+    ret void<br>
+  }<br>
+<br>
+  define void @optimize_if_and_saveexec_xor_<wbr>modify_copy_to_exec(i32 %z, i32 %v)  #0 {<br>
+  main_body:<br>
+      br i1 undef, label %if, label %end<br>
+<br>
+  if:<br>
+    br label %end<br>
+<br>
+  end:<br>
+    ret void<br>
+  }<br>
+<br>
+  define void @optimize_if_and_saveexec_xor_<wbr>live_out_setexec(i32 %z, i32 %v)  #0 {<br>
+  main_body:<br>
+      br i1 undef, label %if, label %end<br>
+<br>
+  if:<br>
+    br label %end<br>
+<br>
+  end:<br>
+    ret void<br>
+  }<br>
+<br>
+  define void @optimize_if_unknown_saveexec(<wbr>i32 %z, i32 %v)  #0 {<br>
+  main_body:<br>
+      br i1 undef, label %if, label %end<br>
+<br>
+  if:<br>
+    br label %end<br>
+<br>
+  end:<br>
+    ret void<br>
+  }<br>
+<br>
+  define void @optimize_if_andn2_saveexec(<wbr>i32 %z, i32 %v)  #0 {<br>
+  main_body:<br>
+      br i1 undef, label %if, label %end<br>
+<br>
+  if:<br>
+    br label %end<br>
+<br>
+  end:<br>
+    ret void<br>
+  }<br>
+<br>
+  define void @optimize_if_andn2_saveexec_<wbr>no_commute(i32 %z, i32 %v)  #0 {<br>
+  main_body:<br>
+      br i1 undef, label %if, label %end<br>
+<br>
+  if:<br>
+    br label %end<br>
+<br>
+  end:<br>
+    ret void<br>
+  }<br>
+<br>
+  ; Function Attrs: nounwind readnone<br>
+  declare i32 @llvm.amdgcn.workitem.id.x() #1<br>
+<br>
+  declare { i1, i64 } @llvm.amdgcn.if(i1)<br>
+<br>
+  declare void @<a href="http://llvm.amdgcn.end.cf" rel="noreferrer" target="_blank">llvm.amdgcn.end.cf</a>(i64)<br>
+<br>
+<br>
+  attributes #0 = { nounwind }<br>
+  attributes #1 = { nounwind readnone }<br>
+<br>
+...<br>
+---<br>
+# CHECK-LABEL: name: optimize_if_and_saveexec_xor{{<wbr>$}}<br>
+# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec<br>
+# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc<br>
+# CHECK-NEXT: SI_MASK_BRANCH<br>
+<br>
+name:            optimize_if_and_saveexec_xor<br>
+alignment:       0<br>
+exposesReturnsTwice: false<br>
+legalized:       false<br>
+regBankSelected: false<br>
+selected:        false<br>
+tracksRegLiveness: true<br>
+liveins:<br>
+  - { reg: '%vgpr0' }<br>
+frameInfo:<br>
+  isFrameAddressTaken: false<br>
+  isReturnAddressTaken: false<br>
+  hasStackMap:     false<br>
+  hasPatchPoint:   false<br>
+  stackSize:       0<br>
+  offsetAdjustment: 0<br>
+  maxAlignment:    0<br>
+  adjustsStack:    false<br>
+  hasCalls:        false<br>
+  maxCallFrameSize: 0<br>
+  hasOpaqueSPAdjustment: false<br>
+  hasVAStart:      false<br>
+  hasMustTailInVarArgFunc: false<br>
+body:             |<br>
+  bb.0.main_body:<br>
+    successors: %bb.1.if, %bb.2.end<br>
+    liveins: %vgpr0<br>
+<br>
+    %sgpr0_sgpr1 = COPY %exec<br>
+    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec<br>
+    %vgpr0 = V_MOV_B32_e32 4, implicit %exec<br>
+    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %exec = S_MOV_B64_term killed %sgpr2_sgpr3<br>
+    SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+    S_BRANCH %bb.1.if<br>
+<br>
+  bb.1.if:<br>
+    successors: %bb.2.end<br>
+    liveins: %sgpr0_sgpr1<br>
+<br>
+    %sgpr7 = S_MOV_B32 61440<br>
+    %sgpr6 = S_MOV_B32 -1<br>
+    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)<br>
+<br>
+  bb.2.end:<br>
+    liveins: %vgpr0, %sgpr0_sgpr1<br>
+<br>
+    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %sgpr3 = S_MOV_B32 61440<br>
+    %sgpr2 = S_MOV_B32 -1<br>
+    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+    S_ENDPGM<br>
+<br>
+...<br>
+---<br>
+# CHECK-LABEL: name: optimize_if_and_saveexec{{$}}<br>
+# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec<br>
+# CHECK-NEXT: SI_MASK_BRANCH<br>
+<br>
+name:            optimize_if_and_saveexec<br>
+alignment:       0<br>
+exposesReturnsTwice: false<br>
+legalized:       false<br>
+regBankSelected: false<br>
+selected:        false<br>
+tracksRegLiveness: true<br>
+liveins:<br>
+  - { reg: '%vgpr0' }<br>
+frameInfo:<br>
+  isFrameAddressTaken: false<br>
+  isReturnAddressTaken: false<br>
+  hasStackMap:     false<br>
+  hasPatchPoint:   false<br>
+  stackSize:       0<br>
+  offsetAdjustment: 0<br>
+  maxAlignment:    0<br>
+  adjustsStack:    false<br>
+  hasCalls:        false<br>
+  maxCallFrameSize: 0<br>
+  hasOpaqueSPAdjustment: false<br>
+  hasVAStart:      false<br>
+  hasMustTailInVarArgFunc: false<br>
+body:             |<br>
+  bb.0.main_body:<br>
+    successors: %bb.1.if, %bb.2.end<br>
+    liveins: %vgpr0<br>
+<br>
+    %sgpr0_sgpr1 = COPY %exec<br>
+    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec<br>
+    %vgpr0 = V_MOV_B32_e32 4, implicit %exec<br>
+    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+    %exec = S_MOV_B64_term killed %sgpr2_sgpr3<br>
+    SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+    S_BRANCH %bb.1.if<br>
+<br>
+  bb.1.if:<br>
+    successors: %bb.2.end<br>
+    liveins: %sgpr0_sgpr1<br>
+<br>
+    %sgpr7 = S_MOV_B32 61440<br>
+    %sgpr6 = S_MOV_B32 -1<br>
+    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)<br>
+<br>
+  bb.2.end:<br>
+    liveins: %vgpr0, %sgpr0_sgpr1<br>
+<br>
+    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %sgpr3 = S_MOV_B32 61440<br>
+    %sgpr2 = S_MOV_B32 -1<br>
+    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+    S_ENDPGM<br>
+<br>
+...<br>
+---<br>
+# CHECK-LABEL: name: optimize_if_or_saveexec{{$}}<br>
+# CHECK: %sgpr0_sgpr1 = S_OR_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec<br>
+# CHECK-NEXT: SI_MASK_BRANCH<br>
+<br>
+name:            optimize_if_or_saveexec<br>
+alignment:       0<br>
+exposesReturnsTwice: false<br>
+legalized:       false<br>
+regBankSelected: false<br>
+selected:        false<br>
+tracksRegLiveness: true<br>
+liveins:<br>
+  - { reg: '%vgpr0' }<br>
+frameInfo:<br>
+  isFrameAddressTaken: false<br>
+  isReturnAddressTaken: false<br>
+  hasStackMap:     false<br>
+  hasPatchPoint:   false<br>
+  stackSize:       0<br>
+  offsetAdjustment: 0<br>
+  maxAlignment:    0<br>
+  adjustsStack:    false<br>
+  hasCalls:        false<br>
+  maxCallFrameSize: 0<br>
+  hasOpaqueSPAdjustment: false<br>
+  hasVAStart:      false<br>
+  hasMustTailInVarArgFunc: false<br>
+body:             |<br>
+  bb.0.main_body:<br>
+    successors: %bb.1.if, %bb.2.end<br>
+    liveins: %vgpr0<br>
+<br>
+    %sgpr0_sgpr1 = COPY %exec<br>
+    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec<br>
+    %vgpr0 = V_MOV_B32_e32 4, implicit %exec<br>
+    %sgpr2_sgpr3 = S_OR_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+    %exec = S_MOV_B64_term killed %sgpr2_sgpr3<br>
+    SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+    S_BRANCH %bb.1.if<br>
+<br>
+  bb.1.if:<br>
+    successors: %bb.2.end<br>
+    liveins: %sgpr0_sgpr1<br>
+<br>
+    %sgpr7 = S_MOV_B32 61440<br>
+    %sgpr6 = S_MOV_B32 -1<br>
+    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)<br>
+<br>
+  bb.2.end:<br>
+    liveins: %vgpr0, %sgpr0_sgpr1<br>
+<br>
+    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %sgpr3 = S_MOV_B32 61440<br>
+    %sgpr2 = S_MOV_B32 -1<br>
+    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+    S_ENDPGM<br>
+<br>
+...<br>
+---<br>
+# CHECK-LABEL: name: optimize_if_and_saveexec_xor_<wbr>valu_middle<br>
+# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc<br>
+# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3<br>
+# CHECK-NEXT: SI_MASK_BRANCH<br>
+name:            optimize_if_and_saveexec_xor_<wbr>valu_middle<br>
+alignment:       0<br>
+exposesReturnsTwice: false<br>
+legalized:       false<br>
+regBankSelected: false<br>
+selected:        false<br>
+tracksRegLiveness: true<br>
+liveins:<br>
+  - { reg: '%vgpr0' }<br>
+frameInfo:<br>
+  isFrameAddressTaken: false<br>
+  isReturnAddressTaken: false<br>
+  hasStackMap:     false<br>
+  hasPatchPoint:   false<br>
+  stackSize:       0<br>
+  offsetAdjustment: 0<br>
+  maxAlignment:    0<br>
+  adjustsStack:    false<br>
+  hasCalls:        false<br>
+  maxCallFrameSize: 0<br>
+  hasOpaqueSPAdjustment: false<br>
+  hasVAStart:      false<br>
+  hasMustTailInVarArgFunc: false<br>
+body:             |<br>
+  bb.0.main_body:<br>
+    successors: %bb.1.if, %bb.2.end<br>
+    liveins: %vgpr0<br>
+<br>
+    %sgpr0_sgpr1 = COPY %exec<br>
+    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec<br>
+    %vgpr0 = V_MOV_B32_e32 4, implicit %exec<br>
+    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+    BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %exec = S_MOV_B64_term killed %sgpr2_sgpr3<br>
+    SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+    S_BRANCH %bb.1.if<br>
+<br>
+  bb.1.if:<br>
+    successors: %bb.2.end<br>
+    liveins: %sgpr0_sgpr1<br>
+<br>
+    %sgpr7 = S_MOV_B32 61440<br>
+    %sgpr6 = S_MOV_B32 -1<br>
+    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)<br>
+<br>
+  bb.2.end:<br>
+    liveins: %vgpr0, %sgpr0_sgpr1<br>
+<br>
+    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %sgpr3 = S_MOV_B32 61440<br>
+    %sgpr2 = S_MOV_B32 -1<br>
+    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+    S_ENDPGM<br>
+<br>
+...<br>
+---<br>
+# CHECK-LABEL: name: optimize_if_and_saveexec_xor_<wbr>wrong_reg{{$}}<br>
+# CHECK: %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc<br>
+# CHECK-NEXT: %exec = COPY %sgpr0_sgpr1<br>
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+name:            optimize_if_and_saveexec_xor_<wbr>wrong_reg<br>
+alignment:       0<br>
+exposesReturnsTwice: false<br>
+legalized:       false<br>
+regBankSelected: false<br>
+selected:        false<br>
+tracksRegLiveness: true<br>
+liveins:<br>
+  - { reg: '%vgpr0' }<br>
+frameInfo:<br>
+  isFrameAddressTaken: false<br>
+  isReturnAddressTaken: false<br>
+  hasStackMap:     false<br>
+  hasPatchPoint:   false<br>
+  stackSize:       0<br>
+  offsetAdjustment: 0<br>
+  maxAlignment:    0<br>
+  adjustsStack:    false<br>
+  hasCalls:        false<br>
+  maxCallFrameSize: 0<br>
+  hasOpaqueSPAdjustment: false<br>
+  hasVAStart:      false<br>
+  hasMustTailInVarArgFunc: false<br>
+body:             |<br>
+  bb.0.main_body:<br>
+    successors: %bb.1.if, %bb.2.end<br>
+    liveins: %vgpr0<br>
+<br>
+    %sgpr6 = S_MOV_B32 -1<br>
+    %sgpr7 = S_MOV_B32 61440<br>
+    %sgpr0_sgpr1 = COPY %exec<br>
+    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec<br>
+    %vgpr0 = V_MOV_B32_e32 4, implicit %exec<br>
+    %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+    %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %exec = S_MOV_B64_term %sgpr0_sgpr1<br>
+    SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+    S_BRANCH %bb.1.if<br>
+<br>
+  bb.1.if:<br>
+    successors: %bb.2.end<br>
+    liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7<br>
+    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)<br>
+<br>
+  bb.2.end:<br>
+    liveins: %vgpr0, %sgpr0_sgpr1, %sgpr4_sgpr5_sgpr6_sgpr7<br>
+<br>
+    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %sgpr3 = S_MOV_B32 61440<br>
+    %sgpr2 = S_MOV_B32 -1<br>
+    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+    S_ENDPGM<br>
+<br>
+...<br>
+---<br>
+# CHECK-LABEL: name: optimize_if_and_saveexec_xor_<wbr>modify_copy_to_exec{{$}}<br>
+# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+# CHECK-NEXT: %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc<br>
+# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc<br>
+# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3<br>
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+<br>
+name:            optimize_if_and_saveexec_xor_<wbr>modify_copy_to_exec<br>
+alignment:       0<br>
+exposesReturnsTwice: false<br>
+legalized:       false<br>
+regBankSelected: false<br>
+selected:        false<br>
+tracksRegLiveness: true<br>
+liveins:<br>
+  - { reg: '%vgpr0' }<br>
+frameInfo:<br>
+  isFrameAddressTaken: false<br>
+  isReturnAddressTaken: false<br>
+  hasStackMap:     false<br>
+  hasPatchPoint:   false<br>
+  stackSize:       0<br>
+  offsetAdjustment: 0<br>
+  maxAlignment:    0<br>
+  adjustsStack:    false<br>
+  hasCalls:        false<br>
+  maxCallFrameSize: 0<br>
+  hasOpaqueSPAdjustment: false<br>
+  hasVAStart:      false<br>
+  hasMustTailInVarArgFunc: false<br>
+body:             |<br>
+  bb.0.main_body:<br>
+    successors: %bb.1.if, %bb.2.end<br>
+    liveins: %vgpr0<br>
+<br>
+    %sgpr0_sgpr1 = COPY %exec<br>
+    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec<br>
+    %vgpr0 = V_MOV_B32_e32 4, implicit %exec<br>
+    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+    %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc<br>
+    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %exec = S_MOV_B64_term killed %sgpr2_sgpr3<br>
+    SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+    S_BRANCH %bb.1.if<br>
+<br>
+  bb.1.if:<br>
+    successors: %bb.2.end<br>
+    liveins: %sgpr0_sgpr1<br>
+<br>
+    %sgpr7 = S_MOV_B32 61440<br>
+    %sgpr6 = S_MOV_B32 -1<br>
+    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)<br>
+<br>
+  bb.2.end:<br>
+    liveins: %vgpr0, %sgpr0_sgpr1<br>
+<br>
+    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %sgpr0 = S_MOV_B32 0<br>
+    %sgpr1 = S_MOV_B32 1<br>
+    %sgpr2 = S_MOV_B32 -1<br>
+    %sgpr3 = S_MOV_B32 61440<br>
+    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+    S_ENDPGM<br>
+<br>
+...<br>
+---<br>
+# CHECK-LABEL: name: optimize_if_and_saveexec_xor_<wbr>live_out_setexec{{$}}<br>
+# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc<br>
+# CHECK-NEXT: %exec = COPY %sgpr2_sgpr3<br>
+# CHECK-NEXT: SI_MASK_BRANCH<br>
+name:            optimize_if_and_saveexec_xor_<wbr>live_out_setexec<br>
+alignment:       0<br>
+exposesReturnsTwice: false<br>
+legalized:       false<br>
+regBankSelected: false<br>
+selected:        false<br>
+tracksRegLiveness: true<br>
+liveins:<br>
+  - { reg: '%vgpr0' }<br>
+frameInfo:<br>
+  isFrameAddressTaken: false<br>
+  isReturnAddressTaken: false<br>
+  hasStackMap:     false<br>
+  hasPatchPoint:   false<br>
+  stackSize:       0<br>
+  offsetAdjustment: 0<br>
+  maxAlignment:    0<br>
+  adjustsStack:    false<br>
+  hasCalls:        false<br>
+  maxCallFrameSize: 0<br>
+  hasOpaqueSPAdjustment: false<br>
+  hasVAStart:      false<br>
+  hasMustTailInVarArgFunc: false<br>
+body:             |<br>
+  bb.0.main_body:<br>
+    successors: %bb.1.if, %bb.2.end<br>
+    liveins: %vgpr0<br>
+<br>
+    %sgpr0_sgpr1 = COPY %exec<br>
+    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec<br>
+    %vgpr0 = V_MOV_B32_e32 4, implicit %exec<br>
+    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %exec = S_MOV_B64_term %sgpr2_sgpr3<br>
+    SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+    S_BRANCH %bb.1.if<br>
+<br>
+  bb.1.if:<br>
+    successors: %bb.2.end<br>
+    liveins: %sgpr0_sgpr1, %sgpr2_sgpr3<br>
+    S_SLEEP 0, implicit %sgpr2_sgpr3<br>
+    %sgpr7 = S_MOV_B32 61440<br>
+    %sgpr6 = S_MOV_B32 -1<br>
+    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)<br>
+<br>
+  bb.2.end:<br>
+    liveins: %vgpr0, %sgpr0_sgpr1<br>
+<br>
+    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %sgpr3 = S_MOV_B32 61440<br>
+    %sgpr2 = S_MOV_B32 -1<br>
+    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+    S_ENDPGM<br>
+<br>
+...<br>
+<br>
+# CHECK-LABEL: name: optimize_if_unknown_saveexec{{<wbr>$}}<br>
+# CHECK: %sgpr0_sgpr1 = COPY %exec<br>
+# CHECK: %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc<br>
+# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3<br>
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+<br>
+name:            optimize_if_unknown_saveexec<br>
+alignment:       0<br>
+exposesReturnsTwice: false<br>
+legalized:       false<br>
+regBankSelected: false<br>
+selected:        false<br>
+tracksRegLiveness: true<br>
+liveins:<br>
+  - { reg: '%vgpr0' }<br>
+frameInfo:<br>
+  isFrameAddressTaken: false<br>
+  isReturnAddressTaken: false<br>
+  hasStackMap:     false<br>
+  hasPatchPoint:   false<br>
+  stackSize:       0<br>
+  offsetAdjustment: 0<br>
+  maxAlignment:    0<br>
+  adjustsStack:    false<br>
+  hasCalls:        false<br>
+  maxCallFrameSize: 0<br>
+  hasOpaqueSPAdjustment: false<br>
+  hasVAStart:      false<br>
+  hasMustTailInVarArgFunc: false<br>
+body:             |<br>
+  bb.0.main_body:<br>
+    successors: %bb.1.if, %bb.2.end<br>
+    liveins: %vgpr0<br>
+<br>
+    %sgpr0_sgpr1 = COPY %exec<br>
+    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec<br>
+    %vgpr0 = V_MOV_B32_e32 4, implicit %exec<br>
+    %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc<br>
+    %exec = S_MOV_B64_term killed %sgpr2_sgpr3<br>
+    SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+    S_BRANCH %bb.1.if<br>
+<br>
+  bb.1.if:<br>
+    successors: %bb.2.end<br>
+    liveins: %sgpr0_sgpr1<br>
+<br>
+    %sgpr7 = S_MOV_B32 61440<br>
+    %sgpr6 = S_MOV_B32 -1<br>
+    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)<br>
+<br>
+  bb.2.end:<br>
+    liveins: %vgpr0, %sgpr0_sgpr1<br>
+<br>
+    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %sgpr3 = S_MOV_B32 61440<br>
+    %sgpr2 = S_MOV_B32 -1<br>
+    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+    S_ENDPGM<br>
+<br>
+...<br>
+---<br>
+# CHECK-LABEL: name: optimize_if_andn2_saveexec{{$}<wbr>}<br>
+# CHECK: %sgpr0_sgpr1 = S_ANDN2_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec<br>
+# CHECK-NEXT: SI_MASK_BRANCH<br>
+<br>
+name:            optimize_if_andn2_saveexec<br>
+alignment:       0<br>
+exposesReturnsTwice: false<br>
+legalized:       false<br>
+regBankSelected: false<br>
+selected:        false<br>
+tracksRegLiveness: true<br>
+liveins:<br>
+  - { reg: '%vgpr0' }<br>
+frameInfo:<br>
+  isFrameAddressTaken: false<br>
+  isReturnAddressTaken: false<br>
+  hasStackMap:     false<br>
+  hasPatchPoint:   false<br>
+  stackSize:       0<br>
+  offsetAdjustment: 0<br>
+  maxAlignment:    0<br>
+  adjustsStack:    false<br>
+  hasCalls:        false<br>
+  maxCallFrameSize: 0<br>
+  hasOpaqueSPAdjustment: false<br>
+  hasVAStart:      false<br>
+  hasMustTailInVarArgFunc: false<br>
+body:             |<br>
+  bb.0.main_body:<br>
+    successors: %bb.1.if, %bb.2.end<br>
+    liveins: %vgpr0<br>
+<br>
+    %sgpr0_sgpr1 = COPY %exec<br>
+    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec<br>
+    %vgpr0 = V_MOV_B32_e32 4, implicit %exec<br>
+    %sgpr2_sgpr3 = S_ANDN2_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc<br>
+    %exec = S_MOV_B64_term killed %sgpr2_sgpr3<br>
+    SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+    S_BRANCH %bb.1.if<br>
+<br>
+  bb.1.if:<br>
+    successors: %bb.2.end<br>
+    liveins: %sgpr0_sgpr1<br>
+<br>
+    %sgpr7 = S_MOV_B32 61440<br>
+    %sgpr6 = S_MOV_B32 -1<br>
+    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)<br>
+<br>
+  bb.2.end:<br>
+    liveins: %vgpr0, %sgpr0_sgpr1<br>
+<br>
+    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %sgpr3 = S_MOV_B32 61440<br>
+    %sgpr2 = S_MOV_B32 -1<br>
+    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+    S_ENDPGM<br>
+<br>
+...<br>
+---<br>
+# CHECK-LABEL: name: optimize_if_andn2_saveexec_no_<wbr>commute{{$}}<br>
+# CHECK: %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc<br>
+# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3<br>
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+name:            optimize_if_andn2_saveexec_no_<wbr>commute<br>
+alignment:       0<br>
+exposesReturnsTwice: false<br>
+legalized:       false<br>
+regBankSelected: false<br>
+selected:        false<br>
+tracksRegLiveness: true<br>
+liveins:<br>
+  - { reg: '%vgpr0' }<br>
+frameInfo:<br>
+  isFrameAddressTaken: false<br>
+  isReturnAddressTaken: false<br>
+  hasStackMap:     false<br>
+  hasPatchPoint:   false<br>
+  stackSize:       0<br>
+  offsetAdjustment: 0<br>
+  maxAlignment:    0<br>
+  adjustsStack:    false<br>
+  hasCalls:        false<br>
+  maxCallFrameSize: 0<br>
+  hasOpaqueSPAdjustment: false<br>
+  hasVAStart:      false<br>
+  hasMustTailInVarArgFunc: false<br>
+body:             |<br>
+  bb.0.main_body:<br>
+    successors: %bb.1.if, %bb.2.end<br>
+    liveins: %vgpr0<br>
+<br>
+    %sgpr0_sgpr1 = COPY %exec<br>
+    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec<br>
+    %vgpr0 = V_MOV_B32_e32 4, implicit %exec<br>
+    %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc<br>
+    %exec = S_MOV_B64_term killed %sgpr2_sgpr3<br>
+    SI_MASK_BRANCH %bb.2.end, implicit %exec<br>
+    S_BRANCH %bb.1.if<br>
+<br>
+  bb.1.if:<br>
+    successors: %bb.2.end<br>
+    liveins: %sgpr0_sgpr1<br>
+<br>
+    %sgpr7 = S_MOV_B32 61440<br>
+    %sgpr6 = S_MOV_B32 -1<br>
+    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)<br>
+<br>
+  bb.2.end:<br>
+    liveins: %vgpr0, %sgpr0_sgpr1<br>
+<br>
+    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc<br>
+    %sgpr3 = S_MOV_B32 61440<br>
+    %sgpr2 = S_MOV_B32 -1<br>
+    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)<br>
+    S_ENDPGM<br>
+<br>
+...<br>
<br>
<br>
______________________________<wbr>_________________<br>
llvm-commits mailing list<br>
<a href="mailto:llvm-commits@lists.llvm.org">llvm-commits@lists.llvm.org</a><br>
<a href="http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits" rel="noreferrer" target="_blank">http://lists.llvm.org/cgi-bin/<wbr>mailman/listinfo/llvm-commits</a><br>
</blockquote></div><br></div>