[llvm] 3366d81 - [AMDGPU] Save WWM registers in functions

Sebastian Neubauer via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 23 09:16:32 PDT 2021


Author: Sebastian Neubauer
Date: 2021-04-23T18:09:24+02:00
New Revision: 3366d8115366866a3a14de925b37dea637025919

URL: https://github.com/llvm/llvm-project/commit/3366d8115366866a3a14de925b37dea637025919
DIFF: https://github.com/llvm/llvm-project/commit/3366d8115366866a3a14de925b37dea637025919.diff

LOG: [AMDGPU] Save WWM registers in functions

The values of registers in inactive lanes needs to be saved during
function calls.

Save all registers used for whole wave mode, similar to how it is done
for VGPRs that are used for SGPR spilling.

Differential Revision: https://reviews.llvm.org/D99429

Reapply with fixed tests on window.

Added: 
    llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll

Modified: 
    llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
    llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
    llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp
    llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index fc24e3fea2b6..c31bc1015584 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -735,9 +735,10 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
   Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
   Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;
 
+  // VGPRs used for SGPR->VGPR spills
   for (const SIMachineFunctionInfo::SGPRSpillVGPR &Reg :
        FuncInfo->getSGPRSpillVGPRs()) {
-    if (!Reg.FI.hasValue())
+    if (!Reg.FI)
       continue;
 
     if (!ScratchExecCopy)
@@ -747,6 +748,20 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
     buildPrologSpill(ST, TRI, *FuncInfo, LiveRegs, MF, MBBI, Reg.VGPR, *Reg.FI);
   }
 
+  // VGPRs used for Whole Wave Mode
+  for (const auto &Reg : FuncInfo->WWMReservedRegs) {
+    auto VGPR = Reg.first;
+    auto FI = Reg.second;
+    if (!FI)
+      continue;
+
+    if (!ScratchExecCopy)
+      ScratchExecCopy =
+          buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, /*IsProlog*/ true);
+
+    buildPrologSpill(ST, TRI, *FuncInfo, LiveRegs, MF, MBBI, VGPR, *FI);
+  }
+
   if (ScratchExecCopy) {
     // FIXME: Split block and make terminator.
     unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
@@ -1026,7 +1041,7 @@ void SIFrameLowering::emitEpilogue(MachineFunction &MF,
   Register ScratchExecCopy;
   for (const SIMachineFunctionInfo::SGPRSpillVGPR &Reg :
        FuncInfo->getSGPRSpillVGPRs()) {
-    if (!Reg.FI.hasValue())
+    if (!Reg.FI)
       continue;
 
     if (!ScratchExecCopy)
@@ -1037,6 +1052,19 @@ void SIFrameLowering::emitEpilogue(MachineFunction &MF,
                        *Reg.FI);
   }
 
+  for (const auto &Reg : FuncInfo->WWMReservedRegs) {
+    auto VGPR = Reg.first;
+    auto FI = Reg.second;
+    if (!FI)
+      continue;
+
+    if (!ScratchExecCopy)
+      ScratchExecCopy =
+          buildScratchExecCopy(LiveRegs, MF, MBB, MBBI, /*IsProlog*/ false);
+
+    buildEpilogRestore(ST, TRI, *FuncInfo, LiveRegs, MF, MBBI, VGPR, *FI);
+  }
+
   if (ScratchExecCopy) {
     // FIXME: Split block and make terminator.
     unsigned ExecMov = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;

diff  --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
index cede63bcc979..99cfefad6054 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
@@ -461,9 +461,9 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
     bool FullyAllocated = false;
   };
 
-  SparseBitVector<> WWMReservedRegs;
-
-  void ReserveWWMRegister(Register Reg) { WWMReservedRegs.set(Reg); }
+  // Map WWM VGPR to a stack slot that is used to save/restore it in the
+  // prolog/epilog.
+  SmallDenseMap<Register, Optional<int>> WWMReservedRegs;
 
 private:
   // Track VGPR + wave index for each subregister of the SGPR spilled to
@@ -503,6 +503,10 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
 
   bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI);
 
+  void reserveWWMRegister(Register Reg, Optional<int> FI) {
+    WWMReservedRegs.insert(std::make_pair(Reg, FI));
+  }
+
   ArrayRef<SpilledReg> getSGPRToVGPRSpills(int FrameIndex) const {
     auto I = SGPRToVGPRSpills.find(FrameIndex);
     return (I == SGPRToVGPRSpills.end()) ?

diff  --git a/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp b/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp
index 07ff8ef222de..c2e2875ed6bf 100644
--- a/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp
@@ -142,13 +142,26 @@ void SIPreAllocateWWMRegs::rewriteRegs(MachineFunction &MF) {
   }
 
   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
+  MachineFrameInfo &FrameInfo = MF.getFrameInfo();
 
   for (unsigned Reg : RegsToRewrite) {
     LIS->removeInterval(Reg);
 
     const Register PhysReg = VRM->getPhys(Reg);
     assert(PhysReg != 0);
-    MFI->ReserveWWMRegister(PhysReg);
+
+    // Check if PhysReg is already reserved
+    if (!MFI->WWMReservedRegs.count(PhysReg)) {
+      Optional<int> FI;
+      if (!MFI->isEntryFunction()) {
+        // Create a stack object for a possible spill in the function prologue.
+        // Note: Non-CSR VGPR also need this as we may overwrite inactive lanes.
+        const TargetRegisterClass *RC = TRI->getPhysRegClass(PhysReg);
+        FI = FrameInfo.CreateSpillStackObject(TRI->getSpillSize(*RC),
+                                              TRI->getSpillAlign(*RC));
+      }
+      MFI->reserveWWMRegister(PhysReg, FI);
+    }
   }
 
   RegsToRewrite.clear();

diff  --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index dc2aff1224b1..2db5180e04bf 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -568,8 +568,8 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
     assert(!isSubRegister(ScratchRSrcReg, BasePtrReg));
   }
 
-  for (MCRegister Reg : MFI->WWMReservedRegs) {
-    reserveRegisterTuples(Reserved, Reg);
+  for (auto Reg : MFI->WWMReservedRegs) {
+    reserveRegisterTuples(Reserved, Reg.first);
   }
 
   // FIXME: Stop using reserved registers for this.

diff  --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
new file mode 100644
index 000000000000..03956782f9cb
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
@@ -0,0 +1,874 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 -mtriple=amdgcn- -mcpu=gfx900 -amdgpu-dpp-combine=false -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9,GFX9-O0 %s
+; RUN: llc -mtriple=amdgcn- -mcpu=gfx900 -amdgpu-dpp-combine=false -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9,GFX9-O3 %s
+
+define amdgpu_gfx void @strict_wwm_no_cfg(<4 x i32> inreg %tmp14) {
+; GFX9-O0-LABEL: strict_wwm_no_cfg:
+; GFX9-O0:       ; %bb.0:
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O0-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O0-NEXT:    s_mov_b32 s8, s7
+; GFX9-O0-NEXT:    s_mov_b32 s9, s6
+; GFX9-O0-NEXT:    s_mov_b32 s10, s5
+; GFX9-O0-NEXT:    ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5_sgpr6_sgpr7
+; GFX9-O0-NEXT:    s_mov_b32 s5, s10
+; GFX9-O0-NEXT:    s_mov_b32 s6, s9
+; GFX9-O0-NEXT:    s_mov_b32 s7, s8
+; GFX9-O0-NEXT:    ; kill: def $sgpr8_sgpr9_sgpr10_sgpr11 killed $sgpr4_sgpr5_sgpr6_sgpr7
+; GFX9-O0-NEXT:    s_mov_b32 s8, 0
+; GFX9-O0-NEXT:    buffer_load_dwordx2 v[5:6], off, s[4:7], s8
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    v_mov_b32_e32 v4, v5
+; GFX9-O0-NEXT:    v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, s8
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, s8
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, s8
+; GFX9-O0-NEXT:    s_nop 1
+; GFX9-O0-NEXT:    v_mov_b32_dpp v2, v0 row_bcast:31 row_mask:0xc bank_mask:0xf
+; GFX9-O0-NEXT:    v_add_u32_e32 v0, v0, v2
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[10:11]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v3, v0
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, s8
+; GFX9-O0-NEXT:    s_nop 1
+; GFX9-O0-NEXT:    v_mov_b32_dpp v0, v1 row_bcast:31 row_mask:0xc bank_mask:0xf
+; GFX9-O0-NEXT:    v_add_u32_e32 v0, v1, v0
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[10:11]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v4, v0
+; GFX9-O0-NEXT:    v_cmp_eq_u32_e64 s[10:11], v3, v4
+; GFX9-O0-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[10:11]
+; GFX9-O0-NEXT:    s_mov_b32 s9, 1
+; GFX9-O0-NEXT:    v_lshlrev_b32_e32 v3, s9, v3
+; GFX9-O0-NEXT:    s_mov_b32 s9, 2
+; GFX9-O0-NEXT:    v_mov_b32_e32 v4, s9
+; GFX9-O0-NEXT:    v_and_b32_e32 v3, v3, v4
+; GFX9-O0-NEXT:    buffer_store_dword v3, off, s[4:7], s8 offset:4
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-O3-LABEL: strict_wwm_no_cfg:
+; GFX9-O3:       ; %bb.0:
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O3-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O3-NEXT:    buffer_load_dwordx2 v[2:3], off, s[4:7], 0
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O3-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-O3-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    v_mov_b32_e32 v3, 0
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O3-NEXT:    v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
+; GFX9-O3-NEXT:    v_mov_b32_dpp v0, v3 row_bcast:31 row_mask:0xc bank_mask:0xf
+; GFX9-O3-NEXT:    v_add_u32_e32 v1, v2, v1
+; GFX9-O3-NEXT:    v_add_u32_e32 v0, v3, v0
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O3-NEXT:    v_mov_b32_e32 v4, v1
+; GFX9-O3-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-O3-NEXT:    v_cmp_eq_u32_e32 vcc, v4, v5
+; GFX9-O3-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX9-O3-NEXT:    v_lshlrev_b32_e32 v4, 1, v4
+; GFX9-O3-NEXT:    v_and_b32_e32 v4, 2, v4
+; GFX9-O3-NEXT:    buffer_store_dword v4, off, s[4:7], 0 offset:4
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; GFX9-O3-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp100 = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %tmp14, i32 0, i32 0, i32 0)
+  %tmp101 = bitcast <2 x float> %tmp100 to <2 x i32>
+  %tmp102 = extractelement <2 x i32> %tmp101, i32 0
+  %tmp103 = extractelement <2 x i32> %tmp101, i32 1
+  %tmp105 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp102, i32 0)
+  %tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp103, i32 0)
+
+
+  %tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
+  %tmp121 = add i32 %tmp105, %tmp120
+  %tmp122 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp121)
+
+  %tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
+  %tmp136 = add i32 %tmp107, %tmp135
+  %tmp137 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp136)
+
+  %tmp138 = icmp eq i32 %tmp122, %tmp137
+  %tmp139 = sext i1 %tmp138 to i32
+  %tmp140 = shl nsw i32 %tmp139, 1
+  %tmp141 = and i32 %tmp140, 2
+  %tmp145 = bitcast i32 %tmp141 to float
+  call void @llvm.amdgcn.raw.buffer.store.f32(float %tmp145, <4 x i32> %tmp14, i32 4, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_gfx void @strict_wwm_cfg(<4 x i32> inreg %tmp14, i32 %arg) {
+; GFX9-O0-LABEL: strict_wwm_cfg:
+; GFX9-O0:       ; %bb.0: ; %entry
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O0-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s30, 0
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s31, 1
+; GFX9-O0-NEXT:    s_mov_b32 s8, s4
+; GFX9-O0-NEXT:    ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9_sgpr10_sgpr11
+; GFX9-O0-NEXT:    s_mov_b32 s9, s5
+; GFX9-O0-NEXT:    s_mov_b32 s10, s6
+; GFX9-O0-NEXT:    s_mov_b32 s11, s7
+; GFX9-O0-NEXT:    s_mov_b64 s[4:5], s[8:9]
+; GFX9-O0-NEXT:    s_mov_b64 s[6:7], s[10:11]
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s4, 2
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s5, 3
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s6, 4
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s7, 5
+; GFX9-O0-NEXT:    s_mov_b32 s4, 0
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_load_dwordx2 v[3:4], off, s[8:11], s4
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, s4
+; GFX9-O0-NEXT:    s_nop 1
+; GFX9-O0-NEXT:    v_mov_b32_dpp v2, v1 row_bcast:31 row_mask:0xc bank_mask:0xf
+; GFX9-O0-NEXT:    v_add_u32_e32 v1, v1, v2
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[6:7]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    v_cmp_eq_u32_e64 s[6:7], s4, v0
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_mov_b64 s[4:5], exec
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s4, 6
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s5, 7
+; GFX9-O0-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT:    s_cbranch_execz BB1_2
+; GFX9-O0-NEXT:  ; %bb.1: ; %if
+; GFX9-O0-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT:    v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
+; GFX9-O0-NEXT:    v_add_u32_e32 v1, v2, v1
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v1
+; GFX9-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:  BB1_2: ; %merge
+; GFX9-O0-NEXT:    v_readlane_b32 s6, v5, 6
+; GFX9-O0-NEXT:    v_readlane_b32 s7, v5, 7
+; GFX9-O0-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GFX9-O0-NEXT:    v_readlane_b32 s4, v5, 0
+; GFX9-O0-NEXT:    v_readlane_b32 s5, v5, 1
+; GFX9-O0-NEXT:    v_readlane_b32 s8, v5, 2
+; GFX9-O0-NEXT:    v_readlane_b32 s9, v5, 3
+; GFX9-O0-NEXT:    v_readlane_b32 s10, v5, 4
+; GFX9-O0-NEXT:    v_readlane_b32 s11, v5, 5
+; GFX9-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    v_cmp_eq_u32_e64 s[6:7], v0, v3
+; GFX9-O0-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[6:7]
+; GFX9-O0-NEXT:    s_mov_b32 s6, 1
+; GFX9-O0-NEXT:    v_lshlrev_b32_e32 v0, s6, v0
+; GFX9-O0-NEXT:    s_mov_b32 s6, 2
+; GFX9-O0-NEXT:    v_mov_b32_e32 v3, s6
+; GFX9-O0-NEXT:    v_and_b32_e32 v0, v0, v3
+; GFX9-O0-NEXT:    s_mov_b32 s6, 0
+; GFX9-O0-NEXT:    buffer_store_dword v0, off, s[8:11], s6 offset:4
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[6:7]
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    s_setpc_b64 s[4:5]
+;
+; GFX9-O3-LABEL: strict_wwm_cfg:
+; GFX9-O3:       ; %bb.0: ; %entry
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O3-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O3-NEXT:    buffer_load_dwordx2 v[2:3], off, s[4:7], 0
+; GFX9-O3-NEXT:    v_mov_b32_e32 v5, 0
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O3-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O3-NEXT:    v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
+; GFX9-O3-NEXT:    v_add_u32_e32 v1, v2, v1
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O3-NEXT:    v_mov_b32_e32 v4, v1
+; GFX9-O3-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX9-O3-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GFX9-O3-NEXT:  ; %bb.1: ; %if
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; GFX9-O3-NEXT:    v_mov_b32_e32 v1, 0
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[10:11]
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    v_mov_b32_e32 v3, 0
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; GFX9-O3-NEXT:    v_mov_b32_dpp v1, v3 row_bcast:31 row_mask:0xc bank_mask:0xf
+; GFX9-O3-NEXT:    v_add_u32_e32 v1, v3, v1
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[10:11]
+; GFX9-O3-NEXT:    v_mov_b32_e32 v5, v1
+; GFX9-O3-NEXT:  ; %bb.2: ; %merge
+; GFX9-O3-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GFX9-O3-NEXT:    v_cmp_eq_u32_e32 vcc, v4, v5
+; GFX9-O3-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-O3-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; GFX9-O3-NEXT:    v_and_b32_e32 v0, 2, v0
+; GFX9-O3-NEXT:    buffer_store_dword v0, off, s[4:7], 0 offset:4
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; GFX9-O3-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %tmp100 = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %tmp14, i32 0, i32 0, i32 0)
+  %tmp101 = bitcast <2 x float> %tmp100 to <2 x i32>
+  %tmp102 = extractelement <2 x i32> %tmp101, i32 0
+  %tmp105 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp102, i32 0)
+
+  %tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
+  %tmp121 = add i32 %tmp105, %tmp120
+  %tmp122 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp121)
+
+  %cond = icmp eq i32 %arg, 0
+  br i1 %cond, label %if, label %merge
+if:
+  %tmp103 = extractelement <2 x i32> %tmp101, i32 1
+  %tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp103, i32 0)
+
+  %tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
+  %tmp136 = add i32 %tmp107, %tmp135
+  %tmp137 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp136)
+  br label %merge
+
+merge:
+  %merge_value = phi i32 [ 0, %entry ], [%tmp137, %if ]
+  %tmp138 = icmp eq i32 %tmp122, %merge_value
+  %tmp139 = sext i1 %tmp138 to i32
+  %tmp140 = shl nsw i32 %tmp139, 1
+  %tmp141 = and i32 %tmp140, 2
+  %tmp145 = bitcast i32 %tmp141 to float
+  call void @llvm.amdgcn.raw.buffer.store.f32(float %tmp145, <4 x i32> %tmp14, i32 4, i32 0, i32 0)
+  ret void
+}
+
+define hidden i32 @strict_wwm_called(i32 %a) noinline {
+; GFX9-LABEL: strict_wwm_called:
+; GFX9:       ; %bb.0:
+; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT:    v_add_u32_e32 v1, v0, v0
+; GFX9:         v_mul_lo_u32 v0, v1, v0
+; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i32 %a, %a
+  %mul = mul i32 %add, %a
+  %sub = sub i32 %mul, %add
+  ret i32 %sub
+}
+
+define amdgpu_gfx void @strict_wwm_call(<4 x i32> inreg %tmp14, i32 inreg %arg) {
+; GFX9-O0-LABEL: strict_wwm_call:
+; GFX9-O0:       ; %bb.0:
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; GFX9-O0-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[10:11]
+; GFX9-O0-NEXT:    v_writelane_b32 v3, s33, 7
+; GFX9-O0-NEXT:    s_mov_b32 s33, s32
+; GFX9-O0-NEXT:    s_add_u32 s32, s32, 0x400
+; GFX9-O0-NEXT:    v_writelane_b32 v3, s30, 0
+; GFX9-O0-NEXT:    v_writelane_b32 v3, s31, 1
+; GFX9-O0-NEXT:    v_writelane_b32 v3, s8, 2
+; GFX9-O0-NEXT:    s_mov_b32 s8, s4
+; GFX9-O0-NEXT:    v_readlane_b32 s4, v3, 2
+; GFX9-O0-NEXT:    ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9_sgpr10_sgpr11
+; GFX9-O0-NEXT:    s_mov_b32 s9, s5
+; GFX9-O0-NEXT:    s_mov_b32 s10, s6
+; GFX9-O0-NEXT:    s_mov_b32 s11, s7
+; GFX9-O0-NEXT:    v_writelane_b32 v3, s8, 3
+; GFX9-O0-NEXT:    v_writelane_b32 v3, s9, 4
+; GFX9-O0-NEXT:    v_writelane_b32 v3, s10, 5
+; GFX9-O0-NEXT:    v_writelane_b32 v3, s11, 6
+; GFX9-O0-NEXT:    s_mov_b32 s8, 0
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, s8
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; GFX9-O0-NEXT:    s_getpc_b64 s[4:5]
+; GFX9-O0-NEXT:    s_add_u32 s4, s4, strict_wwm_called at rel32@lo+4
+; GFX9-O0-NEXT:    s_addc_u32 s5, s5, strict_wwm_called at rel32@hi+12
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[2:3]
+; GFX9-O0-NEXT:    s_mov_b64 s[12:13], s[0:1]
+; GFX9-O0-NEXT:    s_mov_b64 s[0:1], s[12:13]
+; GFX9-O0-NEXT:    s_mov_b64 s[2:3], s[14:15]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; GFX9-O0-NEXT:    v_readlane_b32 s4, v3, 3
+; GFX9-O0-NEXT:    v_readlane_b32 s5, v3, 4
+; GFX9-O0-NEXT:    v_readlane_b32 s6, v3, 5
+; GFX9-O0-NEXT:    v_readlane_b32 s7, v3, 6
+; GFX9-O0-NEXT:    v_readlane_b32 s30, v3, 0
+; GFX9-O0-NEXT:    v_readlane_b32 s31, v3, 1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT:    v_add_u32_e32 v1, v1, v2
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[10:11]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v1
+; GFX9-O0-NEXT:    buffer_store_dword v0, off, s[4:7], s8 offset:4
+; GFX9-O0-NEXT:    s_sub_u32 s32, s32, 0x400
+; GFX9-O0-NEXT:    v_readlane_b32 s33, v3, 7
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-O3-LABEL: strict_wwm_call:
+; GFX9-O3:       ; %bb.0:
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; GFX9-O3-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[10:11]
+; GFX9-O3-NEXT:    s_mov_b32 s14, s33
+; GFX9-O3-NEXT:    s_mov_b32 s33, s32
+; GFX9-O3-NEXT:    s_add_u32 s32, s32, 0x400
+; GFX9-O3-NEXT:    s_mov_b64 s[10:11], s[30:31]
+; GFX9-O3-NEXT:    v_mov_b32_e32 v2, s8
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O3-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9-O3-NEXT:    s_getpc_b64 s[12:13]
+; GFX9-O3-NEXT:    s_add_u32 s12, s12, strict_wwm_called at rel32@lo+4
+; GFX9-O3-NEXT:    s_addc_u32 s13, s13, strict_wwm_called at rel32@hi+12
+; GFX9-O3-NEXT:    s_swappc_b64 s[30:31], s[12:13]
+; GFX9-O3-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-O3-NEXT:    v_add_u32_e32 v1, v1, v2
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O3-NEXT:    v_mov_b32_e32 v0, v1
+; GFX9-O3-NEXT:    buffer_store_dword v0, off, s[4:7], 0 offset:4
+; GFX9-O3-NEXT:    s_sub_u32 s32, s32, 0x400
+; GFX9-O3-NEXT:    s_mov_b32 s33, s14
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; GFX9-O3-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    s_setpc_b64 s[10:11]
+  %tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %arg, i32 0)
+  %tmp134 = call i32 @strict_wwm_called(i32 %tmp107)
+  %tmp136 = add i32 %tmp134, %tmp107
+  %tmp137 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp136)
+  call void @llvm.amdgcn.raw.buffer.store.i32(i32 %tmp137, <4 x i32> %tmp14, i32 4, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_gfx i64 @strict_wwm_called_i64(i64 %a) noinline {
+; GFX9-O0-LABEL: strict_wwm_called_i64:
+; GFX9-O0:       ; %bb.0:
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v0
+; GFX9-O0-NEXT:    ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v7, v1
+; GFX9-O0-NEXT:    ; kill: def $vgpr0_vgpr1 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v6
+; GFX9-O0-NEXT:    v_mov_b32_e32 v3, v6
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v7
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v7
+; GFX9-O0-NEXT:    v_add_co_u32_e64 v4, s[4:5], v2, v3
+; GFX9-O0-NEXT:    v_addc_co_u32_e64 v0, s[4:5], v0, v1, s[4:5]
+; GFX9-O0-NEXT:    ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v5, v0
+; GFX9-O0-NEXT:    s_mov_b32 s4, 32
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v6
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v7
+; GFX9-O0-NEXT:    v_lshrrev_b64 v[0:1], s4, v[0:1]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v0
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v4
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr5
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr5
+; GFX9-O0-NEXT:    v_mul_lo_u32 v2, v0, v1
+; GFX9-O0-NEXT:    ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT:    v_mul_hi_u32 v1, v0, v6
+; GFX9-O0-NEXT:    v_lshrrev_b64 v[7:8], s4, v[4:5]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v3, v7
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr5
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr5
+; GFX9-O0-NEXT:    v_mul_lo_u32 v3, v3, v6
+; GFX9-O0-NEXT:    v_add3_u32 v1, v1, v2, v3
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr5
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr6
+; GFX9-O0-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9-O0-NEXT:    ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v3
+; GFX9-O0-NEXT:    v_lshlrev_b64 v[1:2], s4, v[1:2]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v3, v2
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr5
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr5
+; GFX9-O0-NEXT:    v_mul_lo_u32 v6, v0, v6
+; GFX9-O0-NEXT:    s_mov_b32 s5, 0
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-O0-NEXT:    ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v7, v0
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v7
+; GFX9-O0-NEXT:    v_or_b32_e32 v0, v0, v3
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v6
+; GFX9-O0-NEXT:    v_or_b32_e32 v6, v1, v2
+; GFX9-O0-NEXT:    ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v7, v0
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v6
+; GFX9-O0-NEXT:    v_mov_b32_e32 v3, v4
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v7
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT:    v_sub_co_u32_e64 v1, s[6:7], v1, v3
+; GFX9-O0-NEXT:    v_subb_co_u32_e64 v0, s[6:7], v0, v2, s[6:7]
+; GFX9-O0-NEXT:    ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v1
+; GFX9-O0-NEXT:    v_lshrrev_b64 v[1:2], s4, v[1:2]
+; GFX9-O0-NEXT:    ; kill: def $vgpr1 killed $vgpr1 killed $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-O3-LABEL: strict_wwm_called_i64:
+; GFX9-O3:       ; %bb.0:
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O3-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v0
+; GFX9-O3-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v1, vcc
+; GFX9-O3-NEXT:    v_mul_lo_u32 v4, v3, v0
+; GFX9-O3-NEXT:    v_mul_hi_u32 v5, v2, v0
+; GFX9-O3-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GFX9-O3-NEXT:    v_mul_lo_u32 v0, v2, v0
+; GFX9-O3-NEXT:    v_add3_u32 v1, v5, v1, v4
+; GFX9-O3-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-O3-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-O3-NEXT:    s_setpc_b64 s[30:31]
+  %add = add i64 %a, %a
+  %mul = mul i64 %add, %a
+  %sub = sub i64 %mul, %add
+  ret i64 %sub
+}
+
+define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %arg) {
+; GFX9-O0-LABEL: strict_wwm_call_i64:
+; GFX9-O0:       ; %bb.0:
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; GFX9-O0-NEXT:    buffer_store_dword v11, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v9, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    buffer_store_dword v10, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[10:11]
+; GFX9-O0-NEXT:    v_writelane_b32 v11, s33, 9
+; GFX9-O0-NEXT:    s_mov_b32 s33, s32
+; GFX9-O0-NEXT:    s_add_u32 s32, s32, 0xc00
+; GFX9-O0-NEXT:    v_writelane_b32 v11, s30, 0
+; GFX9-O0-NEXT:    v_writelane_b32 v11, s31, 1
+; GFX9-O0-NEXT:    v_writelane_b32 v11, s9, 2
+; GFX9-O0-NEXT:    v_writelane_b32 v11, s8, 3
+; GFX9-O0-NEXT:    s_mov_b32 s8, s6
+; GFX9-O0-NEXT:    v_readlane_b32 s6, v11, 3
+; GFX9-O0-NEXT:    v_writelane_b32 v11, s8, 4
+; GFX9-O0-NEXT:    s_mov_b32 s12, s5
+; GFX9-O0-NEXT:    v_readlane_b32 s5, v11, 4
+; GFX9-O0-NEXT:    s_mov_b32 s8, s4
+; GFX9-O0-NEXT:    v_readlane_b32 s4, v11, 2
+; GFX9-O0-NEXT:    ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9_sgpr10_sgpr11
+; GFX9-O0-NEXT:    s_mov_b32 s9, s12
+; GFX9-O0-NEXT:    s_mov_b32 s10, s5
+; GFX9-O0-NEXT:    s_mov_b32 s11, s7
+; GFX9-O0-NEXT:    v_writelane_b32 v11, s8, 5
+; GFX9-O0-NEXT:    v_writelane_b32 v11, s9, 6
+; GFX9-O0-NEXT:    v_writelane_b32 v11, s10, 7
+; GFX9-O0-NEXT:    v_writelane_b32 v11, s11, 8
+; GFX9-O0-NEXT:    ; kill: def $sgpr6 killed $sgpr6 def $sgpr6_sgpr7
+; GFX9-O0-NEXT:    s_mov_b32 s7, s4
+; GFX9-O0-NEXT:    ; kill: def $sgpr4_sgpr5 killed $sgpr6_sgpr7
+; GFX9-O0-NEXT:    s_mov_b64 s[4:5], 0
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, s6
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, s7
+; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v0
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, s4
+; GFX9-O0-NEXT:    v_mov_b32_e32 v10, s5
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v9
+; GFX9-O0-NEXT:    s_mov_b32 s4, 32
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GFX9-O0-NEXT:    v_lshrrev_b64 v[3:4], s4, v[9:10]
+; GFX9-O0-NEXT:    s_getpc_b64 s[4:5]
+; GFX9-O0-NEXT:    s_add_u32 s4, s4, strict_wwm_called_i64 at gotpcrel32@lo+4
+; GFX9-O0-NEXT:    s_addc_u32 s5, s5, strict_wwm_called_i64 at gotpcrel32@hi+12
+; GFX9-O0-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GFX9-O0-NEXT:    s_mov_b64 s[14:15], s[2:3]
+; GFX9-O0-NEXT:    s_mov_b64 s[12:13], s[0:1]
+; GFX9-O0-NEXT:    s_mov_b64 s[0:1], s[12:13]
+; GFX9-O0-NEXT:    s_mov_b64 s[2:3], s[14:15]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; GFX9-O0-NEXT:    v_readlane_b32 s4, v11, 5
+; GFX9-O0-NEXT:    v_readlane_b32 s5, v11, 6
+; GFX9-O0-NEXT:    v_readlane_b32 s6, v11, 7
+; GFX9-O0-NEXT:    v_readlane_b32 s7, v11, 8
+; GFX9-O0-NEXT:    v_readlane_b32 s30, v11, 0
+; GFX9-O0-NEXT:    v_readlane_b32 s31, v11, 1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-O0-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v4, v9
+; GFX9-O0-NEXT:    v_mov_b32_e32 v5, v10
+; GFX9-O0-NEXT:    v_add_co_u32_e64 v2, s[10:11], v2, v4
+; GFX9-O0-NEXT:    v_addc_co_u32_e64 v3, s[10:11], v3, v5, s[10:11]
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT:    s_mov_b32 s8, 0
+; GFX9-O0-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], s8 offset:4
+; GFX9-O0-NEXT:    s_sub_u32 s32, s32, 0xc00
+; GFX9-O0-NEXT:    v_readlane_b32 s33, v11, 9
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT:    buffer_load_dword v11, off, s[0:3], s32 offset:40 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:28 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_load_dword v9, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_load_dword v10, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-O3-LABEL: strict_wwm_call_i64:
+; GFX9-O3:       ; %bb.0:
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; GFX9-O3-NEXT:    buffer_store_dword v6, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[10:11]
+; GFX9-O3-NEXT:    s_mov_b32 s14, s33
+; GFX9-O3-NEXT:    s_mov_b32 s33, s32
+; GFX9-O3-NEXT:    s_add_u32 s32, s32, 0x800
+; GFX9-O3-NEXT:    s_mov_b64 s[10:11], s[30:31]
+; GFX9-O3-NEXT:    v_mov_b32_e32 v6, s8
+; GFX9-O3-NEXT:    v_mov_b32_e32 v7, s9
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    v_mov_b32_e32 v6, 0
+; GFX9-O3-NEXT:    v_mov_b32_e32 v7, 0
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O3-NEXT:    s_getpc_b64 s[12:13]
+; GFX9-O3-NEXT:    s_add_u32 s12, s12, strict_wwm_called_i64 at gotpcrel32@lo+4
+; GFX9-O3-NEXT:    s_addc_u32 s13, s13, strict_wwm_called_i64 at gotpcrel32@hi+12
+; GFX9-O3-NEXT:    s_load_dwordx2 s[12:13], s[12:13], 0x0
+; GFX9-O3-NEXT:    v_mov_b32_e32 v0, v6
+; GFX9-O3-NEXT:    v_mov_b32_e32 v1, v7
+; GFX9-O3-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-O3-NEXT:    s_swappc_b64 s[30:31], s[12:13]
+; GFX9-O3-NEXT:    v_mov_b32_e32 v2, v0
+; GFX9-O3-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-O3-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v6
+; GFX9-O3-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v7, vcc
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O3-NEXT:    v_mov_b32_e32 v0, v2
+; GFX9-O3-NEXT:    v_mov_b32_e32 v1, v3
+; GFX9-O3-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0 offset:4
+; GFX9-O3-NEXT:    s_sub_u32 s32, s32, 0x800
+; GFX9-O3-NEXT:    s_mov_b32 s33, s14
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; GFX9-O3-NEXT:    buffer_load_dword v6, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    s_setpc_b64 s[10:11]
+  %tmp107 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %arg, i64 0)
+  %tmp134 = call i64 @strict_wwm_called_i64(i64 %tmp107)
+  %tmp136 = add i64 %tmp134, %tmp107
+  %tmp137 = tail call i64 @llvm.amdgcn.strict.wwm.i64(i64 %tmp136)
+  %tmp138 = bitcast i64 %tmp137 to <2 x i32>
+  call void @llvm.amdgcn.raw.buffer.store.v2i32(<2 x i32> %tmp138, <4 x i32> %tmp14, i32 4, i32 0, i32 0)
+  ret void
+}
+
+define amdgpu_gfx void @strict_wwm_amdgpu_cs_main(<4 x i32> inreg %desc, i32 %index) {
+; GFX9-O0-LABEL: strict_wwm_amdgpu_cs_main:
+; GFX9-O0:       ; %bb.0:
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O0-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O0-NEXT:    s_mov_b32 s8, s7
+; GFX9-O0-NEXT:    s_mov_b32 s9, s6
+; GFX9-O0-NEXT:    s_mov_b32 s10, s5
+; GFX9-O0-NEXT:    ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5_sgpr6_sgpr7
+; GFX9-O0-NEXT:    s_mov_b32 s5, s10
+; GFX9-O0-NEXT:    s_mov_b32 s6, s9
+; GFX9-O0-NEXT:    s_mov_b32 s7, s8
+; GFX9-O0-NEXT:    ; kill: def $sgpr8_sgpr9_sgpr10_sgpr11 killed $sgpr4_sgpr5_sgpr6_sgpr7
+; GFX9-O0-NEXT:    s_mov_b32 s8, 5
+; GFX9-O0-NEXT:    v_lshlrev_b32_e32 v0, s8, v0
+; GFX9-O0-NEXT:    s_mov_b32 s8, 0
+; GFX9-O0-NEXT:    buffer_load_dwordx4 v[10:13], v0, s[4:7], s8 offen
+; GFX9-O0-NEXT:    buffer_load_dwordx2 v[3:4], v0, s[4:7], s8 offen offset:16
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-O0-NEXT:    v_mov_b32_e32 v7, v11
+; GFX9-O0-NEXT:    v_mov_b32_e32 v5, v10
+; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT:    s_brev_b32 s9, -2
+; GFX9-O0-NEXT:    s_mov_b32 s10, -1
+; GFX9-O0-NEXT:    ; kill: def $sgpr10 killed $sgpr10 def $sgpr10_sgpr11
+; GFX9-O0-NEXT:    s_mov_b32 s11, s9
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v6
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, s10
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, s11
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v8, v1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v2
+; GFX9-O0-NEXT:    v_mov_b32_e32 v7, v13
+; GFX9-O0-NEXT:    v_mov_b32_e32 v5, v12
+; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v7
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v5
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v6
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, s10
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, s11
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v7, v2
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v4
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, s10
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, s11
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT:    v_mov_b32_e32 v11, v9
+; GFX9-O0-NEXT:    v_mov_b32_e32 v5, v8
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, v7
+; GFX9-O0-NEXT:    v_mov_b32_e32 v10, v6
+; GFX9-O0-NEXT:    ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6_vgpr7_vgpr8 killed $exec
+; GFX9-O0-NEXT:    v_mov_b32_e32 v6, v11
+; GFX9-O0-NEXT:    v_mov_b32_e32 v7, v10
+; GFX9-O0-NEXT:    v_mov_b32_e32 v8, v9
+; GFX9-O0-NEXT:    buffer_store_dwordx4 v[5:8], v0, s[4:7], s8 offen
+; GFX9-O0-NEXT:    buffer_store_dwordx2 v[3:4], v0, s[4:7], s8 offen offset:16
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; GFX9-O0-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_nop 0
+; GFX9-O0-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-O3-LABEL: strict_wwm_amdgpu_cs_main:
+; GFX9-O3:       ; %bb.0:
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[8:9], -1
+; GFX9-O3-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[8:9]
+; GFX9-O3-NEXT:    v_lshlrev_b32_e32 v0, 5, v0
+; GFX9-O3-NEXT:    buffer_load_dwordx4 v[1:4], v0, s[4:7], 0 offen
+; GFX9-O3-NEXT:    buffer_load_dwordx2 v[5:6], v0, s[4:7], 0 offen offset:16
+; GFX9-O3-NEXT:    s_brev_b32 s9, -2
+; GFX9-O3-NEXT:    s_mov_b32 s8, -1
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(1)
+; GFX9-O3-NEXT:    v_mov_b32_e32 v1, s8
+; GFX9-O3-NEXT:    v_mov_b32_e32 v2, s9
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    v_mov_b32_e32 v3, s8
+; GFX9-O3-NEXT:    v_mov_b32_e32 v4, s9
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    v_mov_b32_e32 v5, s8
+; GFX9-O3-NEXT:    v_mov_b32_e32 v6, s9
+; GFX9-O3-NEXT:    s_not_b64 exec, exec
+; GFX9-O3-NEXT:    v_mov_b32_e32 v7, v1
+; GFX9-O3-NEXT:    v_mov_b32_e32 v9, v3
+; GFX9-O3-NEXT:    v_mov_b32_e32 v11, v5
+; GFX9-O3-NEXT:    v_mov_b32_e32 v8, v2
+; GFX9-O3-NEXT:    v_mov_b32_e32 v10, v4
+; GFX9-O3-NEXT:    v_mov_b32_e32 v12, v6
+; GFX9-O3-NEXT:    buffer_store_dwordx4 v[7:10], v0, s[4:7], 0 offen
+; GFX9-O3-NEXT:    buffer_store_dwordx2 v[11:12], v0, s[4:7], 0 offen offset:16
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; GFX9-O3-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v5, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
+; GFX9-O3-NEXT:    buffer_load_dword v6, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[4:5]
+; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-O3-NEXT:    s_setpc_b64 s[30:31]
+  %tmp17 = shl i32 %index, 5
+  %tmp18 = tail call <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32> %desc, i32 %tmp17, i32 0)
+  %.i0.upto1.bc = bitcast <4 x i32> %tmp18 to <2 x i64>
+  %tmp19 = or i32 %tmp17, 16
+  %tmp20 = tail call <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32> %desc, i32 %tmp19, i32 0)
+  %.i0.upto1.extract = extractelement <2 x i64> %.i0.upto1.bc, i32 0
+  %tmp22 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %.i0.upto1.extract, i64 9223372036854775807)
+  %tmp97 = tail call i64 @llvm.amdgcn.strict.wwm.i64(i64 %tmp22)
+  %.i1.upto1.extract = extractelement <2 x i64> %.i0.upto1.bc, i32 1
+  %tmp99 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %.i1.upto1.extract, i64 9223372036854775807)
+  %tmp174 = tail call i64 @llvm.amdgcn.strict.wwm.i64(i64 %tmp99)
+  %.i25 = bitcast <2 x i32> %tmp20 to i64
+  %tmp176 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %.i25, i64 9223372036854775807)
+  %tmp251 = tail call i64 @llvm.amdgcn.strict.wwm.i64(i64 %tmp176)
+  %.cast = bitcast i64 %tmp97 to <2 x float>
+  %.cast6 = bitcast i64 %tmp174 to <2 x float>
+  %.cast7 = bitcast i64 %tmp251 to <2 x float>
+  %tmp254 = shufflevector <2 x float> %.cast, <2 x float> %.cast6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  tail call void @llvm.amdgcn.raw.buffer.store.v4f32(<4 x float> %tmp254, <4 x i32> %desc, i32 %tmp17, i32 0, i32 0)
+  tail call void @llvm.amdgcn.raw.buffer.store.v2f32(<2 x float> %.cast7, <4 x i32> %desc, i32 %tmp19, i32 0, i32 0)
+  ret void
+}
+
+declare i32 @llvm.amdgcn.strict.wwm.i32(i32)
+declare i64 @llvm.amdgcn.strict.wwm.i64(i64)
+declare i32 @llvm.amdgcn.set.inactive.i32(i32, i32)
+declare i64 @llvm.amdgcn.set.inactive.i64(i64, i64)
+declare i32 @llvm.amdgcn.update.dpp.i32(i32, i32, i32, i32, i32, i1)
+declare <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32>, i32, i32, i32)
+declare void @llvm.amdgcn.raw.buffer.store.f32(float, <4 x i32>, i32, i32, i32)
+declare void @llvm.amdgcn.raw.buffer.store.i32(i32, <4 x i32>, i32, i32, i32)
+declare void @llvm.amdgcn.raw.buffer.store.v2i32(<2 x i32>, <4 x i32>, i32, i32, i32)
+declare void @llvm.amdgcn.raw.buffer.store.v2f32(<2 x float>, <4 x i32>, i32, i32, i32)
+declare void @llvm.amdgcn.raw.buffer.store.v4f32(<4 x float>, <4 x i32>, i32, i32, i32)
+declare <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32>, i32, i32)
+declare <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32>, i32, i32)


        


More information about the llvm-commits mailing list