[llvm] [AMDGPU] Refactor GFX11 VALU Mask Hazard Waitcnt Merging (PR #169213)

via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 23 03:46:55 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-amdgpu

Author: Carl Ritson (perlfu)

<details>
<summary>Changes</summary>

Move GFX11 SGPR VALU mask hazard waitcnt merging to the a forward scan within the AMDGPUWaitSGPRHazard pass.
This simplifies the hazard recognizer code and allows merging of waitcnt instructions in cases where SGPRs unaffected by pending writes are used.
In turn this greatly decreasing numbers of waits inserted in sequences of V_CMP instructions writing SGPRs improving VALU pipeline performance.

---
Full diff: https://github.com/llvm/llvm-project/pull/169213.diff


3 Files Affected:

- (modified) llvm/lib/Target/AMDGPU/AMDGPUWaitSGPRHazards.cpp (+91-5) 
- (modified) llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp (+21-77) 
- (modified) llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir (+106-4) 


``````````diff
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUWaitSGPRHazards.cpp b/llvm/lib/Target/AMDGPU/AMDGPUWaitSGPRHazards.cpp
index ded2f5ae1f8af..85ed0d88fdc10 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUWaitSGPRHazards.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUWaitSGPRHazards.cpp
@@ -66,6 +66,7 @@ class AMDGPUWaitSGPRHazards {
     case AMDGPU::EXEC_HI:
     case AMDGPU::SGPR_NULL:
     case AMDGPU::SGPR_NULL64:
+    case AMDGPU::SCC:
       return {};
     default:
       break;
@@ -437,11 +438,96 @@ class AMDGPUWaitSGPRHazards {
     return Changed;
   }
 
+  bool runWaitMerging(MachineFunction &MF) {
+    // Perform per-block merging of existing s_waitcnt_depctr instructions.
+    // Track set of SGPR writes before a given wait instruction, and search
+    // for reads of these SGPRs prior to the next wait.
+    // If no reads occur then the 1st wait can be merged into the 2nd.
+    const unsigned ConstantMaskBits = AMDGPU::DepCtr::encodeFieldSaSdst(
+        AMDGPU::DepCtr::encodeFieldVaSdst(AMDGPU::DepCtr::encodeFieldVaVcc(0),
+                                          0),
+        0);
+    bool Changed = false;
+    for (auto &MBB : MF) {
+      std::bitset<128> WriteSet, NextWriteSet;
+      MachineInstr *PrevWait = nullptr;
+      bool ReadWriteDep = false;
+      for (MachineBasicBlock::instr_iterator MI = MBB.instr_begin(),
+                                             E = MBB.instr_end();
+           MI != E; ++MI) {
+        if (MI->isMetaInstruction())
+          continue;
+
+        if (MI->getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && !MI->isBundled() &&
+            (MI->getOperand(0).getImm() & ConstantMaskBits) ==
+                ConstantMaskBits) {
+          if (PrevWait && !ReadWriteDep) {
+            // Merge previous wait into this one and merge write sets.
+            MachineOperand &MaskOp = MI->getOperand(0);
+            MaskOp.setImm(
+                mergeMasks(PrevWait->getOperand(0).getImm(), MaskOp.getImm()));
+            PrevWait->eraseFromParent();
+            WriteSet |= NextWriteSet;
+          } else {
+            // Start a new merging region using fresh write set.
+            WriteSet = NextWriteSet;
+          }
+          NextWriteSet.reset();
+          PrevWait = &*MI;
+          ReadWriteDep = false;
+          Changed = true;
+          continue;
+        }
+
+        const bool IsVALU = SIInstrInfo::isVALU(*MI);
+        const bool IsSALU = SIInstrInfo::isSALU(*MI);
+        if (!IsVALU && !IsSALU)
+          continue;
+
+        for (const MachineOperand &Op : MI->operands()) {
+          if (!Op.isReg())
+            continue;
+          Register Reg = Op.getReg();
+          if (!TRI->isSGPRReg(*MRI, Reg))
+            continue;
+
+          auto RegNumber = sgprNumber(Reg, *TRI);
+          if (!RegNumber)
+            continue;
+          unsigned RegN = *RegNumber;
+
+          uint8_t SGPRCount =
+              AMDGPU::getRegBitWidth(*TRI->getRegClassForReg(*MRI, Reg)) / 32;
+
+          if (Op.isDef()) {
+            for (uint8_t RegIdx = 0; RegIdx < SGPRCount; ++RegIdx)
+              NextWriteSet.set(RegN + RegIdx);
+          } else {
+            if (ReadWriteDep)
+              continue;
+            for (uint8_t RegIdx = 0; RegIdx < SGPRCount; ++RegIdx) {
+              if (WriteSet[RegN + RegIdx]) {
+                ReadWriteDep = true;
+                break;
+              }
+            }
+          }
+        }
+      }
+    }
+    return Changed;
+  }
+
   bool run(MachineFunction &MF) {
     const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
-    if (!ST.hasVALUReadSGPRHazard())
+    if (!ST.hasVALUReadSGPRHazard() && !ST.hasVALUMaskWriteHazard())
       return false;
 
+    TII = ST.getInstrInfo();
+    TRI = ST.getRegisterInfo();
+    MRI = &MF.getRegInfo();
+    DsNopCount = ST.isWave64() ? WAVE64_NOPS : WAVE32_NOPS;
+
     // Parse settings
     EnableSGPRHazardWaits = GlobalEnableSGPRHazardWaits;
     CullSGPRHazardsOnFunctionBoundary = GlobalCullSGPRHazardsOnFunctionBoundary;
@@ -467,10 +553,10 @@ class AMDGPUWaitSGPRHazards {
     if (!EnableSGPRHazardWaits)
       return false;
 
-    TII = ST.getInstrInfo();
-    TRI = ST.getRegisterInfo();
-    MRI = &MF.getRegInfo();
-    DsNopCount = ST.isWave64() ? WAVE64_NOPS : WAVE32_NOPS;
+    // VALU mask write hazards have already been handled, but this pass
+    // performs a forward scan optimize them.
+    if (ST.hasVALUMaskWriteHazard())
+      return runWaitMerging(MF);
 
     auto CallingConv = MF.getFunction().getCallingConv();
     if (!AMDGPU::isEntryFunctionCC(CallingConv) &&
diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
index 29d22f27a2d8e..656eb1002fb39 100644
--- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -3315,15 +3315,14 @@ bool GCNHazardRecognizer::fixVALUMaskWriteHazard(MachineInstr *MI) {
   };
 
   SmallVector<const MachineInstr *> WaitInstrs;
-  bool HasSGPRRead = false;
   StateType InitialState;
 
   // Look for SGPR write.
   MachineOperand *HazardDef = nullptr;
-  for (MachineOperand &Op : MI->operands()) {
+  for (MachineOperand &Op : MI->all_defs()) {
     if (!Op.isReg())
       continue;
-    if (Op.isDef() && HazardDef)
+    if (HazardDef)
       continue;
 
     Register Reg = Op.getReg();
@@ -3335,11 +3334,6 @@ bool GCNHazardRecognizer::fixVALUMaskWriteHazard(MachineInstr *MI) {
       if (!TRI->isSGPRReg(MRI, Reg))
         continue;
     }
-    // Also check for SGPR reads.
-    if (Op.isUse()) {
-      HasSGPRRead = true;
-      continue;
-    }
 
     assert(!HazardDef);
     HazardDef = &Op;
@@ -3403,48 +3397,31 @@ bool GCNHazardRecognizer::fixVALUMaskWriteHazard(MachineInstr *MI) {
     }
   };
 
-  const unsigned ConstantMaskBits = AMDGPU::DepCtr::encodeFieldSaSdst(
-      AMDGPU::DepCtr::encodeFieldVaSdst(AMDGPU::DepCtr::encodeFieldVaVcc(0), 0),
-      0);
   auto UpdateStateFn = [&](StateType &State, const MachineInstr &I) {
-    switch (I.getOpcode()) {
-    case AMDGPU::S_WAITCNT_DEPCTR:
-      // Record mergable waits within region of instructions free of SGPR reads.
-      if (!HasSGPRRead && I.getParent() == MI->getParent() && !I.isBundled() &&
-          (I.getOperand(0).getImm() & ConstantMaskBits) == ConstantMaskBits)
-        WaitInstrs.push_back(&I);
-      break;
-    default:
-      // Update tracking of SGPR reads and writes.
-      for (auto &Op : I.operands()) {
-        if (!Op.isReg())
-          continue;
+    // Update tracking of SGPR writes.
+    for (auto &Op : I.all_defs()) {
+      if (!Op.isReg())
+        continue;
 
-        Register Reg = Op.getReg();
-        if (IgnoreableSGPR(Reg))
+      Register Reg = Op.getReg();
+      if (IgnoreableSGPR(Reg))
+        continue;
+      if (!IsVCC(Reg)) {
+        if (Op.isImplicit())
           continue;
-        if (!IsVCC(Reg)) {
-          if (Op.isImplicit())
-            continue;
-          if (!TRI->isSGPRReg(MRI, Reg))
-            continue;
-        }
-        if (Op.isUse()) {
-          HasSGPRRead = true;
+        if (!TRI->isSGPRReg(MRI, Reg))
           continue;
-        }
+      }
 
-        // Stop tracking any SGPRs with writes on the basis that they will
-        // already have an appropriate wait inserted afterwards.
-        SmallVector<Register, 2> Found;
-        for (Register SGPR : State.HazardSGPRs) {
-          if (Reg == SGPR || TRI->regsOverlap(Reg, SGPR))
-            Found.push_back(SGPR);
-        }
-        for (Register SGPR : Found)
-          State.HazardSGPRs.erase(SGPR);
+      // Stop tracking any SGPRs with writes on the basis that they will
+      // already have an appropriate wait inserted afterwards.
+      SmallVector<Register, 2> Found;
+      for (Register SGPR : State.HazardSGPRs) {
+        if (Reg == SGPR || TRI->regsOverlap(Reg, SGPR))
+          Found.push_back(SGPR);
       }
-      break;
+      for (Register SGPR : Found)
+        State.HazardSGPRs.erase(SGPR);
     }
   };
 
@@ -3460,39 +3437,6 @@ bool GCNHazardRecognizer::fixVALUMaskWriteHazard(MachineInstr *MI) {
                                  : AMDGPU::DepCtr::encodeFieldVaSdst(0))
              : AMDGPU::DepCtr::encodeFieldSaSdst(0);
 
-  // Try to merge previous waits into this one for regions with no SGPR reads.
-  if (!WaitInstrs.empty()) {
-    // Note: WaitInstrs contains const pointers, so walk backward from MI to
-    // obtain a mutable pointer to each instruction to be merged.
-    // This is expected to be a very short walk within the same block.
-    SmallVector<MachineInstr *> ToErase;
-    unsigned Found = 0;
-    for (MachineBasicBlock::reverse_iterator It = MI->getReverseIterator(),
-                                             End = MI->getParent()->rend();
-         Found < WaitInstrs.size() && It != End; ++It) {
-      MachineInstr *WaitMI = &*It;
-      // Find next wait instruction.
-      if (std::as_const(WaitMI) != WaitInstrs[Found])
-        continue;
-      Found++;
-      unsigned WaitMask = WaitMI->getOperand(0).getImm();
-      assert((WaitMask & ConstantMaskBits) == ConstantMaskBits);
-      DepCtr = AMDGPU::DepCtr::encodeFieldSaSdst(
-          DepCtr, std::min(AMDGPU::DepCtr::decodeFieldSaSdst(WaitMask),
-                           AMDGPU::DepCtr::decodeFieldSaSdst(DepCtr)));
-      DepCtr = AMDGPU::DepCtr::encodeFieldVaSdst(
-          DepCtr, std::min(AMDGPU::DepCtr::decodeFieldVaSdst(WaitMask),
-                           AMDGPU::DepCtr::decodeFieldVaSdst(DepCtr)));
-      DepCtr = AMDGPU::DepCtr::encodeFieldVaVcc(
-          DepCtr, std::min(AMDGPU::DepCtr::decodeFieldVaVcc(WaitMask),
-                           AMDGPU::DepCtr::decodeFieldVaVcc(DepCtr)));
-      ToErase.push_back(WaitMI);
-    }
-    assert(Found == WaitInstrs.size());
-    for (MachineInstr *WaitMI : ToErase)
-      WaitMI->eraseFromParent();
-  }
-
   // Add s_waitcnt_depctr after SGPR write.
   auto NextMI = std::next(MI->getIterator());
   auto NewMI = BuildMI(*MI->getParent(), NextMI, MI->getDebugLoc(),
diff --git a/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir b/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir
index e1d3ebc2d35d1..7460198ec6f39 100644
--- a/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir
+++ b/llvm/test/CodeGen/AMDGPU/valu-mask-write-hazard.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -verify-machineinstrs -run-pass post-RA-hazard-rec -o - %s | FileCheck -check-prefixes=GCN,GFX11 %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+wavefrontsize64 -verify-machineinstrs -run-pass post-RA-hazard-rec,amdgpu-wait-sgpr-hazards -o - %s | FileCheck -check-prefixes=GCN,GFX11 %s
 # RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+wavefrontsize64 -verify-machineinstrs -run-pass post-RA-hazard-rec,amdgpu-wait-sgpr-hazards -o - %s | FileCheck -check-prefixes=GCN,GFX12 %s
 
 --- |
@@ -54,6 +54,9 @@
   define amdgpu_gs void @mask_hazard_valu_vcmp_sgpr() { ret void }
   define amdgpu_gs void @mask_hazard_combine1() { ret void }
   define amdgpu_gs void @mask_hazard_combine2() { ret void }
+  define amdgpu_gs void @mask_hazard_combine3() { ret void }
+  define amdgpu_gs void @mask_hazard_combine4() { ret void }
+  define amdgpu_gs void @mask_hazard_combine5() { ret void }
 ...
 
 ---
@@ -782,7 +785,6 @@ body:            |
     ; GFX11-LABEL: name: mask_hazard_partial_cancel2
     ; GFX11: $vgpr1 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
     ; GFX11-NEXT: $vcc_hi = S_MOV_B32 0
-    ; GFX11-NEXT: S_WAITCNT_DEPCTR 65534
     ; GFX11-NEXT: $sgpr0 = S_MOV_B32 $vcc_lo
     ; GFX11-NEXT: $vcc = S_MOV_B64 1
     ; GFX11-NEXT: S_WAITCNT_DEPCTR 65534
@@ -1017,10 +1019,9 @@ body:            |
     ; GFX11-NEXT: $vgpr5 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, implicit $exec
     ; GFX11-NEXT: V_CMP_NE_U32_e32 0, $vgpr5, implicit-def $vcc, implicit $exec
     ; GFX11-NEXT: $sgpr0 = S_MOV_B32 0
-    ; GFX11-NEXT: S_WAITCNT_DEPCTR 65532
     ; GFX11-NEXT: $sgpr1 = S_MOV_B32 $sgpr4
     ; GFX11-NEXT: $sgpr2_sgpr3 = V_CMP_EQ_U32_e64 3, $vgpr5, implicit $exec
-    ; GFX11-NEXT: S_WAITCNT_DEPCTR 61950
+    ; GFX11-NEXT: S_WAITCNT_DEPCTR 61948
     ; GFX11-NEXT: S_ENDPGM 0
     ;
     ; GFX12-LABEL: name: mask_hazard_combine2
@@ -1041,3 +1042,104 @@ body:            |
     $sgpr2_sgpr3 = V_CMP_EQ_U32_e64 3, $vgpr5, implicit $exec
     S_ENDPGM 0
 ...
+
+---
+name:            mask_hazard_combine3
+body:            |
+  bb.0:
+    ; GFX11-LABEL: name: mask_hazard_combine3
+    ; GFX11: $vgpr3 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    ; GFX11-NEXT: $vgpr4 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr0_sgpr1, implicit $exec
+    ; GFX11-NEXT: $vgpr5 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, implicit $exec
+    ; GFX11-NEXT: V_CMP_NE_U32_e32 0, $vgpr5, implicit-def $vcc, implicit $exec
+    ; GFX11-NEXT: $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 2, $vgpr5, implicit $exec
+    ; GFX11-NEXT: $sgpr2_sgpr3 = V_CMP_EQ_U32_e64 3, $vgpr5, implicit $exec
+    ; GFX11-NEXT: S_WAITCNT_DEPCTR 61949
+    ; GFX11-NEXT: S_ENDPGM 0
+    ;
+    ; GFX12-LABEL: name: mask_hazard_combine3
+    ; GFX12: $vgpr3 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    ; GFX12-NEXT: $vgpr4 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr0_sgpr1, implicit $exec
+    ; GFX12-NEXT: $vgpr5 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, implicit $exec
+    ; GFX12-NEXT: V_CMP_NE_U32_e32 0, $vgpr5, implicit-def $vcc, implicit $exec
+    ; GFX12-NEXT: $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 2, $vgpr5, implicit $exec
+    ; GFX12-NEXT: $sgpr2_sgpr3 = V_CMP_EQ_U32_e64 3, $vgpr5, implicit $exec
+    ; GFX12-NEXT: S_ENDPGM 0
+    $vgpr3 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    $vgpr4 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr0_sgpr1, implicit $exec
+    $vgpr5 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, implicit $exec
+    V_CMP_NE_U32_e32 0, $vgpr5, implicit-def $vcc, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 2, $vgpr5, implicit $exec
+    $sgpr2_sgpr3 = V_CMP_EQ_U32_e64 3, $vgpr5, implicit $exec
+    S_ENDPGM 0
+...
+
+---
+name:            mask_hazard_combine4
+body:            |
+  bb.0:
+    ; GFX11-LABEL: name: mask_hazard_combine4
+    ; GFX11: $vgpr3 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    ; GFX11-NEXT: $vgpr4 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr0_sgpr1, implicit $exec
+    ; GFX11-NEXT: $vgpr5 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, implicit $exec
+    ; GFX11-NEXT: V_CMP_NE_U32_e32 0, $vgpr5, implicit-def $vcc, implicit $exec
+    ; GFX11-NEXT: $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 2, $vgpr5, implicit $exec
+    ; GFX11-NEXT: S_WAITCNT_DEPCTR 61949
+    ; GFX11-NEXT: $sgpr4_sgpr5 = S_MOV_B64 $vcc
+    ; GFX11-NEXT: $sgpr2_sgpr3 = V_CMP_EQ_U32_e64 3, $vgpr5, implicit $exec
+    ; GFX11-NEXT: S_WAITCNT_DEPCTR 61951
+    ; GFX11-NEXT: S_ENDPGM 0
+    ;
+    ; GFX12-LABEL: name: mask_hazard_combine4
+    ; GFX12: $vgpr3 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    ; GFX12-NEXT: $vgpr4 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr0_sgpr1, implicit $exec
+    ; GFX12-NEXT: $vgpr5 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, implicit $exec
+    ; GFX12-NEXT: V_CMP_NE_U32_e32 0, $vgpr5, implicit-def $vcc, implicit $exec
+    ; GFX12-NEXT: $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 2, $vgpr5, implicit $exec
+    ; GFX12-NEXT: $sgpr4_sgpr5 = S_MOV_B64 $vcc
+    ; GFX12-NEXT: $sgpr2_sgpr3 = V_CMP_EQ_U32_e64 3, $vgpr5, implicit $exec
+    ; GFX12-NEXT: S_ENDPGM 0
+    $vgpr3 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    $vgpr4 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr0_sgpr1, implicit $exec
+    $vgpr5 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, implicit $exec
+    V_CMP_NE_U32_e32 0, $vgpr5, implicit-def $vcc, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 2, $vgpr5, implicit $exec
+    $sgpr4_sgpr5 = S_MOV_B64 $vcc
+    $sgpr2_sgpr3 = V_CMP_EQ_U32_e64 3, $vgpr5, implicit $exec
+    S_ENDPGM 0
+...
+
+---
+name:            mask_hazard_combine5
+body:            |
+  bb.0:
+    ; GFX11-LABEL: name: mask_hazard_combine5
+    ; GFX11: $vgpr3 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    ; GFX11-NEXT: $vgpr4 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr0_sgpr1, implicit $exec
+    ; GFX11-NEXT: $vgpr5 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, implicit $exec
+    ; GFX11-NEXT: V_CMP_NE_U32_e32 0, $vgpr5, implicit-def $vcc, implicit $exec
+    ; GFX11-NEXT: $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 2, $vgpr5, implicit $exec
+    ; GFX11-NEXT: S_WAITCNT_DEPCTR 61949
+    ; GFX11-NEXT: $sgpr5 = S_MOV_B32 $sgpr1
+    ; GFX11-NEXT: $sgpr2_sgpr3 = V_CMP_EQ_U32_e64 3, $vgpr5, implicit $exec
+    ; GFX11-NEXT: S_WAITCNT_DEPCTR 61951
+    ; GFX11-NEXT: S_ENDPGM 0
+    ;
+    ; GFX12-LABEL: name: mask_hazard_combine5
+    ; GFX12: $vgpr3 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    ; GFX12-NEXT: $vgpr4 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr0_sgpr1, implicit $exec
+    ; GFX12-NEXT: $vgpr5 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, implicit $exec
+    ; GFX12-NEXT: V_CMP_NE_U32_e32 0, $vgpr5, implicit-def $vcc, implicit $exec
+    ; GFX12-NEXT: $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 2, $vgpr5, implicit $exec
+    ; GFX12-NEXT: $sgpr5 = S_MOV_B32 $sgpr1
+    ; GFX12-NEXT: $sgpr2_sgpr3 = V_CMP_EQ_U32_e64 3, $vgpr5, implicit $exec
+    ; GFX12-NEXT: S_ENDPGM 0
+    $vgpr3 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    $vgpr4 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr0_sgpr1, implicit $exec
+    $vgpr5 = V_CNDMASK_B32_e64 0, $vgpr1, 0, $vgpr2, $sgpr2_sgpr3, implicit $exec
+    V_CMP_NE_U32_e32 0, $vgpr5, implicit-def $vcc, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 2, $vgpr5, implicit $exec
+    $sgpr5 = S_MOV_B32 $sgpr1
+    $sgpr2_sgpr3 = V_CMP_EQ_U32_e64 3, $vgpr5, implicit $exec
+    S_ENDPGM 0
+...

``````````

</details>


https://github.com/llvm/llvm-project/pull/169213


More information about the llvm-commits mailing list