[clang] [llvm] reduce max wrt divergent mask (PR #135138)

via cfe-commits cfe-commits at lists.llvm.org
Wed Apr 9 23:58:07 PDT 2025


github-actions[bot] wrote:

<!--LLVM CODE FORMAT COMMENT: {clang-format}-->


:warning: C/C++ code formatter, clang-format found issues in your code. :warning:

<details>
<summary>
You can test this locally with the following command:
</summary>

``````````bash
git-clang-format --diff HEAD~1 HEAD --extensions cpp -- clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp llvm/lib/Target/AMDGPU/SIISelLowering.cpp
``````````

</details>

<details>
<summary>
View the diff from clang-format here.
</summary>

``````````diff
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index 15c0eeab4..42f5fc374 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -286,10 +286,10 @@ void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
 
 static Intrinsic::ID getIntrinsicIDforWaveReduction(unsigned BuiltinID) {
   switch (BuiltinID) {
-    case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_wrt_divergent_mask_max_i32:
-      return Intrinsic::amdgcn_wave_reduce_wrt_divergent_mask_umax;
-    default:
-      llvm_unreachable("Unknown BuiltinID for wave reduction");
+  case clang::AMDGPU::BI__builtin_amdgcn_wave_reduce_wrt_divergent_mask_max_i32:
+    return Intrinsic::amdgcn_wave_reduce_wrt_divergent_mask_umax;
+  default:
+    llvm_unreachable("Unknown BuiltinID for wave reduction");
   }
 }
 
@@ -1151,7 +1151,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
   case AMDGPU::BI__builtin_amdgcn_s_prefetch_data:
     return emitBuiltinWithOneOverloadedType<2>(
         *this, E, Intrinsic::amdgcn_s_prefetch_data);
-  case AMDGPU::BI__builtin_amdgcn_wave_reduce_wrt_divergent_mask_max_i32:{
+  case AMDGPU::BI__builtin_amdgcn_wave_reduce_wrt_divergent_mask_max_i32: {
     Intrinsic::ID IID = getIntrinsicIDforWaveReduction(BuiltinID);
     llvm::Value *Value = EmitScalarExpr(E->getArg(0));
     llvm::Value *Mask = EmitScalarExpr(E->getArg(1));
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 83ccba7cf..5e08ee5e7 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5143,157 +5143,165 @@ static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
   return RetBB;
 }
 
-static MachineBasicBlock *lowerWaveReduceWrtDivergentMask(MachineInstr &MI,
-  MachineBasicBlock &BB,
-  const GCNSubtarget &ST,
-  unsigned Opc) {
-MachineRegisterInfo &MRI = BB.getParent()->getRegInfo();
-const SIRegisterInfo *TRI = ST.getRegisterInfo();
-const DebugLoc &DL = MI.getDebugLoc();
-const SIInstrInfo *TII = ST.getInstrInfo();
-// const MachineFunction *MF = BB.getParent();
-// const TargetRegisterInfo *TrgtRegInfo = MF->getSubtarget().getRegisterInfo();
-// Reduction operations depend on whether the input operand is SGPR or VGPR.
-Register SrcReg = MI.getOperand(1).getReg();
-auto SrcRegClass = MRI.getRegClass(SrcReg);
-// llvm::errs() << TrgtRegInfo->getRegClassName(SrcRegClass) << "\n";
-bool isSGPR = TRI->isSGPRClass(SrcRegClass);
-Register DstReg = MI.getOperand(0).getReg();
-// llvm::errs() << TrgtRegInfo->getRegClassName(MRI.getRegClass(DstReg)) << "\n";
-Register DivergentMaskReg = MI.getOperand(2).getReg();
-// llvm::errs() << TrgtRegInfo->getRegClassName(MRI.getRegClass(DivergentMaskReg)) << "\n";
-
-MachineBasicBlock *RetBB = nullptr;
-if (isSGPR) {
-BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B32), DstReg)
-.addReg(SrcReg);
-RetBB = &BB;
-} else {
-
-MachineBasicBlock::iterator I = BB.end();
-
-auto [ComputeLoop, ComputeEnd] = splitBlockForLoop(MI, BB, true);
-
-auto SReg32XM0RegClass = &AMDGPU::SReg_32_XM0RegClass;
-auto SReg32RegClass = &AMDGPU::SReg_32RegClass;
-
-const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass();
-const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg);
-Register ExecCopyReg = MRI.createVirtualRegister(WaveMaskRegClass);
-Register AccSGPRReg = MRI.createVirtualRegister(SReg32XM0RegClass);
-Register UpdatedAccSGPRReg = MRI.createVirtualRegister(SReg32RegClass);
-Register AccReg1 = MRI.createVirtualRegister(DstRegClass);
-Register AccReg = MRI.createVirtualRegister(DstRegClass);
-Register BPermAddrReg = MRI.createVirtualRegister(DstRegClass);
-Register UpdatedBPermAddrReg = MRI.createVirtualRegister(DstRegClass);
-Register InitialBPermAddrReg = MRI.createVirtualRegister(DstRegClass);
-Register UpdatedAccReg = MRI.createVirtualRegister(DstRegClass);
-Register ActiveLanesReg = MRI.createVirtualRegister(WaveMaskRegClass);
-Register UpdatedActiveLanesReg = MRI.createVirtualRegister(WaveMaskRegClass);
-Register FF1ActiveLanesReg = MRI.createVirtualRegister(SReg32RegClass);
-Register FF1MaskReg = MRI.createVirtualRegister(SReg32RegClass);
-Register FF1MaskX4Reg = MRI.createVirtualRegister(SReg32RegClass);
-Register ValReg = MRI.createVirtualRegister(SReg32XM0RegClass);
-Register MaskReg = MRI.createVirtualRegister(SReg32XM0RegClass);
-
-bool IsWave32 = ST.isWave32();
-
-uint32_t IdentityValue =
-(Opc == AMDGPU::S_MIN_U32) ? std::numeric_limits<uint32_t>::max() : 0;
-
-BuildMI(BB, I, DL, TII->get(IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64), ExecCopyReg).addReg(IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC); 
-
-BuildMI(BB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), AccReg)
-.addImm(IdentityValue);
-BuildMI(BB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), InitialBPermAddrReg)
-.addImm(0);
-BuildMI(BB, I, DL, TII->get(AMDGPU::S_BRANCH))
-.addMBB(ComputeLoop);
-
-I = ComputeLoop->end();
-
-auto PhiActiveLanesInst =
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), ActiveLanesReg)
-.addReg(ExecCopyReg)
-.addMBB(&BB);
-auto PhiAccInst =
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), AccReg1)
-.addReg(AccReg)
-.addMBB(&BB);
-auto PhiBPermAddrInst =
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), BPermAddrReg)
-.addReg(InitialBPermAddrReg)
-.addMBB(&BB);
-
-BuildMI(*ComputeLoop, I, DL, TII->get(IsWave32 ? AMDGPU::S_FF1_I32_B32 : AMDGPU::S_FF1_I32_B64), FF1ActiveLanesReg)
-.addReg(ActiveLanesReg);
-
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_READLANE_B32), ValReg)
-.addReg(SrcReg)
-.addReg(FF1ActiveLanesReg);
-
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_READLANE_B32), MaskReg)
-.addReg(DivergentMaskReg)
-.addReg(FF1ActiveLanesReg);
-
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_FF1_I32_B32), FF1MaskReg).addReg(MaskReg);
-
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_READLANE_B32), AccSGPRReg)
-.addReg(AccReg1)
-.addReg(FF1MaskReg);
-
-BuildMI(*ComputeLoop, I, DL, TII->get(Opc), UpdatedAccSGPRReg).addReg(AccSGPRReg).addReg(ValReg);
-
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
-.addReg(FF1MaskReg);
-
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_WRITELANE_B32), UpdatedAccReg)
-.addReg(UpdatedAccSGPRReg)
-.addReg(AMDGPU::M0)
-.addReg(AccReg1);
-
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_LSHL_B32), FF1MaskX4Reg)
-.addReg(FF1MaskReg)
-.addImm(2);
-
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
-.addReg(FF1ActiveLanesReg);
-
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_WRITELANE_B32), UpdatedBPermAddrReg)
-.addReg(FF1MaskX4Reg)
-.addReg(AMDGPU::M0)
-.addReg(BPermAddrReg);
-
-unsigned BITSETOpc =
-IsWave32 ? AMDGPU::S_BITSET0_B32 : AMDGPU::S_BITSET0_B64;
-BuildMI(*ComputeLoop, I, DL, TII->get(BITSETOpc), UpdatedActiveLanesReg)
-.addReg(FF1ActiveLanesReg)
-.addReg(ActiveLanesReg);
-
-PhiActiveLanesInst.addReg(UpdatedActiveLanesReg)
-.addMBB(ComputeLoop);
-PhiAccInst.addReg(UpdatedAccReg)
-.addMBB(ComputeLoop);
-PhiBPermAddrInst.addReg(UpdatedBPermAddrReg)
-.addMBB(ComputeLoop);
-
-unsigned CMPOpc = IsWave32 ? AMDGPU::S_CMP_LG_U32 : AMDGPU::S_CMP_LG_U64;
-BuildMI(*ComputeLoop, I, DL, TII->get(CMPOpc))
-.addReg(UpdatedActiveLanesReg)
-.addImm(0);
-BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
-.addMBB(ComputeLoop);
-
-BuildMI(*ComputeEnd, ComputeEnd->begin(), DL, TII->get(AMDGPU::DS_BPERMUTE_B32), DstReg)
-.addReg(UpdatedBPermAddrReg)
-.addReg(UpdatedAccReg)
-.addImm(0);
-
-RetBB = ComputeEnd;
+static MachineBasicBlock *
+lowerWaveReduceWrtDivergentMask(MachineInstr &MI, MachineBasicBlock &BB,
+                                const GCNSubtarget &ST, unsigned Opc) {
+  MachineRegisterInfo &MRI = BB.getParent()->getRegInfo();
+  const SIRegisterInfo *TRI = ST.getRegisterInfo();
+  const DebugLoc &DL = MI.getDebugLoc();
+  const SIInstrInfo *TII = ST.getInstrInfo();
+  // const MachineFunction *MF = BB.getParent();
+  // const TargetRegisterInfo *TrgtRegInfo =
+  // MF->getSubtarget().getRegisterInfo(); Reduction operations depend on
+  // whether the input operand is SGPR or VGPR.
+  Register SrcReg = MI.getOperand(1).getReg();
+  auto SrcRegClass = MRI.getRegClass(SrcReg);
+  // llvm::errs() << TrgtRegInfo->getRegClassName(SrcRegClass) << "\n";
+  bool isSGPR = TRI->isSGPRClass(SrcRegClass);
+  Register DstReg = MI.getOperand(0).getReg();
+  // llvm::errs() << TrgtRegInfo->getRegClassName(MRI.getRegClass(DstReg)) <<
+  // "\n";
+  Register DivergentMaskReg = MI.getOperand(2).getReg();
+  // llvm::errs() <<
+  // TrgtRegInfo->getRegClassName(MRI.getRegClass(DivergentMaskReg)) << "\n";
 
-}
-MI.eraseFromParent();
-return RetBB;
+  MachineBasicBlock *RetBB = nullptr;
+  if (isSGPR) {
+    BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B32), DstReg).addReg(SrcReg);
+    RetBB = &BB;
+  } else {
+
+    MachineBasicBlock::iterator I = BB.end();
+
+    auto [ComputeLoop, ComputeEnd] = splitBlockForLoop(MI, BB, true);
+
+    auto SReg32XM0RegClass = &AMDGPU::SReg_32_XM0RegClass;
+    auto SReg32RegClass = &AMDGPU::SReg_32RegClass;
+
+    const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass();
+    const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg);
+    Register ExecCopyReg = MRI.createVirtualRegister(WaveMaskRegClass);
+    Register AccSGPRReg = MRI.createVirtualRegister(SReg32XM0RegClass);
+    Register UpdatedAccSGPRReg = MRI.createVirtualRegister(SReg32RegClass);
+    Register AccReg1 = MRI.createVirtualRegister(DstRegClass);
+    Register AccReg = MRI.createVirtualRegister(DstRegClass);
+    Register BPermAddrReg = MRI.createVirtualRegister(DstRegClass);
+    Register UpdatedBPermAddrReg = MRI.createVirtualRegister(DstRegClass);
+    Register InitialBPermAddrReg = MRI.createVirtualRegister(DstRegClass);
+    Register UpdatedAccReg = MRI.createVirtualRegister(DstRegClass);
+    Register ActiveLanesReg = MRI.createVirtualRegister(WaveMaskRegClass);
+    Register UpdatedActiveLanesReg =
+        MRI.createVirtualRegister(WaveMaskRegClass);
+    Register FF1ActiveLanesReg = MRI.createVirtualRegister(SReg32RegClass);
+    Register FF1MaskReg = MRI.createVirtualRegister(SReg32RegClass);
+    Register FF1MaskX4Reg = MRI.createVirtualRegister(SReg32RegClass);
+    Register ValReg = MRI.createVirtualRegister(SReg32XM0RegClass);
+    Register MaskReg = MRI.createVirtualRegister(SReg32XM0RegClass);
+
+    bool IsWave32 = ST.isWave32();
+
+    uint32_t IdentityValue =
+        (Opc == AMDGPU::S_MIN_U32) ? std::numeric_limits<uint32_t>::max() : 0;
+
+    BuildMI(BB, I, DL,
+            TII->get(IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
+            ExecCopyReg)
+        .addReg(IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC);
+
+    BuildMI(BB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), AccReg)
+        .addImm(IdentityValue);
+    BuildMI(BB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), InitialBPermAddrReg)
+        .addImm(0);
+    BuildMI(BB, I, DL, TII->get(AMDGPU::S_BRANCH)).addMBB(ComputeLoop);
+
+    I = ComputeLoop->end();
+
+    auto PhiActiveLanesInst =
+        BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), ActiveLanesReg)
+            .addReg(ExecCopyReg)
+            .addMBB(&BB);
+    auto PhiAccInst =
+        BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), AccReg1)
+            .addReg(AccReg)
+            .addMBB(&BB);
+    auto PhiBPermAddrInst =
+        BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), BPermAddrReg)
+            .addReg(InitialBPermAddrReg)
+            .addMBB(&BB);
+
+    BuildMI(*ComputeLoop, I, DL,
+            TII->get(IsWave32 ? AMDGPU::S_FF1_I32_B32 : AMDGPU::S_FF1_I32_B64),
+            FF1ActiveLanesReg)
+        .addReg(ActiveLanesReg);
+
+    BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_READLANE_B32), ValReg)
+        .addReg(SrcReg)
+        .addReg(FF1ActiveLanesReg);
+
+    BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_READLANE_B32), MaskReg)
+        .addReg(DivergentMaskReg)
+        .addReg(FF1ActiveLanesReg);
+
+    BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_FF1_I32_B32), FF1MaskReg)
+        .addReg(MaskReg);
+
+    BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_READLANE_B32), AccSGPRReg)
+        .addReg(AccReg1)
+        .addReg(FF1MaskReg);
+
+    BuildMI(*ComputeLoop, I, DL, TII->get(Opc), UpdatedAccSGPRReg)
+        .addReg(AccSGPRReg)
+        .addReg(ValReg);
+
+    BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
+        .addReg(FF1MaskReg);
+
+    BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_WRITELANE_B32),
+            UpdatedAccReg)
+        .addReg(UpdatedAccSGPRReg)
+        .addReg(AMDGPU::M0)
+        .addReg(AccReg1);
+
+    BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_LSHL_B32), FF1MaskX4Reg)
+        .addReg(FF1MaskReg)
+        .addImm(2);
+
+    BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
+        .addReg(FF1ActiveLanesReg);
+
+    BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::V_WRITELANE_B32),
+            UpdatedBPermAddrReg)
+        .addReg(FF1MaskX4Reg)
+        .addReg(AMDGPU::M0)
+        .addReg(BPermAddrReg);
+
+    unsigned BITSETOpc =
+        IsWave32 ? AMDGPU::S_BITSET0_B32 : AMDGPU::S_BITSET0_B64;
+    BuildMI(*ComputeLoop, I, DL, TII->get(BITSETOpc), UpdatedActiveLanesReg)
+        .addReg(FF1ActiveLanesReg)
+        .addReg(ActiveLanesReg);
+
+    PhiActiveLanesInst.addReg(UpdatedActiveLanesReg).addMBB(ComputeLoop);
+    PhiAccInst.addReg(UpdatedAccReg).addMBB(ComputeLoop);
+    PhiBPermAddrInst.addReg(UpdatedBPermAddrReg).addMBB(ComputeLoop);
+
+    unsigned CMPOpc = IsWave32 ? AMDGPU::S_CMP_LG_U32 : AMDGPU::S_CMP_LG_U64;
+    BuildMI(*ComputeLoop, I, DL, TII->get(CMPOpc))
+        .addReg(UpdatedActiveLanesReg)
+        .addImm(0);
+    BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
+        .addMBB(ComputeLoop);
+
+    BuildMI(*ComputeEnd, ComputeEnd->begin(), DL,
+            TII->get(AMDGPU::DS_BPERMUTE_B32), DstReg)
+        .addReg(UpdatedBPermAddrReg)
+        .addReg(UpdatedAccReg)
+        .addImm(0);
+
+    RetBB = ComputeEnd;
+  }
+  MI.eraseFromParent();
+  return RetBB;
 }
 
 MachineBasicBlock *
@@ -5310,9 +5318,11 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   case AMDGPU::WAVE_REDUCE_UMAX_PSEUDO_U32:
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_MAX_U32);
   case AMDGPU::WAVE_REDUCE_WRT_DIVERGENT_MASK_UMIN_PSEUDO_U32:
-    return lowerWaveReduceWrtDivergentMask(MI, *BB, *getSubtarget(), AMDGPU::S_MIN_U32);
+    return lowerWaveReduceWrtDivergentMask(MI, *BB, *getSubtarget(),
+                                           AMDGPU::S_MIN_U32);
   case AMDGPU::WAVE_REDUCE_WRT_DIVERGENT_MASK_UMAX_PSEUDO_U32:
-    return lowerWaveReduceWrtDivergentMask(MI, *BB, *getSubtarget(), AMDGPU::S_MAX_U32);
+    return lowerWaveReduceWrtDivergentMask(MI, *BB, *getSubtarget(),
+                                           AMDGPU::S_MAX_U32);
   case AMDGPU::S_UADDO_PSEUDO:
   case AMDGPU::S_USUBO_PSEUDO: {
     const DebugLoc &DL = MI.getDebugLoc();

``````````

</details>


https://github.com/llvm/llvm-project/pull/135138


More information about the cfe-commits mailing list