[llvm] [AMDGPU] Introduce "amdgpu-uniform-intrinsic-combine" pass to combine uniform AMDGPU lane Intrinsics. (PR #116953)

Pankaj Dwivedi via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 25 05:05:04 PDT 2025


https://github.com/PankajDwivedi-25 updated https://github.com/llvm/llvm-project/pull/116953

>From be7ad7882855462e03c479c283dd3ef8c1d9ee0b Mon Sep 17 00:00:00 2001
From: PankajDwivedi-25 <pankajkumar.divedi at amd.com>
Date: Thu, 21 Nov 2024 12:35:56 +0530
Subject: [PATCH 01/30] [WIP][AMDGPU] combine uniform AMDGPU lane Intrinsics

---
 llvm/lib/Target/AMDGPU/AMDGPU.h               |  11 +
 llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def |   1 +
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 183 +++++++++++++++
 llvm/lib/Target/AMDGPU/CMakeLists.txt         |   1 +
 .../amdgpu-uniform-intrinsic-combine.ll       | 221 ++++++++++++++++++
 5 files changed, 417 insertions(+)
 create mode 100644 llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index f5c2b09c84806..f219fd4af2c48 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -491,6 +491,17 @@ extern char &GCNRewritePartialRegUsesID;
 void initializeAMDGPUWaitSGPRHazardsLegacyPass(PassRegistry &);
 extern char &AMDGPUWaitSGPRHazardsLegacyID;
 
+void initializeAMDGPUUniformIntrinsicCombinePass(PassRegistry &);
+extern char &AMDGPUUniformIntrinsicCombineID;
+FunctionPass *createAMDGPUUniformIntrinsicCombinePass();
+
+struct AMDGPUUniformIntrinsicCombinePass
+    : public PassInfoMixin<AMDGPUUniformIntrinsicCombinePass> {
+  const AMDGPUTargetMachine &TM;
+  AMDGPUUniformIntrinsicCombinePass(const AMDGPUTargetMachine &TM_) : TM(TM_) {}
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
 namespace AMDGPU {
 enum TargetIndex {
   TI_CONSTDATA_START,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
index 6832a17c37177..b22083a670e20 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
@@ -67,6 +67,7 @@ FUNCTION_PASS("amdgpu-unify-divergent-exit-nodes",
               AMDGPUUnifyDivergentExitNodesPass())
 FUNCTION_PASS("amdgpu-usenative", AMDGPUUseNativeCallsPass())
 FUNCTION_PASS("si-annotate-control-flow", SIAnnotateControlFlowPass(*static_cast<const GCNTargetMachine *>(this)))
+FUNCTION_PASS("amdgpu-uniform-intrinsic-combine", AMDGPUUniformIntrinsicCombinePass(*this))
 #undef FUNCTION_PASS
 
 #ifndef FUNCTION_ANALYSIS
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
new file mode 100644
index 0000000000000..1288b70697e63
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -0,0 +1,183 @@
+//===-- AMDGPUUniformIntrinsicCombine.cpp
+//-----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This pass combines uniform intrinsic instructions.
+/// Unifrom Intrinsic combine uses pattern match to identify and optimize
+/// redundent intrinsic instruction.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "GCNSubtarget.h"
+#include "llvm/Analysis/DomTreeUpdater.h"
+#include "llvm/Analysis/UniformityAnalysis.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+
+#define DEBUG_TYPE "amdgpu-uniform-intrinsic-combine"
+
+using namespace llvm;
+using namespace llvm::AMDGPU;
+using namespace llvm::PatternMatch;
+
+namespace {
+
+class AMDGPUUniformIntrinsicCombine : public FunctionPass {
+public:
+  static char ID;
+  AMDGPUUniformIntrinsicCombine() : FunctionPass(ID) {}
+
+  bool runOnFunction(Function &F) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addPreserved<DominatorTreeWrapperPass>();
+    AU.addRequired<UniformityInfoWrapperPass>();
+    AU.addRequired<TargetPassConfig>();
+  }
+};
+
+class AMDGPUUniformIntrinsicCombineImpl
+    : public InstVisitor<AMDGPUUniformIntrinsicCombineImpl> {
+private:
+  const UniformityInfo *UI;
+
+  void optimizeUniformIntrinsicInst(IntrinsicInst &II) const;
+
+public:
+  AMDGPUUniformIntrinsicCombineImpl() = delete;
+
+  AMDGPUUniformIntrinsicCombineImpl(const UniformityInfo *UI) : UI(UI) {}
+
+  bool run(Function &F);
+};
+
+} // namespace
+
+char AMDGPUUniformIntrinsicCombine::ID = 0;
+
+char &llvm::AMDGPUUniformIntrinsicCombineID = AMDGPUUniformIntrinsicCombine::ID;
+
+bool AMDGPUUniformIntrinsicCombine::runOnFunction(Function &F) {
+  if (skipFunction(F)) {
+    return false;
+  }
+
+  const UniformityInfo *UI =
+      &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
+
+  return AMDGPUUniformIntrinsicCombineImpl(UI).run(F);
+}
+
+PreservedAnalyses
+AMDGPUUniformIntrinsicCombinePass::run(Function &F,
+                                       FunctionAnalysisManager &AM) {
+
+  const auto *UI = &AM.getResult<UniformityInfoAnalysis>(F);
+
+  // @todo check if it is required that this method must return bool, if so
+  // figure out what can be returned.
+  bool IsChanged = AMDGPUUniformIntrinsicCombineImpl(UI).run(F);
+
+  if (!IsChanged) {
+    return PreservedAnalyses::all();
+  }
+
+  PreservedAnalyses PA;
+  PA.preserve<DominatorTreeAnalysis>();
+  return PA;
+}
+
+bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
+
+  // @todo check if it is required that this method must return bool, if so
+  // figure out what can be returned.
+  const bool IsChanged{false};
+
+  // Iterate over each instruction in the function to get the desired intrinsic
+  // inst to check for optimization.
+  for (BasicBlock &BB : F) {
+    for (Instruction &I : BB) {
+      if (auto *Call = dyn_cast<CallInst>(&I)) {
+        if (auto *Intrinsic = dyn_cast<IntrinsicInst>(Call)) {
+          optimizeUniformIntrinsicInst(*Intrinsic);
+        }
+      }
+    }
+  }
+
+  return IsChanged;
+}
+
+void AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
+    IntrinsicInst &II) const {
+  llvm::Intrinsic::ID IID = II.getIntrinsicID();
+
+  switch (IID) {
+  case Intrinsic::amdgcn_permlane64: {
+    Value *Src = II.getOperand(0);
+    if (UI->isUniform(Src)) {
+      return II.replaceAllUsesWith(Src);
+    }
+    break;
+  }
+  case Intrinsic::amdgcn_readfirstlane:
+  case Intrinsic::amdgcn_readlane: {
+    Value *Srcv = II.getOperand(0);
+    if (UI->isUniform(Srcv)) {
+      return II.replaceAllUsesWith(Srcv);
+    }
+
+    // The rest of these may not be safe if the exec may not be the same between
+    // the def and use.
+    Value *Src = II.getArgOperand(0);
+    Instruction *SrcInst = dyn_cast<Instruction>(Src);
+    if (SrcInst && SrcInst->getParent() != II.getParent())
+      break;
+
+    // readfirstlane (readfirstlane x) -> readfirstlane x
+    // readlane (readfirstlane x), y -> readfirstlane x
+    if (match(Src,
+              PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readfirstlane>())) {
+      return II.replaceAllUsesWith(Src);
+    }
+
+    if (IID == Intrinsic::amdgcn_readfirstlane) {
+      // readfirstlane (readlane x, y) -> readlane x, y
+      if (match(Src, PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readlane>())) {
+        return II.replaceAllUsesWith(Src);
+      }
+    } else {
+      // readlane (readlane x, y), y -> readlane x, y
+      if (match(Src, PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readlane>(
+                         PatternMatch::m_Value(),
+                         PatternMatch::m_Specific(II.getArgOperand(1))))) {
+        return II.replaceAllUsesWith(Src);
+      }
+    }
+    break;
+  }
+  }
+}
+
+INITIALIZE_PASS_BEGIN(AMDGPUUniformIntrinsicCombine, DEBUG_TYPE,
+                      "AMDGPU uniformIntrinsic Combine", false, false)
+INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(AMDGPUUniformIntrinsicCombine, DEBUG_TYPE,
+                    "AMDGPU uniformIntrinsic Combine", false, false)
+
+FunctionPass *llvm::createAMDGPUUniformIntrinsicCombinePass() {
+  return new AMDGPUUniformIntrinsicCombine();
+}
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index 09a3096602fc3..984f7616476be 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -62,6 +62,7 @@ add_llvm_target(AMDGPUCodeGen
   AMDGPUHSAMetadataStreamer.cpp
   AMDGPUInsertDelayAlu.cpp
   AMDGPUInstCombineIntrinsic.cpp
+  AMDGPUUniformIntrinsicCombine.cpp
   AMDGPUInstrInfo.cpp
   AMDGPUInstructionSelector.cpp
   AMDGPUISelDAGToDAG.cpp
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
new file mode 100644
index 0000000000000..6f5279bb717c7
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -0,0 +1,221 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes="instcombine,amdgpu-uniform-intrinsic-combine" -S < %s | FileCheck %s --check-prefixes=GFX,GFX10
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes="instcombine,amdgpu-uniform-intrinsic-combine" -S < %s | FileCheck %s --check-prefixes=GFX,GFX11
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes="instcombine,amdgpu-uniform-intrinsic-combine" -S < %s | FileCheck %s --check-prefixes=GFX,GFX12
+
+define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_constant(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; GFX-NEXT:    store i32 77, ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.permlane64(i32 77)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_undef(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.permlane64(i32 undef)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_sgpr(ptr addrspace(1) %out, i32 %src) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_sgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.permlane64(i32 undef)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_vgpr(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_vgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %v = call i32 @llvm.amdgcn.permlane64(i32 %tid)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_vgpr_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_vgpr_expression(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid2 = add i32 %tid, 1
+  %v = call i32 @llvm.amdgcn.permlane64(i32 %tid2)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_constant(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readlane(i32 7, i32 5)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_undef(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readlane(i32 undef, i32 undef)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readlane_sgpr(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_sgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC0]], i32 [[SRC1]])
+; GFX-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readlane_vgpr(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_vgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+  %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+  %v = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tidx
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+define amdgpu_kernel void @readlane_vgpr_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_vgpr_expression(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX-NEXT:    [[TIDX2:%.*]] = add i32 [[TIDX]], 1
+; GFX-NEXT:    [[TIDY2:%.*]] = add i32 [[TIDY]], 2
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+  %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+  %tidx2 = add i32 %tidx, 1
+  %tidy2 = add i32 %tidy, 2
+  %v = call i32 @llvm.amdgcn.readlane(i32 %tidx2, i32 %tidy2)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tidx
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readfirstlane(i32 7)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_undef(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readfirstlane(i32 undef)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_sgpr(ptr addrspace(1) %out, i32 %src0) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_sgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[SRC0]])
+; GFX-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readfirstlane(i32 %src0)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_vgpr(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_vgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_vgpr_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_vgpr_expression(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID2]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid2 = add i32 %tid, 1
+  %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid2)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid2
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX10: {{.*}}
+; GFX11: {{.*}}
+; GFX12: {{.*}}

>From 8dae2e78bf60a1271c8b34c600aeb6eee40cc6eb Mon Sep 17 00:00:00 2001
From: PankajDwivedi-25 <pankajkumar.divedi at amd.com>
Date: Fri, 22 Nov 2024 19:42:42 +0530
Subject: [PATCH 02/30] refactored and updated intrinsics handling

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 71 +++++++------------
 1 file changed, 27 insertions(+), 44 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 1288b70697e63..00ea250586231 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -1,5 +1,4 @@
-//===-- AMDGPUUniformIntrinsicCombine.cpp
-//-----------------------------------------===//
+//===-- AMDGPUUniformIntrinsicCombine.cpp ---------------------------------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -9,13 +8,16 @@
 //
 /// \file
 /// This pass combines uniform intrinsic instructions.
-/// Unifrom Intrinsic combine uses pattern match to identify and optimize
-/// redundent intrinsic instruction.
+/// Uniform Intrinsic Combine uses pattern match to identify and optimize
+/// redundant intrinsic instructions.
 //===----------------------------------------------------------------------===//
 
 #include "AMDGPU.h"
 #include "GCNSubtarget.h"
 #include "llvm/Analysis/DomTreeUpdater.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
 #include "llvm/Analysis/UniformityAnalysis.h"
 #include "llvm/CodeGen/TargetPassConfig.h"
 #include "llvm/IR/IRBuilder.h"
@@ -42,7 +44,7 @@ class AMDGPUUniformIntrinsicCombine : public FunctionPass {
   bool runOnFunction(Function &F) override;
 
   void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.addPreserved<DominatorTreeWrapperPass>();
+    AU.setPreservesCFG();
     AU.addRequired<UniformityInfoWrapperPass>();
     AU.addRequired<TargetPassConfig>();
   }
@@ -53,7 +55,7 @@ class AMDGPUUniformIntrinsicCombineImpl
 private:
   const UniformityInfo *UI;
 
-  void optimizeUniformIntrinsicInst(IntrinsicInst &II) const;
+  bool optimizeUniformIntrinsicInst(IntrinsicInst &II) const;
 
 public:
   AMDGPUUniformIntrinsicCombineImpl() = delete;
@@ -86,8 +88,6 @@ AMDGPUUniformIntrinsicCombinePass::run(Function &F,
 
   const auto *UI = &AM.getResult<UniformityInfoAnalysis>(F);
 
-  // @todo check if it is required that this method must return bool, if so
-  // figure out what can be returned.
   bool IsChanged = AMDGPUUniformIntrinsicCombineImpl(UI).run(F);
 
   if (!IsChanged) {
@@ -96,14 +96,16 @@ AMDGPUUniformIntrinsicCombinePass::run(Function &F,
 
   PreservedAnalyses PA;
   PA.preserve<DominatorTreeAnalysis>();
+  PA.preserve<LoopAnalysis>();
+  PA.preserve<ScalarEvolutionAnalysis>();
+  PA.preserve<UniformityInfoAnalysis>();
+  PA.preserve<TargetLibraryAnalysis>();
   return PA;
 }
 
 bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
 
-  // @todo check if it is required that this method must return bool, if so
-  // figure out what can be returned.
-  const bool IsChanged{false};
+  bool IsChanged{false};
 
   // Iterate over each instruction in the function to get the desired intrinsic
   // inst to check for optimization.
@@ -111,7 +113,7 @@ bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
     for (Instruction &I : BB) {
       if (auto *Call = dyn_cast<CallInst>(&I)) {
         if (auto *Intrinsic = dyn_cast<IntrinsicInst>(Call)) {
-          optimizeUniformIntrinsicInst(*Intrinsic);
+          IsChanged |= optimizeUniformIntrinsicInst(*Intrinsic);
         }
       }
     }
@@ -120,55 +122,36 @@ bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
   return IsChanged;
 }
 
-void AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
+bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
     IntrinsicInst &II) const {
   llvm::Intrinsic::ID IID = II.getIntrinsicID();
 
   switch (IID) {
-  case Intrinsic::amdgcn_permlane64: {
-    Value *Src = II.getOperand(0);
-    if (UI->isUniform(Src)) {
-      return II.replaceAllUsesWith(Src);
-    }
-    break;
-  }
+  case Intrinsic::amdgcn_permlane64:
   case Intrinsic::amdgcn_readfirstlane:
   case Intrinsic::amdgcn_readlane: {
-    Value *Srcv = II.getOperand(0);
-    if (UI->isUniform(Srcv)) {
-      return II.replaceAllUsesWith(Srcv);
-    }
-
-    // The rest of these may not be safe if the exec may not be the same between
-    // the def and use.
     Value *Src = II.getArgOperand(0);
+    // The below part may not be safe if the exec is not same between the def
+    // and use. Is this part stilll required??
     Instruction *SrcInst = dyn_cast<Instruction>(Src);
     if (SrcInst && SrcInst->getParent() != II.getParent())
       break;
 
     // readfirstlane (readfirstlane x) -> readfirstlane x
+    // readfirstlane (readlane x, y) -> readlane x, y
     // readlane (readfirstlane x), y -> readfirstlane x
-    if (match(Src,
-              PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readfirstlane>())) {
-      return II.replaceAllUsesWith(Src);
-    }
-
-    if (IID == Intrinsic::amdgcn_readfirstlane) {
-      // readfirstlane (readlane x, y) -> readlane x, y
-      if (match(Src, PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readlane>())) {
-        return II.replaceAllUsesWith(Src);
-      }
-    } else {
-      // readlane (readlane x, y), y -> readlane x, y
-      if (match(Src, PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readlane>(
-                         PatternMatch::m_Value(),
-                         PatternMatch::m_Specific(II.getArgOperand(1))))) {
-        return II.replaceAllUsesWith(Src);
-      }
+    // readlane (readlane x, y), z -> readlane x, y
+    // All these cases are identical and are dependent on the inner intrinsic
+    // results value.(i.e.irrespective of the which of these case is inner
+    // intrinsic will write the same value across all output lane indexes)
+    if (UI->isUniform(II.getOperandUse(0))) {
+      II.replaceAllUsesWith(Src);
+      return true;
     }
     break;
   }
   }
+  return false;
 }
 
 INITIALIZE_PASS_BEGIN(AMDGPUUniformIntrinsicCombine, DEBUG_TYPE,

>From d8d36667ae06390936af56a83fd797ad071c21a4 Mon Sep 17 00:00:00 2001
From: PankajDwivedi-25 <pankajkumar.divedi at amd.com>
Date: Fri, 22 Nov 2024 20:39:15 +0530
Subject: [PATCH 03/30] removed redundant casting

---
 .../Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp   | 11 ++---------
 1 file changed, 2 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 00ea250586231..b7321f8515750 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -111,10 +111,8 @@ bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
   // inst to check for optimization.
   for (BasicBlock &BB : F) {
     for (Instruction &I : BB) {
-      if (auto *Call = dyn_cast<CallInst>(&I)) {
-        if (auto *Intrinsic = dyn_cast<IntrinsicInst>(Call)) {
-          IsChanged |= optimizeUniformIntrinsicInst(*Intrinsic);
-        }
+      if (auto *Intrinsic = dyn_cast<IntrinsicInst>(&I)) {
+        IsChanged |= optimizeUniformIntrinsicInst(*Intrinsic);
       }
     }
   }
@@ -131,11 +129,6 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
   case Intrinsic::amdgcn_readfirstlane:
   case Intrinsic::amdgcn_readlane: {
     Value *Src = II.getArgOperand(0);
-    // The below part may not be safe if the exec is not same between the def
-    // and use. Is this part stilll required??
-    Instruction *SrcInst = dyn_cast<Instruction>(Src);
-    if (SrcInst && SrcInst->getParent() != II.getParent())
-      break;
 
     // readfirstlane (readfirstlane x) -> readfirstlane x
     // readfirstlane (readlane x, y) -> readlane x, y

>From 4311e656c0b124555a3cdd14b6e4c13c166bf62a Mon Sep 17 00:00:00 2001
From: PankajDwivedi-25 <pankajkumar.divedi at amd.com>
Date: Fri, 6 Dec 2024 01:23:42 +0530
Subject: [PATCH 04/30] refactored, added more test

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  |  29 +--
 .../amdgpu-uniform-intrinsic-combine.ll       | 222 ++++++++++++++++--
 2 files changed, 208 insertions(+), 43 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index b7321f8515750..94f1366691929 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -7,9 +7,7 @@
 //===----------------------------------------------------------------------===//
 //
 /// \file
-/// This pass combines uniform intrinsic instructions.
-/// Uniform Intrinsic Combine uses pattern match to identify and optimize
-/// redundant intrinsic instructions.
+/// This pass simplifies certain intrinsic calls when the arguments are uniform.
 //===----------------------------------------------------------------------===//
 
 #include "AMDGPU.h"
@@ -21,6 +19,7 @@
 #include "llvm/Analysis/UniformityAnalysis.h"
 #include "llvm/CodeGen/TargetPassConfig.h"
 #include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
 #include "llvm/IR/InstVisitor.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
 #include "llvm/IR/PatternMatch.h"
@@ -35,14 +34,11 @@ using namespace llvm::AMDGPU;
 using namespace llvm::PatternMatch;
 
 namespace {
-
 class AMDGPUUniformIntrinsicCombine : public FunctionPass {
 public:
   static char ID;
   AMDGPUUniformIntrinsicCombine() : FunctionPass(ID) {}
-
   bool runOnFunction(Function &F) override;
-
   void getAnalysisUsage(AnalysisUsage &AU) const override {
     AU.setPreservesCFG();
     AU.addRequired<UniformityInfoWrapperPass>();
@@ -54,46 +50,36 @@ class AMDGPUUniformIntrinsicCombineImpl
     : public InstVisitor<AMDGPUUniformIntrinsicCombineImpl> {
 private:
   const UniformityInfo *UI;
-
   bool optimizeUniformIntrinsicInst(IntrinsicInst &II) const;
 
 public:
   AMDGPUUniformIntrinsicCombineImpl() = delete;
-
   AMDGPUUniformIntrinsicCombineImpl(const UniformityInfo *UI) : UI(UI) {}
-
   bool run(Function &F);
 };
-
 } // namespace
 
 char AMDGPUUniformIntrinsicCombine::ID = 0;
-
 char &llvm::AMDGPUUniformIntrinsicCombineID = AMDGPUUniformIntrinsicCombine::ID;
 
 bool AMDGPUUniformIntrinsicCombine::runOnFunction(Function &F) {
   if (skipFunction(F)) {
     return false;
   }
-
   const UniformityInfo *UI =
       &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
-
   return AMDGPUUniformIntrinsicCombineImpl(UI).run(F);
 }
 
 PreservedAnalyses
 AMDGPUUniformIntrinsicCombinePass::run(Function &F,
                                        FunctionAnalysisManager &AM) {
-
   const auto *UI = &AM.getResult<UniformityInfoAnalysis>(F);
-
   bool IsChanged = AMDGPUUniformIntrinsicCombineImpl(UI).run(F);
 
   if (!IsChanged) {
     return PreservedAnalyses::all();
   }
-
   PreservedAnalyses PA;
   PA.preserve<DominatorTreeAnalysis>();
   PA.preserve<LoopAnalysis>();
@@ -104,19 +90,14 @@ AMDGPUUniformIntrinsicCombinePass::run(Function &F,
 }
 
 bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
-
   bool IsChanged{false};
-
   // Iterate over each instruction in the function to get the desired intrinsic
   // inst to check for optimization.
-  for (BasicBlock &BB : F) {
-    for (Instruction &I : BB) {
-      if (auto *Intrinsic = dyn_cast<IntrinsicInst>(&I)) {
-        IsChanged |= optimizeUniformIntrinsicInst(*Intrinsic);
-      }
+  for (Instruction &I : instructions(F)) {
+    if (auto *Intrinsic = dyn_cast<IntrinsicInst>(&I)) {
+      IsChanged |= optimizeUniformIntrinsicInst(*Intrinsic);
     }
   }
-
   return IsChanged;
 }
 
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index 6f5279bb717c7..8f4b70c632e44 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -25,18 +25,20 @@ define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
   ret void
 }
 
-define amdgpu_kernel void @permlane64_sgpr(ptr addrspace(1) %out, i32 %src) {
-; GFX-LABEL: define amdgpu_kernel void @permlane64_sgpr(
+define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_uniform(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[SRC]])
+; GFX-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
 ; GFX-NEXT:    ret void
 ;
-  %v = call i32 @llvm.amdgcn.permlane64(i32 undef)
+  %v = call i32 @llvm.amdgcn.permlane64(i32 %src)
   store i32 %v, ptr addrspace(1) %out
   ret void
 }
 
-define amdgpu_kernel void @permlane64_vgpr(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @permlane64_vgpr(
+define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
@@ -52,8 +54,8 @@ define amdgpu_kernel void @permlane64_vgpr(i32 addrspace(1)* %out) {
   ret void
 }
 
-define amdgpu_kernel void @permlane64_vgpr_expression(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @permlane64_vgpr_expression(
+define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; GFX-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
@@ -92,8 +94,8 @@ define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
   ret void
 }
 
-define amdgpu_kernel void @readlane_sgpr(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_sgpr(
+define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
 ; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC0]], i32 [[SRC1]])
 ; GFX-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
@@ -104,8 +106,8 @@ define amdgpu_kernel void @readlane_sgpr(ptr addrspace(1) %out, i32 %src0, i32 %
   ret void
 }
 
-define amdgpu_kernel void @readlane_vgpr(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_vgpr(
+define amdgpu_kernel void @readlane_nonuniform_workitem(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -123,8 +125,8 @@ define amdgpu_kernel void @readlane_vgpr(i32 addrspace(1)* %out) {
   ret void
 }
 
-define amdgpu_kernel void @readlane_vgpr_expression(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_vgpr_expression(
+define amdgpu_kernel void @readlane_nonuniform_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
@@ -167,8 +169,8 @@ define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
   ret void
 }
 
-define amdgpu_kernel void @readfirstlane_sgpr(ptr addrspace(1) %out, i32 %src0) {
-; GFX-LABEL: define amdgpu_kernel void @readfirstlane_sgpr(
+define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i32 %src0) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
 ; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[SRC0]])
 ; GFX-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
@@ -179,8 +181,8 @@ define amdgpu_kernel void @readfirstlane_sgpr(ptr addrspace(1) %out, i32 %src0)
   ret void
 }
 
-define amdgpu_kernel void @readfirstlane_vgpr(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @readfirstlane_vgpr(
+define amdgpu_kernel void @readfirstlane_with_workitem_id(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
@@ -196,8 +198,8 @@ define amdgpu_kernel void @readfirstlane_vgpr(i32 addrspace(1)* %out) {
   ret void
 }
 
-define amdgpu_kernel void @readfirstlane_vgpr_expression(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @readfirstlane_vgpr_expression(
+define amdgpu_kernel void @readfirstlane_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_expression(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; GFX-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
@@ -215,6 +217,188 @@ define amdgpu_kernel void @readfirstlane_vgpr_expression(i32 addrspace(1)* %out)
   ret void
 }
 
+define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 5)
+  %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
+  store i32 %v2, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+  %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+  %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+  %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
+  store i32 %v2, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+  %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 %tidx)
+  %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 3)
+  store i32 %v2, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_readlane(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+  %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+  %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+  %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 2)
+  store i32 %v2, ptr addrspace(1) %out
+  ret void
+}
+
+
+define amdgpu_kernel void @permlane64_boundary(ptr addrspace(1) %out_min, ptr addrspace(1) %out_max) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_boundary(
+; GFX-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; GFX-NEXT:    store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
+; GFX-NEXT:    ret void
+;
+  %min_v = call i32 @llvm.amdgcn.permlane64(i32 -2147483648)
+  store i32 %min_v, ptr addrspace(1) %out_min
+  %max_v = call i32 @llvm.amdgcn.permlane64(i32 2147483647)
+  store i32 %max_v, ptr addrspace(1) %out_max
+  ret void
+}
+
+define amdgpu_kernel void @readlane_cross_lane(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_cross_lane(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[TIDY:%.*]] = add i32 [[TIDX]], 5
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+  %tidy = add i32 %tidx, 5
+  %v = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_random(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    store i32 435, ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %random = xor i32 123, 456
+  %v = call i32 @llvm.amdgcn.readfirstlane(i32 %random)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_invalid(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_invalid(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    ret void
+;
+  %undef_v = call i32 @llvm.amdgcn.permlane64(i32 undef)
+  store i32 %undef_v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readlane_expression(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_expression(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[IDX2:%.*]] = shl i32 [[IDX1]], 1
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %idx1 = call i32 @llvm.amdgcn.workitem.id.x()
+  %idx2 = mul i32 %idx1, 2
+  %v = call i32 @llvm.amdgcn.readlane(i32 %idx1, i32 %idx2)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+; Test case: Ensure that a loop with a divergent exit and a uniform value
+; used by an intrinsic outside the loop is not optimized due to temporal divergence.
+
+define amdgpu_kernel void @test_divergent_exit(ptr addrspace(1) %out, i32 %max_iter, i32 %div_cond) {
+; GFX-LABEL: define amdgpu_kernel void @test_divergent_exit(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[MAX_ITER:%.*]], i32 [[DIV_COND:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:  [[ENTRY:.*:]]
+; GFX-NEXT:    [[ITER:%.*]] = alloca i32, align 4
+; GFX-NEXT:    store i32 0, ptr [[ITER]], align 4
+; GFX-NEXT:    br label %[[LOOP:.*]]
+; GFX:       [[LOOP]]:
+; GFX-NEXT:    [[ITER_VAL:%.*]] = load i32, ptr [[ITER]], align 4
+; GFX-NEXT:    [[NEW_ITER:%.*]] = add i32 [[ITER_VAL]], 1
+; GFX-NEXT:    store i32 [[NEW_ITER]], ptr [[ITER]], align 4
+; GFX-NEXT:    [[COND1:%.*]] = icmp sgt i32 [[NEW_ITER]], [[MAX_ITER]]
+; GFX-NEXT:    [[COND2:%.*]] = icmp eq i32 [[DIV_COND]], 0
+; GFX-NEXT:    [[EXIT:%.*]] = or i1 [[COND1]], [[COND2]]
+; GFX-NEXT:    br i1 [[EXIT]], label %[[EXIT_BLOCK:.*]], label %[[LOOP]]
+; GFX:       [[EXIT_BLOCK]]:
+; GFX-NEXT:    [[FINAL_VAL:%.*]] = load i32, ptr [[ITER]], align 4
+; GFX-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[FINAL_VAL]])
+; GFX-NEXT:    store i32 [[RESULT]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+entry:
+  %iter = alloca i32, align 4
+  store i32 0, ptr %iter, align 4
+  br label %loop
+
+loop:
+  ; Increment loop counter
+  %iter_val = load i32, ptr %iter, align 4
+  %new_iter = add i32 %iter_val, 1
+  store i32 %new_iter, ptr %iter, align 4
+
+  ; Check exit conditions
+  %cond1 = icmp sgt i32 %new_iter, %max_iter
+  %cond2 = icmp eq i32 %div_cond, 0
+  %exit = or i1 %cond1, %cond2
+  br i1 %exit, label %exit_block, label %loop
+
+exit_block:
+  ; Use the uniform value in an intrinsic outside the loop
+  %final_val = load i32, ptr %iter, align 4
+  %result = call i32 @llvm.amdgcn.permlane64(i32 %final_val)
+  store i32 %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+declare i32 @llvm.amdgcn.permlane64(i32)
+
+
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX10: {{.*}}
 ; GFX11: {{.*}}

>From 1bed57f7fe30738aa72634a11be7f09538a9dedb Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Thu, 9 Jan 2025 23:24:16 +0530
Subject: [PATCH 05/30] integrated in pipeline, more test added

---
 llvm/lib/Target/AMDGPU/AMDGPU.h               | 10 ++--
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 11 +++-
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 25 +++++---
 .../amdgpu-simplify-trivial-waterfall-loop.ll | 49 +++++++++++++++
 .../amdgpu-uniform-intrinsic-combine.ll       | 59 ++++++++++++++++---
 5 files changed, 132 insertions(+), 22 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index f219fd4af2c48..f5c663cca46bd 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -491,14 +491,14 @@ extern char &GCNRewritePartialRegUsesID;
 void initializeAMDGPUWaitSGPRHazardsLegacyPass(PassRegistry &);
 extern char &AMDGPUWaitSGPRHazardsLegacyID;
 
-void initializeAMDGPUUniformIntrinsicCombinePass(PassRegistry &);
-extern char &AMDGPUUniformIntrinsicCombineID;
-FunctionPass *createAMDGPUUniformIntrinsicCombinePass();
-
+void initializeAMDGPUUniformIntrinsicCombineLegacyPass(PassRegistry &);
+extern char &AMDGPUUniformIntrinsicCombineLegacyPassID;
+FunctionPass *createAMDGPUUniformIntrinsicCombineLegacyPass(
+    const AMDGPUTargetMachine *TM = nullptr);
 struct AMDGPUUniformIntrinsicCombinePass
     : public PassInfoMixin<AMDGPUUniformIntrinsicCombinePass> {
   const AMDGPUTargetMachine &TM;
-  AMDGPUUniformIntrinsicCombinePass(const AMDGPUTargetMachine &TM_) : TM(TM_) {}
+  AMDGPUUniformIntrinsicCombinePass(const AMDGPUTargetMachine &TM) : TM(TM) {}
   PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
 };
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index d0454cce15756..3f7995722be64 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -477,6 +477,11 @@ static cl::opt<bool> HasClosedWorldAssumption(
     "amdgpu-link-time-closed-world",
     cl::desc("Whether has closed-world assumption at link time"),
     cl::init(false), cl::Hidden);
+    
+static cl::opt<bool> EnableUniformIntrinsicCombine(
+    "amdgpu-enable-uniform-intrinsic-combine",
+    cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"),
+    cl::init(true), cl::Hidden);
 
 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
   // Register the target
@@ -561,6 +566,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
   initializeGCNRegPressurePrinterPass(*PR);
   initializeAMDGPUPreloadKernArgPrologLegacyPass(*PR);
   initializeAMDGPUWaitSGPRHazardsLegacyPass(*PR);
+  initializeAMDGPUUniformIntrinsicCombineLegacyPass(*PR);
 }
 
 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
@@ -830,13 +836,16 @@ void AMDGPUTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) {
       });
 
   PB.registerPeepholeEPCallback(
-      [](FunctionPassManager &FPM, OptimizationLevel Level) {
+      [this](FunctionPassManager &FPM, OptimizationLevel Level) {
         if (Level == OptimizationLevel::O0)
           return;
 
         FPM.addPass(AMDGPUUseNativeCallsPass());
         if (EnableLibCallSimplify)
           FPM.addPass(AMDGPUSimplifyLibCallsPass());
+
+        if (EnableUniformIntrinsicCombine)
+          FPM.addPass(AMDGPUUniformIntrinsicCombinePass(*this));
       });
 
   PB.registerCGSCCOptimizerLateEPCallback(
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 94f1366691929..ad2c5e223aa89 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -34,10 +34,15 @@ using namespace llvm::AMDGPU;
 using namespace llvm::PatternMatch;
 
 namespace {
-class AMDGPUUniformIntrinsicCombine : public FunctionPass {
+class AMDGPUUniformIntrinsicCombineLegacy : public FunctionPass {
 public:
   static char ID;
-  AMDGPUUniformIntrinsicCombine() : FunctionPass(ID) {}
+  const AMDGPUTargetMachine *AMDGPUTM;
+  AMDGPUUniformIntrinsicCombineLegacy(const AMDGPUTargetMachine *TM)
+      : FunctionPass(ID), AMDGPUTM(TM) {
+    initializeAMDGPUUniformIntrinsicCombineLegacyPass(
+        *PassRegistry::getPassRegistry());
+  }
   bool runOnFunction(Function &F) override;
   void getAnalysisUsage(AnalysisUsage &AU) const override {
     AU.setPreservesCFG();
@@ -59,10 +64,11 @@ class AMDGPUUniformIntrinsicCombineImpl
 };
 } // namespace
 
-char AMDGPUUniformIntrinsicCombine::ID = 0;
-char &llvm::AMDGPUUniformIntrinsicCombineID = AMDGPUUniformIntrinsicCombine::ID;
+char AMDGPUUniformIntrinsicCombineLegacy::ID = 0;
+char &llvm::AMDGPUUniformIntrinsicCombineLegacyPassID =
+    AMDGPUUniformIntrinsicCombineLegacy::ID;
 
-bool AMDGPUUniformIntrinsicCombine::runOnFunction(Function &F) {
+bool AMDGPUUniformIntrinsicCombineLegacy::runOnFunction(Function &F) {
   if (skipFunction(F)) {
     return false;
   }
@@ -128,13 +134,14 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
   return false;
 }
 
-INITIALIZE_PASS_BEGIN(AMDGPUUniformIntrinsicCombine, DEBUG_TYPE,
+INITIALIZE_PASS_BEGIN(AMDGPUUniformIntrinsicCombineLegacy, DEBUG_TYPE,
                       "AMDGPU uniformIntrinsic Combine", false, false)
 INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
-INITIALIZE_PASS_END(AMDGPUUniformIntrinsicCombine, DEBUG_TYPE,
+INITIALIZE_PASS_END(AMDGPUUniformIntrinsicCombineLegacy, DEBUG_TYPE,
                     "AMDGPU uniformIntrinsic Combine", false, false)
 
-FunctionPass *llvm::createAMDGPUUniformIntrinsicCombinePass() {
-  return new AMDGPUUniformIntrinsicCombine();
+FunctionPass *llvm::createAMDGPUUniformIntrinsicCombineLegacyPass(
+    const AMDGPUTargetMachine *TM) {
+  return new AMDGPUUniformIntrinsicCombineLegacy(TM);
 }
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll
new file mode 100644
index 0000000000000..2a1fddff5f3c8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll
@@ -0,0 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes="default<O1>" -S < %s | FileCheck %s --check-prefixes=GFX,GFX10
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes="default<O1>" -S < %s | FileCheck %s --check-prefixes=GFX,GFX11
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes="default<O1>" -S < %s | FileCheck %s --check-prefixes=GFX,GFX12
+
+define amdgpu_kernel void @trivial_waterfall_loop(ptr addrspace(1) %out, i32 %src) {
+; CHECK-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
+; CHECK:       store i32 %src, ptr addrspace(1) %out, align 4
+; CHECK-NOT:   br label %loop
+; GFX-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
+; GFX-SAME: ptr addrspace(1) nocapture writeonly [[OUT:%.*]], i32 [[SRC:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; GFX-NEXT:  [[ENTRY:.*:]]
+; GFX-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+entry:
+  ; Initialize result to zero
+  %result = alloca i32, align 4
+  store i32 0, ptr %result, align 4
+  br label %loop
+
+loop:
+  ; Load the current result
+  %cur_result = load i32, ptr %result, align 4
+
+  ; Compute the next value
+  %next_value = add i32 %cur_result, %src
+
+  ; Apply the readfirstlane intrinsic for uniformity
+  %uniform_value = call i32 @llvm.amdgcn.readfirstlane(i32 %next_value)
+
+  ; Store the uniform result back
+  store i32 %uniform_value, ptr %result, align 4
+
+  ; This is a trivial loop that always exits after one iteration
+  br i1 true, label %exit, label %loop
+
+exit:
+  ; Store the result to the output pointer
+  %final_result = load i32, ptr %result, align 4
+  store i32 %final_result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX10: {{.*}}
+; GFX11: {{.*}}
+; GFX12: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index 8f4b70c632e44..40c0d11c68e5e 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes="instcombine,amdgpu-uniform-intrinsic-combine" -S < %s | FileCheck %s --check-prefixes=GFX,GFX10
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes="instcombine,amdgpu-uniform-intrinsic-combine" -S < %s | FileCheck %s --check-prefixes=GFX,GFX11
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes="instcombine,amdgpu-uniform-intrinsic-combine" -S < %s | FileCheck %s --check-prefixes=GFX,GFX12
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=instcombine,amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s --check-prefixes=GFX,GFX10
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=instcombine,amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s --check-prefixes=GFX,GFX11
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes=instcombine,amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s --check-prefixes=GFX,GFX12
 
 define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
 ; GFX-LABEL: define amdgpu_kernel void @permlane64_constant(
@@ -28,7 +28,6 @@ define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
 define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
 ; GFX-LABEL: define amdgpu_kernel void @permlane64_uniform(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[SRC]])
 ; GFX-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
 ; GFX-NEXT:    ret void
 ;
@@ -97,7 +96,6 @@ define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
 define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
 ; GFX-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC0]], i32 [[SRC1]])
 ; GFX-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
 ; GFX-NEXT:    ret void
 ;
@@ -172,7 +170,6 @@ define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
 define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i32 %src0) {
 ; GFX-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
 ; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[SRC0]])
 ; GFX-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
 ; GFX-NEXT:    ret void
 ;
@@ -396,8 +393,56 @@ exit_block:
   ret void
 }
 
-declare i32 @llvm.amdgcn.permlane64(i32)
+; Define the kernel function
+define amdgpu_kernel void @trivial_waterfall_loop(ptr addrspace(1) %out, i32 %src) {
+; CHECK-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
+; CHECK:       store i32 %src, ptr addrspace(1) %out, align 4
+; CHECK-NOT:   br label %loop
+; GFX-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:  [[ENTRY:.*:]]
+; GFX-NEXT:    [[RESULT:%.*]] = alloca i32, align 4
+; GFX-NEXT:    store i32 0, ptr [[RESULT]], align 4
+; GFX-NEXT:    br label %[[LOOP:.*]]
+; GFX:       [[LOOP]]:
+; GFX-NEXT:    [[CUR_RESULT:%.*]] = load i32, ptr [[RESULT]], align 4
+; GFX-NEXT:    [[NEXT_VALUE:%.*]] = add i32 [[CUR_RESULT]], [[SRC]]
+; GFX-NEXT:    [[UNIFORM_VALUE:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[NEXT_VALUE]])
+; GFX-NEXT:    store i32 [[UNIFORM_VALUE]], ptr [[RESULT]], align 4
+; GFX-NEXT:    br i1 true, label %[[EXIT:.*]], label %[[LOOP]]
+; GFX:       [[EXIT]]:
+; GFX-NEXT:    [[FINAL_RESULT:%.*]] = load i32, ptr [[RESULT]], align 4
+; GFX-NEXT:    store i32 [[FINAL_RESULT]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+entry:
+  ; Initialize result to zero
+  %result = alloca i32, align 4
+  store i32 0, ptr %result, align 4
+  br label %loop
 
+loop:
+  ; Load the current result
+  %cur_result = load i32, ptr %result, align 4
+
+  ; Compute the next value
+  %next_value = add i32 %cur_result, %src
+
+  ; Apply the readfirstlane intrinsic for uniformity
+  %uniform_value = call i32 @llvm.amdgcn.readfirstlane(i32 %next_value)
+
+  ; Store the uniform result back
+  store i32 %uniform_value, ptr %result, align 4
+
+  ; This is a trivial loop that always exits after one iteration
+  br i1 true, label %exit, label %loop
+
+exit:
+  ; Store the result to the output pointer
+  %final_result = load i32, ptr %result, align 4
+  store i32 %final_result, ptr addrspace(1) %out, align 4
+  ret void
+}
 
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX10: {{.*}}

>From ed204b9568fe8213608b48f42491633d60d438ff Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Wed, 15 Jan 2025 16:58:15 +0530
Subject: [PATCH 06/30] removed unused gfx checks

---
 .../amdgpu-simplify-trivial-waterfall-loop.ll |  21 +-
 .../amdgpu-uniform-intrinsic-combine.ll       | 355 +++++++++---------
 2 files changed, 179 insertions(+), 197 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll
index 2a1fddff5f3c8..56ba117ce1d30 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll
@@ -1,18 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes="default<O1>" -S < %s | FileCheck %s --check-prefixes=GFX,GFX10
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes="default<O1>" -S < %s | FileCheck %s --check-prefixes=GFX,GFX11
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes="default<O1>" -S < %s | FileCheck %s --check-prefixes=GFX,GFX12
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes="default<O1>" -S < %s | FileCheck %s --check-prefixes=GFX10
 
 define amdgpu_kernel void @trivial_waterfall_loop(ptr addrspace(1) %out, i32 %src) {
-; CHECK-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
-; CHECK:       store i32 %src, ptr addrspace(1) %out, align 4
-; CHECK-NOT:   br label %loop
-; GFX-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
-; GFX-SAME: ptr addrspace(1) nocapture writeonly [[OUT:%.*]], i32 [[SRC:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-; GFX-NEXT:  [[ENTRY:.*:]]
-; GFX-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
+; GFX10-SAME: ptr addrspace(1) nocapture writeonly [[OUT:%.*]], i32 [[SRC:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
 entry:
   ; Initialize result to zero
@@ -43,7 +38,3 @@ exit:
   ret void
 }
 
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX10: {{.*}}
-; GFX11: {{.*}}
-; GFX12: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index 40c0d11c68e5e..ddedd435d35aa 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -1,14 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=instcombine,amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s --check-prefixes=GFX,GFX10
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=instcombine,amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s --check-prefixes=GFX,GFX11
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes=instcombine,amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s --check-prefixes=GFX,GFX12
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=instcombine,amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s --check-prefixes=GFX10
 
 define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @permlane64_constant(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
-; GFX-NEXT:    store i32 77, ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @permlane64_constant(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; GFX10-NEXT:    store i32 77, ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.permlane64(i32 77)
   store i32 %v, ptr addrspace(1) %out
@@ -16,9 +14,9 @@ define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @permlane64_undef(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @permlane64_undef(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.permlane64(i32 undef)
   store i32 %v, ptr addrspace(1) %out
@@ -26,10 +24,10 @@ define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
-; GFX-LABEL: define amdgpu_kernel void @permlane64_uniform(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @permlane64_uniform(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.permlane64(i32 %src)
   store i32 %v, ptr addrspace(1) %out
@@ -37,14 +35,14 @@ define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
 }
 
 define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
-; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
-; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX10-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %v = call i32 @llvm.amdgcn.permlane64(i32 %tid)
@@ -54,15 +52,15 @@ define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
 }
 
 define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
-; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
-; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX10-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid2 = add i32 %tid, 1
@@ -73,10 +71,10 @@ define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %o
 }
 
 define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_constant(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readlane_constant(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readlane(i32 7, i32 5)
   store i32 %v, ptr addrspace(1) %out
@@ -84,9 +82,9 @@ define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_undef(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readlane_undef(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readlane(i32 undef, i32 undef)
   store i32 %v, ptr addrspace(1) %out
@@ -94,10 +92,10 @@ define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1)
   store i32 %v, ptr addrspace(1) %out
@@ -105,15 +103,15 @@ define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i3
 }
 
 define amdgpu_kernel void @readlane_nonuniform_workitem(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
-; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
-; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX10-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -124,17 +122,17 @@ define amdgpu_kernel void @readlane_nonuniform_workitem(i32 addrspace(1)* %out)
 }
 
 define amdgpu_kernel void @readlane_nonuniform_expression(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; GFX-NEXT:    [[TIDX2:%.*]] = add i32 [[TIDX]], 1
-; GFX-NEXT:    [[TIDY2:%.*]] = add i32 [[TIDY]], 2
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
-; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
-; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX10-NEXT:    [[TIDX2:%.*]] = add i32 [[TIDX]], 1
+; GFX10-NEXT:    [[TIDY2:%.*]] = add i32 [[TIDY]], 2
+; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX10-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -147,10 +145,10 @@ define amdgpu_kernel void @readlane_nonuniform_expression(i32 addrspace(1)* %out
 }
 
 define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @readfirstlane_constant(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 7)
   store i32 %v, ptr addrspace(1) %out
@@ -158,9 +156,9 @@ define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @readfirstlane_undef(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_undef(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 undef)
   store i32 %v, ptr addrspace(1) %out
@@ -168,10 +166,10 @@ define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i32 %src0) {
-; GFX-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 %src0)
   store i32 %v, ptr addrspace(1) %out
@@ -179,14 +177,14 @@ define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i3
 }
 
 define amdgpu_kernel void @readfirstlane_with_workitem_id(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
-; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
-; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX10-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid)
@@ -196,15 +194,15 @@ define amdgpu_kernel void @readfirstlane_with_workitem_id(i32 addrspace(1)* %out
 }
 
 define amdgpu_kernel void @readfirstlane_expression(i32 addrspace(1)* %out) {
-; GFX-LABEL: define amdgpu_kernel void @readfirstlane_expression(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
-; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID2]] to i64
-; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_expression(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TID2]] to i64
+; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX10-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid2 = add i32 %tid, 1
@@ -215,10 +213,10 @@ define amdgpu_kernel void @readfirstlane_expression(i32 addrspace(1)* %out) {
 }
 
 define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 5)
   %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
@@ -227,13 +225,13 @@ define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %ou
 }
 
 define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
-; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX10-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; GFX10-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -244,12 +242,12 @@ define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
-; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; GFX10-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 %tidx)
@@ -259,13 +257,13 @@ define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_readlane(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
-; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readlane_readlane(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX10-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; GFX10-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -277,11 +275,11 @@ define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
 
 
 define amdgpu_kernel void @permlane64_boundary(ptr addrspace(1) %out_min, ptr addrspace(1) %out_max) {
-; GFX-LABEL: define amdgpu_kernel void @permlane64_boundary(
-; GFX-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
-; GFX-NEXT:    store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @permlane64_boundary(
+; GFX10-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; GFX10-NEXT:    store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
+; GFX10-NEXT:    ret void
 ;
   %min_v = call i32 @llvm.amdgcn.permlane64(i32 -2147483648)
   store i32 %min_v, ptr addrspace(1) %out_min
@@ -291,13 +289,13 @@ define amdgpu_kernel void @permlane64_boundary(ptr addrspace(1) %out_min, ptr ad
 }
 
 define amdgpu_kernel void @readlane_cross_lane(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_cross_lane(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX-NEXT:    [[TIDY:%.*]] = add i32 [[TIDX]], 5
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
-; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readlane_cross_lane(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[TIDY:%.*]] = add i32 [[TIDX]], 5
+; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = add i32 %tidx, 5
@@ -307,10 +305,10 @@ define amdgpu_kernel void @readlane_cross_lane(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @readfirstlane_random(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    store i32 435, ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_random(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    store i32 435, ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %random = xor i32 123, 456
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 %random)
@@ -319,9 +317,9 @@ define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @permlane64_invalid(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @permlane64_invalid(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @permlane64_invalid(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    ret void
 ;
   %undef_v = call i32 @llvm.amdgcn.permlane64(i32 undef)
   store i32 %undef_v, ptr addrspace(1) %out
@@ -329,13 +327,13 @@ define amdgpu_kernel void @permlane64_invalid(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_expression(ptr addrspace(1) %out) {
-; GFX-LABEL: define amdgpu_kernel void @readlane_expression(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:    [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX-NEXT:    [[IDX2:%.*]] = shl i32 [[IDX1]], 1
-; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
-; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @readlane_expression(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:    [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX10-NEXT:    [[IDX2:%.*]] = shl i32 [[IDX1]], 1
+; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
+; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
   %idx1 = call i32 @llvm.amdgcn.workitem.id.x()
   %idx2 = mul i32 %idx1, 2
@@ -348,25 +346,25 @@ define amdgpu_kernel void @readlane_expression(ptr addrspace(1) %out) {
 ; used by an intrinsic outside the loop is not optimized due to temporal divergence.
 
 define amdgpu_kernel void @test_divergent_exit(ptr addrspace(1) %out, i32 %max_iter, i32 %div_cond) {
-; GFX-LABEL: define amdgpu_kernel void @test_divergent_exit(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[MAX_ITER:%.*]], i32 [[DIV_COND:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:  [[ENTRY:.*:]]
-; GFX-NEXT:    [[ITER:%.*]] = alloca i32, align 4
-; GFX-NEXT:    store i32 0, ptr [[ITER]], align 4
-; GFX-NEXT:    br label %[[LOOP:.*]]
-; GFX:       [[LOOP]]:
-; GFX-NEXT:    [[ITER_VAL:%.*]] = load i32, ptr [[ITER]], align 4
-; GFX-NEXT:    [[NEW_ITER:%.*]] = add i32 [[ITER_VAL]], 1
-; GFX-NEXT:    store i32 [[NEW_ITER]], ptr [[ITER]], align 4
-; GFX-NEXT:    [[COND1:%.*]] = icmp sgt i32 [[NEW_ITER]], [[MAX_ITER]]
-; GFX-NEXT:    [[COND2:%.*]] = icmp eq i32 [[DIV_COND]], 0
-; GFX-NEXT:    [[EXIT:%.*]] = or i1 [[COND1]], [[COND2]]
-; GFX-NEXT:    br i1 [[EXIT]], label %[[EXIT_BLOCK:.*]], label %[[LOOP]]
-; GFX:       [[EXIT_BLOCK]]:
-; GFX-NEXT:    [[FINAL_VAL:%.*]] = load i32, ptr [[ITER]], align 4
-; GFX-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[FINAL_VAL]])
-; GFX-NEXT:    store i32 [[RESULT]], ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @test_divergent_exit(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[MAX_ITER:%.*]], i32 [[DIV_COND:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[ITER:%.*]] = alloca i32, align 4
+; GFX10-NEXT:    store i32 0, ptr [[ITER]], align 4
+; GFX10-NEXT:    br label %[[LOOP:.*]]
+; GFX10:       [[LOOP]]:
+; GFX10-NEXT:    [[ITER_VAL:%.*]] = load i32, ptr [[ITER]], align 4
+; GFX10-NEXT:    [[NEW_ITER:%.*]] = add i32 [[ITER_VAL]], 1
+; GFX10-NEXT:    store i32 [[NEW_ITER]], ptr [[ITER]], align 4
+; GFX10-NEXT:    [[COND1:%.*]] = icmp sgt i32 [[NEW_ITER]], [[MAX_ITER]]
+; GFX10-NEXT:    [[COND2:%.*]] = icmp eq i32 [[DIV_COND]], 0
+; GFX10-NEXT:    [[EXIT:%.*]] = or i1 [[COND1]], [[COND2]]
+; GFX10-NEXT:    br i1 [[EXIT]], label %[[EXIT_BLOCK:.*]], label %[[LOOP]]
+; GFX10:       [[EXIT_BLOCK]]:
+; GFX10-NEXT:    [[FINAL_VAL:%.*]] = load i32, ptr [[ITER]], align 4
+; GFX10-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[FINAL_VAL]])
+; GFX10-NEXT:    store i32 [[RESULT]], ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
 entry:
   %iter = alloca i32, align 4
@@ -395,25 +393,22 @@ exit_block:
 
 ; Define the kernel function
 define amdgpu_kernel void @trivial_waterfall_loop(ptr addrspace(1) %out, i32 %src) {
-; CHECK-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
-; CHECK:       store i32 %src, ptr addrspace(1) %out, align 4
-; CHECK-NOT:   br label %loop
-; GFX-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
-; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
-; GFX-NEXT:  [[ENTRY:.*:]]
-; GFX-NEXT:    [[RESULT:%.*]] = alloca i32, align 4
-; GFX-NEXT:    store i32 0, ptr [[RESULT]], align 4
-; GFX-NEXT:    br label %[[LOOP:.*]]
-; GFX:       [[LOOP]]:
-; GFX-NEXT:    [[CUR_RESULT:%.*]] = load i32, ptr [[RESULT]], align 4
-; GFX-NEXT:    [[NEXT_VALUE:%.*]] = add i32 [[CUR_RESULT]], [[SRC]]
-; GFX-NEXT:    [[UNIFORM_VALUE:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[NEXT_VALUE]])
-; GFX-NEXT:    store i32 [[UNIFORM_VALUE]], ptr [[RESULT]], align 4
-; GFX-NEXT:    br i1 true, label %[[EXIT:.*]], label %[[LOOP]]
-; GFX:       [[EXIT]]:
-; GFX-NEXT:    [[FINAL_RESULT:%.*]] = load i32, ptr [[RESULT]], align 4
-; GFX-NEXT:    store i32 [[FINAL_RESULT]], ptr addrspace(1) [[OUT]], align 4
-; GFX-NEXT:    ret void
+; GFX10-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
+; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; GFX10-NEXT:  [[ENTRY:.*:]]
+; GFX10-NEXT:    [[RESULT:%.*]] = alloca i32, align 4
+; GFX10-NEXT:    store i32 0, ptr [[RESULT]], align 4
+; GFX10-NEXT:    br label %[[LOOP:.*]]
+; GFX10:       [[LOOP]]:
+; GFX10-NEXT:    [[CUR_RESULT:%.*]] = load i32, ptr [[RESULT]], align 4
+; GFX10-NEXT:    [[NEXT_VALUE:%.*]] = add i32 [[CUR_RESULT]], [[SRC]]
+; GFX10-NEXT:    [[UNIFORM_VALUE:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[NEXT_VALUE]])
+; GFX10-NEXT:    store i32 [[UNIFORM_VALUE]], ptr [[RESULT]], align 4
+; GFX10-NEXT:    br i1 true, label %[[EXIT:.*]], label %[[LOOP]]
+; GFX10:       [[EXIT]]:
+; GFX10-NEXT:    [[FINAL_RESULT:%.*]] = load i32, ptr [[RESULT]], align 4
+; GFX10-NEXT:    store i32 [[FINAL_RESULT]], ptr addrspace(1) [[OUT]], align 4
+; GFX10-NEXT:    ret void
 ;
 entry:
   ; Initialize result to zero
@@ -444,7 +439,3 @@ exit:
   ret void
 }
 
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX10: {{.*}}
-; GFX11: {{.*}}
-; GFX12: {{.*}}

>From 847fef424390778d9cb8dafab4a69741d94d34c7 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Fri, 7 Feb 2025 20:08:45 +0530
Subject: [PATCH 07/30] added pass to llc pipeline, more test added

---
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |   2 +
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  |  11 +-
 ...amdgpu-miscellaneous-uniform-intrinsics.ll | 132 +++++++
 .../amdgpu-simplify-uniform-waterfall.ll      | 155 ++++++++
 .../amdgpu-uniform-intrinsic-combine.ll       | 374 +++++++-----------
 5 files changed, 429 insertions(+), 245 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 3f7995722be64..487590cb1f00b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1222,6 +1222,8 @@ void AMDGPUPassConfig::addIRPasses() {
   if (isPassEnabled(EnableImageIntrinsicOptimizer))
     addPass(createAMDGPUImageIntrinsicOptimizerPass(&TM));
 
+  if (EnableUniformIntrinsicCombine)
+    addPass(createAMDGPUUniformIntrinsicCombineLegacyPass(&TM));
   // This can be disabled by passing ::Disable here or on the command line
   // with --expand-variadics-override=disable.
   addPass(createExpandVariadicsPass(ExpandVariadicsMode::Lowering));
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index ad2c5e223aa89..8779466831c45 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -114,16 +114,9 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
   switch (IID) {
   case Intrinsic::amdgcn_permlane64:
   case Intrinsic::amdgcn_readfirstlane:
-  case Intrinsic::amdgcn_readlane: {
+  case Intrinsic::amdgcn_readlane:
+  case Intrinsic::amdgcn_ballot: {
     Value *Src = II.getArgOperand(0);
-
-    // readfirstlane (readfirstlane x) -> readfirstlane x
-    // readfirstlane (readlane x, y) -> readlane x, y
-    // readlane (readfirstlane x), y -> readfirstlane x
-    // readlane (readlane x, y), z -> readlane x, y
-    // All these cases are identical and are dependent on the inner intrinsic
-    // results value.(i.e.irrespective of the which of these case is inner
-    // intrinsic will write the same value across all output lane indexes)
     if (UI->isUniform(II.getOperandUse(0))) {
       II.replaceAllUsesWith(Src);
       return true;
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll
new file mode 100644
index 0000000000000..82f92d2ccb550
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll
@@ -0,0 +1,132 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs - %s | FileCheck %s
+
+define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) {
+; CHECK-LABEL: readfirstlane_with_readfirstlane:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 5
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT:    s_endpgm
+  %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 5)
+  %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
+  store i32 %v2, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
+; CHECK-LABEL: readfirstlane_with_readlane:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT:    v_bfe_u32 v1, v0, 10, 10
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT:    v_readfirstlane_b32 s2, v1
+; CHECK-NEXT:    v_readlane_b32 s2, v0, s2
+; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT:    s_endpgm
+  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+  %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+  %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+  %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
+  store i32 %v2, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
+; CHECK-LABEL: readlane_with_firstlane:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT:    v_readfirstlane_b32 s2, v0
+; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT:    s_endpgm
+  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+  %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 %tidx)
+  %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 3)
+  store i32 %v2, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
+; CHECK-LABEL: readlane_readlane:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT:    v_bfe_u32 v1, v0, 10, 10
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT:    v_readfirstlane_b32 s2, v1
+; CHECK-NEXT:    v_readlane_b32 s2, v0, s2
+; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT:    s_endpgm
+  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+  %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+  %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+  %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 2)
+  store i32 %v2, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
+; CHECK-LABEL: permlane64_uniform:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s2, s[4:5], 0x8
+; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
+; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT:    s_endpgm
+  %v = call i32 @llvm.amdgcn.permlane64(i32 %src)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
+; CHECK-LABEL: permlane64_nonuniform:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT:    v_permlane64_b32 v1, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT:    s_endpgm
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %v = call i32 @llvm.amdgcn.permlane64(i32 %tid)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %out) {
+; CHECK-LABEL: permlane64_nonuniform_expression:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; CHECK-NEXT:    v_add_nc_u32_e32 v1, 1, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; CHECK-NEXT:    v_permlane64_b32 v1, v1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
+; CHECK-NEXT:    s_endpgm
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid2 = add i32 %tid, 1
+  %v = call i32 @llvm.amdgcn.permlane64(i32 %tid2)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
new file mode 100644
index 0000000000000..63bc10c49a161
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -0,0 +1,155 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -O3 -S < %s | FileCheck %s --check-prefixes=PASS-CHECK
+
+; Test case: Ensure that a loop with a divergent exit and a uniform value
+; used by an intrinsic outside the loop is not optimized due to temporal divergence.
+
+define amdgpu_kernel void @test_divergent_exit(ptr addrspace(1) %out, i32 %max_iter, i32 %div_cond) {
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_divergent_exit(
+; PASS-CHECK-SAME: ptr addrspace(1) nocapture writeonly [[OUT:%.*]], i32 [[MAX_ITER:%.*]], i32 [[DIV_COND:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    [[COND2:%.*]] = icmp eq i32 [[DIV_COND]], 0
+; PASS-CHECK-NEXT:    br label %[[LOOP:.*]]
+; PASS-CHECK:       [[LOOP]]:
+; PASS-CHECK-NEXT:    [[ITER_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[NEW_ITER:%.*]], %[[LOOP]] ]
+; PASS-CHECK-NEXT:    [[NEW_ITER]] = add i32 [[ITER_VAL]], 1
+; PASS-CHECK-NEXT:    [[COND1:%.*]] = icmp sgt i32 [[NEW_ITER]], [[MAX_ITER]]
+; PASS-CHECK-NEXT:    [[EXIT:%.*]] = or i1 [[COND2]], [[COND1]]
+; PASS-CHECK-NEXT:    br i1 [[EXIT]], label %[[EXIT_BLOCK:.*]], label %[[LOOP]]
+; PASS-CHECK:       [[EXIT_BLOCK]]:
+; PASS-CHECK-NEXT:    store i32 [[NEW_ITER]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
+;
+entry:
+  %iter = alloca i32, align 4
+  store i32 0, ptr %iter, align 4
+  br label %loop
+
+loop:
+  ; Increment loop counter
+  %iter_val = load i32, ptr %iter, align 4
+  %new_iter = add i32 %iter_val, 1
+  store i32 %new_iter, ptr %iter, align 4
+
+  ; Check exit conditions
+  %cond1 = icmp sgt i32 %new_iter, %max_iter
+  %cond2 = icmp eq i32 %div_cond, 0
+  %exit = or i1 %cond1, %cond2
+  br i1 %exit, label %exit_block, label %loop
+
+exit_block:
+  ; Use the uniform value in an intrinsic outside the loop
+  %final_val = load i32, ptr %iter, align 4
+  %result = call i32 @llvm.amdgcn.permlane64(i32 %final_val)
+  store i32 %result, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall() local_unnamed_addr #0 {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall(
+; PASS-CHECK-SAME: ) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; PASS-CHECK-NEXT:  [[_PEEL_BEGIN:.*:]]
+; PASS-CHECK-NEXT:    [[TMP5:%.*]] = tail call i32 asm sideeffect "", "=v,0"(i32 1) #[[ATTR5:[0-9]+]], !srcloc [[META0:![0-9]+]]
+; PASS-CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
+; PASS-CHECK-NEXT:    [[TMP7:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[TMP6]])
+; PASS-CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 0
+; PASS-CHECK-NEXT:    br i1 [[TMP8]], [[DOTLOOPEXIT:label %.*]], [[DOTPEEL_NEWPH:label %.*]]
+; PASS-CHECK:       [[_PEEL_NEWPH:.*:]]
+; PASS-CHECK-NEXT:    [[TMP4:%.*]] = tail call i32 asm sideeffect "", "=v,0"(i32 0) #[[ATTR5]], !srcloc [[META0]]
+; PASS-CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP4]], 0
+; PASS-CHECK-NEXT:    [[TMP10:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[TMP9]])
+; PASS-CHECK-NEXT:    [[TMP11:%.*]] = icmp eq i32 [[TMP10]], 0
+; PASS-CHECK-NEXT:    br i1 [[TMP11]], [[DOTLOOPEXIT]], [[DOTPEEL_NEWPH]], !llvm.loop [[LOOP1:![0-9]+]]
+; PASS-CHECK:       [[_LOOPEXIT:.*:]]
+; PASS-CHECK-NEXT:    ret void
+;
+  br label %1
+
+1:                                                ; preds = %10, %0
+  %2 = phi i8 [ 0, %0 ], [ %12, %10 ]
+  %3 = and i8 %2, 1
+  %4 = xor i8 %3, 1
+  %5 = zext nneg i8 %4 to i32
+  %6 = tail call i32 asm sideeffect "", "=v,0"(i32 %5) #2, !srcloc !6
+  %7 = icmp ne i32 %6, 0
+  %8 = tail call i64 @llvm.amdgcn.ballot.i64(i1 %7)
+  %9 = icmp eq i64 %8, 0
+  br i1 %9, label %13, label %10
+
+10:                                               ; preds = %1
+  %11 = icmp eq i8 %3, 0
+  %12 = select i1 %11, i8 1, i8 %2
+  br label %1, !llvm.loop !7
+
+13:                                               ; preds = %1
+  ret void
+}
+
+define protected amdgpu_kernel void @waterfall() local_unnamed_addr #0 {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
+; PASS-CHECK-SAME: ) local_unnamed_addr #[[ATTR1]] {
+; PASS-CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; PASS-CHECK-NEXT:    br label %[[BB2:.*]]
+; PASS-CHECK:       [[BB2]]:
+; PASS-CHECK-NEXT:    [[TMP3:%.*]] = phi i1 [ false, [[TMP0:%.*]] ], [ [[DOTBE:%.*]], %[[DOTBACKEDGE:.*]] ]
+; PASS-CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true
+; PASS-CHECK-NEXT:    [[TMP5:%.*]] = zext i1 [[TMP4]] to i32
+; PASS-CHECK-NEXT:    [[TMP6:%.*]] = tail call i32 asm sideeffect "", "=v,0"(i32 [[TMP5]]) #[[ATTR5]], !srcloc [[META0]]
+; PASS-CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i32 [[TMP6]], 0
+; PASS-CHECK-NEXT:    [[TMP8:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[TMP7]])
+; PASS-CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; PASS-CHECK-NEXT:    br i1 [[TMP9]], label %[[BB14:.*]], label %[[BB10:.*]]
+; PASS-CHECK:       [[BB10]]:
+; PASS-CHECK-NEXT:    br i1 [[TMP3]], label %[[DOTBACKEDGE]], label %[[BB11:.*]]
+; PASS-CHECK:       [[BB11]]:
+; PASS-CHECK-NEXT:    [[TMP12:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP1]])
+; PASS-CHECK-NEXT:    [[TMP13:%.*]] = icmp eq i32 [[TMP1]], [[TMP12]]
+; PASS-CHECK-NEXT:    br label %[[DOTBACKEDGE]]
+; PASS-CHECK:       [[_BACKEDGE:.*:]]
+; PASS-CHECK-NEXT:    [[DOTBE]] = phi i1 [ true, %[[BB10]] ], [ [[TMP13]], %[[BB11]] ]
+; PASS-CHECK-NEXT:    br label %[[BB2]], !llvm.loop [[LOOP4:![0-9]+]]
+; PASS-CHECK:       [[BB14]]:
+; PASS-CHECK-NEXT:    ret void
+;
+  %1 = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %2 = tail call noundef i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %1)
+  br label %3
+
+3:                                                ; preds = %15, %0
+  %4 = phi i1 [ false, %0 ], [ %16, %15 ]
+  %5 = xor i1 %4, true
+  %6 = zext i1 %5 to i32
+  %7 = tail call i32 asm sideeffect "", "=v,0"(i32 %6) #3, !srcloc !6
+  %8 = icmp ne i32 %7, 0
+  %9 = tail call i64 @llvm.amdgcn.ballot.i64(i1 %8)
+  %10 = icmp eq i64 %9, 0
+  br i1 %10, label %17, label %11
+
+11:                                               ; preds = %3
+  br i1 %4, label %15, label %12
+
+12:                                               ; preds = %11
+  %13 = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 %2)
+  %14 = icmp eq i32 %2, %13
+  br label %15
+
+15:                                               ; preds = %12, %11
+  %16 = phi i1 [ true, %11 ], [ %14, %12 ]
+  br label %3, !llvm.loop !7
+
+17:                                               ; preds = %3
+  ret void
+}
+
+
+declare i64 @llvm.amdgcn.ballot.i64(i1) #1
+!6 = !{i64 690}
+!7 = distinct !{!7, !8}
+!8 = !{!"llvm.loop.mustprogress"}
+;.
+; PASS-CHECK: [[META0]] = !{i64 690}
+; PASS-CHECK: [[LOOP1]] = distinct !{[[LOOP1]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
+; PASS-CHECK: [[META2]] = !{!"llvm.loop.mustprogress"}
+; PASS-CHECK: [[META3]] = !{!"llvm.loop.peeled.count", i32 1}
+; PASS-CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]]}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index ddedd435d35aa..c6a639a761f75 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -1,12 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=instcombine,amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s --check-prefixes=GFX10
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=instcombine,amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s --check-prefixes=PASS-CHECK
 
 define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @permlane64_constant(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
-; GFX10-NEXT:    store i32 77, ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; PASS-CHECK-NEXT:    store i32 77, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.permlane64(i32 77)
   store i32 %v, ptr addrspace(1) %out
@@ -14,9 +13,9 @@ define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @permlane64_undef(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_undef(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.permlane64(i32 undef)
   store i32 %v, ptr addrspace(1) %out
@@ -24,10 +23,10 @@ define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
-; GFX10-LABEL: define amdgpu_kernel void @permlane64_uniform(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.permlane64(i32 %src)
   store i32 %v, ptr addrspace(1) %out
@@ -35,14 +34,14 @@ define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
 }
 
 define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
-; GFX10-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
-; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
-; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %v = call i32 @llvm.amdgcn.permlane64(i32 %tid)
@@ -52,15 +51,15 @@ define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
 }
 
 define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %out) {
-; GFX10-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
-; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
-; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
-; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid2 = add i32 %tid, 1
@@ -71,10 +70,10 @@ define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %o
 }
 
 define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readlane_constant(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readlane(i32 7, i32 5)
   store i32 %v, ptr addrspace(1) %out
@@ -82,9 +81,9 @@ define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readlane_undef(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_undef(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readlane(i32 undef, i32 undef)
   store i32 %v, ptr addrspace(1) %out
@@ -92,10 +91,10 @@ define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
-; GFX10-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1)
   store i32 %v, ptr addrspace(1) %out
@@ -103,15 +102,15 @@ define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i3
 }
 
 define amdgpu_kernel void @readlane_nonuniform_workitem(i32 addrspace(1)* %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
-; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
-; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -122,17 +121,17 @@ define amdgpu_kernel void @readlane_nonuniform_workitem(i32 addrspace(1)* %out)
 }
 
 define amdgpu_kernel void @readlane_nonuniform_expression(i32 addrspace(1)* %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; GFX10-NEXT:    [[TIDX2:%.*]] = add i32 [[TIDX]], 1
-; GFX10-NEXT:    [[TIDY2:%.*]] = add i32 [[TIDY]], 2
-; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
-; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
-; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT:    [[TIDX2:%.*]] = add i32 [[TIDX]], 1
+; PASS-CHECK-NEXT:    [[TIDY2:%.*]] = add i32 [[TIDY]], 2
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -145,10 +144,10 @@ define amdgpu_kernel void @readlane_nonuniform_expression(i32 addrspace(1)* %out
 }
 
 define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_constant(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 7)
   store i32 %v, ptr addrspace(1) %out
@@ -156,9 +155,9 @@ define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_undef(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_undef(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 undef)
   store i32 %v, ptr addrspace(1) %out
@@ -166,10 +165,10 @@ define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i32 %src0) {
-; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 %src0)
   store i32 %v, ptr addrspace(1) %out
@@ -177,14 +176,14 @@ define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i3
 }
 
 define amdgpu_kernel void @readfirstlane_with_workitem_id(i32 addrspace(1)* %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
-; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
-; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid)
@@ -194,15 +193,15 @@ define amdgpu_kernel void @readfirstlane_with_workitem_id(i32 addrspace(1)* %out
 }
 
 define amdgpu_kernel void @readfirstlane_expression(i32 addrspace(1)* %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_expression(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
-; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
-; GFX10-NEXT:    [[TMP1:%.*]] = sext i32 [[TID2]] to i64
-; GFX10-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
-; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID2]] to i64
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid2 = add i32 %tid, 1
@@ -213,10 +212,10 @@ define amdgpu_kernel void @readfirstlane_expression(i32 addrspace(1)* %out) {
 }
 
 define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 5)
   %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
@@ -225,13 +224,13 @@ define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %ou
 }
 
 define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; GFX10-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
-; GFX10-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -242,12 +241,12 @@ define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
-; GFX10-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; PASS-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 %tidx)
@@ -257,13 +256,13 @@ define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readlane_readlane(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
-; GFX10-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
-; GFX10-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_readlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -275,11 +274,11 @@ define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
 
 
 define amdgpu_kernel void @permlane64_boundary(ptr addrspace(1) %out_min, ptr addrspace(1) %out_max) {
-; GFX10-LABEL: define amdgpu_kernel void @permlane64_boundary(
-; GFX10-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
-; GFX10-NEXT:    store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; PASS-CHECK-NEXT:    store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %min_v = call i32 @llvm.amdgcn.permlane64(i32 -2147483648)
   store i32 %min_v, ptr addrspace(1) %out_min
@@ -289,13 +288,13 @@ define amdgpu_kernel void @permlane64_boundary(ptr addrspace(1) %out_min, ptr ad
 }
 
 define amdgpu_kernel void @readlane_cross_lane(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readlane_cross_lane(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[TIDY:%.*]] = add i32 [[TIDX]], 5
-; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
-; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_cross_lane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[TIDY:%.*]] = add i32 [[TIDX]], 5
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = add i32 %tidx, 5
@@ -305,10 +304,10 @@ define amdgpu_kernel void @readlane_cross_lane(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readfirstlane_random(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    store i32 435, ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    store i32 435, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %random = xor i32 123, 456
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 %random)
@@ -317,9 +316,9 @@ define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @permlane64_invalid(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @permlane64_invalid(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_invalid(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    ret void
 ;
   %undef_v = call i32 @llvm.amdgcn.permlane64(i32 undef)
   store i32 %undef_v, ptr addrspace(1) %out
@@ -327,13 +326,13 @@ define amdgpu_kernel void @permlane64_invalid(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_expression(ptr addrspace(1) %out) {
-; GFX10-LABEL: define amdgpu_kernel void @readlane_expression(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:    [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; GFX10-NEXT:    [[IDX2:%.*]] = shl i32 [[IDX1]], 1
-; GFX10-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
-; GFX10-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[IDX2:%.*]] = shl i32 [[IDX1]], 1
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
+; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
 ;
   %idx1 = call i32 @llvm.amdgcn.workitem.id.x()
   %idx2 = mul i32 %idx1, 2
@@ -342,100 +341,3 @@ define amdgpu_kernel void @readlane_expression(ptr addrspace(1) %out) {
   ret void
 }
 
-; Test case: Ensure that a loop with a divergent exit and a uniform value
-; used by an intrinsic outside the loop is not optimized due to temporal divergence.
-
-define amdgpu_kernel void @test_divergent_exit(ptr addrspace(1) %out, i32 %max_iter, i32 %div_cond) {
-; GFX10-LABEL: define amdgpu_kernel void @test_divergent_exit(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[MAX_ITER:%.*]], i32 [[DIV_COND:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[ITER:%.*]] = alloca i32, align 4
-; GFX10-NEXT:    store i32 0, ptr [[ITER]], align 4
-; GFX10-NEXT:    br label %[[LOOP:.*]]
-; GFX10:       [[LOOP]]:
-; GFX10-NEXT:    [[ITER_VAL:%.*]] = load i32, ptr [[ITER]], align 4
-; GFX10-NEXT:    [[NEW_ITER:%.*]] = add i32 [[ITER_VAL]], 1
-; GFX10-NEXT:    store i32 [[NEW_ITER]], ptr [[ITER]], align 4
-; GFX10-NEXT:    [[COND1:%.*]] = icmp sgt i32 [[NEW_ITER]], [[MAX_ITER]]
-; GFX10-NEXT:    [[COND2:%.*]] = icmp eq i32 [[DIV_COND]], 0
-; GFX10-NEXT:    [[EXIT:%.*]] = or i1 [[COND1]], [[COND2]]
-; GFX10-NEXT:    br i1 [[EXIT]], label %[[EXIT_BLOCK:.*]], label %[[LOOP]]
-; GFX10:       [[EXIT_BLOCK]]:
-; GFX10-NEXT:    [[FINAL_VAL:%.*]] = load i32, ptr [[ITER]], align 4
-; GFX10-NEXT:    [[RESULT:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[FINAL_VAL]])
-; GFX10-NEXT:    store i32 [[RESULT]], ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
-;
-entry:
-  %iter = alloca i32, align 4
-  store i32 0, ptr %iter, align 4
-  br label %loop
-
-loop:
-  ; Increment loop counter
-  %iter_val = load i32, ptr %iter, align 4
-  %new_iter = add i32 %iter_val, 1
-  store i32 %new_iter, ptr %iter, align 4
-
-  ; Check exit conditions
-  %cond1 = icmp sgt i32 %new_iter, %max_iter
-  %cond2 = icmp eq i32 %div_cond, 0
-  %exit = or i1 %cond1, %cond2
-  br i1 %exit, label %exit_block, label %loop
-
-exit_block:
-  ; Use the uniform value in an intrinsic outside the loop
-  %final_val = load i32, ptr %iter, align 4
-  %result = call i32 @llvm.amdgcn.permlane64(i32 %final_val)
-  store i32 %result, ptr addrspace(1) %out, align 4
-  ret void
-}
-
-; Define the kernel function
-define amdgpu_kernel void @trivial_waterfall_loop(ptr addrspace(1) %out, i32 %src) {
-; GFX10-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
-; GFX10-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    [[RESULT:%.*]] = alloca i32, align 4
-; GFX10-NEXT:    store i32 0, ptr [[RESULT]], align 4
-; GFX10-NEXT:    br label %[[LOOP:.*]]
-; GFX10:       [[LOOP]]:
-; GFX10-NEXT:    [[CUR_RESULT:%.*]] = load i32, ptr [[RESULT]], align 4
-; GFX10-NEXT:    [[NEXT_VALUE:%.*]] = add i32 [[CUR_RESULT]], [[SRC]]
-; GFX10-NEXT:    [[UNIFORM_VALUE:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[NEXT_VALUE]])
-; GFX10-NEXT:    store i32 [[UNIFORM_VALUE]], ptr [[RESULT]], align 4
-; GFX10-NEXT:    br i1 true, label %[[EXIT:.*]], label %[[LOOP]]
-; GFX10:       [[EXIT]]:
-; GFX10-NEXT:    [[FINAL_RESULT:%.*]] = load i32, ptr [[RESULT]], align 4
-; GFX10-NEXT:    store i32 [[FINAL_RESULT]], ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
-;
-entry:
-  ; Initialize result to zero
-  %result = alloca i32, align 4
-  store i32 0, ptr %result, align 4
-  br label %loop
-
-loop:
-  ; Load the current result
-  %cur_result = load i32, ptr %result, align 4
-
-  ; Compute the next value
-  %next_value = add i32 %cur_result, %src
-
-  ; Apply the readfirstlane intrinsic for uniformity
-  %uniform_value = call i32 @llvm.amdgcn.readfirstlane(i32 %next_value)
-
-  ; Store the uniform result back
-  store i32 %uniform_value, ptr %result, align 4
-
-  ; This is a trivial loop that always exits after one iteration
-  br i1 true, label %exit, label %loop
-
-exit:
-  ; Store the result to the output pointer
-  %final_result = load i32, ptr %result, align 4
-  store i32 %final_result, ptr addrspace(1) %out, align 4
-  ret void
-}
-

>From e0fc6fcf07999621b5df8c47f21158be13311aaa Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Tue, 18 Feb 2025 09:58:43 +0530
Subject: [PATCH 08/30] handled ballot with icmp for trivial waterfall

---
 llvm/lib/Target/AMDGPU/AMDGPU.h               |   4 +-
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |   2 +-
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  |  53 ++++-
 ...amdgpu-miscellaneous-uniform-intrinsics.ll |   3 +-
 .../amdgpu-simplify-trivial-waterfall-loop.ll |  40 ----
 .../amdgpu-simplify-uniform-waterfall.ll      | 185 ++++++++--------
 .../amdgpu-uniform-intrinsic-combine.ll       | 208 ++++++++++++++++--
 .../amdgpu-uniform-temporal-divergence.ll     |  90 ++++++++
 llvm/test/CodeGen/AMDGPU/llc-pipeline.ll      |  23 ++
 9 files changed, 442 insertions(+), 166 deletions(-)
 delete mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index f5c663cca46bd..b1bf0e49f3f3b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -493,8 +493,8 @@ extern char &AMDGPUWaitSGPRHazardsLegacyID;
 
 void initializeAMDGPUUniformIntrinsicCombineLegacyPass(PassRegistry &);
 extern char &AMDGPUUniformIntrinsicCombineLegacyPassID;
-FunctionPass *createAMDGPUUniformIntrinsicCombineLegacyPass(
-    const AMDGPUTargetMachine *TM = nullptr);
+FunctionPass *createAMDGPUUniformIntrinsicCombineLegacyPass();
+
 struct AMDGPUUniformIntrinsicCombinePass
     : public PassInfoMixin<AMDGPUUniformIntrinsicCombinePass> {
   const AMDGPUTargetMachine &TM;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 487590cb1f00b..5b5def0977aa6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1223,7 +1223,7 @@ void AMDGPUPassConfig::addIRPasses() {
     addPass(createAMDGPUImageIntrinsicOptimizerPass(&TM));
 
   if (EnableUniformIntrinsicCombine)
-    addPass(createAMDGPUUniformIntrinsicCombineLegacyPass(&TM));
+    addPass(createAMDGPUUniformIntrinsicCombineLegacyPass());
   // This can be disabled by passing ::Disable here or on the command line
   // with --expand-variadics-override=disable.
   addPass(createExpandVariadicsPass(ExpandVariadicsMode::Lowering));
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 8779466831c45..4e02c16c61b09 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -37,9 +37,7 @@ namespace {
 class AMDGPUUniformIntrinsicCombineLegacy : public FunctionPass {
 public:
   static char ID;
-  const AMDGPUTargetMachine *AMDGPUTM;
-  AMDGPUUniformIntrinsicCombineLegacy(const AMDGPUTargetMachine *TM)
-      : FunctionPass(ID), AMDGPUTM(TM) {
+  AMDGPUUniformIntrinsicCombineLegacy() : FunctionPass(ID) {
     initializeAMDGPUUniformIntrinsicCombineLegacyPass(
         *PassRegistry::getPassRegistry());
   }
@@ -114,15 +112,55 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
   switch (IID) {
   case Intrinsic::amdgcn_permlane64:
   case Intrinsic::amdgcn_readfirstlane:
-  case Intrinsic::amdgcn_readlane:
-  case Intrinsic::amdgcn_ballot: {
+  case Intrinsic::amdgcn_readlane: {
     Value *Src = II.getArgOperand(0);
+    // Check if the argument is uniform
     if (UI->isUniform(II.getOperandUse(0))) {
+      LLVM_DEBUG(dbgs() << "Replacing " << II << " with " << *Src << "\n");
       II.replaceAllUsesWith(Src);
       return true;
     }
     break;
   }
+  case Intrinsic::amdgcn_ballot: {
+    Value *Src = II.getArgOperand(0);
+    // Check if the argument is uniform and has a direct `icmp eq` use of the
+    // ballot result.
+    // %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cond)
+    // %is_done = icmp eq i64 %ballot, 0
+    // This means we are checking if *all lanes* in the ballot result are
+    // inactive.
+    if (UI->isUniform(II.getOperandUse(0))) {
+      LLVM_DEBUG(dbgs() << "Found uniform ballot intrinsic: " << II << "\n");
+
+      // Look for a direct `icmp eq` use of the ballot result.
+      auto It = llvm::find_if(II.users(), [&](User *U) {
+        return match(U, m_ICmp(m_Specific(&II), m_Zero()));
+      });
+
+      // Check if a match was found
+      if (It != II.user_end()) {
+        // Extract the matching `icmp` instruction
+        ICmpInst *ICmp = dyn_cast<ICmpInst>(*It);
+        if (!ICmp)
+          break; // Safety check
+
+        IRBuilder<> Builder(ICmp);
+
+        // Convert ballot argument to match `icmp` operand type (i64)
+        Value *ConvertedSrc =
+            Builder.CreateZExtOrTrunc(Src, ICmp->getOperand(1)->getType());
+
+        LLVM_DEBUG(dbgs() << "Replacing ballot result in icmp: " << *ICmp
+                          << " with " << *ConvertedSrc << "\n");
+
+        // Replace `%ballot` in `icmp` with `ConvertedSrc`
+        ICmp->setOperand(0, ConvertedSrc);
+        return true;
+      }
+    }
+    break;
+  }
   }
   return false;
 }
@@ -134,7 +172,6 @@ INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
 INITIALIZE_PASS_END(AMDGPUUniformIntrinsicCombineLegacy, DEBUG_TYPE,
                     "AMDGPU uniformIntrinsic Combine", false, false)
 
-FunctionPass *llvm::createAMDGPUUniformIntrinsicCombineLegacyPass(
-    const AMDGPUTargetMachine *TM) {
-  return new AMDGPUUniformIntrinsicCombineLegacy(TM);
+FunctionPass *llvm::createAMDGPUUniformIntrinsicCombineLegacyPass() {
+  return new AMDGPUUniformIntrinsicCombineLegacy();
 }
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll
index 82f92d2ccb550..f450b0e6763c4 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs - %s | FileCheck %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs -o - %s | FileCheck %s
 
 define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) {
 ; CHECK-LABEL: readfirstlane_with_readfirstlane:
@@ -129,4 +129,3 @@ define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %o
   store i32 %v, i32 addrspace(1)* %out_ptr
   ret void
 }
-
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll
deleted file mode 100644
index 56ba117ce1d30..0000000000000
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-trivial-waterfall-loop.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes="default<O1>" -S < %s | FileCheck %s --check-prefixes=GFX10
-
-define amdgpu_kernel void @trivial_waterfall_loop(ptr addrspace(1) %out, i32 %src) {
-; GFX10-LABEL: define amdgpu_kernel void @trivial_waterfall_loop(
-; GFX10-SAME: ptr addrspace(1) nocapture writeonly [[OUT:%.*]], i32 [[SRC:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
-; GFX10-NEXT:  [[ENTRY:.*:]]
-; GFX10-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
-; GFX10-NEXT:    ret void
-;
-entry:
-  ; Initialize result to zero
-  %result = alloca i32, align 4
-  store i32 0, ptr %result, align 4
-  br label %loop
-
-loop:
-  ; Load the current result
-  %cur_result = load i32, ptr %result, align 4
-
-  ; Compute the next value
-  %next_value = add i32 %cur_result, %src
-
-  ; Apply the readfirstlane intrinsic for uniformity
-  %uniform_value = call i32 @llvm.amdgcn.readfirstlane(i32 %next_value)
-
-  ; Store the uniform result back
-  store i32 %uniform_value, ptr %result, align 4
-
-  ; This is a trivial loop that always exits after one iteration
-  br i1 true, label %exit, label %loop
-
-exit:
-  ; Store the result to the output pointer
-  %final_result = load i32, ptr %result, align 4
-  store i32 %final_result, ptr addrspace(1) %out, align 4
-  ret void
-}
-
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index 63bc10c49a161..f43d3163efd5f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -1,122 +1,109 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -O3 -S < %s | FileCheck %s --check-prefixes=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,instcombine,early-cse,simplifycfg -S < %s | FileCheck %s -check-prefix=DCE-CHECK
 
-; Test case: Ensure that a loop with a divergent exit and a uniform value
-; used by an intrinsic outside the loop is not optimized due to temporal divergence.
-
-define amdgpu_kernel void @test_divergent_exit(ptr addrspace(1) %out, i32 %max_iter, i32 %div_cond) {
-; PASS-CHECK-LABEL: define amdgpu_kernel void @test_divergent_exit(
-; PASS-CHECK-SAME: ptr addrspace(1) nocapture writeonly [[OUT:%.*]], i32 [[MAX_ITER:%.*]], i32 [[DIV_COND:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+define protected amdgpu_kernel void @trivial_waterfall() {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall(
+; PASS-CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
-; PASS-CHECK-NEXT:    [[COND2:%.*]] = icmp eq i32 [[DIV_COND]], 0
-; PASS-CHECK-NEXT:    br label %[[LOOP:.*]]
-; PASS-CHECK:       [[LOOP]]:
-; PASS-CHECK-NEXT:    [[ITER_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[NEW_ITER:%.*]], %[[LOOP]] ]
-; PASS-CHECK-NEXT:    [[NEW_ITER]] = add i32 [[ITER_VAL]], 1
-; PASS-CHECK-NEXT:    [[COND1:%.*]] = icmp sgt i32 [[NEW_ITER]], [[MAX_ITER]]
-; PASS-CHECK-NEXT:    [[EXIT:%.*]] = or i1 [[COND2]], [[COND1]]
-; PASS-CHECK-NEXT:    br i1 [[EXIT]], label %[[EXIT_BLOCK:.*]], label %[[LOOP]]
-; PASS-CHECK:       [[EXIT_BLOCK]]:
-; PASS-CHECK-NEXT:    store i32 [[NEW_ITER]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ true, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
+; PASS-CHECK-NEXT:    [[TMP0:%.*]] = zext i1 [[DONE]] to i64
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[TMP0]], 0
+; PASS-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    [[IS_ONE:%.*]] = icmp eq i1 [[DONE]], true
+; PASS-CHECK-NEXT:    [[NEW_DONE]] = select i1 [[IS_ONE]], i1 false, i1 [[DONE]]
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
 ; PASS-CHECK-NEXT:    ret void
 ;
-entry:
-  %iter = alloca i32, align 4
-  store i32 0, ptr %iter, align 4
-  br label %loop
-
-loop:
-  ; Increment loop counter
-  %iter_val = load i32, ptr %iter, align 4
-  %new_iter = add i32 %iter_val, 1
-  store i32 %new_iter, ptr %iter, align 4
-
-  ; Check exit conditions
-  %cond1 = icmp sgt i32 %new_iter, %max_iter
-  %cond2 = icmp eq i32 %div_cond, 0
-  %exit = or i1 %cond1, %cond2
-  br i1 %exit, label %exit_block, label %loop
-
-exit_block:
-  ; Use the uniform value in an intrinsic outside the loop
-  %final_val = load i32, ptr %iter, align 4
-  %result = call i32 @llvm.amdgcn.permlane64(i32 %final_val)
-  store i32 %result, ptr addrspace(1) %out, align 4
-  ret void
-}
-
-define protected amdgpu_kernel void @trivial_waterfall() local_unnamed_addr #0 {
-; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall(
-; PASS-CHECK-SAME: ) local_unnamed_addr #[[ATTR1:[0-9]+]] {
-; PASS-CHECK-NEXT:  [[_PEEL_BEGIN:.*:]]
-; PASS-CHECK-NEXT:    [[TMP5:%.*]] = tail call i32 asm sideeffect "", "=v,0"(i32 1) #[[ATTR5:[0-9]+]], !srcloc [[META0:![0-9]+]]
-; PASS-CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
-; PASS-CHECK-NEXT:    [[TMP7:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[TMP6]])
-; PASS-CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 0
-; PASS-CHECK-NEXT:    br i1 [[TMP8]], [[DOTLOOPEXIT:label %.*]], [[DOTPEEL_NEWPH:label %.*]]
-; PASS-CHECK:       [[_PEEL_NEWPH:.*:]]
-; PASS-CHECK-NEXT:    [[TMP4:%.*]] = tail call i32 asm sideeffect "", "=v,0"(i32 0) #[[ATTR5]], !srcloc [[META0]]
-; PASS-CHECK-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP4]], 0
-; PASS-CHECK-NEXT:    [[TMP10:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[TMP9]])
-; PASS-CHECK-NEXT:    [[TMP11:%.*]] = icmp eq i32 [[TMP10]], 0
-; PASS-CHECK-NEXT:    br i1 [[TMP11]], [[DOTLOOPEXIT]], [[DOTPEEL_NEWPH]], !llvm.loop [[LOOP1:![0-9]+]]
-; PASS-CHECK:       [[_LOOPEXIT:.*:]]
-; PASS-CHECK-NEXT:    ret void
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall(
+; DCE-CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    ret void
 ;
-  br label %1
+entry:
+  br label %while
 
-1:                                                ; preds = %10, %0
-  %2 = phi i8 [ 0, %0 ], [ %12, %10 ]
-  %3 = and i8 %2, 1
-  %4 = xor i8 %3, 1
-  %5 = zext nneg i8 %4 to i32
-  %6 = tail call i32 asm sideeffect "", "=v,0"(i32 %5) #2, !srcloc !6
-  %7 = icmp ne i32 %6, 0
-  %8 = tail call i64 @llvm.amdgcn.ballot.i64(i1 %7)
-  %9 = icmp eq i64 %8, 0
-  br i1 %9, label %13, label %10
+while:
+  %done = phi i1 [ 1, %entry ], [ %new_done, %if ]
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+  %is_done = icmp eq i64 %ballot, 0
+  br i1 %is_done, label %exit, label %if
 
-10:                                               ; preds = %1
-  %11 = icmp eq i8 %3, 0
-  %12 = select i1 %11, i8 1, i8 %2
-  br label %1, !llvm.loop !7
+if:
+  %is_one = icmp eq i1 %done, 1
+  %new_done = select i1 %is_one, i1 0, i1 %done
+  br label %while
 
-13:                                               ; preds = %1
+exit:
   ret void
 }
 
-define protected amdgpu_kernel void @waterfall() local_unnamed_addr #0 {
+define protected amdgpu_kernel void @waterfall() {
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
-; PASS-CHECK-SAME: ) local_unnamed_addr #[[ATTR1]] {
-; PASS-CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; PASS-CHECK-SAME: ) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; PASS-CHECK-NEXT:    [[TMP1:%.*]] = tail call noundef i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[TMP0]])
 ; PASS-CHECK-NEXT:    br label %[[BB2:.*]]
 ; PASS-CHECK:       [[BB2]]:
-; PASS-CHECK-NEXT:    [[TMP3:%.*]] = phi i1 [ false, [[TMP0:%.*]] ], [ [[DOTBE:%.*]], %[[DOTBACKEDGE:.*]] ]
+; PASS-CHECK-NEXT:    [[TMP3:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[TMP15:%.*]], %[[TMP14:.*]] ]
 ; PASS-CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true
 ; PASS-CHECK-NEXT:    [[TMP5:%.*]] = zext i1 [[TMP4]] to i32
-; PASS-CHECK-NEXT:    [[TMP6:%.*]] = tail call i32 asm sideeffect "", "=v,0"(i32 [[TMP5]]) #[[ATTR5]], !srcloc [[META0]]
+; PASS-CHECK-NEXT:    [[TMP6:%.*]] = tail call i32 asm sideeffect "", "=v,0"(i32 [[TMP5]]), !srcloc [[META0:![0-9]+]]
 ; PASS-CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i32 [[TMP6]], 0
-; PASS-CHECK-NEXT:    [[TMP8:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[TMP7]])
-; PASS-CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; PASS-CHECK-NEXT:    br i1 [[TMP9]], label %[[BB14:.*]], label %[[BB10:.*]]
+; PASS-CHECK-NEXT:    [[TMP8:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP7]])
+; PASS-CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[TMP8]], 0
+; PASS-CHECK-NEXT:    br i1 [[TMP9]], label %[[BB16:.*]], label %[[BB10:.*]]
 ; PASS-CHECK:       [[BB10]]:
-; PASS-CHECK-NEXT:    br i1 [[TMP3]], label %[[DOTBACKEDGE]], label %[[BB11:.*]]
+; PASS-CHECK-NEXT:    br i1 [[TMP3]], label %[[TMP14]], label %[[BB11:.*]]
 ; PASS-CHECK:       [[BB11]]:
 ; PASS-CHECK-NEXT:    [[TMP12:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP1]])
 ; PASS-CHECK-NEXT:    [[TMP13:%.*]] = icmp eq i32 [[TMP1]], [[TMP12]]
-; PASS-CHECK-NEXT:    br label %[[DOTBACKEDGE]]
-; PASS-CHECK:       [[_BACKEDGE:.*:]]
-; PASS-CHECK-NEXT:    [[DOTBE]] = phi i1 [ true, %[[BB10]] ], [ [[TMP13]], %[[BB11]] ]
-; PASS-CHECK-NEXT:    br label %[[BB2]], !llvm.loop [[LOOP4:![0-9]+]]
-; PASS-CHECK:       [[BB14]]:
+; PASS-CHECK-NEXT:    br label %[[TMP14]]
+; PASS-CHECK:       [[TMP14]]:
+; PASS-CHECK-NEXT:    [[TMP15]] = phi i1 [ true, %[[BB10]] ], [ [[TMP13]], %[[BB11]] ]
+; PASS-CHECK-NEXT:    br label %[[BB2]], !llvm.loop [[LOOP1:![0-9]+]]
+; PASS-CHECK:       [[BB16]]:
 ; PASS-CHECK-NEXT:    ret void
 ;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
+; DCE-CHECK-SAME: ) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*]]:
+; DCE-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; DCE-CHECK-NEXT:    br label %[[BB1:.*]]
+; DCE-CHECK:       [[BB1]]:
+; DCE-CHECK-NEXT:    [[TMP2:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[TMP14:%.*]], %[[TMP13:.*]] ]
+; DCE-CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
+; DCE-CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
+; DCE-CHECK-NEXT:    [[TMP5:%.*]] = tail call i32 asm sideeffect "", "=v,0"(i32 [[TMP4]]) #[[ATTR4:[0-9]+]], !srcloc [[META0:![0-9]+]]
+; DCE-CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
+; DCE-CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.amdgcn.ballot.i32(i1 [[TMP6]])
+; DCE-CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 0
+; DCE-CHECK-NEXT:    br i1 [[TMP8]], label %[[BB15:.*]], label %[[BB9:.*]]
+; DCE-CHECK:       [[BB9]]:
+; DCE-CHECK-NEXT:    br i1 [[TMP2]], label %[[TMP13]], label %[[BB10:.*]]
+; DCE-CHECK:       [[BB10]]:
+; DCE-CHECK-NEXT:    [[TMP11:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP0]])
+; DCE-CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i32 [[TMP0]], [[TMP11]]
+; DCE-CHECK-NEXT:    br label %[[TMP13]]
+; DCE-CHECK:       [[TMP13]]:
+; DCE-CHECK-NEXT:    [[TMP14]] = phi i1 [ true, %[[BB9]] ], [ [[TMP12]], %[[BB10]] ]
+; DCE-CHECK-NEXT:    br label %[[BB1]], !llvm.loop [[LOOP1:![0-9]+]]
+; DCE-CHECK:       [[BB15]]:
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
   %1 = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
   %2 = tail call noundef i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %1)
   br label %3
 
-3:                                                ; preds = %15, %0
-  %4 = phi i1 [ false, %0 ], [ %16, %15 ]
+3:
+  %4 = phi i1 [ false, %entry ], [ %16, %15 ]
   %5 = xor i1 %4, true
   %6 = zext i1 %5 to i32
   %7 = tail call i32 asm sideeffect "", "=v,0"(i32 %6) #3, !srcloc !6
@@ -125,19 +112,19 @@ define protected amdgpu_kernel void @waterfall() local_unnamed_addr #0 {
   %10 = icmp eq i64 %9, 0
   br i1 %10, label %17, label %11
 
-11:                                               ; preds = %3
+11:
   br i1 %4, label %15, label %12
 
-12:                                               ; preds = %11
+12:
   %13 = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 %2)
   %14 = icmp eq i32 %2, %13
   br label %15
 
-15:                                               ; preds = %12, %11
+15:
   %16 = phi i1 [ true, %11 ], [ %14, %12 ]
   br label %3, !llvm.loop !7
 
-17:                                               ; preds = %3
+17:
   ret void
 }
 
@@ -148,8 +135,10 @@ declare i64 @llvm.amdgcn.ballot.i64(i1) #1
 !8 = !{!"llvm.loop.mustprogress"}
 ;.
 ; PASS-CHECK: [[META0]] = !{i64 690}
-; PASS-CHECK: [[LOOP1]] = distinct !{[[LOOP1]], [[META2:![0-9]+]], [[META3:![0-9]+]]}
+; PASS-CHECK: [[LOOP1]] = distinct !{[[LOOP1]], [[META2:![0-9]+]]}
 ; PASS-CHECK: [[META2]] = !{!"llvm.loop.mustprogress"}
-; PASS-CHECK: [[META3]] = !{!"llvm.loop.peeled.count", i32 1}
-; PASS-CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]]}
+;.
+; DCE-CHECK: [[META0]] = !{i64 690}
+; DCE-CHECK: [[LOOP1]] = distinct !{[[LOOP1]], [[META2:![0-9]+]]}
+; DCE-CHECK: [[META2]] = !{!"llvm.loop.mustprogress"}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index c6a639a761f75..ee54aff64f25d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -1,11 +1,18 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=instcombine,amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s --check-prefixes=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s -check-prefix=DCE-CHECK
 
 define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 77)
 ; PASS-CHECK-NEXT:    store i32 77, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; DCE-CHECK-NEXT:    store i32 77, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.permlane64(i32 77)
   store i32 %v, ptr addrspace(1) %out
@@ -15,7 +22,14 @@ define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
 define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_undef(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 undef)
+; PASS-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_undef(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.permlane64(i32 undef)
   store i32 %v, ptr addrspace(1) %out
@@ -25,8 +39,14 @@ define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
 define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[SRC]])
 ; PASS-CHECK-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.permlane64(i32 %src)
   store i32 %v, ptr addrspace(1) %out
@@ -38,10 +58,17 @@ define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
-; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
-; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
 ; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; DCE-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; DCE-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %v = call i32 @llvm.amdgcn.permlane64(i32 %tid)
@@ -56,10 +83,18 @@ define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %o
 ; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
-; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
-; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
 ; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; DCE-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; DCE-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; DCE-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid2 = add i32 %tid, 1
@@ -72,8 +107,14 @@ define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %o
 define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 7, i32 5)
 ; PASS-CHECK-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readlane(i32 7, i32 5)
   store i32 %v, ptr addrspace(1) %out
@@ -83,7 +124,14 @@ define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
 define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_undef(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 undef, i32 undef)
+; PASS-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_undef(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readlane(i32 undef, i32 undef)
   store i32 %v, ptr addrspace(1) %out
@@ -93,8 +141,14 @@ define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
 define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC0]], i32 [[SRC1]])
 ; PASS-CHECK-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1)
   store i32 %v, ptr addrspace(1) %out
@@ -107,10 +161,18 @@ define amdgpu_kernel void @readlane_nonuniform_workitem(i32 addrspace(1)* %out)
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
-; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
-; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
 ; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
+; DCE-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -128,10 +190,20 @@ define amdgpu_kernel void @readlane_nonuniform_expression(i32 addrspace(1)* %out
 ; PASS-CHECK-NEXT:    [[TIDX2:%.*]] = add i32 [[TIDX]], 1
 ; PASS-CHECK-NEXT:    [[TIDY2:%.*]] = add i32 [[TIDY]], 2
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
-; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
-; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
 ; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT:    [[TIDX2:%.*]] = add i32 [[TIDX]], 1
+; DCE-CHECK-NEXT:    [[TIDY2:%.*]] = add i32 [[TIDY]], 2
+; DCE-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; DCE-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
+; DCE-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -146,8 +218,14 @@ define amdgpu_kernel void @readlane_nonuniform_expression(i32 addrspace(1)* %out
 define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 7)
 ; PASS-CHECK-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 7)
   store i32 %v, ptr addrspace(1) %out
@@ -157,7 +235,14 @@ define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
 define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_undef(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 undef)
+; PASS-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_undef(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 undef)
   store i32 %v, ptr addrspace(1) %out
@@ -167,8 +252,14 @@ define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
 define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i32 %src0) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[SRC0]])
 ; PASS-CHECK-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 %src0)
   store i32 %v, ptr addrspace(1) %out
@@ -180,10 +271,17 @@ define amdgpu_kernel void @readfirstlane_with_workitem_id(i32 addrspace(1)* %out
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
-; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
-; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
 ; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; DCE-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; DCE-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid)
@@ -198,10 +296,18 @@ define amdgpu_kernel void @readfirstlane_expression(i32 addrspace(1)* %out) {
 ; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
-; PASS-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID2]] to i64
-; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; PASS-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID2]]
 ; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; DCE-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; DCE-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID2]]
+; DCE-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid2 = add i32 %tid, 1
@@ -214,8 +320,15 @@ define amdgpu_kernel void @readfirstlane_expression(i32 addrspace(1)* %out) {
 define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 5)
+; PASS-CHECK-NEXT:    [[V2:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 5)
 ; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 5)
   %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
@@ -229,8 +342,17 @@ define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT:    [[V2:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[V1]])
 ; PASS-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -245,8 +367,16 @@ define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; PASS-CHECK-NEXT:    [[V2:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[V1]], i32 3)
 ; PASS-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; DCE-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 %tidx)
@@ -261,8 +391,17 @@ define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT:    [[V2:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[V1]], i32 2)
 ; PASS-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_readlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = call i32 @llvm.amdgcn.workitem.id.y()
@@ -276,9 +415,17 @@ define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
 define amdgpu_kernel void @permlane64_boundary(ptr addrspace(1) %out_min, ptr addrspace(1) %out_max) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[MIN_V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 -2147483648)
 ; PASS-CHECK-NEXT:    store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; PASS-CHECK-NEXT:    [[MAX_V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 2147483647)
 ; PASS-CHECK-NEXT:    store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; DCE-CHECK-NEXT:    store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %min_v = call i32 @llvm.amdgcn.permlane64(i32 -2147483648)
   store i32 %min_v, ptr addrspace(1) %out_min
@@ -295,6 +442,14 @@ define amdgpu_kernel void @readlane_cross_lane(ptr addrspace(1) %out) {
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
 ; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_cross_lane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[TIDY:%.*]] = add i32 [[TIDX]], 5
+; DCE-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %tidx = call i32 @llvm.amdgcn.workitem.id.x()
   %tidy = add i32 %tidx, 5
@@ -306,8 +461,16 @@ define amdgpu_kernel void @readlane_cross_lane(ptr addrspace(1) %out) {
 define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    store i32 435, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    [[RANDOM:%.*]] = xor i32 123, 456
+; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[RANDOM]])
+; PASS-CHECK-NEXT:    store i32 [[RANDOM]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[RANDOM:%.*]] = xor i32 123, 456
+; DCE-CHECK-NEXT:    store i32 [[RANDOM]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %random = xor i32 123, 456
   %v = call i32 @llvm.amdgcn.readfirstlane(i32 %random)
@@ -318,7 +481,14 @@ define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
 define amdgpu_kernel void @permlane64_invalid(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_invalid(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[UNDEF_V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 undef)
+; PASS-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_invalid(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %undef_v = call i32 @llvm.amdgcn.permlane64(i32 undef)
   store i32 %undef_v, ptr addrspace(1) %out
@@ -329,10 +499,18 @@ define amdgpu_kernel void @readlane_expression(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; PASS-CHECK-NEXT:    [[IDX2:%.*]] = shl i32 [[IDX1]], 1
+; PASS-CHECK-NEXT:    [[IDX2:%.*]] = mul i32 [[IDX1]], 2
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
 ; PASS-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[IDX2:%.*]] = mul i32 [[IDX1]], 2
+; DCE-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
+; DCE-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
 ;
   %idx1 = call i32 @llvm.amdgcn.workitem.id.x()
   %idx2 = mul i32 %idx1, 2
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
new file mode 100644
index 0000000000000..a467bba7973ef
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,instcombine,early-cse,simplifycfg -S < %s | FileCheck %s -check-prefix=DCE-CHECK
+
+; This should not be optimized
+define amdgpu_cs void @temporal_divergence(ptr addrspace(1) %out, i32 %n) {
+; PASS-CHECK-LABEL: define amdgpu_cs void @temporal_divergence(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    [[VAL:%.*]] = alloca i32, align 4
+; PASS-CHECK-NEXT:    store i32 0, ptr [[VAL]], align 4
+; PASS-CHECK-NEXT:    [[TID_MOD:%.*]] = urem i32 [[TID]], 2
+; PASS-CHECK-NEXT:    [[IS_EVEN:%.*]] = icmp eq i32 [[TID_MOD]], 0
+; PASS-CHECK-NEXT:    br i1 [[IS_EVEN]], label %[[EXIT_LOOP:.*]], label %[[LOOP:.*]]
+; PASS-CHECK:       [[LOOP]]:
+; PASS-CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
+; PASS-CHECK-NEXT:    [[VAL_LOADED:%.*]] = load i32, ptr [[VAL]], align 4
+; PASS-CHECK-NEXT:    [[VAL_UPDATED:%.*]] = add i32 [[VAL_LOADED]], [[I]]
+; PASS-CHECK-NEXT:    store i32 [[VAL_UPDATED]], ptr [[VAL]], align 4
+; PASS-CHECK-NEXT:    [[I_NEXT]] = add i32 [[I]], 1
+; PASS-CHECK-NEXT:    [[LOOP_COND:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
+; PASS-CHECK-NEXT:    br i1 [[LOOP_COND]], label %[[LOOP]], label %[[EXIT_LOOP]]
+; PASS-CHECK:       [[EXIT_LOOP]]:
+; PASS-CHECK-NEXT:    [[FINAL_VAL:%.*]] = load i32, ptr [[VAL]], align 4
+; PASS-CHECK-NEXT:    [[FIRST_LANE_VAL:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[FINAL_VAL]])
+; PASS-CHECK-NEXT:    store i32 [[FIRST_LANE_VAL]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_cs void @temporal_divergence(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*]]:
+; DCE-CHECK-NEXT:    [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    [[VAL:%.*]] = alloca i32, align 4
+; DCE-CHECK-NEXT:    store i32 0, ptr [[VAL]], align 4
+; DCE-CHECK-NEXT:    [[TID_MOD:%.*]] = and i32 [[TID]], 1
+; DCE-CHECK-NEXT:    [[IS_EVEN:%.*]] = icmp eq i32 [[TID_MOD]], 0
+; DCE-CHECK-NEXT:    br i1 [[IS_EVEN]], label %[[EXIT_LOOP:.*]], label %[[LOOP:.*]]
+; DCE-CHECK:       [[LOOP]]:
+; DCE-CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
+; DCE-CHECK-NEXT:    [[VAL_LOADED:%.*]] = load i32, ptr [[VAL]], align 4
+; DCE-CHECK-NEXT:    [[VAL_UPDATED:%.*]] = add i32 [[VAL_LOADED]], [[I]]
+; DCE-CHECK-NEXT:    store i32 [[VAL_UPDATED]], ptr [[VAL]], align 4
+; DCE-CHECK-NEXT:    [[I_NEXT]] = add i32 [[I]], 1
+; DCE-CHECK-NEXT:    [[LOOP_COND:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
+; DCE-CHECK-NEXT:    br i1 [[LOOP_COND]], label %[[LOOP]], label %[[EXIT_LOOP]]
+; DCE-CHECK:       [[EXIT_LOOP]]:
+; DCE-CHECK-NEXT:    [[FINAL_VAL:%.*]] = load i32, ptr [[VAL]], align 4
+; DCE-CHECK-NEXT:    [[FIRST_LANE_VAL:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[FINAL_VAL]])
+; DCE-CHECK-NEXT:    store i32 [[FIRST_LANE_VAL]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %val = alloca i32, align 4
+  store i32 0, ptr %val, align 4
+
+  ; Compute (tid % 2) to check if it is even
+  %tid_mod = urem i32 %tid, 2
+  %is_even = icmp eq i32 %tid_mod, 0
+
+  ; If tid is even, jump directly to exit.loop
+  br i1 %is_even, label %exit.loop, label %loop
+
+loop:
+  %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
+  %val.loaded = load i32, ptr %val, align 4
+
+  ; Update value
+  %val.updated = add i32 %val.loaded, %i
+  store i32 %val.updated, ptr %val, align 4
+
+  ; Loop iteration
+  %i.next = add i32 %i, 1
+  %loop.cond = icmp ult i32 %i.next, %n
+  br i1 %loop.cond, label %loop, label %exit.loop
+
+exit.loop:
+  ; Read first lane's value
+  %final_val = load i32, ptr %val, align 4
+  %first_lane_val = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 %final_val)
+
+  ; Store result in memory
+  store i32 %first_lane_val, ptr addrspace(1) %out, align 4
+  ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x()
+declare i32 @llvm.amdgcn.readfirstlane.i32(i32)
+
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 4b6cc32522f5b..ea8a22de76119 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -31,6 +31,11 @@
 ; GCN-O0-NEXT:    AMDGPU Remove Incompatible Functions
 ; GCN-O0-NEXT:    AMDGPU Printf lowering
 ; GCN-O0-NEXT:    Lower ctors and dtors for AMDGPU
+; GCN-O0-NEXT:    FunctionPass Manager
+; GCN-O0-NEXT:      Dominator Tree Construction
+; GCN-O0-NEXT:      Cycle Info Analysis
+; GCN-O0-NEXT:      Uniformity Analysis
+; GCN-O0-NEXT:      AMDGPU uniformIntrinsic Combine
 ; GCN-O0-NEXT:    Expand variadic functions
 ; GCN-O0-NEXT:    AMDGPU Inline All Functions
 ; GCN-O0-NEXT:    Inliner for always_inline functions
@@ -181,6 +186,11 @@
 ; GCN-O1-NEXT:    AMDGPU Remove Incompatible Functions
 ; GCN-O1-NEXT:    AMDGPU Printf lowering
 ; GCN-O1-NEXT:    Lower ctors and dtors for AMDGPU
+; GCN-O1-NEXT:    FunctionPass Manager
+; GCN-O1-NEXT:      Dominator Tree Construction
+; GCN-O1-NEXT:      Cycle Info Analysis
+; GCN-O1-NEXT:      Uniformity Analysis
+; GCN-O1-NEXT:      AMDGPU uniformIntrinsic Combine
 ; GCN-O1-NEXT:    Expand variadic functions
 ; GCN-O1-NEXT:    AMDGPU Inline All Functions
 ; GCN-O1-NEXT:    Inliner for always_inline functions
@@ -466,6 +476,11 @@
 ; GCN-O1-OPTS-NEXT:    AMDGPU Remove Incompatible Functions
 ; GCN-O1-OPTS-NEXT:    AMDGPU Printf lowering
 ; GCN-O1-OPTS-NEXT:    Lower ctors and dtors for AMDGPU
+; GCN-O1-OPTS-NEXT:    FunctionPass Manager
+; GCN-O1-OPTS-NEXT:      Dominator Tree Construction
+; GCN-O1-OPTS-NEXT:      Cycle Info Analysis
+; GCN-O1-OPTS-NEXT:      Uniformity Analysis
+; GCN-O1-OPTS-NEXT:      AMDGPU uniformIntrinsic Combine
 ; GCN-O1-OPTS-NEXT:    Expand variadic functions
 ; GCN-O1-OPTS-NEXT:    AMDGPU Inline All Functions
 ; GCN-O1-OPTS-NEXT:    Inliner for always_inline functions
@@ -781,6 +796,10 @@
 ; GCN-O2-NEXT:    Lower ctors and dtors for AMDGPU
 ; GCN-O2-NEXT:    FunctionPass Manager
 ; GCN-O2-NEXT:      AMDGPU Image Intrinsic Optimizer
+; GCN-O2-NEXT:      Dominator Tree Construction
+; GCN-O2-NEXT:      Cycle Info Analysis
+; GCN-O2-NEXT:      Uniformity Analysis
+; GCN-O2-NEXT:      AMDGPU uniformIntrinsic Combine
 ; GCN-O2-NEXT:    Expand variadic functions
 ; GCN-O2-NEXT:    AMDGPU Inline All Functions
 ; GCN-O2-NEXT:    Inliner for always_inline functions
@@ -1100,6 +1119,10 @@
 ; GCN-O3-NEXT:    Lower ctors and dtors for AMDGPU
 ; GCN-O3-NEXT:    FunctionPass Manager
 ; GCN-O3-NEXT:      AMDGPU Image Intrinsic Optimizer
+; GCN-O3-NEXT:      Dominator Tree Construction
+; GCN-O3-NEXT:      Cycle Info Analysis
+; GCN-O3-NEXT:      Uniformity Analysis
+; GCN-O3-NEXT:      AMDGPU uniformIntrinsic Combine
 ; GCN-O3-NEXT:    Expand variadic functions
 ; GCN-O3-NEXT:    AMDGPU Inline All Functions
 ; GCN-O3-NEXT:    Inliner for always_inline functions

>From 661ce9664c94d99c73d3141354e6ad5113132992 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Tue, 18 Feb 2025 19:28:51 +0530
Subject: [PATCH 09/30] updated test

---
 .../amdgpu-simplify-uniform-waterfall.ll      | 137 ++++++++----------
 .../amdgpu-uniform-temporal-divergence.ll     |  96 ++++--------
 2 files changed, 89 insertions(+), 144 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index f43d3163efd5f..f5d3aa176449a 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -2,129 +2,123 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,instcombine,early-cse,simplifycfg -S < %s | FileCheck %s -check-prefix=DCE-CHECK
 
-define protected amdgpu_kernel void @trivial_waterfall() {
+define protected amdgpu_kernel void @trivial_waterfall(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall(
-; PASS-CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
 ; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
 ; PASS-CHECK:       [[WHILE]]:
-; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ true, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[DONE1:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = xor i1 [[DONE1]], true
 ; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
 ; PASS-CHECK-NEXT:    [[TMP0:%.*]] = zext i1 [[DONE]] to i64
 ; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[TMP0]], 0
 ; PASS-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[IF]]
 ; PASS-CHECK:       [[IF]]:
-; PASS-CHECK-NEXT:    [[IS_ONE:%.*]] = icmp eq i1 [[DONE]], true
-; PASS-CHECK-NEXT:    [[NEW_DONE]] = select i1 [[IS_ONE]], i1 false, i1 [[DONE]]
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    br label %[[WHILE]]
 ; PASS-CHECK:       [[EXIT]]:
 ; PASS-CHECK-NEXT:    ret void
 ;
 ; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall(
-; DCE-CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
 ; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
 ; DCE-CHECK-NEXT:    ret void
 ;
 entry:
   br label %while
 
 while:
-  %done = phi i1 [ 1, %entry ], [ %new_done, %if ]
-  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+  %done = phi i1 [ 0, %entry ], [ 1, %if ]
+  %not_done = xor i1 %done, true
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
   %is_done = icmp eq i64 %ballot, 0
   br i1 %is_done, label %exit, label %if
 
 if:
-  %is_one = icmp eq i1 %done, 1
-  %new_done = select i1 %is_one, i1 0, i1 %done
+  store i32 5, ptr addrspace(1) %out
   br label %while
 
 exit:
   ret void
 }
 
-define protected amdgpu_kernel void @waterfall() {
+define protected amdgpu_kernel void @waterfall(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
-; PASS-CHECK-SAME: ) #[[ATTR0]] {
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
 ; PASS-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
 ; PASS-CHECK-NEXT:    [[TMP1:%.*]] = tail call noundef i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[TMP0]])
-; PASS-CHECK-NEXT:    br label %[[BB2:.*]]
-; PASS-CHECK:       [[BB2]]:
-; PASS-CHECK-NEXT:    [[TMP3:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[TMP15:%.*]], %[[TMP14:.*]] ]
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[TMP3:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
 ; PASS-CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true
-; PASS-CHECK-NEXT:    [[TMP5:%.*]] = zext i1 [[TMP4]] to i32
-; PASS-CHECK-NEXT:    [[TMP6:%.*]] = tail call i32 asm sideeffect "", "=v,0"(i32 [[TMP5]]), !srcloc [[META0:![0-9]+]]
-; PASS-CHECK-NEXT:    [[TMP7:%.*]] = icmp ne i32 [[TMP6]], 0
-; PASS-CHECK-NEXT:    [[TMP8:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP7]])
+; PASS-CHECK-NEXT:    [[TMP8:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP4]])
 ; PASS-CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[TMP8]], 0
-; PASS-CHECK-NEXT:    br i1 [[TMP9]], label %[[BB16:.*]], label %[[BB10:.*]]
-; PASS-CHECK:       [[BB10]]:
-; PASS-CHECK-NEXT:    br i1 [[TMP3]], label %[[TMP14]], label %[[BB11:.*]]
-; PASS-CHECK:       [[BB11]]:
+; PASS-CHECK-NEXT:    br i1 [[TMP9]], label %[[EXIT:.*]], label %[[IF:.*]]
+; PASS-CHECK:       [[IF]]:
 ; PASS-CHECK-NEXT:    [[TMP12:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP1]])
 ; PASS-CHECK-NEXT:    [[TMP13:%.*]] = icmp eq i32 [[TMP1]], [[TMP12]]
-; PASS-CHECK-NEXT:    br label %[[TMP14]]
-; PASS-CHECK:       [[TMP14]]:
-; PASS-CHECK-NEXT:    [[TMP15]] = phi i1 [ true, %[[BB10]] ], [ [[TMP13]], %[[BB11]] ]
-; PASS-CHECK-NEXT:    br label %[[BB2]], !llvm.loop [[LOOP1:![0-9]+]]
-; PASS-CHECK:       [[BB16]]:
+; PASS-CHECK-NEXT:    br i1 [[TMP13]], label %[[WORK:.*]], label %[[TAIL]]
+; PASS-CHECK:       [[WORK]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[TAIL]]
+; PASS-CHECK:       [[TAIL]]:
+; PASS-CHECK-NEXT:    [[NEW_DONE]] = phi i1 [ true, %[[WORK]] ], [ false, %[[IF]] ]
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
 ; PASS-CHECK-NEXT:    ret void
 ;
 ; DCE-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
-; DCE-CHECK-SAME: ) #[[ATTR0]] {
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; DCE-CHECK-NEXT:  [[ENTRY:.*]]:
 ; DCE-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; DCE-CHECK-NEXT:    br label %[[BB1:.*]]
-; DCE-CHECK:       [[BB1]]:
-; DCE-CHECK-NEXT:    [[TMP2:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[TMP14:%.*]], %[[TMP13:.*]] ]
+; DCE-CHECK-NEXT:    br label %[[WHILE:.*]]
+; DCE-CHECK:       [[WHILE]]:
+; DCE-CHECK-NEXT:    [[TMP2:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[TMP12:%.*]], %[[TAIL:.*]] ]
 ; DCE-CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
-; DCE-CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i32
-; DCE-CHECK-NEXT:    [[TMP5:%.*]] = tail call i32 asm sideeffect "", "=v,0"(i32 [[TMP4]]) #[[ATTR4:[0-9]+]], !srcloc [[META0:![0-9]+]]
-; DCE-CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
-; DCE-CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.amdgcn.ballot.i32(i1 [[TMP6]])
+; DCE-CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.amdgcn.ballot.i32(i1 [[TMP3]])
 ; DCE-CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 0
-; DCE-CHECK-NEXT:    br i1 [[TMP8]], label %[[BB15:.*]], label %[[BB9:.*]]
-; DCE-CHECK:       [[BB9]]:
-; DCE-CHECK-NEXT:    br i1 [[TMP2]], label %[[TMP13]], label %[[BB10:.*]]
-; DCE-CHECK:       [[BB10]]:
+; DCE-CHECK-NEXT:    br i1 [[TMP8]], label %[[EXIT:.*]], label %[[IF:.*]]
+; DCE-CHECK:       [[IF]]:
 ; DCE-CHECK-NEXT:    [[TMP11:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP0]])
-; DCE-CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i32 [[TMP0]], [[TMP11]]
-; DCE-CHECK-NEXT:    br label %[[TMP13]]
-; DCE-CHECK:       [[TMP13]]:
-; DCE-CHECK-NEXT:    [[TMP14]] = phi i1 [ true, %[[BB9]] ], [ [[TMP12]], %[[BB10]] ]
-; DCE-CHECK-NEXT:    br label %[[BB1]], !llvm.loop [[LOOP1:![0-9]+]]
-; DCE-CHECK:       [[BB15]]:
+; DCE-CHECK-NEXT:    [[TMP12]] = icmp eq i32 [[TMP0]], [[TMP11]]
+; DCE-CHECK-NEXT:    br i1 [[TMP12]], label %[[WORK:.*]], label %[[TAIL]]
+; DCE-CHECK:       [[WORK]]:
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    br label %[[TAIL]]
+; DCE-CHECK:       [[TAIL]]:
+; DCE-CHECK-NEXT:    br label %[[WHILE]]
+; DCE-CHECK:       [[EXIT]]:
 ; DCE-CHECK-NEXT:    ret void
 ;
 entry:
   %1 = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  %2 = tail call noundef i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %1)
-  br label %3
+  %tid = tail call noundef i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %1)
+  br label %while
 
-3:
-  %4 = phi i1 [ false, %entry ], [ %16, %15 ]
-  %5 = xor i1 %4, true
-  %6 = zext i1 %5 to i32
-  %7 = tail call i32 asm sideeffect "", "=v,0"(i32 %6) #3, !srcloc !6
-  %8 = icmp ne i32 %7, 0
-  %9 = tail call i64 @llvm.amdgcn.ballot.i64(i1 %8)
-  %10 = icmp eq i64 %9, 0
-  br i1 %10, label %17, label %11
+while:
+  %done = phi i1 [ false, %entry ], [ %new_done, %tail ]
+  %not_done = xor i1 %done, true
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+  %is_done = icmp eq i64 %ballot, 0
+  br i1 %is_done, label %exit, label %if
 
-11:
-  br i1 %4, label %15, label %12
+if:
+  %first_active_id = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 %tid)
+  %is_first_active_id = icmp eq i32 %tid, %first_active_id
+  br i1 %is_first_active_id, label %work, label %tail
 
-12:
-  %13 = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 %2)
-  %14 = icmp eq i32 %2, %13
-  br label %15
+work:
+  store i32 5, ptr addrspace(1) %out
+  br label %tail
 
-15:
-  %16 = phi i1 [ true, %11 ], [ %14, %12 ]
-  br label %3, !llvm.loop !7
+tail:
+  %new_done = phi i1 [ true, %work ], [ false, %if ]
+  br label %while
 
-17:
+exit:
   ret void
 }
 
@@ -133,12 +127,3 @@ declare i64 @llvm.amdgcn.ballot.i64(i1) #1
 !6 = !{i64 690}
 !7 = distinct !{!7, !8}
 !8 = !{!"llvm.loop.mustprogress"}
-;.
-; PASS-CHECK: [[META0]] = !{i64 690}
-; PASS-CHECK: [[LOOP1]] = distinct !{[[LOOP1]], [[META2:![0-9]+]]}
-; PASS-CHECK: [[META2]] = !{!"llvm.loop.mustprogress"}
-;.
-; DCE-CHECK: [[META0]] = !{i64 690}
-; DCE-CHECK: [[LOOP1]] = distinct !{[[LOOP1]], [[META2:![0-9]+]]}
-; DCE-CHECK: [[META2]] = !{!"llvm.loop.mustprogress"}
-;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
index a467bba7973ef..3061669edc0b3 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
@@ -7,84 +7,44 @@ define amdgpu_cs void @temporal_divergence(ptr addrspace(1) %out, i32 %n) {
 ; PASS-CHECK-LABEL: define amdgpu_cs void @temporal_divergence(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
-; PASS-CHECK-NEXT:    [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
-; PASS-CHECK-NEXT:    [[VAL:%.*]] = alloca i32, align 4
-; PASS-CHECK-NEXT:    store i32 0, ptr [[VAL]], align 4
-; PASS-CHECK-NEXT:    [[TID_MOD:%.*]] = urem i32 [[TID]], 2
-; PASS-CHECK-NEXT:    [[IS_EVEN:%.*]] = icmp eq i32 [[TID_MOD]], 0
-; PASS-CHECK-NEXT:    br i1 [[IS_EVEN]], label %[[EXIT_LOOP:.*]], label %[[LOOP:.*]]
-; PASS-CHECK:       [[LOOP]]:
-; PASS-CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; PASS-CHECK-NEXT:    [[VAL_LOADED:%.*]] = load i32, ptr [[VAL]], align 4
-; PASS-CHECK-NEXT:    [[VAL_UPDATED:%.*]] = add i32 [[VAL_LOADED]], [[I]]
-; PASS-CHECK-NEXT:    store i32 [[VAL_UPDATED]], ptr [[VAL]], align 4
+; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT:    br label %[[H:.*]]
+; PASS-CHECK:       [[H]]:
+; PASS-CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[H]] ]
 ; PASS-CHECK-NEXT:    [[I_NEXT]] = add i32 [[I]], 1
-; PASS-CHECK-NEXT:    [[LOOP_COND:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
-; PASS-CHECK-NEXT:    br i1 [[LOOP_COND]], label %[[LOOP]], label %[[EXIT_LOOP]]
-; PASS-CHECK:       [[EXIT_LOOP]]:
-; PASS-CHECK-NEXT:    [[FINAL_VAL:%.*]] = load i32, ptr [[VAL]], align 4
-; PASS-CHECK-NEXT:    [[FIRST_LANE_VAL:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[FINAL_VAL]])
-; PASS-CHECK-NEXT:    store i32 [[FIRST_LANE_VAL]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    [[DIV_EXITX:%.*]] = icmp eq i32 [[TID]], 0
+; PASS-CHECK-NEXT:    br i1 [[DIV_EXITX]], label %[[X:.*]], label %[[H]]
+; PASS-CHECK:       [[X]]:
+; PASS-CHECK-NEXT:    [[UNI_JOIN:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[I_NEXT]])
+; PASS-CHECK-NEXT:    [[JOIN_USER:%.*]] = add i32 [[I_NEXT]], 5
 ; PASS-CHECK-NEXT:    ret void
 ;
 ; DCE-CHECK-LABEL: define amdgpu_cs void @temporal_divergence(
 ; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; DCE-CHECK-NEXT:  [[ENTRY:.*]]:
-; DCE-CHECK-NEXT:    [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
-; DCE-CHECK-NEXT:    [[VAL:%.*]] = alloca i32, align 4
-; DCE-CHECK-NEXT:    store i32 0, ptr [[VAL]], align 4
-; DCE-CHECK-NEXT:    [[TID_MOD:%.*]] = and i32 [[TID]], 1
-; DCE-CHECK-NEXT:    [[IS_EVEN:%.*]] = icmp eq i32 [[TID_MOD]], 0
-; DCE-CHECK-NEXT:    br i1 [[IS_EVEN]], label %[[EXIT_LOOP:.*]], label %[[LOOP:.*]]
-; DCE-CHECK:       [[LOOP]]:
-; DCE-CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[LOOP]] ]
-; DCE-CHECK-NEXT:    [[VAL_LOADED:%.*]] = load i32, ptr [[VAL]], align 4
-; DCE-CHECK-NEXT:    [[VAL_UPDATED:%.*]] = add i32 [[VAL_LOADED]], [[I]]
-; DCE-CHECK-NEXT:    store i32 [[VAL_UPDATED]], ptr [[VAL]], align 4
-; DCE-CHECK-NEXT:    [[I_NEXT]] = add i32 [[I]], 1
-; DCE-CHECK-NEXT:    [[LOOP_COND:%.*]] = icmp ult i32 [[I_NEXT]], [[N]]
-; DCE-CHECK-NEXT:    br i1 [[LOOP_COND]], label %[[LOOP]], label %[[EXIT_LOOP]]
-; DCE-CHECK:       [[EXIT_LOOP]]:
-; DCE-CHECK-NEXT:    [[FINAL_VAL:%.*]] = load i32, ptr [[VAL]], align 4
-; DCE-CHECK-NEXT:    [[FIRST_LANE_VAL:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[FINAL_VAL]])
-; DCE-CHECK-NEXT:    store i32 [[FIRST_LANE_VAL]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT:    br label %[[H:.*]]
+; DCE-CHECK:       [[H]]:
+; DCE-CHECK-NEXT:    [[DIV_EXITX:%.*]] = icmp eq i32 [[TID]], 0
+; DCE-CHECK-NEXT:    br i1 [[DIV_EXITX]], label %[[X:.*]], label %[[H]]
+; DCE-CHECK:       [[X]]:
 ; DCE-CHECK-NEXT:    ret void
 ;
 entry:
-  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
-  %val = alloca i32, align 4
-  store i32 0, ptr %val, align 4
-
-  ; Compute (tid % 2) to check if it is even
-  %tid_mod = urem i32 %tid, 2
-  %is_even = icmp eq i32 %tid_mod, 0
-
-  ; If tid is even, jump directly to exit.loop
-  br i1 %is_even, label %exit.loop, label %loop
-
-loop:
-  %i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
-  %val.loaded = load i32, ptr %val, align 4
-
-  ; Update value
-  %val.updated = add i32 %val.loaded, %i
-  store i32 %val.updated, ptr %val, align 4
-
-  ; Loop iteration
-  %i.next = add i32 %i, 1
-  %loop.cond = icmp ult i32 %i.next, %n
-  br i1 %loop.cond, label %loop, label %exit.loop
-
-exit.loop:
-  ; Read first lane's value
-  %final_val = load i32, ptr %val, align 4
-  %first_lane_val = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 %final_val)
-
-  ; Store result in memory
-  store i32 %first_lane_val, ptr addrspace(1) %out, align 4
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  br label %H
+
+H:
+  %uni.merge.h = phi i32 [ 0, %entry ], [ %uni.inc, %H ]
+  %uni.inc = add i32 %uni.merge.h, 1
+  %div.exitx = icmp eq i32 %tid, 0
+  br i1 %div.exitx, label %X, label %H ; divergent branch
+
+X:
+  %uni.join = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %uni.inc)
+  %join.user = add i32 %uni.join, 5
   ret void
 }
 
 declare i32 @llvm.amdgcn.workitem.id.x()
 declare i32 @llvm.amdgcn.readfirstlane.i32(i32)
-

>From 8963961549d3cb8baac5cd756c2f0d78c9cd2a3b Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Wed, 19 Feb 2025 17:50:35 +0530
Subject: [PATCH 10/30] Fix: use isDivergentUse instead isUniform

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 15 ++++----
 .../amdgpu-uniform-temporal-divergence.ll     | 37 +++++++++++--------
 2 files changed, 30 insertions(+), 22 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 4e02c16c61b09..e083ca26b7c6b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -114,8 +114,8 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
   case Intrinsic::amdgcn_readfirstlane:
   case Intrinsic::amdgcn_readlane: {
     Value *Src = II.getArgOperand(0);
-    // Check if the argument is uniform
-    if (UI->isUniform(II.getOperandUse(0))) {
+    // Check if the argument use is uniform
+    if (!UI->isDivergentUse(II.getOperandUse(0))) {
       LLVM_DEBUG(dbgs() << "Replacing " << II << " with " << *Src << "\n");
       II.replaceAllUsesWith(Src);
       return true;
@@ -124,13 +124,14 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
   }
   case Intrinsic::amdgcn_ballot: {
     Value *Src = II.getArgOperand(0);
-    // Check if the argument is uniform and has a direct `icmp eq` use of the
-    // ballot result.
+    // Check if the argument use is uniform and has a direct `icmp eq` use of
+    // the ballot result. If exists pull the ballot argument to the use place.
     // %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cond)
     // %is_done = icmp eq i64 %ballot, 0
-    // This means we are checking if *all lanes* in the ballot result are
-    // inactive.
-    if (UI->isUniform(II.getOperandUse(0))) {
+    // transformed IR should look like.
+    // %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cond)
+    // %is_done = icmp eq i64 %cond, 0
+    if (!UI->isDivergentUse(II.getOperandUse(0))) {
       LLVM_DEBUG(dbgs() << "Found uniform ballot intrinsic: " << II << "\n");
 
       // Look for a direct `icmp eq` use of the ballot result.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
index 3061669edc0b3..2fde3e3759f47 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,instcombine,early-cse,simplifycfg -S < %s | FileCheck %s -check-prefix=DCE-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,instcombine,early-cse,simplifycfg -S < %s | FileCheck %s -check-prefix=COMB-CHECK
 
 ; This should not be optimized
 define amdgpu_cs void @temporal_divergence(ptr addrspace(1) %out, i32 %n) {
@@ -10,25 +10,31 @@ define amdgpu_cs void @temporal_divergence(ptr addrspace(1) %out, i32 %n) {
 ; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    br label %[[H:.*]]
 ; PASS-CHECK:       [[H]]:
-; PASS-CHECK-NEXT:    [[I:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[I_NEXT:%.*]], %[[H]] ]
-; PASS-CHECK-NEXT:    [[I_NEXT]] = add i32 [[I]], 1
+; PASS-CHECK-NEXT:    [[UNI_MERGE_H:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[UNI_INC:%.*]], %[[H]] ]
+; PASS-CHECK-NEXT:    [[UNI_INC]] = add i32 [[UNI_MERGE_H]], 1
 ; PASS-CHECK-NEXT:    [[DIV_EXITX:%.*]] = icmp eq i32 [[TID]], 0
 ; PASS-CHECK-NEXT:    br i1 [[DIV_EXITX]], label %[[X:.*]], label %[[H]]
 ; PASS-CHECK:       [[X]]:
-; PASS-CHECK-NEXT:    [[UNI_JOIN:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[I_NEXT]])
-; PASS-CHECK-NEXT:    [[JOIN_USER:%.*]] = add i32 [[I_NEXT]], 5
+; PASS-CHECK-NEXT:    [[UNI_JOIN:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[UNI_INC]])
+; PASS-CHECK-NEXT:    [[JOIN_USER:%.*]] = add i32 [[UNI_JOIN]], 5
+; PASS-CHECK-NEXT:    store i32 [[JOIN_USER]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
-; DCE-CHECK-LABEL: define amdgpu_cs void @temporal_divergence(
-; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
-; DCE-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
-; DCE-CHECK-NEXT:    br label %[[H:.*]]
-; DCE-CHECK:       [[H]]:
-; DCE-CHECK-NEXT:    [[DIV_EXITX:%.*]] = icmp eq i32 [[TID]], 0
-; DCE-CHECK-NEXT:    br i1 [[DIV_EXITX]], label %[[X:.*]], label %[[H]]
-; DCE-CHECK:       [[X]]:
-; DCE-CHECK-NEXT:    ret void
+; COMB-CHECK-LABEL: define amdgpu_cs void @temporal_divergence(
+; COMB-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; COMB-CHECK-NEXT:  [[ENTRY:.*]]:
+; COMB-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; COMB-CHECK-NEXT:    br label %[[H:.*]]
+; COMB-CHECK:       [[H]]:
+; COMB-CHECK-NEXT:    [[UNI_MERGE_H:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[UNI_INC:%.*]], %[[H]] ]
+; COMB-CHECK-NEXT:    [[UNI_INC]] = add i32 [[UNI_MERGE_H]], 1
+; COMB-CHECK-NEXT:    [[DIV_EXITX:%.*]] = icmp eq i32 [[TID]], 0
+; COMB-CHECK-NEXT:    br i1 [[DIV_EXITX]], label %[[X:.*]], label %[[H]]
+; COMB-CHECK:       [[X]]:
+; COMB-CHECK-NEXT:    [[UNI_JOIN:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[UNI_INC]])
+; COMB-CHECK-NEXT:    [[JOIN_USER:%.*]] = add i32 [[UNI_JOIN]], 5
+; COMB-CHECK-NEXT:    store i32 [[JOIN_USER]], ptr addrspace(1) [[OUT]], align 4
+; COMB-CHECK-NEXT:    ret void
 ;
 entry:
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -43,6 +49,7 @@ H:
 X:
   %uni.join = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %uni.inc)
   %join.user = add i32 %uni.join, 5
+  store i32 %join.user, ptr addrspace(1) %out
   ret void
 }
 

>From 5fb5f8cf319268a1819483b38d5337a20e9198f9 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Wed, 19 Feb 2025 19:39:01 +0530
Subject: [PATCH 11/30] addressed reviews

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 88 +++++++++----------
 1 file changed, 44 insertions(+), 44 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index e083ca26b7c6b..1525c9a491f65 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -95,6 +95,16 @@ AMDGPUUniformIntrinsicCombinePass::run(Function &F,
 
 bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
   bool IsChanged{false};
+  Module *M = F.getParent();
+
+  // If none of the relevant intrinsics are declared, return early.
+  // if (!M->getFunction(Intrinsic::getName(Intrinsic::amdgcn_permlane64)) &&
+  //     !M->getFunction(Intrinsic::getName(Intrinsic::amdgcn_readfirstlane)) &&
+  //     !M->getFunction(Intrinsic::getName(Intrinsic::amdgcn_readlane)) &&
+  //     !M->getFunction(Intrinsic::getName(Intrinsic::amdgcn_ballot))) {
+  //   return false;
+  // }
+
   // Iterate over each instruction in the function to get the desired intrinsic
   // inst to check for optimization.
   for (Instruction &I : instructions(F)) {
@@ -114,53 +124,43 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
   case Intrinsic::amdgcn_readfirstlane:
   case Intrinsic::amdgcn_readlane: {
     Value *Src = II.getArgOperand(0);
-    // Check if the argument use is uniform
-    if (!UI->isDivergentUse(II.getOperandUse(0))) {
-      LLVM_DEBUG(dbgs() << "Replacing " << II << " with " << *Src << "\n");
-      II.replaceAllUsesWith(Src);
-      return true;
-    }
-    break;
+    // Check if the argument use is divergent
+    if (UI->isDivergentUse(II.getOperandUse(0)))
+      return false;
+    LLVM_DEBUG(dbgs() << "Replacing " << II << " with " << *Src << "\n");
+    II.replaceAllUsesWith(Src);
+    return true;
   }
   case Intrinsic::amdgcn_ballot: {
     Value *Src = II.getArgOperand(0);
-    // Check if the argument use is uniform and has a direct `icmp eq` use of
-    // the ballot result. If exists pull the ballot argument to the use place.
-    // %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cond)
-    // %is_done = icmp eq i64 %ballot, 0
-    // transformed IR should look like.
-    // %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %cond)
-    // %is_done = icmp eq i64 %cond, 0
-    if (!UI->isDivergentUse(II.getOperandUse(0))) {
-      LLVM_DEBUG(dbgs() << "Found uniform ballot intrinsic: " << II << "\n");
-
-      // Look for a direct `icmp eq` use of the ballot result.
-      auto It = llvm::find_if(II.users(), [&](User *U) {
-        return match(U, m_ICmp(m_Specific(&II), m_Zero()));
-      });
-
-      // Check if a match was found
-      if (It != II.user_end()) {
-        // Extract the matching `icmp` instruction
-        ICmpInst *ICmp = dyn_cast<ICmpInst>(*It);
-        if (!ICmp)
-          break; // Safety check
-
-        IRBuilder<> Builder(ICmp);
-
-        // Convert ballot argument to match `icmp` operand type (i64)
-        Value *ConvertedSrc =
-            Builder.CreateZExtOrTrunc(Src, ICmp->getOperand(1)->getType());
-
-        LLVM_DEBUG(dbgs() << "Replacing ballot result in icmp: " << *ICmp
-                          << " with " << *ConvertedSrc << "\n");
-
-        // Replace `%ballot` in `icmp` with `ConvertedSrc`
-        ICmp->setOperand(0, ConvertedSrc);
-        return true;
-      }
-    }
-    break;
+    if (UI->isDivergentUse(II.getOperandUse(0)))
+      return false;
+
+    LLVM_DEBUG(dbgs() << "Found uniform ballot intrinsic: " << II << "\n");
+
+    // Look for a direct `icmp eq` use of the ballot result.
+    // FIXME: replace all the uses?
+    auto It = llvm::find_if(II.users(), [&](User *U) {
+      return match(U, m_ICmp(m_Specific(&II), m_Zero()));
+    });
+
+    // Check if a match was found
+    if (It == II.user_end())
+      return false;
+
+    // Extract the matching `icmp` instruction
+    ICmpInst *ICmp = dyn_cast<ICmpInst>(*It);
+    IRBuilder<> Builder(ICmp);
+
+    // Convert ballot argument to match `icmp` operand type (i64)
+    Value *ConvertedSrc = Builder.CreateZExtOrTrunc(Src, II.getType());
+
+    LLVM_DEBUG(dbgs() << "Replacing ballot result in icmp: " << *ICmp
+                      << " with " << *ConvertedSrc << "\n");
+
+    // Replace `%ballot` in `icmp` with `ConvertedSrc`
+    ICmp->setOperand(0, ConvertedSrc);
+    return true;
   }
   }
   return false;

>From 982096c2501705a626319ac86e53d1275c516c40 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Thu, 20 Feb 2025 18:46:37 +0530
Subject: [PATCH 12/30] pull the ballot argument to all the match users

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 47 ++++++++-----------
 .../amdgpu-simplify-uniform-waterfall.ll      | 47 +++++++++++++++++++
 2 files changed, 67 insertions(+), 27 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 1525c9a491f65..d4f91bb4c2572 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -96,12 +96,12 @@ AMDGPUUniformIntrinsicCombinePass::run(Function &F,
 bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
   bool IsChanged{false};
   Module *M = F.getParent();
-
+  
   // If none of the relevant intrinsics are declared, return early.
-  // if (!M->getFunction(Intrinsic::getName(Intrinsic::amdgcn_permlane64)) &&
-  //     !M->getFunction(Intrinsic::getName(Intrinsic::amdgcn_readfirstlane)) &&
-  //     !M->getFunction(Intrinsic::getName(Intrinsic::amdgcn_readlane)) &&
-  //     !M->getFunction(Intrinsic::getName(Intrinsic::amdgcn_ballot))) {
+  // if (!Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_permlane64, {}) &&
+  //     !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_readfirstlane, {}) &&
+  //     !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_readlane, {}) &&
+  //     !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_ballot, {})) {
   //   return false;
   // }
 
@@ -139,28 +139,21 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
     LLVM_DEBUG(dbgs() << "Found uniform ballot intrinsic: " << II << "\n");
 
     // Look for a direct `icmp eq` use of the ballot result.
-    // FIXME: replace all the uses?
-    auto It = llvm::find_if(II.users(), [&](User *U) {
-      return match(U, m_ICmp(m_Specific(&II), m_Zero()));
-    });
-
-    // Check if a match was found
-    if (It == II.user_end())
-      return false;
-
-    // Extract the matching `icmp` instruction
-    ICmpInst *ICmp = dyn_cast<ICmpInst>(*It);
-    IRBuilder<> Builder(ICmp);
-
-    // Convert ballot argument to match `icmp` operand type (i64)
-    Value *ConvertedSrc = Builder.CreateZExtOrTrunc(Src, II.getType());
-
-    LLVM_DEBUG(dbgs() << "Replacing ballot result in icmp: " << *ICmp
-                      << " with " << *ConvertedSrc << "\n");
-
-    // Replace `%ballot` in `icmp` with `ConvertedSrc`
-    ICmp->setOperand(0, ConvertedSrc);
-    return true;
+    bool Changed = false;
+    for (User *U : make_early_inc_range(II.users())) {
+      if (match(U, m_ICmp(m_Specific(&II), m_Zero()))) {
+        ICmpInst *ICmp = dyn_cast<ICmpInst>(U);
+        IRBuilder<> Builder(ICmp);
+        Value *ConvertedSrc = Builder.CreateZExtOrTrunc(Src, II.getType());
+
+        LLVM_DEBUG(dbgs() << "Replacing ballot result in icmp: " << *ICmp
+                          << " with " << *ConvertedSrc << "\n");
+
+        ICmp->setOperand(0, ConvertedSrc);
+        Changed = true;
+      }
+    }
+    return Changed;
   }
   }
   return false;
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index f5d3aa176449a..69346d14a79fb 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -122,6 +122,53 @@ exit:
   ret void
 }
 
+define protected amdgpu_kernel void @trivial_waterfall_multiple_icmp(ptr addrspace(1) %out) {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_multiple_icmp(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
+; PASS-CHECK-NEXT:    [[TMP1:%.*]] = zext i1 [[NOT_DONE]] to i64
+; PASS-CHECK-NEXT:    [[IS_DONE_1:%.*]] = icmp eq i64 [[TMP1]], 0
+; PASS-CHECK-NEXT:    [[TMP0:%.*]] = zext i1 [[NOT_DONE]] to i64
+; PASS-CHECK-NEXT:    [[IS_DONE_3:%.*]] = icmp eq i64 [[TMP0]], 0
+; PASS-CHECK-NEXT:    br i1 [[IS_DONE_1]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[NOT_DONE]] to i64
+; PASS-CHECK-NEXT:    [[IS_DONE_4:%.*]] = icmp eq i64 [[TMP2]], 0
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_multiple_icmp(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  br label %while
+
+while:
+  %done = phi i1 [ 0, %entry ], [ 1, %if ]
+  %not_done = xor i1 %done, true
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+  %is_done_1 = icmp eq i64 %ballot, 0
+  %is_done_2 = icmp eq i64 %ballot, 0
+  br i1 %is_done_1, label %exit, label %if
+
+if:
+  store i32 5, ptr addrspace(1) %out
+  %is_done_3 = icmp eq i64 %ballot, 0
+  br label %while
+
+exit:
+  ret void
+}
 
 declare i64 @llvm.amdgcn.ballot.i64(i1) #1
 !6 = !{i64 690}

>From d4b7ec02fc87f57bbe58abd6953660bee8f484a1 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Fri, 21 Feb 2025 16:27:20 +0530
Subject: [PATCH 13/30] Match and replace icmp ballot,0 with XOR

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 37 ++++------
 .../amdgpu-simplify-uniform-waterfall.ll      | 70 +++++++++----------
 2 files changed, 47 insertions(+), 60 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index d4f91bb4c2572..6074eaeac3bf0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -95,19 +95,10 @@ AMDGPUUniformIntrinsicCombinePass::run(Function &F,
 
 bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
   bool IsChanged{false};
-  Module *M = F.getParent();
-  
-  // If none of the relevant intrinsics are declared, return early.
-  // if (!Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_permlane64, {}) &&
-  //     !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_readfirstlane, {}) &&
-  //     !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_readlane, {}) &&
-  //     !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_ballot, {})) {
-  //   return false;
-  // }
 
   // Iterate over each instruction in the function to get the desired intrinsic
   // inst to check for optimization.
-  for (Instruction &I : instructions(F)) {
+  for (Instruction &I : make_early_inc_range(instructions(F))) {
     if (auto *Intrinsic = dyn_cast<IntrinsicInst>(&I)) {
       IsChanged |= optimizeUniformIntrinsicInst(*Intrinsic);
     }
@@ -135,22 +126,24 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
     Value *Src = II.getArgOperand(0);
     if (UI->isDivergentUse(II.getOperandUse(0)))
       return false;
-
     LLVM_DEBUG(dbgs() << "Found uniform ballot intrinsic: " << II << "\n");
 
-    // Look for a direct `icmp eq` use of the ballot result.
     bool Changed = false;
     for (User *U : make_early_inc_range(II.users())) {
-      if (match(U, m_ICmp(m_Specific(&II), m_Zero()))) {
-        ICmpInst *ICmp = dyn_cast<ICmpInst>(U);
-        IRBuilder<> Builder(ICmp);
-        Value *ConvertedSrc = Builder.CreateZExtOrTrunc(Src, II.getType());
-
-        LLVM_DEBUG(dbgs() << "Replacing ballot result in icmp: " << *ICmp
-                          << " with " << *ConvertedSrc << "\n");
-
-        ICmp->setOperand(0, ConvertedSrc);
-        Changed = true;
+      if (auto *ICmp = dyn_cast<ICmpInst>(U)) {
+        Value *Op0 = ICmp->getOperand(0);
+        Value *Op1 = ICmp->getOperand(1);
+
+        if (ICmp->getPredicate() == ICmpInst::ICMP_EQ &&
+            ((Op0 == &II && match(Op1, m_Zero())) ||
+             (Op1 == &II && match(Op0, m_Zero())))) {
+
+          IRBuilder<> Builder(ICmp);
+          Value *Xor = Builder.CreateXor(Src, Builder.getTrue());
+          LLVM_DEBUG(dbgs() << "Replacing with XOR: " << *Xor << "\n");
+          ICmp->replaceAllUsesWith(Xor);
+          Changed = true;
+        }
       }
     }
     return Changed;
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index 69346d14a79fb..6898b35920ef0 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,instcombine,early-cse,simplifycfg -S < %s | FileCheck %s -check-prefix=DCE-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,early-cse,instcombine,simplifycfg -S < %s | FileCheck %s -check-prefix=DCE-CHECK
 
 define protected amdgpu_kernel void @trivial_waterfall(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall(
@@ -8,12 +8,12 @@ define protected amdgpu_kernel void @trivial_waterfall(ptr addrspace(1) %out) {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
 ; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
 ; PASS-CHECK:       [[WHILE]]:
-; PASS-CHECK-NEXT:    [[DONE1:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
-; PASS-CHECK-NEXT:    [[DONE:%.*]] = xor i1 [[DONE1]], true
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
-; PASS-CHECK-NEXT:    [[TMP0:%.*]] = zext i1 [[DONE]] to i64
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[TMP0]], 0
-; PASS-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
+; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0
+; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
 ; PASS-CHECK:       [[IF]]:
 ; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    br label %[[WHILE]]
@@ -49,18 +49,18 @@ define protected amdgpu_kernel void @waterfall(ptr addrspace(1) %out) {
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
 ; PASS-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; PASS-CHECK-NEXT:    [[TMP1:%.*]] = tail call noundef i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[TMP0]])
+; PASS-CHECK-NEXT:    [[TID:%.*]] = tail call noundef i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[TMP0]])
 ; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
 ; PASS-CHECK:       [[WHILE]]:
-; PASS-CHECK-NEXT:    [[TMP3:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
-; PASS-CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[TMP3]], true
-; PASS-CHECK-NEXT:    [[TMP8:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP4]])
-; PASS-CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[TMP8]], 0
-; PASS-CHECK-NEXT:    br i1 [[TMP9]], label %[[EXIT:.*]], label %[[IF:.*]]
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
+; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0
+; PASS-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[IF:.*]]
 ; PASS-CHECK:       [[IF]]:
-; PASS-CHECK-NEXT:    [[TMP12:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP1]])
-; PASS-CHECK-NEXT:    [[TMP13:%.*]] = icmp eq i32 [[TMP1]], [[TMP12]]
-; PASS-CHECK-NEXT:    br i1 [[TMP13]], label %[[WORK:.*]], label %[[TAIL]]
+; PASS-CHECK-NEXT:    [[FIRST_ACTIVE_ID:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; PASS-CHECK-NEXT:    [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 [[TID]], [[FIRST_ACTIVE_ID]]
+; PASS-CHECK-NEXT:    br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[TAIL]]
 ; PASS-CHECK:       [[WORK]]:
 ; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    br label %[[TAIL]]
@@ -76,15 +76,15 @@ define protected amdgpu_kernel void @waterfall(ptr addrspace(1) %out) {
 ; DCE-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
 ; DCE-CHECK-NEXT:    br label %[[WHILE:.*]]
 ; DCE-CHECK:       [[WHILE]]:
-; DCE-CHECK-NEXT:    [[TMP2:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[TMP12:%.*]], %[[TAIL:.*]] ]
-; DCE-CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP2]], true
-; DCE-CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.amdgcn.ballot.i32(i1 [[TMP3]])
-; DCE-CHECK-NEXT:    [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 0
-; DCE-CHECK-NEXT:    br i1 [[TMP8]], label %[[EXIT:.*]], label %[[IF:.*]]
+; DCE-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[IS_FIRST_ACTIVE_ID:%.*]], %[[TAIL:.*]] ]
+; DCE-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; DCE-CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.ballot.i32(i1 [[NOT_DONE]])
+; DCE-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i32 [[TMP1]], 0
+; DCE-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[IF:.*]]
 ; DCE-CHECK:       [[IF]]:
-; DCE-CHECK-NEXT:    [[TMP11:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP0]])
-; DCE-CHECK-NEXT:    [[TMP12]] = icmp eq i32 [[TMP0]], [[TMP11]]
-; DCE-CHECK-NEXT:    br i1 [[TMP12]], label %[[WORK:.*]], label %[[TAIL]]
+; DCE-CHECK-NEXT:    [[FIRST_ACTIVE_ID:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP0]])
+; DCE-CHECK-NEXT:    [[IS_FIRST_ACTIVE_ID]] = icmp eq i32 [[TMP0]], [[FIRST_ACTIVE_ID]]
+; DCE-CHECK-NEXT:    br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[TAIL]]
 ; DCE-CHECK:       [[WORK]]:
 ; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
 ; DCE-CHECK-NEXT:    br label %[[TAIL]]
@@ -122,8 +122,8 @@ exit:
   ret void
 }
 
-define protected amdgpu_kernel void @trivial_waterfall_multiple_icmp(ptr addrspace(1) %out) {
-; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_multiple_icmp(
+define protected amdgpu_kernel void @trivial_waterfall_swap_op(ptr addrspace(1) %out) {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_swap_op(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
 ; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
@@ -131,20 +131,16 @@ define protected amdgpu_kernel void @trivial_waterfall_multiple_icmp(ptr addrspa
 ; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
 ; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
 ; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
-; PASS-CHECK-NEXT:    [[TMP1:%.*]] = zext i1 [[NOT_DONE]] to i64
-; PASS-CHECK-NEXT:    [[IS_DONE_1:%.*]] = icmp eq i64 [[TMP1]], 0
-; PASS-CHECK-NEXT:    [[TMP0:%.*]] = zext i1 [[NOT_DONE]] to i64
-; PASS-CHECK-NEXT:    [[IS_DONE_3:%.*]] = icmp eq i64 [[TMP0]], 0
-; PASS-CHECK-NEXT:    br i1 [[IS_DONE_1]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 0, [[BALLOT]]
+; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
 ; PASS-CHECK:       [[IF]]:
 ; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; PASS-CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[NOT_DONE]] to i64
-; PASS-CHECK-NEXT:    [[IS_DONE_4:%.*]] = icmp eq i64 [[TMP2]], 0
 ; PASS-CHECK-NEXT:    br label %[[WHILE]]
 ; PASS-CHECK:       [[EXIT]]:
 ; PASS-CHECK-NEXT:    ret void
 ;
-; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_multiple_icmp(
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_swap_op(
 ; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
 ; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -157,13 +153,11 @@ while:
   %done = phi i1 [ 0, %entry ], [ 1, %if ]
   %not_done = xor i1 %done, true
   %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
-  %is_done_1 = icmp eq i64 %ballot, 0
-  %is_done_2 = icmp eq i64 %ballot, 0
-  br i1 %is_done_1, label %exit, label %if
+  %is_done = icmp eq i64 0, %ballot
+  br i1 %is_done, label %exit, label %if
 
 if:
   store i32 5, ptr addrspace(1) %out
-  %is_done_3 = icmp eq i64 %ballot, 0
   br label %while
 
 exit:

>From 6297b9d3da2fdeddb82ad41092f717b640b3d178 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Mon, 24 Feb 2025 15:43:13 +0530
Subject: [PATCH 14/30] Rebase: resolve merge

---
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |   2 -
 ...amdgpu-miscellaneous-uniform-intrinsics.ll | 131 ------------------
 llvm/test/CodeGen/AMDGPU/llc-pipeline.ll      |  23 ---
 3 files changed, 156 deletions(-)
 delete mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 5b5def0977aa6..3f7995722be64 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1222,8 +1222,6 @@ void AMDGPUPassConfig::addIRPasses() {
   if (isPassEnabled(EnableImageIntrinsicOptimizer))
     addPass(createAMDGPUImageIntrinsicOptimizerPass(&TM));
 
-  if (EnableUniformIntrinsicCombine)
-    addPass(createAMDGPUUniformIntrinsicCombineLegacyPass());
   // This can be disabled by passing ::Disable here or on the command line
   // with --expand-variadics-override=disable.
   addPass(createExpandVariadicsPass(ExpandVariadicsMode::Lowering));
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll
deleted file mode 100644
index f450b0e6763c4..0000000000000
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-miscellaneous-uniform-intrinsics.ll
+++ /dev/null
@@ -1,131 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -verify-machineinstrs -o - %s | FileCheck %s
-
-define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) {
-; CHECK-LABEL: readfirstlane_with_readfirstlane:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
-; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 5
-; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
-; CHECK-NEXT:    s_endpgm
-  %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 5)
-  %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
-  store i32 %v2, ptr addrspace(1) %out
-  ret void
-}
-
-define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
-; CHECK-LABEL: readfirstlane_with_readlane:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
-; CHECK-NEXT:    v_bfe_u32 v1, v0, 10, 10
-; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-NEXT:    v_readfirstlane_b32 s2, v1
-; CHECK-NEXT:    v_readlane_b32 s2, v0, s2
-; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
-; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
-; CHECK-NEXT:    s_endpgm
-  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
-  %tidy = call i32 @llvm.amdgcn.workitem.id.y()
-  %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
-  %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
-  store i32 %v2, ptr addrspace(1) %out
-  ret void
-}
-
-define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
-; CHECK-LABEL: readlane_with_firstlane:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
-; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-NEXT:    v_readfirstlane_b32 s2, v0
-; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
-; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
-; CHECK-NEXT:    s_endpgm
-  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
-  %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 %tidx)
-  %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 3)
-  store i32 %v2, ptr addrspace(1) %out
-  ret void
-}
-
-define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
-; CHECK-LABEL: readlane_readlane:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
-; CHECK-NEXT:    v_bfe_u32 v1, v0, 10, 10
-; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-NEXT:    v_readfirstlane_b32 s2, v1
-; CHECK-NEXT:    v_readlane_b32 s2, v0, s2
-; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
-; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
-; CHECK-NEXT:    s_endpgm
-  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
-  %tidy = call i32 @llvm.amdgcn.workitem.id.y()
-  %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
-  %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 2)
-  store i32 %v2, ptr addrspace(1) %out
-  ret void
-}
-
-define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
-; CHECK-LABEL: permlane64_uniform:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_clause 0x1
-; CHECK-NEXT:    s_load_b32 s2, s[4:5], 0x8
-; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
-; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
-; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
-; CHECK-NEXT:    s_endpgm
-  %v = call i32 @llvm.amdgcn.permlane64(i32 %src)
-  store i32 %v, ptr addrspace(1) %out
-  ret void
-}
-
-define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
-; CHECK-LABEL: permlane64_nonuniform:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
-; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; CHECK-NEXT:    v_permlane64_b32 v1, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
-; CHECK-NEXT:    s_endpgm
-  %tid = call i32 @llvm.amdgcn.workitem.id.x()
-  %v = call i32 @llvm.amdgcn.permlane64(i32 %tid)
-  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  store i32 %v, i32 addrspace(1)* %out_ptr
-  ret void
-}
-
-define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %out) {
-; CHECK-LABEL: permlane64_nonuniform_expression:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_load_b64 s[0:1], s[4:5], 0x0
-; CHECK-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; CHECK-NEXT:    v_add_nc_u32_e32 v1, 1, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; CHECK-NEXT:    v_permlane64_b32 v1, v1
-; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    global_store_b32 v0, v1, s[0:1]
-; CHECK-NEXT:    s_endpgm
-  %tid = call i32 @llvm.amdgcn.workitem.id.x()
-  %tid2 = add i32 %tid, 1
-  %v = call i32 @llvm.amdgcn.permlane64(i32 %tid2)
-  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
-  store i32 %v, i32 addrspace(1)* %out_ptr
-  ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index ea8a22de76119..4b6cc32522f5b 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -31,11 +31,6 @@
 ; GCN-O0-NEXT:    AMDGPU Remove Incompatible Functions
 ; GCN-O0-NEXT:    AMDGPU Printf lowering
 ; GCN-O0-NEXT:    Lower ctors and dtors for AMDGPU
-; GCN-O0-NEXT:    FunctionPass Manager
-; GCN-O0-NEXT:      Dominator Tree Construction
-; GCN-O0-NEXT:      Cycle Info Analysis
-; GCN-O0-NEXT:      Uniformity Analysis
-; GCN-O0-NEXT:      AMDGPU uniformIntrinsic Combine
 ; GCN-O0-NEXT:    Expand variadic functions
 ; GCN-O0-NEXT:    AMDGPU Inline All Functions
 ; GCN-O0-NEXT:    Inliner for always_inline functions
@@ -186,11 +181,6 @@
 ; GCN-O1-NEXT:    AMDGPU Remove Incompatible Functions
 ; GCN-O1-NEXT:    AMDGPU Printf lowering
 ; GCN-O1-NEXT:    Lower ctors and dtors for AMDGPU
-; GCN-O1-NEXT:    FunctionPass Manager
-; GCN-O1-NEXT:      Dominator Tree Construction
-; GCN-O1-NEXT:      Cycle Info Analysis
-; GCN-O1-NEXT:      Uniformity Analysis
-; GCN-O1-NEXT:      AMDGPU uniformIntrinsic Combine
 ; GCN-O1-NEXT:    Expand variadic functions
 ; GCN-O1-NEXT:    AMDGPU Inline All Functions
 ; GCN-O1-NEXT:    Inliner for always_inline functions
@@ -476,11 +466,6 @@
 ; GCN-O1-OPTS-NEXT:    AMDGPU Remove Incompatible Functions
 ; GCN-O1-OPTS-NEXT:    AMDGPU Printf lowering
 ; GCN-O1-OPTS-NEXT:    Lower ctors and dtors for AMDGPU
-; GCN-O1-OPTS-NEXT:    FunctionPass Manager
-; GCN-O1-OPTS-NEXT:      Dominator Tree Construction
-; GCN-O1-OPTS-NEXT:      Cycle Info Analysis
-; GCN-O1-OPTS-NEXT:      Uniformity Analysis
-; GCN-O1-OPTS-NEXT:      AMDGPU uniformIntrinsic Combine
 ; GCN-O1-OPTS-NEXT:    Expand variadic functions
 ; GCN-O1-OPTS-NEXT:    AMDGPU Inline All Functions
 ; GCN-O1-OPTS-NEXT:    Inliner for always_inline functions
@@ -796,10 +781,6 @@
 ; GCN-O2-NEXT:    Lower ctors and dtors for AMDGPU
 ; GCN-O2-NEXT:    FunctionPass Manager
 ; GCN-O2-NEXT:      AMDGPU Image Intrinsic Optimizer
-; GCN-O2-NEXT:      Dominator Tree Construction
-; GCN-O2-NEXT:      Cycle Info Analysis
-; GCN-O2-NEXT:      Uniformity Analysis
-; GCN-O2-NEXT:      AMDGPU uniformIntrinsic Combine
 ; GCN-O2-NEXT:    Expand variadic functions
 ; GCN-O2-NEXT:    AMDGPU Inline All Functions
 ; GCN-O2-NEXT:    Inliner for always_inline functions
@@ -1119,10 +1100,6 @@
 ; GCN-O3-NEXT:    Lower ctors and dtors for AMDGPU
 ; GCN-O3-NEXT:    FunctionPass Manager
 ; GCN-O3-NEXT:      AMDGPU Image Intrinsic Optimizer
-; GCN-O3-NEXT:      Dominator Tree Construction
-; GCN-O3-NEXT:      Cycle Info Analysis
-; GCN-O3-NEXT:      Uniformity Analysis
-; GCN-O3-NEXT:      AMDGPU uniformIntrinsic Combine
 ; GCN-O3-NEXT:    Expand variadic functions
 ; GCN-O3-NEXT:    AMDGPU Inline All Functions
 ; GCN-O3-NEXT:    Inliner for always_inline functions

>From 603d5f6f8b341351d065231e7ff484ee4e81440b Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Mon, 24 Feb 2025 16:21:16 +0530
Subject: [PATCH 15/30] remove undef test

---
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |  2 +-
 .../amdgpu-uniform-intrinsic-combine.ll       | 68 -------------------
 2 files changed, 1 insertion(+), 69 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 3f7995722be64..e7da24329f21c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -477,7 +477,7 @@ static cl::opt<bool> HasClosedWorldAssumption(
     "amdgpu-link-time-closed-world",
     cl::desc("Whether has closed-world assumption at link time"),
     cl::init(false), cl::Hidden);
-    
+
 static cl::opt<bool> EnableUniformIntrinsicCombine(
     "amdgpu-enable-uniform-intrinsic-combine",
     cl::desc("Enable/Disable the Uniform Intrinsic Combine Pass"),
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index ee54aff64f25d..e182319a1faad 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -19,23 +19,6 @@ define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
   ret void
 }
 
-define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
-; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_undef(
-; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 undef)
-; PASS-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
-; PASS-CHECK-NEXT:    ret void
-;
-; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_undef(
-; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; DCE-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
-; DCE-CHECK-NEXT:    ret void
-;
-  %v = call i32 @llvm.amdgcn.permlane64(i32 undef)
-  store i32 %v, ptr addrspace(1) %out
-  ret void
-}
-
 define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
@@ -121,23 +104,6 @@ define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
   ret void
 }
 
-define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
-; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_undef(
-; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 undef, i32 undef)
-; PASS-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
-; PASS-CHECK-NEXT:    ret void
-;
-; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_undef(
-; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; DCE-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
-; DCE-CHECK-NEXT:    ret void
-;
-  %v = call i32 @llvm.amdgcn.readlane(i32 undef, i32 undef)
-  store i32 %v, ptr addrspace(1) %out
-  ret void
-}
-
 define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
@@ -232,23 +198,6 @@ define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
   ret void
 }
 
-define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
-; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_undef(
-; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 undef)
-; PASS-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
-; PASS-CHECK-NEXT:    ret void
-;
-; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_undef(
-; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; DCE-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
-; DCE-CHECK-NEXT:    ret void
-;
-  %v = call i32 @llvm.amdgcn.readfirstlane(i32 undef)
-  store i32 %v, ptr addrspace(1) %out
-  ret void
-}
-
 define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i32 %src0) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
@@ -478,23 +427,6 @@ define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
   ret void
 }
 
-define amdgpu_kernel void @permlane64_invalid(ptr addrspace(1) %out) {
-; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_invalid(
-; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    [[UNDEF_V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 undef)
-; PASS-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
-; PASS-CHECK-NEXT:    ret void
-;
-; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_invalid(
-; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; DCE-CHECK-NEXT:    store i32 undef, ptr addrspace(1) [[OUT]], align 4
-; DCE-CHECK-NEXT:    ret void
-;
-  %undef_v = call i32 @llvm.amdgcn.permlane64(i32 undef)
-  store i32 %undef_v, ptr addrspace(1) %out
-  ret void
-}
-
 define amdgpu_kernel void @readlane_expression(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {

>From 2781457618f9383d1fc3007c953aa17e84fd733d Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Mon, 3 Mar 2025 18:59:48 +0530
Subject: [PATCH 16/30] add icmp ne case, and test

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  |  34 +-
 .../amdgpu-simplify-uniform-waterfall.ll      | 328 +++++++++++++++---
 2 files changed, 309 insertions(+), 53 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 6074eaeac3bf0..e5b61063fd7b8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -133,17 +133,37 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
       if (auto *ICmp = dyn_cast<ICmpInst>(U)) {
         Value *Op0 = ICmp->getOperand(0);
         Value *Op1 = ICmp->getOperand(1);
-
-        if (ICmp->getPredicate() == ICmpInst::ICMP_EQ &&
-            ((Op0 == &II && match(Op1, m_Zero())) ||
-             (Op1 == &II && match(Op0, m_Zero())))) {
-
-          IRBuilder<> Builder(ICmp);
+        ICmpInst::Predicate Pred = ICmp->getPredicate();
+        IRBuilder<> Builder(ICmp);
+
+        // Ensure ballot is one of the operands
+        Value *OtherOp = nullptr;
+        if (Op0 == &II)
+          OtherOp = Op1;
+        else if (Op1 == &II)
+          OtherOp = Op0;
+        else
+          continue; // Skip if ballot isn't involved
+
+        // Case (icmp eq %ballot, 0) OR (icmp ne %ballot, 1)  -->  xor
+        // %ballot_arg, 1
+        if ((Pred == ICmpInst::ICMP_EQ && match(OtherOp, m_Zero())) ||
+            (Pred == ICmpInst::ICMP_NE && match(OtherOp, m_One()))) {
           Value *Xor = Builder.CreateXor(Src, Builder.getTrue());
-          LLVM_DEBUG(dbgs() << "Replacing with XOR: " << *Xor << "\n");
+          LLVM_DEBUG(dbgs()
+                     << "Replacing ICMP_EQ/ICMP_NE with XOR: " << *Xor << "\n");
           ICmp->replaceAllUsesWith(Xor);
           Changed = true;
         }
+        // Case (icmp eq %ballot, 1) OR (icmp ne %ballot, 0)  -->  %ballot_arg
+        else if ((Pred == ICmpInst::ICMP_EQ && match(OtherOp, m_One())) ||
+                 (Pred == ICmpInst::ICMP_NE && match(OtherOp, m_Zero()))) {
+          LLVM_DEBUG(dbgs()
+                     << "Replacing ICMP_EQ/ICMP_NE with ballot argument: "
+                     << *Src << "\n");
+          ICmp->replaceAllUsesWith(Src);
+          Changed = true;
+        }
       }
     }
     return Changed;
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index 6898b35920ef0..975aa66fcd7dd 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -2,8 +2,8 @@
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,early-cse,instcombine,simplifycfg -S < %s | FileCheck %s -check-prefix=DCE-CHECK
 
-define protected amdgpu_kernel void @trivial_waterfall(ptr addrspace(1) %out) {
-; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall(
+define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1) %out) {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
 ; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
@@ -20,7 +20,7 @@ define protected amdgpu_kernel void @trivial_waterfall(ptr addrspace(1) %out) {
 ; PASS-CHECK:       [[EXIT]]:
 ; PASS-CHECK-NEXT:    ret void
 ;
-; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall(
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero(
 ; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
 ; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
 ; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -33,7 +33,285 @@ while:
   %done = phi i1 [ 0, %entry ], [ 1, %if ]
   %not_done = xor i1 %done, true
   %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
-  %is_done = icmp eq i64 %ballot, 0
+  %is_done = icmp eq i64 %ballot, 0 ; in this case is_done = !not_done
+  br i1 %is_done, label %exit, label %if
+
+if:
+  store i32 5, ptr addrspace(1) %out
+  br label %while
+
+exit:
+  ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(ptr addrspace(1) %out) {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
+; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 0, [[BALLOT]]
+; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  br label %while
+
+while:
+  %done = phi i1 [ 0, %entry ], [ 1, %if ]
+  %not_done = xor i1 %done, true
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+  %is_done = icmp eq i64 0, %ballot ; in this case is_done = !not_done
+  br i1 %is_done, label %exit, label %if
+
+if:
+  store i32 5, ptr addrspace(1) %out
+  br label %while
+
+exit:
+  ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_ne_one(ptr addrspace(1) %out) {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
+; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp ne i64 [[BALLOT]], 1
+; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[IF]], label %[[EXIT:.*]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  br label %while
+
+while:
+  %done = phi i1 [ 0, %entry ], [ 1, %if ]
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+  %is_done = icmp ne i64 %ballot, 1 ; in this case is_done = !done
+  br i1 %is_done, label %if, label %exit
+
+if:
+  store i32 5, ptr addrspace(1) %out
+  br label %while
+
+exit:
+  ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_ne_one_swap_op(ptr addrspace(1) %out) {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one_swap_op(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
+; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp ne i64 1, [[BALLOT]]
+; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[IF]], label %[[EXIT:.*]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one_swap_op(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  br label %while
+
+while:
+  %done = phi i1 [ 0, %entry ], [ 1, %if ]
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+  %is_done = icmp ne i64 1, %ballot ; in this case is_done = !done
+  br i1 %is_done, label %if, label %exit
+
+if:
+  store i32 5, ptr addrspace(1) %out
+  br label %while
+
+exit:
+  ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_eq_one(ptr addrspace(1) %out) {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 1
+; PASS-CHECK-NEXT:    br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  br label %while
+
+while:
+  %done = phi i1 [ 0, %entry ], [ 1, %if ]
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+  %is_done = icmp eq i64 %ballot, 1 ; in this case is_done = done
+  br i1 %is_done, label %exit, label %if
+
+if:
+  store i32 5, ptr addrspace(1) %out
+  br label %while
+
+exit:
+  ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_eq_one_swap_op(ptr addrspace(1) %out) {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one_swap_op(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 1, [[BALLOT]]
+; PASS-CHECK-NEXT:    br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one_swap_op(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  br label %while
+
+while:
+  %done = phi i1 [ 0, %entry ], [ 1, %if ]
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+  %is_done = icmp eq i64 1, %ballot ; in this case is_done = done
+  br i1 %is_done, label %exit, label %if
+
+if:
+  store i32 5, ptr addrspace(1) %out
+  br label %while
+
+exit:
+  ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_ne_zero(ptr addrspace(1) %out) {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp ne i64 0, [[BALLOT]]
+; PASS-CHECK-NEXT:    br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  br label %while
+
+while:
+  %done = phi i1 [ 0, %entry ], [ 1, %if ]
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+  %is_done = icmp ne i64 0, %ballot ; in this case is_done = done
+  br i1 %is_done, label %exit, label %if
+
+if:
+  store i32 5, ptr addrspace(1) %out
+  br label %while
+
+exit:
+  ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(ptr addrspace(1) %out) {
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp ne i64 [[BALLOT]], 0
+; PASS-CHECK-NEXT:    br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  br label %while
+
+while:
+  %done = phi i1 [ 0, %entry ], [ 1, %if ]
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+  %is_done = icmp ne i64 %ballot, 0 ; in this case is_done = done
   br i1 %is_done, label %exit, label %if
 
 if:
@@ -122,48 +400,6 @@ exit:
   ret void
 }
 
-define protected amdgpu_kernel void @trivial_waterfall_swap_op(ptr addrspace(1) %out) {
-; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_swap_op(
-; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
-; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
-; PASS-CHECK:       [[WHILE]]:
-; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
-; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
-; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 0, [[BALLOT]]
-; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
-; PASS-CHECK:       [[IF]]:
-; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; PASS-CHECK-NEXT:    br label %[[WHILE]]
-; PASS-CHECK:       [[EXIT]]:
-; PASS-CHECK-NEXT:    ret void
-;
-; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_swap_op(
-; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
-; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; DCE-CHECK-NEXT:    ret void
-;
-entry:
-  br label %while
-
-while:
-  %done = phi i1 [ 0, %entry ], [ 1, %if ]
-  %not_done = xor i1 %done, true
-  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
-  %is_done = icmp eq i64 0, %ballot
-  br i1 %is_done, label %exit, label %if
-
-if:
-  store i32 5, ptr addrspace(1) %out
-  br label %while
-
-exit:
-  ret void
-}
-
 declare i64 @llvm.amdgcn.ballot.i64(i1) #1
 !6 = !{i64 690}
 !7 = distinct !{!7, !8}

>From 261b4ffc12c81face5ec06ee061c73988c533b66 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Tue, 4 Mar 2025 13:01:04 +0530
Subject: [PATCH 17/30] Add run line shows difference without this pass in
 pipeline

---
 .../amdgpu-simplify-uniform-waterfall.ll      | 129 +++++++++++++++
 .../amdgpu-uniform-intrinsic-combine.ll       | 149 ++++++++++++++++++
 2 files changed, 278 insertions(+)

diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index 975aa66fcd7dd..4ef3cfa23b7f5 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -1,8 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,early-cse,instcombine,simplifycfg -S < %s | FileCheck %s -check-prefix=DCE-CHECK
 
 define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT:    [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[IF_PEEL:.*]]
+; CURRENT-CHECK:       [[IF_PEEL]]:
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[EXIT]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
@@ -45,6 +58,18 @@ exit:
 }
 
 define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT:    [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[IF_PEEL:.*]]
+; CURRENT-CHECK:       [[IF_PEEL]]:
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[EXIT]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
@@ -87,6 +112,18 @@ exit:
 }
 
 define protected amdgpu_kernel void @trivial_waterfall_ne_one(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
+; CURRENT-CHECK:       [[WHILE]]:
+; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT:    [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 1
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_NOT]], label %[[EXIT:.*]], label %[[WHILE]], !llvm.loop [[LOOP0:![0-9]+]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
@@ -127,6 +164,18 @@ exit:
 }
 
 define protected amdgpu_kernel void @trivial_waterfall_ne_one_swap_op(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one_swap_op(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
+; CURRENT-CHECK:       [[WHILE]]:
+; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT:    [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 1
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_NOT]], label %[[EXIT:.*]], label %[[WHILE]], !llvm.loop [[LOOP2:![0-9]+]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one_swap_op(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
@@ -167,6 +216,18 @@ exit:
 }
 
 define protected amdgpu_kernel void @trivial_waterfall_eq_one(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
+; CURRENT-CHECK:       [[WHILE]]:
+; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i32 [[TMP0]], 1
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[WHILE]], !llvm.loop [[LOOP3:![0-9]+]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
@@ -206,6 +267,18 @@ exit:
 }
 
 define protected amdgpu_kernel void @trivial_waterfall_eq_one_swap_op(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one_swap_op(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
+; CURRENT-CHECK:       [[WHILE]]:
+; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i32 [[TMP0]], 1
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[WHILE]], !llvm.loop [[LOOP4:![0-9]+]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one_swap_op(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
@@ -245,6 +318,18 @@ exit:
 }
 
 define protected amdgpu_kernel void @trivial_waterfall_ne_zero(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
+; CURRENT-CHECK:       [[WHILE]]:
+; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT:    [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP5:![0-9]+]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
@@ -284,6 +369,18 @@ exit:
 }
 
 define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
+; CURRENT-CHECK:       [[WHILE]]:
+; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT:    [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP6:![0-9]+]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
@@ -323,6 +420,29 @@ exit:
 }
 
 define protected amdgpu_kernel void @waterfall(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*]]:
+; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
+; CURRENT-CHECK:       [[WHILE]]:
+; CURRENT-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[IS_FIRST_ACTIVE_ID:%.*]], %[[WHILE_BACKEDGE:.*]] ]
+; CURRENT-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; CURRENT-CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[NOT_DONE]])
+; CURRENT-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i32 [[TMP1]], 0
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[IF:.*]]
+; CURRENT-CHECK:       [[IF]]:
+; CURRENT-CHECK-NEXT:    [[FIRST_ACTIVE_ID:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP0]])
+; CURRENT-CHECK-NEXT:    [[IS_FIRST_ACTIVE_ID]] = icmp eq i32 [[TMP0]], [[FIRST_ACTIVE_ID]]
+; CURRENT-CHECK-NEXT:    br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[WHILE_BACKEDGE]]
+; CURRENT-CHECK:       [[WORK]]:
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[WHILE_BACKEDGE]]
+; CURRENT-CHECK:       [[WHILE_BACKEDGE]]:
+; CURRENT-CHECK-NEXT:    br label %[[WHILE]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
@@ -404,3 +524,12 @@ declare i64 @llvm.amdgcn.ballot.i64(i1) #1
 !6 = !{i64 690}
 !7 = distinct !{!7, !8}
 !8 = !{!"llvm.loop.mustprogress"}
+;.
+; CURRENT-CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
+; CURRENT-CHECK: [[META1]] = !{!"llvm.loop.peeled.count", i32 1}
+; CURRENT-CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; CURRENT-CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+; CURRENT-CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]]}
+; CURRENT-CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]}
+; CURRENT-CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]]}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index e182319a1faad..0b0dd66ccc829 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -1,8 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s -check-prefix=DCE-CHECK
 
 define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CURRENT-CHECK-NEXT:    store i32 77, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 77)
@@ -20,6 +26,12 @@ define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]], i32 [[SRC:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; CURRENT-CHECK-NEXT:    [[V:%.*]] = tail call i32 @llvm.amdgcn.permlane64.i32(i32 [[SRC]])
+; CURRENT-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[SRC]])
@@ -37,6 +49,15 @@ define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
 }
 
 define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:    [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT:    [[V:%.*]] = tail call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; CURRENT-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; CURRENT-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
@@ -61,6 +82,16 @@ define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
 }
 
 define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:    [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; CURRENT-CHECK-NEXT:    [[V:%.*]] = tail call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; CURRENT-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; CURRENT-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
@@ -88,6 +119,11 @@ define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %o
 }
 
 define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 7, i32 5)
@@ -105,6 +141,12 @@ define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:    [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC0]], i32 [[SRC1]])
+; CURRENT-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC0]], i32 [[SRC1]])
@@ -122,6 +164,16 @@ define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i3
 }
 
 define amdgpu_kernel void @readlane_nonuniform_workitem(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] {
+; CURRENT-CHECK-NEXT:    [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT:    [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT:    [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; CURRENT-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
@@ -149,6 +201,18 @@ define amdgpu_kernel void @readlane_nonuniform_workitem(i32 addrspace(1)* %out)
 }
 
 define amdgpu_kernel void @readlane_nonuniform_expression(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CURRENT-CHECK-NEXT:    [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT:    [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT:    [[TIDX2:%.*]] = add i32 [[TIDX]], 1
+; CURRENT-CHECK-NEXT:    [[TIDY2:%.*]] = add i32 [[TIDY]], 2
+; CURRENT-CHECK-NEXT:    [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; CURRENT-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; CURRENT-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
@@ -182,6 +246,11 @@ define amdgpu_kernel void @readlane_nonuniform_expression(i32 addrspace(1)* %out
 }
 
 define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 7)
@@ -199,6 +268,12 @@ define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i32 %src0) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]], i32 [[SRC0:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:    [[V:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[SRC0]])
+; CURRENT-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[SRC0]])
@@ -216,6 +291,15 @@ define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i3
 }
 
 define amdgpu_kernel void @readfirstlane_with_workitem_id(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:    [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT:    [[V:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; CURRENT-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; CURRENT-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
@@ -240,6 +324,16 @@ define amdgpu_kernel void @readfirstlane_with_workitem_id(i32 addrspace(1)* %out
 }
 
 define amdgpu_kernel void @readfirstlane_expression(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:    [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; CURRENT-CHECK-NEXT:    [[V:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; CURRENT-CHECK-NEXT:    [[TMP1:%.*]] = sext i32 [[TID2]] to i64
+; CURRENT-CHECK-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_expression(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
@@ -267,6 +361,11 @@ define amdgpu_kernel void @readfirstlane_expression(i32 addrspace(1)* %out) {
 }
 
 define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 5)
@@ -286,6 +385,14 @@ define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %ou
 }
 
 define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CURRENT-CHECK-NEXT:    [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT:    [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT:    [[V1:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
@@ -312,6 +419,13 @@ define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:    [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT:    [[V1:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; CURRENT-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
@@ -335,6 +449,14 @@ define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_readlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CURRENT-CHECK-NEXT:    [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT:    [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT:    [[V1:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_readlane(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
@@ -362,6 +484,12 @@ define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
 
 
 define amdgpu_kernel void @permlane64_boundary(ptr addrspace(1) %out_min, ptr addrspace(1) %out_max) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT_MIN:%.*]], ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT_MAX:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT:    store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; CURRENT-CHECK-NEXT:    store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[MIN_V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 -2147483648)
@@ -384,6 +512,14 @@ define amdgpu_kernel void @permlane64_boundary(ptr addrspace(1) %out_min, ptr ad
 }
 
 define amdgpu_kernel void @readlane_cross_lane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_cross_lane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:    [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT:    [[TIDY:%.*]] = add i32 [[TIDX]], 5
+; CURRENT-CHECK-NEXT:    [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_cross_lane(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
@@ -408,6 +544,11 @@ define amdgpu_kernel void @readlane_cross_lane(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT:    store i32 435, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[RANDOM:%.*]] = xor i32 123, 456
@@ -428,6 +569,14 @@ define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
 }
 
 define amdgpu_kernel void @readlane_expression(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:    [[IDX1:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT:    [[IDX2:%.*]] = shl i32 [[IDX1]], 1
+; CURRENT-CHECK-NEXT:    [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
+; CURRENT-CHECK-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    ret void
+;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()

>From c34d392f50b562d728a7ceff916a5d3f79a2112a Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Wed, 5 Mar 2025 19:43:30 +0530
Subject: [PATCH 18/30] addressed reviews

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 10 ++--
 .../amdgpu-simplify-uniform-waterfall.ll      | 57 +++++--------------
 2 files changed, 18 insertions(+), 49 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index e5b61063fd7b8..df9349ab113e4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -134,7 +134,6 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
         Value *Op0 = ICmp->getOperand(0);
         Value *Op1 = ICmp->getOperand(1);
         ICmpInst::Predicate Pred = ICmp->getPredicate();
-        IRBuilder<> Builder(ICmp);
 
         // Ensure ballot is one of the operands
         Value *OtherOp = nullptr;
@@ -149,10 +148,11 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
         // %ballot_arg, 1
         if ((Pred == ICmpInst::ICMP_EQ && match(OtherOp, m_Zero())) ||
             (Pred == ICmpInst::ICMP_NE && match(OtherOp, m_One()))) {
-          Value *Xor = Builder.CreateXor(Src, Builder.getTrue());
-          LLVM_DEBUG(dbgs()
-                     << "Replacing ICMP_EQ/ICMP_NE with XOR: " << *Xor << "\n");
-          ICmp->replaceAllUsesWith(Xor);
+          Instruction *NotOp = BinaryOperator::CreateNot(Src);
+          NotOp->insertInto(ICmp->getParent(), ICmp->getIterator());
+          LLVM_DEBUG(dbgs() << "Replacing ICMP_EQ/ICMP_NE with NOT: " << *NotOp
+                            << "\n");
+          ICmp->replaceAllUsesWith(NotOp);
           Changed = true;
         }
         // Case (icmp eq %ballot, 1) OR (icmp ne %ballot, 0)  -->  %ballot_arg
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index 4ef3cfa23b7f5..b3024ec5f54b6 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -421,43 +421,31 @@ exit:
 
 define protected amdgpu_kernel void @waterfall(ptr addrspace(1) %out) {
 ; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
-; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
-; CURRENT-CHECK-NEXT:  [[ENTRY:.*]]:
-; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
-; CURRENT-CHECK:       [[WHILE]]:
-; CURRENT-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[IS_FIRST_ACTIVE_ID:%.*]], %[[WHILE_BACKEDGE:.*]] ]
-; CURRENT-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
-; CURRENT-CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[NOT_DONE]])
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
 ; CURRENT-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i32 [[TMP1]], 0
-; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[IF:.*]]
-; CURRENT-CHECK:       [[IF]]:
-; CURRENT-CHECK-NEXT:    [[FIRST_ACTIVE_ID:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP0]])
-; CURRENT-CHECK-NEXT:    [[IS_FIRST_ACTIVE_ID]] = icmp eq i32 [[TMP0]], [[FIRST_ACTIVE_ID]]
-; CURRENT-CHECK-NEXT:    br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[WHILE_BACKEDGE]]
-; CURRENT-CHECK:       [[WORK]]:
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[WORK_PEEL:.*]]
+; CURRENT-CHECK:       [[WORK_PEEL]]:
 ; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; CURRENT-CHECK-NEXT:    br label %[[WHILE_BACKEDGE]]
-; CURRENT-CHECK:       [[WHILE_BACKEDGE]]:
-; CURRENT-CHECK-NEXT:    br label %[[WHILE]]
+; CURRENT-CHECK-NEXT:    br label %[[EXIT]]
 ; CURRENT-CHECK:       [[EXIT]]:
 ; CURRENT-CHECK-NEXT:    ret void
 ;
 ; PASS-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
-; PASS-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; PASS-CHECK-NEXT:    [[TID:%.*]] = tail call noundef i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[TMP0]])
 ; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
 ; PASS-CHECK:       [[WHILE]]:
 ; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
 ; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
 ; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
+; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
 ; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0
-; PASS-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[IF:.*]]
+; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]]
 ; PASS-CHECK:       [[IF]]:
-; PASS-CHECK-NEXT:    [[FIRST_ACTIVE_ID:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
-; PASS-CHECK-NEXT:    [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 [[TID]], [[FIRST_ACTIVE_ID]]
+; PASS-CHECK-NEXT:    [[FIRST_ACTIVE_ID:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 0)
+; PASS-CHECK-NEXT:    [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 0, 0
 ; PASS-CHECK-NEXT:    br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[TAIL]]
 ; PASS-CHECK:       [[WORK]]:
 ; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -470,30 +458,11 @@ define protected amdgpu_kernel void @waterfall(ptr addrspace(1) %out) {
 ;
 ; DCE-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
 ; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; DCE-CHECK-NEXT:  [[ENTRY:.*]]:
-; DCE-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-; DCE-CHECK-NEXT:    br label %[[WHILE:.*]]
-; DCE-CHECK:       [[WHILE]]:
-; DCE-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[IS_FIRST_ACTIVE_ID:%.*]], %[[TAIL:.*]] ]
-; DCE-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
-; DCE-CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.ballot.i32(i1 [[NOT_DONE]])
-; DCE-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i32 [[TMP1]], 0
-; DCE-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[IF:.*]]
-; DCE-CHECK:       [[IF]]:
-; DCE-CHECK-NEXT:    [[FIRST_ACTIVE_ID:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP0]])
-; DCE-CHECK-NEXT:    [[IS_FIRST_ACTIVE_ID]] = icmp eq i32 [[TMP0]], [[FIRST_ACTIVE_ID]]
-; DCE-CHECK-NEXT:    br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[TAIL]]
-; DCE-CHECK:       [[WORK]]:
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
 ; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; DCE-CHECK-NEXT:    br label %[[TAIL]]
-; DCE-CHECK:       [[TAIL]]:
-; DCE-CHECK-NEXT:    br label %[[WHILE]]
-; DCE-CHECK:       [[EXIT]]:
 ; DCE-CHECK-NEXT:    ret void
 ;
 entry:
-  %1 = tail call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
-  %tid = tail call noundef i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %1)
   br label %while
 
 while:
@@ -504,8 +473,8 @@ while:
   br i1 %is_done, label %exit, label %if
 
 if:
-  %first_active_id = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 %tid)
-  %is_first_active_id = icmp eq i32 %tid, %first_active_id
+  %first_active_id = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 0)
+  %is_first_active_id = icmp eq i32 0, %first_active_id
   br i1 %is_first_active_id, label %work, label %tail
 
 work:

>From eb73c6a6036d7f56a13804d5cd9f802e2789add3 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Thu, 6 Mar 2025 17:33:08 +0530
Subject: [PATCH 19/30] address reviews

---
 .../amdgpu-simplify-uniform-waterfall.ll      | 94 +++++++++++++++++--
 1 file changed, 87 insertions(+), 7 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index b3024ec5f54b6..5c0c5f7bcba77 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -419,20 +419,20 @@ exit:
   ret void
 }
 
-define protected amdgpu_kernel void @waterfall(ptr addrspace(1) %out) {
-; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
+define protected amdgpu_kernel void @trivial_uniform_waterfall(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_uniform_waterfall(
 ; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
 ; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
-; CURRENT-CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
-; CURRENT-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i32 [[TMP1]], 0
-; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[WORK_PEEL:.*]]
+; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT:    [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[WORK_PEEL:.*]]
 ; CURRENT-CHECK:       [[WORK_PEEL]]:
 ; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
 ; CURRENT-CHECK-NEXT:    br label %[[EXIT]]
 ; CURRENT-CHECK:       [[EXIT]]:
 ; CURRENT-CHECK-NEXT:    ret void
 ;
-; PASS-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_uniform_waterfall(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
 ; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
@@ -456,7 +456,7 @@ define protected amdgpu_kernel void @waterfall(ptr addrspace(1) %out) {
 ; PASS-CHECK:       [[EXIT]]:
 ; PASS-CHECK-NEXT:    ret void
 ;
-; DCE-CHECK-LABEL: define protected amdgpu_kernel void @waterfall(
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_uniform_waterfall(
 ; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
 ; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -489,6 +489,86 @@ exit:
   ret void
 }
 
+define protected amdgpu_kernel void @uniform_waterfall(ptr addrspace(1) %out, i32 %mymask) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @uniform_waterfall(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]], i32 [[MYMASK:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*]]:
+; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
+; CURRENT-CHECK:       [[WHILE]]:
+; CURRENT-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[IS_FIRST_ACTIVE_ID:%.*]], %[[WHILE_BACKEDGE:.*]] ]
+; CURRENT-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[NOT_DONE]])
+; CURRENT-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[IF:.*]]
+; CURRENT-CHECK:       [[IF]]:
+; CURRENT-CHECK-NEXT:    [[FIRST_ACTIVE_ID:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[MYMASK]])
+; CURRENT-CHECK-NEXT:    [[IS_FIRST_ACTIVE_ID]] = icmp eq i32 [[MYMASK]], [[FIRST_ACTIVE_ID]]
+; CURRENT-CHECK-NEXT:    br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[WHILE_BACKEDGE]]
+; CURRENT-CHECK:       [[WORK]]:
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[WHILE_BACKEDGE]]
+; CURRENT-CHECK:       [[WHILE_BACKEDGE]]:
+; CURRENT-CHECK-NEXT:    br label %[[WHILE]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @uniform_waterfall(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[MYMASK:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
+; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
+; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0
+; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    [[FIRST_ACTIVE_ID:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[MYMASK]])
+; PASS-CHECK-NEXT:    [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 [[MYMASK]], [[MYMASK]]
+; PASS-CHECK-NEXT:    br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[TAIL]]
+; PASS-CHECK:       [[WORK]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[TAIL]]
+; PASS-CHECK:       [[TAIL]]:
+; PASS-CHECK-NEXT:    [[NEW_DONE]] = phi i1 [ true, %[[WORK]] ], [ false, %[[IF]] ]
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @uniform_waterfall(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[MYMASK:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  br label %while
+
+while:
+  %done = phi i1 [ false, %entry ], [ %new_done, %tail ]
+  %not_done = xor i1 %done, true
+  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+  %is_done = icmp eq i64 %ballot, 0
+  br i1 %is_done, label %exit, label %if
+
+if:
+  %first_active_id = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 %mymask)
+  %is_first_active_id = icmp eq i32 %mymask, %first_active_id
+  br i1 %is_first_active_id, label %work, label %tail
+
+work:
+  store i32 5, ptr addrspace(1) %out
+  br label %tail
+
+tail:
+  %new_done = phi i1 [ true, %work ], [ false, %if ]
+  br label %while
+
+exit:
+  ret void
+}
+
 declare i64 @llvm.amdgcn.ballot.i64(i1) #1
 !6 = !{i64 690}
 !7 = distinct !{!7, !8}

>From 1bca2e7b45c15eea16f7f87f9ca7427964e1da09 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Wed, 12 Mar 2025 14:34:31 +0530
Subject: [PATCH 20/30] remove icmp ballot,1

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  |  19 +-
 .../amdgpu-simplify-uniform-waterfall.ll      | 216 +-----------------
 2 files changed, 10 insertions(+), 225 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index df9349ab113e4..513024f620e13 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -144,23 +144,18 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
         else
           continue; // Skip if ballot isn't involved
 
-        // Case (icmp eq %ballot, 0) OR (icmp ne %ballot, 1)  -->  xor
-        // %ballot_arg, 1
-        if ((Pred == ICmpInst::ICMP_EQ && match(OtherOp, m_Zero())) ||
-            (Pred == ICmpInst::ICMP_NE && match(OtherOp, m_One()))) {
+        // Case (icmp eq %ballot, 0) -->  xor %ballot_arg, 1
+        if (Pred == ICmpInst::ICMP_EQ && match(OtherOp, m_Zero())) {
           Instruction *NotOp = BinaryOperator::CreateNot(Src);
           NotOp->insertInto(ICmp->getParent(), ICmp->getIterator());
-          LLVM_DEBUG(dbgs() << "Replacing ICMP_EQ/ICMP_NE with NOT: " << *NotOp
-                            << "\n");
+          LLVM_DEBUG(dbgs() << "Replacing ICMP_EQ: " << *NotOp << "\n");
           ICmp->replaceAllUsesWith(NotOp);
           Changed = true;
         }
-        // Case (icmp eq %ballot, 1) OR (icmp ne %ballot, 0)  -->  %ballot_arg
-        else if ((Pred == ICmpInst::ICMP_EQ && match(OtherOp, m_One())) ||
-                 (Pred == ICmpInst::ICMP_NE && match(OtherOp, m_Zero()))) {
-          LLVM_DEBUG(dbgs()
-                     << "Replacing ICMP_EQ/ICMP_NE with ballot argument: "
-                     << *Src << "\n");
+        // (icmp ne %ballot, 0)  -->  %ballot_arg
+        else if (Pred == ICmpInst::ICMP_NE && match(OtherOp, m_Zero())) {
+          LLVM_DEBUG(dbgs() << "Replacing ICMP_NE with ballot argument: "
+                            << *Src << "\n");
           ICmp->replaceAllUsesWith(Src);
           Changed = true;
         }
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index 5c0c5f7bcba77..06309275c37d0 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -111,222 +111,16 @@ exit:
   ret void
 }
 
-define protected amdgpu_kernel void @trivial_waterfall_ne_one(ptr addrspace(1) %out) {
-; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one(
-; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
-; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
-; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
-; CURRENT-CHECK:       [[WHILE]]:
-; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
-; CURRENT-CHECK-NEXT:    [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 1
-; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_NOT]], label %[[EXIT:.*]], label %[[WHILE]], !llvm.loop [[LOOP0:![0-9]+]]
-; CURRENT-CHECK:       [[EXIT]]:
-; CURRENT-CHECK-NEXT:    ret void
-;
-; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one(
-; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
-; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
-; PASS-CHECK:       [[WHILE]]:
-; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
-; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[DONE]], true
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp ne i64 [[BALLOT]], 1
-; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[IF]], label %[[EXIT:.*]]
-; PASS-CHECK:       [[IF]]:
-; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; PASS-CHECK-NEXT:    br label %[[WHILE]]
-; PASS-CHECK:       [[EXIT]]:
-; PASS-CHECK-NEXT:    ret void
-;
-; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one(
-; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
-; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; DCE-CHECK-NEXT:    ret void
-;
-entry:
-  br label %while
-
-while:
-  %done = phi i1 [ 0, %entry ], [ 1, %if ]
-  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
-  %is_done = icmp ne i64 %ballot, 1 ; in this case is_done = !done
-  br i1 %is_done, label %if, label %exit
-
-if:
-  store i32 5, ptr addrspace(1) %out
-  br label %while
-
-exit:
-  ret void
-}
-
-define protected amdgpu_kernel void @trivial_waterfall_ne_one_swap_op(ptr addrspace(1) %out) {
-; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one_swap_op(
-; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
-; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
-; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
-; CURRENT-CHECK:       [[WHILE]]:
-; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
-; CURRENT-CHECK-NEXT:    [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 1
-; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_NOT]], label %[[EXIT:.*]], label %[[WHILE]], !llvm.loop [[LOOP2:![0-9]+]]
-; CURRENT-CHECK:       [[EXIT]]:
-; CURRENT-CHECK-NEXT:    ret void
-;
-; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one_swap_op(
-; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
-; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
-; PASS-CHECK:       [[WHILE]]:
-; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
-; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[DONE]], true
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp ne i64 1, [[BALLOT]]
-; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[IF]], label %[[EXIT:.*]]
-; PASS-CHECK:       [[IF]]:
-; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; PASS-CHECK-NEXT:    br label %[[WHILE]]
-; PASS-CHECK:       [[EXIT]]:
-; PASS-CHECK-NEXT:    ret void
-;
-; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_one_swap_op(
-; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
-; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; DCE-CHECK-NEXT:    ret void
-;
-entry:
-  br label %while
-
-while:
-  %done = phi i1 [ 0, %entry ], [ 1, %if ]
-  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
-  %is_done = icmp ne i64 1, %ballot ; in this case is_done = !done
-  br i1 %is_done, label %if, label %exit
-
-if:
-  store i32 5, ptr addrspace(1) %out
-  br label %while
-
-exit:
-  ret void
-}
-
-define protected amdgpu_kernel void @trivial_waterfall_eq_one(ptr addrspace(1) %out) {
-; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one(
-; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
-; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
-; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
-; CURRENT-CHECK:       [[WHILE]]:
-; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
-; CURRENT-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i32 [[TMP0]], 1
-; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[WHILE]], !llvm.loop [[LOOP3:![0-9]+]]
-; CURRENT-CHECK:       [[EXIT]]:
-; CURRENT-CHECK-NEXT:    ret void
-;
-; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one(
-; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
-; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
-; PASS-CHECK:       [[WHILE]]:
-; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 1
-; PASS-CHECK-NEXT:    br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
-; PASS-CHECK:       [[IF]]:
-; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; PASS-CHECK-NEXT:    br label %[[WHILE]]
-; PASS-CHECK:       [[EXIT]]:
-; PASS-CHECK-NEXT:    ret void
-;
-; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one(
-; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
-; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; DCE-CHECK-NEXT:    ret void
-;
-entry:
-  br label %while
-
-while:
-  %done = phi i1 [ 0, %entry ], [ 1, %if ]
-  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
-  %is_done = icmp eq i64 %ballot, 1 ; in this case is_done = done
-  br i1 %is_done, label %exit, label %if
-
-if:
-  store i32 5, ptr addrspace(1) %out
-  br label %while
-
-exit:
-  ret void
-}
-
-define protected amdgpu_kernel void @trivial_waterfall_eq_one_swap_op(ptr addrspace(1) %out) {
-; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one_swap_op(
-; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
-; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
-; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
-; CURRENT-CHECK:       [[WHILE]]:
-; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
-; CURRENT-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i32 [[TMP0]], 1
-; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE]], label %[[EXIT:.*]], label %[[WHILE]], !llvm.loop [[LOOP4:![0-9]+]]
-; CURRENT-CHECK:       [[EXIT]]:
-; CURRENT-CHECK-NEXT:    ret void
-;
-; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one_swap_op(
-; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
-; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
-; PASS-CHECK:       [[WHILE]]:
-; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 1, [[BALLOT]]
-; PASS-CHECK-NEXT:    br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
-; PASS-CHECK:       [[IF]]:
-; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; PASS-CHECK-NEXT:    br label %[[WHILE]]
-; PASS-CHECK:       [[EXIT]]:
-; PASS-CHECK-NEXT:    ret void
-;
-; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_one_swap_op(
-; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
-; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
-; DCE-CHECK-NEXT:    ret void
-;
-entry:
-  br label %while
-
-while:
-  %done = phi i1 [ 0, %entry ], [ 1, %if ]
-  %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
-  %is_done = icmp eq i64 1, %ballot ; in this case is_done = done
-  br i1 %is_done, label %exit, label %if
-
-if:
-  store i32 5, ptr addrspace(1) %out
-  br label %while
-
-exit:
-  ret void
-}
-
 define protected amdgpu_kernel void @trivial_waterfall_ne_zero(ptr addrspace(1) %out) {
 ; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero(
-; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
 ; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
 ; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
 ; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
 ; CURRENT-CHECK:       [[WHILE]]:
 ; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
 ; CURRENT-CHECK-NEXT:    [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 0
-; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP5:![0-9]+]]
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP0:![0-9]+]]
 ; CURRENT-CHECK:       [[EXIT]]:
 ; CURRENT-CHECK-NEXT:    ret void
 ;
@@ -377,7 +171,7 @@ define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(ptr addrspac
 ; CURRENT-CHECK:       [[WHILE]]:
 ; CURRENT-CHECK-NEXT:    [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
 ; CURRENT-CHECK-NEXT:    [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 0
-; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP6:![0-9]+]]
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP2:![0-9]+]]
 ; CURRENT-CHECK:       [[EXIT]]:
 ; CURRENT-CHECK-NEXT:    ret void
 ;
@@ -577,8 +371,4 @@ declare i64 @llvm.amdgcn.ballot.i64(i1) #1
 ; CURRENT-CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
 ; CURRENT-CHECK: [[META1]] = !{!"llvm.loop.peeled.count", i32 1}
 ; CURRENT-CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
-; CURRENT-CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
-; CURRENT-CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]]}
-; CURRENT-CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]}
-; CURRENT-CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]]}
 ;.

>From 01e3ed6b8955e09c85182bff4b29e6977b2ec61f Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Thu, 13 Mar 2025 16:56:45 +0530
Subject: [PATCH 21/30] address review

---
 llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp | 9 +--------
 1 file changed, 1 insertion(+), 8 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 513024f620e13..d60ebc35967a5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -135,14 +135,7 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
         Value *Op1 = ICmp->getOperand(1);
         ICmpInst::Predicate Pred = ICmp->getPredicate();
 
-        // Ensure ballot is one of the operands
-        Value *OtherOp = nullptr;
-        if (Op0 == &II)
-          OtherOp = Op1;
-        else if (Op1 == &II)
-          OtherOp = Op0;
-        else
-          continue; // Skip if ballot isn't involved
+        Value *OtherOp = (Op0 == &II ? Op1 : Op0);
 
         // Case (icmp eq %ballot, 0) -->  xor %ballot_arg, 1
         if (Pred == ICmpInst::ICMP_EQ && match(OtherOp, m_Zero())) {

>From 51cb723436968c4ef688d6f07e3ee1f1dd995b3d Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Thu, 13 Mar 2025 17:56:20 +0530
Subject: [PATCH 22/30] Exit early if module don't have intrinsic declaration
 in it

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp       | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index d60ebc35967a5..db7674d8ab4eb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -95,6 +95,20 @@ AMDGPUUniformIntrinsicCombinePass::run(Function &F,
 
 bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
   bool IsChanged{false};
+  Module *M = F.getParent();
+  llvm::LLVMContext &Ctx = M->getContext();
+  llvm::Type *IntrinsicTy = llvm::Type::getInt32Ty(Ctx);
+
+  if (!Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_permlane64,
+                                         {IntrinsicTy}) &&
+      !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_readfirstlane,
+                                         {IntrinsicTy}) &&
+      !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_readlane,
+                                         {IntrinsicTy}) &&
+      !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_ballot,
+                                         {IntrinsicTy})) {
+    return false;
+  }
 
   // Iterate over each instruction in the function to get the desired intrinsic
   // inst to check for optimization.

>From c9ace74ba447bea1e848488437246e368471ba4e Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Sat, 15 Mar 2025 12:28:43 +0530
Subject: [PATCH 23/30] pass insertion point into createNot

---
 llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index db7674d8ab4eb..34d9c960b3eb4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -153,8 +153,8 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
 
         // Case (icmp eq %ballot, 0) -->  xor %ballot_arg, 1
         if (Pred == ICmpInst::ICMP_EQ && match(OtherOp, m_Zero())) {
-          Instruction *NotOp = BinaryOperator::CreateNot(Src);
-          NotOp->insertInto(ICmp->getParent(), ICmp->getIterator());
+          Instruction *NotOp =
+              BinaryOperator::CreateNot(Src, "", ICmp->getIterator());
           LLVM_DEBUG(dbgs() << "Replacing ICMP_EQ: " << *NotOp << "\n");
           ICmp->replaceAllUsesWith(NotOp);
           Changed = true;

>From a1a07068cadcf12ee5322257a64dcce671bb6e80 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Mon, 17 Mar 2025 16:03:50 +0530
Subject: [PATCH 24/30] avoiding unnecessary function-wide scanning

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 46 +++++++++++--------
 1 file changed, 27 insertions(+), 19 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 34d9c960b3eb4..a1f5b5232bb24 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -94,27 +94,35 @@ AMDGPUUniformIntrinsicCombinePass::run(Function &F,
 }
 
 bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
-  bool IsChanged{false};
   Module *M = F.getParent();
   llvm::LLVMContext &Ctx = M->getContext();
-  llvm::Type *IntrinsicTy = llvm::Type::getInt32Ty(Ctx);
-
-  if (!Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_permlane64,
-                                         {IntrinsicTy}) &&
-      !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_readfirstlane,
-                                         {IntrinsicTy}) &&
-      !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_readlane,
-                                         {IntrinsicTy}) &&
-      !Intrinsic::getDeclarationIfExists(M, Intrinsic::amdgcn_ballot,
-                                         {IntrinsicTy})) {
-    return false;
-  }
-
-  // Iterate over each instruction in the function to get the desired intrinsic
-  // inst to check for optimization.
-  for (Instruction &I : make_early_inc_range(instructions(F))) {
-    if (auto *Intrinsic = dyn_cast<IntrinsicInst>(&I)) {
-      IsChanged |= optimizeUniformIntrinsicInst(*Intrinsic);
+  // List of AMDGPU intrinsics to optimize if their arguments are uniform.
+  std::vector<Intrinsic::ID> Intrinsics = {
+      Intrinsic::amdgcn_permlane64, Intrinsic::amdgcn_readfirstlane,
+      Intrinsic::amdgcn_readlane, Intrinsic::amdgcn_ballot};
+
+  bool IsChanged = false;
+
+  // Iterate over each intrinsic in the list and process its uses within F.
+  for (Intrinsic::ID IID : Intrinsics) {
+    // Determine the correct return type for the intrinsic.
+    // Most intrinsics return i32, but amdgcn_ballot returns i64.
+    llvm::Type *IntrinsicTy = (IID == Intrinsic::amdgcn_ballot)
+                                  ? llvm::Type::getInt64Ty(Ctx)
+                                  : llvm::Type::getInt32Ty(Ctx);
+
+    // Check if the intrinsic is declared in the module with the expected type.
+    if (Function *Intr =
+            Intrinsic::getDeclarationIfExists(M, IID, {IntrinsicTy})) {
+      // Iterate over all users of the intrinsic.
+      for (User *U : Intr->users()) {
+        // Ensure the user is an intrinsic call within function F.
+        if (auto *II = dyn_cast<IntrinsicInst>(U)) {
+          if (II->getFunction() == &F) {
+            IsChanged |= optimizeUniformIntrinsicInst(*II);
+          }
+        }
+      }
     }
   }
   return IsChanged;

>From 316472c4610e8d284e27426d9dfdb1898a72a684 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Tue, 18 Mar 2025 13:04:04 +0530
Subject: [PATCH 25/30] Erase dead intrinsic call

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp       |  5 +++++
 .../AMDGPU/amdgpu-simplify-uniform-waterfall.ll    | 14 --------------
 .../AMDGPU/amdgpu-uniform-intrinsic-combine.ll     | 14 --------------
 3 files changed, 5 insertions(+), 28 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index a1f5b5232bb24..28f080279404f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -142,6 +142,7 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
       return false;
     LLVM_DEBUG(dbgs() << "Replacing " << II << " with " << *Src << "\n");
     II.replaceAllUsesWith(Src);
+    II.eraseFromParent();
     return true;
   }
   case Intrinsic::amdgcn_ballot: {
@@ -165,6 +166,8 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
               BinaryOperator::CreateNot(Src, "", ICmp->getIterator());
           LLVM_DEBUG(dbgs() << "Replacing ICMP_EQ: " << *NotOp << "\n");
           ICmp->replaceAllUsesWith(NotOp);
+          ICmp->eraseFromParent();
+          II.eraseFromParent();
           Changed = true;
         }
         // (icmp ne %ballot, 0)  -->  %ballot_arg
@@ -172,6 +175,8 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
           LLVM_DEBUG(dbgs() << "Replacing ICMP_NE with ballot argument: "
                             << *Src << "\n");
           ICmp->replaceAllUsesWith(Src);
+          ICmp->eraseFromParent();
+          II.eraseFromParent();
           Changed = true;
         }
       }
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index 06309275c37d0..a72d18015d5fa 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -23,9 +23,7 @@ define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1)
 ; PASS-CHECK:       [[WHILE]]:
 ; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
 ; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
 ; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0
 ; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
 ; PASS-CHECK:       [[IF]]:
 ; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -77,9 +75,7 @@ define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(ptr addrs
 ; PASS-CHECK:       [[WHILE]]:
 ; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
 ; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
 ; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 0, [[BALLOT]]
 ; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
 ; PASS-CHECK:       [[IF]]:
 ; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -130,8 +126,6 @@ define protected amdgpu_kernel void @trivial_waterfall_ne_zero(ptr addrspace(1)
 ; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
 ; PASS-CHECK:       [[WHILE]]:
 ; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp ne i64 0, [[BALLOT]]
 ; PASS-CHECK-NEXT:    br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
 ; PASS-CHECK:       [[IF]]:
 ; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -181,8 +175,6 @@ define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(ptr addrspac
 ; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
 ; PASS-CHECK:       [[WHILE]]:
 ; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[DONE]])
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp ne i64 [[BALLOT]], 0
 ; PASS-CHECK-NEXT:    br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
 ; PASS-CHECK:       [[IF]]:
 ; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
@@ -233,12 +225,9 @@ define protected amdgpu_kernel void @trivial_uniform_waterfall(ptr addrspace(1)
 ; PASS-CHECK:       [[WHILE]]:
 ; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
 ; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
 ; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0
 ; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]]
 ; PASS-CHECK:       [[IF]]:
-; PASS-CHECK-NEXT:    [[FIRST_ACTIVE_ID:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 0)
 ; PASS-CHECK-NEXT:    [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 0, 0
 ; PASS-CHECK-NEXT:    br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[TAIL]]
 ; PASS-CHECK:       [[WORK]]:
@@ -313,12 +302,9 @@ define protected amdgpu_kernel void @uniform_waterfall(ptr addrspace(1) %out, i3
 ; PASS-CHECK:       [[WHILE]]:
 ; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
 ; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
-; PASS-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i64 @llvm.amdgcn.ballot.i64(i1 [[NOT_DONE]])
 ; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
-; PASS-CHECK-NEXT:    [[IS_DONE:%.*]] = icmp eq i64 [[BALLOT]], 0
 ; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]]
 ; PASS-CHECK:       [[IF]]:
-; PASS-CHECK-NEXT:    [[FIRST_ACTIVE_ID:%.*]] = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 [[MYMASK]])
 ; PASS-CHECK-NEXT:    [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 [[MYMASK]], [[MYMASK]]
 ; PASS-CHECK-NEXT:    br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[TAIL]]
 ; PASS-CHECK:       [[WORK]]:
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index 0b0dd66ccc829..09971e55786e1 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -11,7 +11,6 @@ define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
 ;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
-; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 77)
 ; PASS-CHECK-NEXT:    store i32 77, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
@@ -34,7 +33,6 @@ define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
 ;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[SRC]])
 ; PASS-CHECK-NEXT:    store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
@@ -126,7 +124,6 @@ define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
 ;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 7, i32 5)
 ; PASS-CHECK-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
@@ -149,7 +146,6 @@ define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i3
 ;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC0]], i32 [[SRC1]])
 ; PASS-CHECK-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
@@ -253,7 +249,6 @@ define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
 ;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 7)
 ; PASS-CHECK-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
@@ -276,7 +271,6 @@ define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i3
 ;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[SRC0]])
 ; PASS-CHECK-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
@@ -368,8 +362,6 @@ define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %ou
 ;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 5)
-; PASS-CHECK-NEXT:    [[V2:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 5)
 ; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
@@ -398,7 +390,6 @@ define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
-; PASS-CHECK-NEXT:    [[V2:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[V1]])
 ; PASS-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
@@ -430,7 +421,6 @@ define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
-; PASS-CHECK-NEXT:    [[V2:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[V1]], i32 3)
 ; PASS-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
@@ -462,7 +452,6 @@ define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
 ; PASS-CHECK-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
 ; PASS-CHECK-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
 ; PASS-CHECK-NEXT:    [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
-; PASS-CHECK-NEXT:    [[V2:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[V1]], i32 2)
 ; PASS-CHECK-NEXT:    store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
@@ -492,9 +481,7 @@ define amdgpu_kernel void @permlane64_boundary(ptr addrspace(1) %out_min, ptr ad
 ;
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
-; PASS-CHECK-NEXT:    [[MIN_V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 -2147483648)
 ; PASS-CHECK-NEXT:    store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
-; PASS-CHECK-NEXT:    [[MAX_V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 2147483647)
 ; PASS-CHECK-NEXT:    store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;
@@ -552,7 +539,6 @@ define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
 ; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
 ; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
 ; PASS-CHECK-NEXT:    [[RANDOM:%.*]] = xor i32 123, 456
-; PASS-CHECK-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[RANDOM]])
 ; PASS-CHECK-NEXT:    store i32 [[RANDOM]], ptr addrspace(1) [[OUT]], align 4
 ; PASS-CHECK-NEXT:    ret void
 ;

>From f22d719ed2fafd31844156c2d2920032ebf07e3c Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Wed, 19 Mar 2025 12:22:21 +0530
Subject: [PATCH 26/30] Bug: Possible issue if II still have users

---
 llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 28f080279404f..12dc745289350 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -167,7 +167,6 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
           LLVM_DEBUG(dbgs() << "Replacing ICMP_EQ: " << *NotOp << "\n");
           ICmp->replaceAllUsesWith(NotOp);
           ICmp->eraseFromParent();
-          II.eraseFromParent();
           Changed = true;
         }
         // (icmp ne %ballot, 0)  -->  %ballot_arg
@@ -176,11 +175,13 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
                             << *Src << "\n");
           ICmp->replaceAllUsesWith(Src);
           ICmp->eraseFromParent();
-          II.eraseFromParent();
           Changed = true;
         }
       }
     }
+    // Erase the intrinsic if it has no remaining uses.
+    if (II.use_empty())
+      II.eraseFromParent();
     return Changed;
   }
   }

>From f8da0bc2940cf495c57aa331044f7fbe0d28e2b5 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Wed, 19 Mar 2025 12:50:21 +0530
Subject: [PATCH 27/30] Exit early if ballot has no Icmp use

---
 llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 12dc745289350..350a1f9b389cc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -151,6 +151,12 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
       return false;
     LLVM_DEBUG(dbgs() << "Found uniform ballot intrinsic: " << II << "\n");
 
+    // If there are no ICmp users, return early.
+    if (II.user_empty() ||
+        none_of(II.users(), [](User *U) { return isa<ICmpInst>(U); })) {
+      return false;
+    }
+
     bool Changed = false;
     for (User *U : make_early_inc_range(II.users())) {
       if (auto *ICmp = dyn_cast<ICmpInst>(U)) {

>From 5e3c8fa37b9862ab29537164e41f9c1bd45e26ef Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Wed, 19 Mar 2025 14:12:35 +0530
Subject: [PATCH 28/30] Refactor: address reviews

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 178 ++++++++----------
 1 file changed, 81 insertions(+), 97 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 350a1f9b389cc..e12b58bd4e420 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -33,103 +33,9 @@ using namespace llvm;
 using namespace llvm::AMDGPU;
 using namespace llvm::PatternMatch;
 
-namespace {
-class AMDGPUUniformIntrinsicCombineLegacy : public FunctionPass {
-public:
-  static char ID;
-  AMDGPUUniformIntrinsicCombineLegacy() : FunctionPass(ID) {
-    initializeAMDGPUUniformIntrinsicCombineLegacyPass(
-        *PassRegistry::getPassRegistry());
-  }
-  bool runOnFunction(Function &F) override;
-  void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.setPreservesCFG();
-    AU.addRequired<UniformityInfoWrapperPass>();
-    AU.addRequired<TargetPassConfig>();
-  }
-};
-
-class AMDGPUUniformIntrinsicCombineImpl
-    : public InstVisitor<AMDGPUUniformIntrinsicCombineImpl> {
-private:
-  const UniformityInfo *UI;
-  bool optimizeUniformIntrinsicInst(IntrinsicInst &II) const;
-
-public:
-  AMDGPUUniformIntrinsicCombineImpl() = delete;
-  AMDGPUUniformIntrinsicCombineImpl(const UniformityInfo *UI) : UI(UI) {}
-  bool run(Function &F);
-};
-} // namespace
-
-char AMDGPUUniformIntrinsicCombineLegacy::ID = 0;
-char &llvm::AMDGPUUniformIntrinsicCombineLegacyPassID =
-    AMDGPUUniformIntrinsicCombineLegacy::ID;
-
-bool AMDGPUUniformIntrinsicCombineLegacy::runOnFunction(Function &F) {
-  if (skipFunction(F)) {
-    return false;
-  }
-  const UniformityInfo *UI =
-      &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
-  return AMDGPUUniformIntrinsicCombineImpl(UI).run(F);
-}
-
-PreservedAnalyses
-AMDGPUUniformIntrinsicCombinePass::run(Function &F,
-                                       FunctionAnalysisManager &AM) {
-  const auto *UI = &AM.getResult<UniformityInfoAnalysis>(F);
-  bool IsChanged = AMDGPUUniformIntrinsicCombineImpl(UI).run(F);
-
-  if (!IsChanged) {
-    return PreservedAnalyses::all();
-  }
-  PreservedAnalyses PA;
-  PA.preserve<DominatorTreeAnalysis>();
-  PA.preserve<LoopAnalysis>();
-  PA.preserve<ScalarEvolutionAnalysis>();
-  PA.preserve<UniformityInfoAnalysis>();
-  PA.preserve<TargetLibraryAnalysis>();
-  return PA;
-}
-
-bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
-  Module *M = F.getParent();
-  llvm::LLVMContext &Ctx = M->getContext();
-  // List of AMDGPU intrinsics to optimize if their arguments are uniform.
-  std::vector<Intrinsic::ID> Intrinsics = {
-      Intrinsic::amdgcn_permlane64, Intrinsic::amdgcn_readfirstlane,
-      Intrinsic::amdgcn_readlane, Intrinsic::amdgcn_ballot};
-
-  bool IsChanged = false;
-
-  // Iterate over each intrinsic in the list and process its uses within F.
-  for (Intrinsic::ID IID : Intrinsics) {
-    // Determine the correct return type for the intrinsic.
-    // Most intrinsics return i32, but amdgcn_ballot returns i64.
-    llvm::Type *IntrinsicTy = (IID == Intrinsic::amdgcn_ballot)
-                                  ? llvm::Type::getInt64Ty(Ctx)
-                                  : llvm::Type::getInt32Ty(Ctx);
-
-    // Check if the intrinsic is declared in the module with the expected type.
-    if (Function *Intr =
-            Intrinsic::getDeclarationIfExists(M, IID, {IntrinsicTy})) {
-      // Iterate over all users of the intrinsic.
-      for (User *U : Intr->users()) {
-        // Ensure the user is an intrinsic call within function F.
-        if (auto *II = dyn_cast<IntrinsicInst>(U)) {
-          if (II->getFunction() == &F) {
-            IsChanged |= optimizeUniformIntrinsicInst(*II);
-          }
-        }
-      }
-    }
-  }
-  return IsChanged;
-}
-
-bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
-    IntrinsicInst &II) const {
+/// Optimizes uniform intrinsics.
+static bool optimizeUniformIntrinsic(IntrinsicInst &II,
+                                     const UniformityInfo *UI) {
   llvm::Intrinsic::ID IID = II.getIntrinsicID();
 
   switch (IID) {
@@ -194,6 +100,84 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
   return false;
 }
 
+/// Iterates over the Intrinsics use in the function to optimise.
+static bool runUniformIntrinsicCombine(Function &F, const UniformityInfo *UI) {
+  Module *M = F.getParent();
+  llvm::LLVMContext &Ctx = M->getContext();
+  // List of AMDGPU intrinsics to optimize if their arguments are uniform.
+  std::vector<Intrinsic::ID> Intrinsics = {
+      Intrinsic::amdgcn_permlane64, Intrinsic::amdgcn_readfirstlane,
+      Intrinsic::amdgcn_readlane, Intrinsic::amdgcn_ballot};
+
+  bool IsChanged = false;
+
+  // Iterate over each intrinsic in the list and process its uses within F.
+  for (Intrinsic::ID IID : Intrinsics) {
+    // Determine the correct return type for the intrinsic.
+    // Most intrinsics return i32, but amdgcn_ballot returns i64.
+    llvm::Type *IntrinsicTy = (IID == Intrinsic::amdgcn_ballot)
+                                  ? llvm::Type::getInt64Ty(Ctx)
+                                  : llvm::Type::getInt32Ty(Ctx);
+
+    // Check if the intrinsic is declared in the module with the expected type.
+    if (Function *Intr =
+            Intrinsic::getDeclarationIfExists(M, IID, {IntrinsicTy})) {
+      // Iterate over all users of the intrinsic.
+      for (User *U : Intr->users()) {
+        // Ensure the user is an intrinsic call within function F.
+        if (auto *II = dyn_cast<IntrinsicInst>(U)) {
+          if (II->getFunction() == &F) {
+            IsChanged |= optimizeUniformIntrinsic(*II, UI);
+          }
+        }
+      }
+    }
+  }
+  return IsChanged;
+}
+class AMDGPUUniformIntrinsicCombineLegacy : public FunctionPass {
+public:
+  static char ID;
+  AMDGPUUniformIntrinsicCombineLegacy() : FunctionPass(ID) {
+    initializeAMDGPUUniformIntrinsicCombineLegacyPass(
+        *PassRegistry::getPassRegistry());
+  }
+  bool runOnFunction(Function &F) override;
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    AU.addRequired<UniformityInfoWrapperPass>();
+    AU.addRequired<TargetPassConfig>();
+    AU.addPreserved<UniformityInfoWrapperPass>();
+  }
+};
+
+char AMDGPUUniformIntrinsicCombineLegacy::ID = 0;
+char &llvm::AMDGPUUniformIntrinsicCombineLegacyPassID =
+    AMDGPUUniformIntrinsicCombineLegacy::ID;
+
+bool AMDGPUUniformIntrinsicCombineLegacy::runOnFunction(Function &F) {
+  if (skipFunction(F)) {
+    return false;
+  }
+  const UniformityInfo *UI =
+      &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
+  return runUniformIntrinsicCombine(F, UI);
+}
+
+PreservedAnalyses
+AMDGPUUniformIntrinsicCombinePass::run(Function &F,
+                                       FunctionAnalysisManager &AM) {
+  const auto *UI = &AM.getResult<UniformityInfoAnalysis>(F);
+  bool IsChanged = runUniformIntrinsicCombine(F, UI);
+
+  if (!IsChanged) {
+    return PreservedAnalyses::all();
+  }
+  PreservedAnalyses PA;
+  PA.preserve<UniformityInfoAnalysis>();
+  return PA;
+}
+
 INITIALIZE_PASS_BEGIN(AMDGPUUniformIntrinsicCombineLegacy, DEBUG_TYPE,
                       "AMDGPU uniformIntrinsic Combine", false, false)
 INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)

>From 8a8f4f41b01b56e6f80cb405042ab044b96d10fd Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Thu, 20 Mar 2025 11:07:47 +0530
Subject: [PATCH 29/30] Address review

---
 llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp | 1 +
 1 file changed, 1 insertion(+)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index e12b58bd4e420..4b745f9e7f1b2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -135,6 +135,7 @@ static bool runUniformIntrinsicCombine(Function &F, const UniformityInfo *UI) {
   }
   return IsChanged;
 }
+
 class AMDGPUUniformIntrinsicCombineLegacy : public FunctionPass {
 public:
   static char ID;

>From f5b900d7e34b778b53e103e6a5ee0aefdb9f4574 Mon Sep 17 00:00:00 2001
From: Pankaj kumar divedi <Pankajkumar.divedi at amd.com>
Date: Tue, 25 Mar 2025 17:24:12 +0530
Subject: [PATCH 30/30] Extend intrinsic types

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  |  38 ++--
 .../amdgpu-simplify-uniform-waterfall.ll      | 102 +++++++++
 .../amdgpu-uniform-intrinsic-combine.ll       | 211 ++++++++++++++++++
 3 files changed, 335 insertions(+), 16 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 4b745f9e7f1b2..578bb5de79e01 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -110,24 +110,30 @@ static bool runUniformIntrinsicCombine(Function &F, const UniformityInfo *UI) {
       Intrinsic::amdgcn_readlane, Intrinsic::amdgcn_ballot};
 
   bool IsChanged = false;
-
+  // TODO: Vector types can also be optimized, provided generic way to query
+  // getDeclarationIfExists().
+  SmallVector<Type *, 7> Tys = {
+      Type::getInt16Ty(Ctx),  // i16
+      Type::getInt32Ty(Ctx),  // i32
+      Type::getInt64Ty(Ctx),  // i64
+      Type::getHalfTy(Ctx),   // Float16
+      Type::getFloatTy(Ctx),  // float
+      Type::getDoubleTy(Ctx), // double
+      Type::getBFloatTy(Ctx)  // bfloat16
+  };
   // Iterate over each intrinsic in the list and process its uses within F.
   for (Intrinsic::ID IID : Intrinsics) {
-    // Determine the correct return type for the intrinsic.
-    // Most intrinsics return i32, but amdgcn_ballot returns i64.
-    llvm::Type *IntrinsicTy = (IID == Intrinsic::amdgcn_ballot)
-                                  ? llvm::Type::getInt64Ty(Ctx)
-                                  : llvm::Type::getInt32Ty(Ctx);
-
-    // Check if the intrinsic is declared in the module with the expected type.
-    if (Function *Intr =
-            Intrinsic::getDeclarationIfExists(M, IID, {IntrinsicTy})) {
-      // Iterate over all users of the intrinsic.
-      for (User *U : Intr->users()) {
-        // Ensure the user is an intrinsic call within function F.
-        if (auto *II = dyn_cast<IntrinsicInst>(U)) {
-          if (II->getFunction() == &F) {
-            IsChanged |= optimizeUniformIntrinsic(*II, UI);
+    for (Type *Ty : Tys) {
+      // Check if the intrinsic is declared in the module with the expected
+      // type.
+      if (Function *Intr = Intrinsic::getDeclarationIfExists(M, IID, {Ty})) {
+        // Iterate over all users of the intrinsic.
+        for (User *U : Intr->users()) {
+          // Ensure the user is an intrinsic call within function F.
+          if (auto *II = dyn_cast<IntrinsicInst>(U)) {
+            if (II->getFunction() == &F) {
+              IsChanged |= optimizeUniformIntrinsic(*II, UI);
+            }
           }
         }
       }
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
index a72d18015d5fa..f9c520b20acc7 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -349,6 +349,107 @@ exit:
   ret void
 }
 
+define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT:    [[BALLOT_PEEL:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT:    [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[BALLOT_PEEL]], 0
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[IF_PEEL:.*]]
+; CURRENT-CHECK:       [[IF_PEEL]]:
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[EXIT]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT:    [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT:    br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  br label %while
+
+while:
+  %done = phi i1 [ 0, %entry ], [ 1, %if ]
+  %not_done = xor i1 %done, true
+  %ballot = tail call i32 @llvm.amdgcn.ballot.i32(i1 %not_done)
+  %is_done = icmp eq i32 %ballot, 0 ; in this case is_done = !not_done
+  br i1 %is_done, label %exit, label %if
+
+if:
+  store i32 5, ptr addrspace(1) %out
+  br label %while
+
+exit:
+  ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:  [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT:    br label %[[WHILE:.*]]
+; CURRENT-CHECK:       [[WHILE]]:
+; CURRENT-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT:    [[IS_DONE_NOT:%.*]] = icmp eq i32 [[BALLOT]], 0
+; CURRENT-CHECK-NEXT:    br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP3:![0-9]+]]
+; CURRENT-CHECK:       [[EXIT]]:
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:  [[ENTRY:.*]]:
+; PASS-CHECK-NEXT:    br label %[[WHILE:.*]]
+; PASS-CHECK:       [[WHILE]]:
+; PASS-CHECK-NEXT:    [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT:    br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK:       [[IF]]:
+; PASS-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT:    br label %[[WHILE]]
+; PASS-CHECK:       [[EXIT]]:
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:  [[ENTRY:.*:]]
+; DCE-CHECK-NEXT:    store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT:    ret void
+;
+entry:
+  br label %while
+
+while:
+  %done = phi i1 [ 0, %entry ], [ 1, %if ]
+  %ballot = tail call i32 @llvm.amdgcn.ballot.i32(i1 %done)
+  %is_done = icmp ne i32 0, %ballot ; in this case is_done = done
+  br i1 %is_done, label %exit, label %if
+
+if:
+  store i32 5, ptr addrspace(1) %out
+  br label %while
+
+exit:
+  ret void
+}
+
 declare i64 @llvm.amdgcn.ballot.i64(i1) #1
 !6 = !{i64 690}
 !7 = distinct !{!7, !8}
@@ -357,4 +458,5 @@ declare i64 @llvm.amdgcn.ballot.i64(i1) #1
 ; CURRENT-CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
 ; CURRENT-CHECK: [[META1]] = !{!"llvm.loop.peeled.count", i32 1}
 ; CURRENT-CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; CURRENT-CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
 ;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
index 09971e55786e1..1da806461c8d9 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -586,3 +586,214 @@ define amdgpu_kernel void @readlane_expression(ptr addrspace(1) %out) {
   ret void
 }
 
+define amdgpu_kernel void @ballot_i32(i32 %v, ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @ballot_i32(
+; CURRENT-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) writeonly captures(none) initializes((0, 1)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:    [[C:%.*]] = trunc i32 [[V]] to i1
+; CURRENT-CHECK-NEXT:    [[BALLOT:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[C]])
+; CURRENT-CHECK-NEXT:    [[BALLOT_NE_ZERO:%.*]] = icmp ne i32 [[BALLOT]], 0
+; CURRENT-CHECK-NEXT:    store i1 [[BALLOT_NE_ZERO]], ptr addrspace(1) [[OUT]], align 1
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @ballot_i32(
+; PASS-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[C:%.*]] = trunc i32 [[V]] to i1
+; PASS-CHECK-NEXT:    store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @ballot_i32(
+; DCE-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[C:%.*]] = trunc i32 [[V]] to i1
+; DCE-CHECK-NEXT:    store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; DCE-CHECK-NEXT:    ret void
+;
+  %c = trunc i32 %v to i1
+  %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c)
+  %ballot_ne_zero = icmp ne i32 %ballot, 0
+  store i1 %ballot_ne_zero, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @ballot_i64(i32 %v, ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @ballot_i64(
+; CURRENT-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) writeonly captures(none) initializes((0, 1)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT:    [[C:%.*]] = trunc i32 [[V]] to i1
+; CURRENT-CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[C]])
+; CURRENT-CHECK-NEXT:    [[BALLOT_NE_ZERO:%.*]] = icmp ne i32 [[TMP1]], 0
+; CURRENT-CHECK-NEXT:    store i1 [[BALLOT_NE_ZERO]], ptr addrspace(1) [[OUT]], align 1
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @ballot_i64(
+; PASS-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[C:%.*]] = trunc i32 [[V]] to i1
+; PASS-CHECK-NEXT:    store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @ballot_i64(
+; DCE-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[C:%.*]] = trunc i32 [[V]] to i1
+; DCE-CHECK-NEXT:    store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; DCE-CHECK-NEXT:    ret void
+;
+  %c = trunc i32 %v to i1
+  %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
+  %ballot_ne_zero = icmp ne i64 %ballot, 0
+  store i1 %ballot_ne_zero, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @test_readlane_i16(i16 %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i16(
+; CURRENT-CHECK-SAME: i16 [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
+; CURRENT-CHECK-NEXT:    [[READLANE:%.*]] = tail call i16 @llvm.amdgcn.readlane.i16(i16 [[SRC0]], i32 [[SRC1]])
+; CURRENT-CHECK-NEXT:    tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i16(
+; PASS-CHECK-SAME: i16 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    call void asm sideeffect "
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i16(
+; DCE-CHECK-SAME: i16 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    call void asm sideeffect "
+; DCE-CHECK-NEXT:    ret void
+;
+  %readlane = call i16 @llvm.amdgcn.readlane.i16(i16 %src0, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(i16 %readlane)
+  ret void
+}
+
+define amdgpu_kernel void @test_readlane_i64(i64 %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i64(
+; CURRENT-CHECK-SAME: i64 [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT:    [[READLANE:%.*]] = tail call i64 @llvm.amdgcn.readlane.i64(i64 [[SRC0]], i32 [[SRC1]])
+; CURRENT-CHECK-NEXT:    tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i64(
+; PASS-CHECK-SAME: i64 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    call void asm sideeffect "
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i64(
+; DCE-CHECK-SAME: i64 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    call void asm sideeffect "
+; DCE-CHECK-NEXT:    ret void
+;
+  %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 %src0, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(i64 %readlane)
+  ret void
+}
+
+define amdgpu_kernel void @test_readlane_bf16(bfloat %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_bf16(
+; CURRENT-CHECK-SAME: bfloat [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT:    [[READLANE:%.*]] = tail call bfloat @llvm.amdgcn.readlane.bf16(bfloat [[SRC0]], i32 [[SRC1]])
+; CURRENT-CHECK-NEXT:    tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_bf16(
+; PASS-CHECK-SAME: bfloat [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    call void asm sideeffect "
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_bf16(
+; DCE-CHECK-SAME: bfloat [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    call void asm sideeffect "
+; DCE-CHECK-NEXT:    ret void
+;
+  %readlane = call bfloat @llvm.amdgcn.readlane.bf16(bfloat %src0, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(bfloat %readlane)
+  ret void
+}
+
+define amdgpu_kernel void @test_readlane_f16(half %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f16(
+; CURRENT-CHECK-SAME: half [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT:    [[READLANE:%.*]] = tail call half @llvm.amdgcn.readlane.f16(half [[SRC0]], i32 [[SRC1]])
+; CURRENT-CHECK-NEXT:    tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f16(
+; PASS-CHECK-SAME: half [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    call void asm sideeffect "
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f16(
+; DCE-CHECK-SAME: half [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    call void asm sideeffect "
+; DCE-CHECK-NEXT:    ret void
+;
+  %readlane = call half @llvm.amdgcn.readlane.f16(half %src0, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(half %readlane)
+  ret void
+}
+
+define amdgpu_kernel void @test_readlane_f32(float %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f32(
+; CURRENT-CHECK-SAME: float [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT:    [[READLANE:%.*]] = tail call float @llvm.amdgcn.readlane.f32(float [[SRC0]], i32 [[SRC1]])
+; CURRENT-CHECK-NEXT:    tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f32(
+; PASS-CHECK-SAME: float [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    call void asm sideeffect "
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f32(
+; DCE-CHECK-SAME: float [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    call void asm sideeffect "
+; DCE-CHECK-NEXT:    ret void
+;
+  %readlane = call float @llvm.amdgcn.readlane.f32(float %src0, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(float %readlane)
+  ret void
+}
+
+define amdgpu_kernel void @test_readlane_f64(double %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f64(
+; CURRENT-CHECK-SAME: double [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT:    [[READLANE:%.*]] = tail call double @llvm.amdgcn.readlane.f64(double [[SRC0]], i32 [[SRC1]])
+; CURRENT-CHECK-NEXT:    tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f64(
+; PASS-CHECK-SAME: double [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    call void asm sideeffect "
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f64(
+; DCE-CHECK-SAME: double [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    call void asm sideeffect "
+; DCE-CHECK-NEXT:    ret void
+;
+  %readlane = call double @llvm.amdgcn.readlane.f64(double %src0, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(double %readlane)
+  ret void
+}
+; All such cases can be optimised, given generic way to query getDeclarationIfExists()
+define void @test_readlane_v8i16(ptr addrspace(1) %out, <8 x i16> %src, i32 %src1) {
+; CURRENT-CHECK-LABEL: define void @test_readlane_v8i16(
+; CURRENT-CHECK-SAME: ptr addrspace(1) readnone captures(none) [[OUT:%.*]], <8 x i16> [[SRC:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT:    [[X:%.*]] = tail call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> [[SRC]], i32 [[SRC1]])
+; CURRENT-CHECK-NEXT:    tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT:    ret void
+;
+; PASS-CHECK-LABEL: define void @test_readlane_v8i16(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], <8 x i16> [[SRC:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT:    [[X:%.*]] = call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> [[SRC]], i32 [[SRC1]])
+; PASS-CHECK-NEXT:    call void asm sideeffect "
+; PASS-CHECK-NEXT:    ret void
+;
+; DCE-CHECK-LABEL: define void @test_readlane_v8i16(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], <8 x i16> [[SRC:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT:    [[X:%.*]] = call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> [[SRC]], i32 [[SRC1]])
+; DCE-CHECK-NEXT:    call void asm sideeffect "
+; DCE-CHECK-NEXT:    ret void
+;
+  %x = call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> %src, i32 %src1)
+  call void asm sideeffect "; use $0", "s"(<8 x i16> %x)
+  ret void
+}



More information about the llvm-commits mailing list