[llvm] [WIP][AMDGPU] combine uniform AMDGPU lane Intrinsics (PR #116953)

Pankaj Dwivedi via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 22 07:09:30 PST 2024


https://github.com/PankajDwivedi-25 updated https://github.com/llvm/llvm-project/pull/116953

>From 1f450cf4d41138575c67ad396f9d9ff33f98c7b6 Mon Sep 17 00:00:00 2001
From: PankajDwivedi-25 <pankajkumar.divedi at amd.com>
Date: Thu, 21 Nov 2024 12:35:56 +0530
Subject: [PATCH 1/3] [WIP][AMDGPU] combine uniform AMDGPU lane Intrinsics

---
 llvm/lib/Target/AMDGPU/AMDGPU.h               |  11 +
 llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def |   1 +
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 183 +++++++++++++++
 llvm/lib/Target/AMDGPU/CMakeLists.txt         |   1 +
 .../amdgpu-uniform-intrinsic-combine.ll       | 221 ++++++++++++++++++
 5 files changed, 417 insertions(+)
 create mode 100644 llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
 create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index 95d0ad0f9dc96a..33572bdb35eacc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -453,6 +453,17 @@ void initializeAMDGPUSetWavePriorityPass(PassRegistry &);
 void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &);
 extern char &GCNRewritePartialRegUsesID;
 
+void initializeAMDGPUUniformIntrinsicCombinePass(PassRegistry &);
+extern char &AMDGPUUniformIntrinsicCombineID;
+FunctionPass *createAMDGPUUniformIntrinsicCombinePass();
+
+struct AMDGPUUniformIntrinsicCombinePass
+    : public PassInfoMixin<AMDGPUUniformIntrinsicCombinePass> {
+  const AMDGPUTargetMachine &TM;
+  AMDGPUUniformIntrinsicCombinePass(const AMDGPUTargetMachine &TM_) : TM(TM_) {}
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
 namespace AMDGPU {
 enum TargetIndex {
   TI_CONSTDATA_START,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
index 174a90f0aa419d..bb0b7d97950132 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
@@ -65,6 +65,7 @@ FUNCTION_PASS("amdgpu-unify-divergent-exit-nodes",
               AMDGPUUnifyDivergentExitNodesPass())
 FUNCTION_PASS("amdgpu-usenative", AMDGPUUseNativeCallsPass())
 FUNCTION_PASS("si-annotate-control-flow", SIAnnotateControlFlowPass(*static_cast<const GCNTargetMachine *>(this)))
+FUNCTION_PASS("amdgpu-uniform-intrinsic-combine", AMDGPUUniformIntrinsicCombinePass(*this))
 #undef FUNCTION_PASS
 
 #ifndef FUNCTION_ANALYSIS
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
new file mode 100644
index 00000000000000..1288b70697e639
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -0,0 +1,183 @@
+//===-- AMDGPUUniformIntrinsicCombine.cpp
+//-----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// This pass combines uniform intrinsic instructions.
+/// Unifrom Intrinsic combine uses pattern match to identify and optimize
+/// redundent intrinsic instruction.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "GCNSubtarget.h"
+#include "llvm/Analysis/DomTreeUpdater.h"
+#include "llvm/Analysis/UniformityAnalysis.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+
+#define DEBUG_TYPE "amdgpu-uniform-intrinsic-combine"
+
+using namespace llvm;
+using namespace llvm::AMDGPU;
+using namespace llvm::PatternMatch;
+
+namespace {
+
+class AMDGPUUniformIntrinsicCombine : public FunctionPass {
+public:
+  static char ID;
+  AMDGPUUniformIntrinsicCombine() : FunctionPass(ID) {}
+
+  bool runOnFunction(Function &F) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addPreserved<DominatorTreeWrapperPass>();
+    AU.addRequired<UniformityInfoWrapperPass>();
+    AU.addRequired<TargetPassConfig>();
+  }
+};
+
+class AMDGPUUniformIntrinsicCombineImpl
+    : public InstVisitor<AMDGPUUniformIntrinsicCombineImpl> {
+private:
+  const UniformityInfo *UI;
+
+  void optimizeUniformIntrinsicInst(IntrinsicInst &II) const;
+
+public:
+  AMDGPUUniformIntrinsicCombineImpl() = delete;
+
+  AMDGPUUniformIntrinsicCombineImpl(const UniformityInfo *UI) : UI(UI) {}
+
+  bool run(Function &F);
+};
+
+} // namespace
+
+char AMDGPUUniformIntrinsicCombine::ID = 0;
+
+char &llvm::AMDGPUUniformIntrinsicCombineID = AMDGPUUniformIntrinsicCombine::ID;
+
+bool AMDGPUUniformIntrinsicCombine::runOnFunction(Function &F) {
+  if (skipFunction(F)) {
+    return false;
+  }
+
+  const UniformityInfo *UI =
+      &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
+
+  return AMDGPUUniformIntrinsicCombineImpl(UI).run(F);
+}
+
+PreservedAnalyses
+AMDGPUUniformIntrinsicCombinePass::run(Function &F,
+                                       FunctionAnalysisManager &AM) {
+
+  const auto *UI = &AM.getResult<UniformityInfoAnalysis>(F);
+
+  // @todo check if it is required that this method must return bool, if so
+  // figure out what can be returned.
+  bool IsChanged = AMDGPUUniformIntrinsicCombineImpl(UI).run(F);
+
+  if (!IsChanged) {
+    return PreservedAnalyses::all();
+  }
+
+  PreservedAnalyses PA;
+  PA.preserve<DominatorTreeAnalysis>();
+  return PA;
+}
+
+bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
+
+  // @todo check if it is required that this method must return bool, if so
+  // figure out what can be returned.
+  const bool IsChanged{false};
+
+  // Iterate over each instruction in the function to get the desired intrinsic
+  // inst to check for optimization.
+  for (BasicBlock &BB : F) {
+    for (Instruction &I : BB) {
+      if (auto *Call = dyn_cast<CallInst>(&I)) {
+        if (auto *Intrinsic = dyn_cast<IntrinsicInst>(Call)) {
+          optimizeUniformIntrinsicInst(*Intrinsic);
+        }
+      }
+    }
+  }
+
+  return IsChanged;
+}
+
+void AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
+    IntrinsicInst &II) const {
+  llvm::Intrinsic::ID IID = II.getIntrinsicID();
+
+  switch (IID) {
+  case Intrinsic::amdgcn_permlane64: {
+    Value *Src = II.getOperand(0);
+    if (UI->isUniform(Src)) {
+      return II.replaceAllUsesWith(Src);
+    }
+    break;
+  }
+  case Intrinsic::amdgcn_readfirstlane:
+  case Intrinsic::amdgcn_readlane: {
+    Value *Srcv = II.getOperand(0);
+    if (UI->isUniform(Srcv)) {
+      return II.replaceAllUsesWith(Srcv);
+    }
+
+    // The rest of these may not be safe if the exec may not be the same between
+    // the def and use.
+    Value *Src = II.getArgOperand(0);
+    Instruction *SrcInst = dyn_cast<Instruction>(Src);
+    if (SrcInst && SrcInst->getParent() != II.getParent())
+      break;
+
+    // readfirstlane (readfirstlane x) -> readfirstlane x
+    // readlane (readfirstlane x), y -> readfirstlane x
+    if (match(Src,
+              PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readfirstlane>())) {
+      return II.replaceAllUsesWith(Src);
+    }
+
+    if (IID == Intrinsic::amdgcn_readfirstlane) {
+      // readfirstlane (readlane x, y) -> readlane x, y
+      if (match(Src, PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readlane>())) {
+        return II.replaceAllUsesWith(Src);
+      }
+    } else {
+      // readlane (readlane x, y), y -> readlane x, y
+      if (match(Src, PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readlane>(
+                         PatternMatch::m_Value(),
+                         PatternMatch::m_Specific(II.getArgOperand(1))))) {
+        return II.replaceAllUsesWith(Src);
+      }
+    }
+    break;
+  }
+  }
+}
+
+INITIALIZE_PASS_BEGIN(AMDGPUUniformIntrinsicCombine, DEBUG_TYPE,
+                      "AMDGPU uniformIntrinsic Combine", false, false)
+INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(AMDGPUUniformIntrinsicCombine, DEBUG_TYPE,
+                    "AMDGPU uniformIntrinsic Combine", false, false)
+
+FunctionPass *llvm::createAMDGPUUniformIntrinsicCombinePass() {
+  return new AMDGPUUniformIntrinsicCombine();
+}
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index fed29c3e14aae2..13e0fc61a82443 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -61,6 +61,7 @@ add_llvm_target(AMDGPUCodeGen
   AMDGPUHSAMetadataStreamer.cpp
   AMDGPUInsertDelayAlu.cpp
   AMDGPUInstCombineIntrinsic.cpp
+  AMDGPUUniformIntrinsicCombine.cpp
   AMDGPUInstrInfo.cpp
   AMDGPUInstructionSelector.cpp
   AMDGPUISelDAGToDAG.cpp
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
new file mode 100644
index 00000000000000..6f5279bb717c77
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -0,0 +1,221 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes="instcombine,amdgpu-uniform-intrinsic-combine" -S < %s | FileCheck %s --check-prefixes=GFX,GFX10
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes="instcombine,amdgpu-uniform-intrinsic-combine" -S < %s | FileCheck %s --check-prefixes=GFX,GFX11
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes="instcombine,amdgpu-uniform-intrinsic-combine" -S < %s | FileCheck %s --check-prefixes=GFX,GFX12
+
+define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_constant(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; GFX-NEXT:    store i32 77, ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.permlane64(i32 77)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_undef(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.permlane64(i32 undef)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_sgpr(ptr addrspace(1) %out, i32 %src) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_sgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.permlane64(i32 undef)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_vgpr(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_vgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %v = call i32 @llvm.amdgcn.permlane64(i32 %tid)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+define amdgpu_kernel void @permlane64_vgpr_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @permlane64_vgpr_expression(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid2 = add i32 %tid, 1
+  %v = call i32 @llvm.amdgcn.permlane64(i32 %tid2)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_constant(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readlane(i32 7, i32 5)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_undef(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readlane(i32 undef, i32 undef)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readlane_sgpr(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_sgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC0]], i32 [[SRC1]])
+; GFX-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readlane_vgpr(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_vgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+  %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+  %v = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tidx
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+define amdgpu_kernel void @readlane_vgpr_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readlane_vgpr_expression(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX-NEXT:    [[TIDX2:%.*]] = add i32 [[TIDX]], 1
+; GFX-NEXT:    [[TIDY2:%.*]] = add i32 [[TIDY]], 2
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+  %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+  %tidx2 = add i32 %tidx, 1
+  %tidy2 = add i32 %tidy, 2
+  %v = call i32 @llvm.amdgcn.readlane(i32 %tidx2, i32 %tidy2)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tidx
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    store i32 7, ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readfirstlane(i32 7)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_undef(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readfirstlane(i32 undef)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_sgpr(ptr addrspace(1) %out, i32 %src0) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_sgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[SRC0]])
+; GFX-NEXT:    store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; GFX-NEXT:    ret void
+;
+  %v = call i32 @llvm.amdgcn.readfirstlane(i32 %src0)
+  store i32 %v, ptr addrspace(1) %out
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_vgpr(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_vgpr(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+define amdgpu_kernel void @readfirstlane_vgpr_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: define amdgpu_kernel void @readfirstlane_vgpr_expression(
+; GFX-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; GFX-NEXT:    [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT:    [[TID2:%.*]] = add i32 [[TID]], 1
+; GFX-NEXT:    [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; GFX-NEXT:    [[TMP1:%.*]] = sext i32 [[TID2]] to i64
+; GFX-NEXT:    [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; GFX-NEXT:    store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT:    ret void
+;
+  %tid = call i32 @llvm.amdgcn.workitem.id.x()
+  %tid2 = add i32 %tid, 1
+  %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid2)
+  %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid2
+  store i32 %v, i32 addrspace(1)* %out_ptr
+  ret void
+}
+
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX10: {{.*}}
+; GFX11: {{.*}}
+; GFX12: {{.*}}

>From 4d52200a7b5915b4026cc4b63d29107a5c77d754 Mon Sep 17 00:00:00 2001
From: PankajDwivedi-25 <pankajkumar.divedi at amd.com>
Date: Fri, 22 Nov 2024 19:42:42 +0530
Subject: [PATCH 2/3] refactored and updated intrinsics handling

---
 .../AMDGPU/AMDGPUUniformIntrinsicCombine.cpp  | 71 +++++++------------
 1 file changed, 27 insertions(+), 44 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 1288b70697e639..00ea2505862316 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -1,5 +1,4 @@
-//===-- AMDGPUUniformIntrinsicCombine.cpp
-//-----------------------------------------===//
+//===-- AMDGPUUniformIntrinsicCombine.cpp ---------------------------------===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -9,13 +8,16 @@
 //
 /// \file
 /// This pass combines uniform intrinsic instructions.
-/// Unifrom Intrinsic combine uses pattern match to identify and optimize
-/// redundent intrinsic instruction.
+/// Uniform Intrinsic Combine uses pattern match to identify and optimize
+/// redundant intrinsic instructions.
 //===----------------------------------------------------------------------===//
 
 #include "AMDGPU.h"
 #include "GCNSubtarget.h"
 #include "llvm/Analysis/DomTreeUpdater.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
 #include "llvm/Analysis/UniformityAnalysis.h"
 #include "llvm/CodeGen/TargetPassConfig.h"
 #include "llvm/IR/IRBuilder.h"
@@ -42,7 +44,7 @@ class AMDGPUUniformIntrinsicCombine : public FunctionPass {
   bool runOnFunction(Function &F) override;
 
   void getAnalysisUsage(AnalysisUsage &AU) const override {
-    AU.addPreserved<DominatorTreeWrapperPass>();
+    AU.setPreservesCFG();
     AU.addRequired<UniformityInfoWrapperPass>();
     AU.addRequired<TargetPassConfig>();
   }
@@ -53,7 +55,7 @@ class AMDGPUUniformIntrinsicCombineImpl
 private:
   const UniformityInfo *UI;
 
-  void optimizeUniformIntrinsicInst(IntrinsicInst &II) const;
+  bool optimizeUniformIntrinsicInst(IntrinsicInst &II) const;
 
 public:
   AMDGPUUniformIntrinsicCombineImpl() = delete;
@@ -86,8 +88,6 @@ AMDGPUUniformIntrinsicCombinePass::run(Function &F,
 
   const auto *UI = &AM.getResult<UniformityInfoAnalysis>(F);
 
-  // @todo check if it is required that this method must return bool, if so
-  // figure out what can be returned.
   bool IsChanged = AMDGPUUniformIntrinsicCombineImpl(UI).run(F);
 
   if (!IsChanged) {
@@ -96,14 +96,16 @@ AMDGPUUniformIntrinsicCombinePass::run(Function &F,
 
   PreservedAnalyses PA;
   PA.preserve<DominatorTreeAnalysis>();
+  PA.preserve<LoopAnalysis>();
+  PA.preserve<ScalarEvolutionAnalysis>();
+  PA.preserve<UniformityInfoAnalysis>();
+  PA.preserve<TargetLibraryAnalysis>();
   return PA;
 }
 
 bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
 
-  // @todo check if it is required that this method must return bool, if so
-  // figure out what can be returned.
-  const bool IsChanged{false};
+  bool IsChanged{false};
 
   // Iterate over each instruction in the function to get the desired intrinsic
   // inst to check for optimization.
@@ -111,7 +113,7 @@ bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
     for (Instruction &I : BB) {
       if (auto *Call = dyn_cast<CallInst>(&I)) {
         if (auto *Intrinsic = dyn_cast<IntrinsicInst>(Call)) {
-          optimizeUniformIntrinsicInst(*Intrinsic);
+          IsChanged |= optimizeUniformIntrinsicInst(*Intrinsic);
         }
       }
     }
@@ -120,55 +122,36 @@ bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
   return IsChanged;
 }
 
-void AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
+bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
     IntrinsicInst &II) const {
   llvm::Intrinsic::ID IID = II.getIntrinsicID();
 
   switch (IID) {
-  case Intrinsic::amdgcn_permlane64: {
-    Value *Src = II.getOperand(0);
-    if (UI->isUniform(Src)) {
-      return II.replaceAllUsesWith(Src);
-    }
-    break;
-  }
+  case Intrinsic::amdgcn_permlane64:
   case Intrinsic::amdgcn_readfirstlane:
   case Intrinsic::amdgcn_readlane: {
-    Value *Srcv = II.getOperand(0);
-    if (UI->isUniform(Srcv)) {
-      return II.replaceAllUsesWith(Srcv);
-    }
-
-    // The rest of these may not be safe if the exec may not be the same between
-    // the def and use.
     Value *Src = II.getArgOperand(0);
+    // The below part may not be safe if the exec is not same between the def
+    // and use. Is this part stilll required??
     Instruction *SrcInst = dyn_cast<Instruction>(Src);
     if (SrcInst && SrcInst->getParent() != II.getParent())
       break;
 
     // readfirstlane (readfirstlane x) -> readfirstlane x
+    // readfirstlane (readlane x, y) -> readlane x, y
     // readlane (readfirstlane x), y -> readfirstlane x
-    if (match(Src,
-              PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readfirstlane>())) {
-      return II.replaceAllUsesWith(Src);
-    }
-
-    if (IID == Intrinsic::amdgcn_readfirstlane) {
-      // readfirstlane (readlane x, y) -> readlane x, y
-      if (match(Src, PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readlane>())) {
-        return II.replaceAllUsesWith(Src);
-      }
-    } else {
-      // readlane (readlane x, y), y -> readlane x, y
-      if (match(Src, PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readlane>(
-                         PatternMatch::m_Value(),
-                         PatternMatch::m_Specific(II.getArgOperand(1))))) {
-        return II.replaceAllUsesWith(Src);
-      }
+    // readlane (readlane x, y), z -> readlane x, y
+    // All these cases are identical and are dependent on the inner intrinsic
+    // results value.(i.e.irrespective of the which of these case is inner
+    // intrinsic will write the same value across all output lane indexes)
+    if (UI->isUniform(II.getOperandUse(0))) {
+      II.replaceAllUsesWith(Src);
+      return true;
     }
     break;
   }
   }
+  return false;
 }
 
 INITIALIZE_PASS_BEGIN(AMDGPUUniformIntrinsicCombine, DEBUG_TYPE,

>From 60b0b140998da703a431135d7ba95833a10daffc Mon Sep 17 00:00:00 2001
From: PankajDwivedi-25 <pankajkumar.divedi at amd.com>
Date: Fri, 22 Nov 2024 20:39:15 +0530
Subject: [PATCH 3/3] removed redundant casting

---
 .../Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp   | 11 ++---------
 1 file changed, 2 insertions(+), 9 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
index 00ea2505862316..b7321f85157507 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUUniformIntrinsicCombine.cpp
@@ -111,10 +111,8 @@ bool AMDGPUUniformIntrinsicCombineImpl::run(Function &F) {
   // inst to check for optimization.
   for (BasicBlock &BB : F) {
     for (Instruction &I : BB) {
-      if (auto *Call = dyn_cast<CallInst>(&I)) {
-        if (auto *Intrinsic = dyn_cast<IntrinsicInst>(Call)) {
-          IsChanged |= optimizeUniformIntrinsicInst(*Intrinsic);
-        }
+      if (auto *Intrinsic = dyn_cast<IntrinsicInst>(&I)) {
+        IsChanged |= optimizeUniformIntrinsicInst(*Intrinsic);
       }
     }
   }
@@ -131,11 +129,6 @@ bool AMDGPUUniformIntrinsicCombineImpl::optimizeUniformIntrinsicInst(
   case Intrinsic::amdgcn_readfirstlane:
   case Intrinsic::amdgcn_readlane: {
     Value *Src = II.getArgOperand(0);
-    // The below part may not be safe if the exec is not same between the def
-    // and use. Is this part stilll required??
-    Instruction *SrcInst = dyn_cast<Instruction>(Src);
-    if (SrcInst && SrcInst->getParent() != II.getParent())
-      break;
 
     // readfirstlane (readfirstlane x) -> readfirstlane x
     // readfirstlane (readlane x, y) -> readlane x, y



More information about the llvm-commits mailing list