[llvm] [LLVM][AMDGPU] AMDGPUInstCombineIntrinsic for *lane intrinsics (PR #99878)
Acim Maravic via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 22 07:22:37 PDT 2024
https://github.com/Acim-Maravic created https://github.com/llvm/llvm-project/pull/99878
For AMDGCN it would be nice to have UniformityAnalysis in InstCombine pass to allow folding of more lane* intrinsics and other ones in the future. Is this acceptable for other targets?
>From 5fa8e34456217ff2f979a49b7c382779cbcb2a77 Mon Sep 17 00:00:00 2001
From: Acim Maravic <Acim.Maravic at amd.com>
Date: Mon, 22 Jul 2024 14:52:37 +0200
Subject: [PATCH 1/2] [NFC][LLVM][AMDGPU] AMDGPUInstCombineIntrinsic for *lane
intrinsics precommit
---
.../AMDGPU/amdgcn-*lane-intrinsic-combine.ll | 204 ++++++++++++++++++
1 file changed, 204 insertions(+)
create mode 100644 llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-*lane-intrinsic-combine.ll
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-*lane-intrinsic-combine.ll b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-*lane-intrinsic-combine.ll
new file mode 100644
index 0000000000000..780d7cea8eb4a
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-*lane-intrinsic-combine.ll
@@ -0,0 +1,204 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=instcombine -S < %s | FileCheck %s --check-prefixes=GFX,GFX10
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -passes=instcombine -S < %s | FileCheck %s --check-prefixes=GFX,GFX11
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -passes=instcombine -S < %s | FileCheck %s --check-prefixes=GFX,GFX12
+
+define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
+; GFX-LABEL: @permlane64_constant(
+; GFX-NEXT: store i32 77, ptr addrspace(1) [[OUT:%.*]], align 4
+; GFX-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.permlane64(i32 77)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_undef(ptr addrspace(1) %out) {
+; GFX-LABEL: @permlane64_undef(
+; GFX-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.permlane64(i32 undef)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_sgpr(ptr addrspace(1) %out, i32 %src) {
+; GFX-LABEL: @permlane64_sgpr(
+; GFX-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.permlane64(i32 undef)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_vgpr(i32 addrspace(1)* %out) {
+; GFX-LABEL: @permlane64_vgpr(
+; GFX-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; GFX-NEXT: [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT:%.*]], i64 [[TMP1]]
+; GFX-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %v = call i32 @llvm.amdgcn.permlane64(i32 %tid)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_vgpr_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: @permlane64_vgpr_expression(
+; GFX-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT: [[TID2:%.*]] = add i32 [[TID]], 1
+; GFX-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; GFX-NEXT: [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT:%.*]], i64 [[TMP1]]
+; GFX-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid2 = add i32 %tid, 1
+ %v = call i32 @llvm.amdgcn.permlane64(i32 %tid2)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
+; GFX-LABEL: @readlane_constant(
+; GFX-NEXT: store i32 7, ptr addrspace(1) [[OUT:%.*]], align 4
+; GFX-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readlane(i32 7, i32 5)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
+; GFX-LABEL: @readlane_undef(
+; GFX-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readlane(i32 undef, i32 undef)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_sgpr(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
+; GFX-LABEL: @readlane_sgpr(
+; GFX-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC0:%.*]], i32 [[SRC1:%.*]])
+; GFX-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT:%.*]], align 4
+; GFX-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_vgpr(i32 addrspace(1)* %out) {
+; GFX-LABEL: @readlane_vgpr(
+; GFX-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; GFX-NEXT: [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; GFX-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT:%.*]], i64 [[TMP1]]
+; GFX-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %v = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tidx
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readlane_vgpr_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: @readlane_vgpr_expression(
+; GFX-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; GFX-NEXT: [[TIDX2:%.*]] = add i32 [[TIDX]], 1
+; GFX-NEXT: [[TIDY2:%.*]] = add i32 [[TIDY]], 2
+; GFX-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; GFX-NEXT: [[TMP1:%.*]] = sext i32 [[TIDX]] to i64
+; GFX-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT:%.*]], i64 [[TMP1]]
+; GFX-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %tidx2 = add i32 %tidx, 1
+ %tidy2 = add i32 %tidy, 2
+ %v = call i32 @llvm.amdgcn.readlane(i32 %tidx2, i32 %tidy2)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tidx
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
+; GFX-LABEL: @readfirstlane_constant(
+; GFX-NEXT: store i32 7, ptr addrspace(1) [[OUT:%.*]], align 4
+; GFX-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 7)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
+; GFX-LABEL: @readfirstlane_undef(
+; GFX-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 undef)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_sgpr(ptr addrspace(1) %out, i32 %src0) {
+; GFX-LABEL: @readfirstlane_sgpr(
+; GFX-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[SRC0:%.*]])
+; GFX-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT:%.*]], align 4
+; GFX-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 %src0)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_vgpr(i32 addrspace(1)* %out) {
+; GFX-LABEL: @readfirstlane_vgpr(
+; GFX-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; GFX-NEXT: [[TMP1:%.*]] = sext i32 [[TID]] to i64
+; GFX-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT:%.*]], i64 [[TMP1]]
+; GFX-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_vgpr_expression(i32 addrspace(1)* %out) {
+; GFX-LABEL: @readfirstlane_vgpr_expression(
+; GFX-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; GFX-NEXT: [[TID2:%.*]] = add i32 [[TID]], 1
+; GFX-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; GFX-NEXT: [[TMP1:%.*]] = sext i32 [[TID2]] to i64
+; GFX-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT:%.*]], i64 [[TMP1]]
+; GFX-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; GFX-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid2 = add i32 %tid, 1
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid2)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid2
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX10: {{.*}}
+; GFX11: {{.*}}
+; GFX12: {{.*}}
>From d14cd90ef07e108456d8c145cf2c5a2131b73614 Mon Sep 17 00:00:00 2001
From: Acim Maravic <Acim.Maravic at amd.com>
Date: Mon, 22 Jul 2024 15:15:17 +0200
Subject: [PATCH 2/2] [LLVM][AMDGPU] AMDGPUInstCombineIntrinsic for *lane
intrinsics
This patch adds UniformityAnalysis to InstCombine pass, thereby
enabling the use of UniformityAnalysis instead of just checking
for trivially uniform constants.
---
.../Transforms/InstCombine/InstCombiner.h | 8 +++++--
.../AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 17 ++++++++-------
.../InstCombine/InstCombineInternal.h | 5 +++--
.../InstCombine/InstructionCombining.cpp | 13 ++++++++----
llvm/test/Other/new-pm-defaults.ll | 2 ++
llvm/test/Other/new-pm-lto-defaults.ll | 2 ++
.../Other/new-pm-thinlto-postlink-defaults.ll | 2 ++
.../new-pm-thinlto-postlink-pgo-defaults.ll | 2 ++
...-pm-thinlto-postlink-samplepgo-defaults.ll | 2 ++
.../Other/new-pm-thinlto-prelink-defaults.ll | 2 ++
.../new-pm-thinlto-prelink-pgo-defaults.ll | 4 ++++
...w-pm-thinlto-prelink-samplepgo-defaults.ll | 2 ++
.../AMDGPU/amdgcn-*lane-intrinsic-combine.ll | 6 ++----
.../InstCombine/AMDGPU/amdgcn-intrinsics.ll | 21 +++++++------------
14 files changed, 55 insertions(+), 33 deletions(-)
diff --git a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
index ebcbd5d9e8880..1b949729bf90e 100644
--- a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
+++ b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h
@@ -21,6 +21,7 @@
#include "llvm/Analysis/DomConditionCache.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/Analysis/UniformityAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/PatternMatch.h"
@@ -79,6 +80,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner {
BranchProbabilityInfo *BPI;
ProfileSummaryInfo *PSI;
DomConditionCache DC;
+ UniformityInfo &UI;
// Optional analyses. When non-null, these can both be used to do better
// combining and will be updated to reflect any changes.
@@ -98,12 +100,13 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner {
TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
DominatorTree &DT, OptimizationRemarkEmitter &ORE,
BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI,
- ProfileSummaryInfo *PSI, const DataLayout &DL, LoopInfo *LI)
+ ProfileSummaryInfo *PSI, const DataLayout &DL, LoopInfo *LI,
+ UniformityInfo &UI)
: TTI(TTI), Builder(Builder), Worklist(Worklist),
MinimizeSize(MinimizeSize), AA(AA), AC(AC), TLI(TLI), DT(DT), DL(DL),
SQ(DL, &TLI, &DT, &AC, nullptr, /*UseInstrInfo*/ true,
/*CanUseUndef*/ true, &DC),
- ORE(ORE), BFI(BFI), BPI(BPI), PSI(PSI), LI(LI) {}
+ ORE(ORE), BFI(BFI), BPI(BPI), PSI(PSI), UI(UI), LI(LI) {}
virtual ~InstCombiner() = default;
@@ -345,6 +348,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner {
}
BlockFrequencyInfo *getBlockFrequencyInfo() const { return BFI; }
ProfileSummaryInfo *getProfileSummaryInfo() const { return PSI; }
+ UniformityInfo &getUniformityInfo() const { return UI; }
LoopInfo *getLoopInfo() const { return LI; }
// Call target specific combiners
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 9197404309663..5da4ba62a08a7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1059,17 +1059,20 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
return IC.replaceOperand(II, 0, UndefValue::get(VDstIn->getType()));
}
- case Intrinsic::amdgcn_permlane64:
- // A constant value is trivially uniform.
- if (Constant *C = dyn_cast<Constant>(II.getArgOperand(0))) {
- return IC.replaceInstUsesWith(II, C);
+ case Intrinsic::amdgcn_permlane64: {
+ UniformityInfo &UI = IC.getUniformityInfo();
+ Value *Src = II.getOperand(0);
+ if (UI.isUniform(Src)) {
+ return IC.replaceInstUsesWith(II, Src);
}
break;
+ }
case Intrinsic::amdgcn_readfirstlane:
case Intrinsic::amdgcn_readlane: {
- // A constant value is trivially uniform.
- if (Constant *C = dyn_cast<Constant>(II.getArgOperand(0))) {
- return IC.replaceInstUsesWith(II, C);
+ UniformityInfo &UI = IC.getUniformityInfo();
+ Value *Srcv = II.getOperand(0);
+ if (UI.isUniform(Srcv)) {
+ return IC.replaceInstUsesWith(II, Srcv);
}
// The rest of these may not be safe if the exec may not be the same between
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 64fbcc80e0edf..7926783827330 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -68,9 +68,10 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
DominatorTree &DT, OptimizationRemarkEmitter &ORE,
BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI,
- ProfileSummaryInfo *PSI, const DataLayout &DL, LoopInfo *LI)
+ ProfileSummaryInfo *PSI, const DataLayout &DL, LoopInfo *LI,
+ UniformityInfo &UI)
: InstCombiner(Worklist, Builder, MinimizeSize, AA, AC, TLI, TTI, DT, ORE,
- BFI, BPI, PSI, DL, LI) {}
+ BFI, BPI, PSI, DL, LI, UI) {}
virtual ~InstCombinerImpl() = default;
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 0d8e7e92c5c8e..e402906b6c064 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -55,6 +55,7 @@
#include "llvm/Analysis/TargetFolder.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/UniformityAnalysis.h"
#include "llvm/Analysis/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
@@ -5380,7 +5381,7 @@ static bool combineInstructionsOverFunction(
AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI,
BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, LoopInfo *LI,
- const InstCombineOptions &Opts) {
+ UniformityInfo &UI, const InstCombineOptions &Opts) {
auto &DL = F.getDataLayout();
/// Builder - This is an IRBuilder that automatically inserts new
@@ -5418,7 +5419,7 @@ static bool combineInstructionsOverFunction(
<< F.getName() << "\n");
InstCombinerImpl IC(Worklist, Builder, F.hasMinSize(), AA, AC, TLI, TTI, DT,
- ORE, BFI, BPI, PSI, DL, LI);
+ ORE, BFI, BPI, PSI, DL, LI, UI);
IC.MaxArraySizeForCombine = MaxArraySize;
bool MadeChangeInThisIteration = IC.prepareWorklist(F, RPOT);
MadeChangeInThisIteration |= IC.run();
@@ -5466,6 +5467,7 @@ PreservedAnalyses InstCombinePass::run(Function &F,
auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
auto &TTI = AM.getResult<TargetIRAnalysis>(F);
+ auto &UI = AM.getResult<UniformityInfoAnalysis>(F);
// TODO: Only use LoopInfo when the option is set. This requires that the
// callers in the pass pipeline explicitly set the option.
@@ -5482,7 +5484,7 @@ PreservedAnalyses InstCombinePass::run(Function &F,
auto *BPI = AM.getCachedResult<BranchProbabilityAnalysis>(F);
if (!combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
- BFI, BPI, PSI, LI, Options))
+ BFI, BPI, PSI, LI, UI, Options))
// No changes, all analyses are preserved.
return PreservedAnalyses::all();
@@ -5505,6 +5507,7 @@ void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<BasicAAWrapperPass>();
AU.addPreserved<GlobalsAAWrapperPass>();
AU.addRequired<ProfileSummaryInfoWrapperPass>();
+ AU.addRequired<UniformityInfoWrapperPass>();
LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
}
@@ -5519,6 +5522,7 @@ bool InstructionCombiningPass::runOnFunction(Function &F) {
auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
+ auto &UI = getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
// Optional analyses.
auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
@@ -5535,7 +5539,7 @@ bool InstructionCombiningPass::runOnFunction(Function &F) {
BPI = &WrapperPass->getBPI();
return combineInstructionsOverFunction(F, Worklist, AA, AC, TLI, TTI, DT, ORE,
- BFI, BPI, PSI, LI,
+ BFI, BPI, PSI, LI, UI,
InstCombineOptions());
}
@@ -5556,6 +5560,7 @@ INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
"Combine redundant instructions", false, false)
diff --git a/llvm/test/Other/new-pm-defaults.ll b/llvm/test/Other/new-pm-defaults.ll
index 588337c15625e..5c9001355ba0c 100644
--- a/llvm/test/Other/new-pm-defaults.ll
+++ b/llvm/test/Other/new-pm-defaults.ll
@@ -119,6 +119,8 @@
; CHECK-O-NEXT: Running pass: PromotePass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-O-NEXT: Running analysis: UniformityInfoAnalysis
+; CHECK-O-NEXT: Running analysis: CycleAnalysis
; CHECK-O-NEXT: Running analysis: AAManager
; CHECK-O-NEXT: Running analysis: BasicAA
; CHECK-O-NEXT: Running analysis: ScopedNoAliasAA
diff --git a/llvm/test/Other/new-pm-lto-defaults.ll b/llvm/test/Other/new-pm-lto-defaults.ll
index d451d2897f673..99014bd48f167 100644
--- a/llvm/test/Other/new-pm-lto-defaults.ll
+++ b/llvm/test/Other/new-pm-lto-defaults.ll
@@ -67,6 +67,8 @@
; CHECK-O23SZ-NEXT: Running pass: ConstantMergePass
; CHECK-O23SZ-NEXT: Running pass: DeadArgumentEliminationPass
; CHECK-O23SZ-NEXT: Running pass: InstCombinePass
+; CHECK-O23SZ-NEXT: Running analysis: UniformityInfoAnalysis
+; CHECK-O23SZ-NEXT: Running analysis: CycleAnalysis
; CHECK-O23SZ-NEXT: Running pass: AggressiveInstCombinePass
; CHECK-EP-Peephole-NEXT: Running pass: NoOpFunctionPass
; CHECK-O23SZ-NEXT: Running pass: ModuleInlinerWrapperPass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
index 064362eabbf83..d08865291e381 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
@@ -55,6 +55,8 @@
; CHECK-O-NEXT: Running pass: PromotePass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-O-NEXT: Running analysis: UniformityInfoAnalysis
+; CHECK-O-NEXT: Running analysis: CycleAnalysis
; CHECK-O-NEXT: Running analysis: AAManager
; CHECK-O-NEXT: Running analysis: BasicAA
; CHECK-O-NEXT: Running analysis: ScopedNoAliasAA
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
index 19a44867e434a..920708564a101 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
@@ -40,6 +40,8 @@
; CHECK-O-NEXT: Running pass: PromotePass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running analysis: TargetLibraryAnalysis
+; CHECK-O-NEXT: Running analysis: UniformityInfoAnalysis
+; CHECK-O-NEXT: Running analysis: CycleAnalysis
; CHECK-O-NEXT: Running analysis: AAManager
; CHECK-O-NEXT: Running analysis: BasicAA
; CHECK-O-NEXT: Running analysis: ScopedNoAliasAA
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
index e5aebc4850e6d..617873f1a5d83 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
@@ -48,6 +48,8 @@
; CHECK-O-NEXT: Running analysis: TargetLibraryAnalysis
; CHECK-O-NEXT: Running pass: PromotePass
; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-O-NEXT: Running analysis: UniformityInfoAnalysis
+; CHECK-O-NEXT: Running analysis: CycleAnalysis
; CHECK-O-NEXT: Running analysis: AAManager on foo
; CHECK-O-NEXT: Running analysis: BasicAA
; CHECK-O-NEXT: Running analysis: ScopedNoAliasAA
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
index 42ef49f8f7c7e..6218c793c782a 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-defaults.ll
@@ -87,6 +87,8 @@
; CHECK-O-NEXT: Running pass: PromotePass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-O-NEXT: Running analysis: UniformityInfoAnalysis
+; CHECK-O-NEXT: Running analysis: CycleAnalysis
; CHECK-O-NEXT: Running analysis: AAManager
; CHECK-O-NEXT: Running analysis: BasicAA
; CHECK-O-NEXT: Running analysis: ScopedNoAliasAA
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
index e74f88c1a3bf9..79ff01d9df9b5 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-pgo-defaults.ll
@@ -51,6 +51,8 @@
; CHECK-O-NEXT: Running pass: PromotePass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running analysis: OptimizationRemarkEmitterAnalysis
+; CHECK-O-NEXT: Running analysis: UniformityInfoAnalysis
+; CHECK-O-NEXT: Running analysis: CycleAnalysis
; CHECK-O-NEXT: Running analysis: AAManager
; CHECK-O-NEXT: Running analysis: BasicAA
; CHECK-O-NEXT: Running analysis: ScopedNoAliasAA
@@ -121,6 +123,8 @@
; CHECK-O23SZ-NEXT: Invalidating analysis: LazyValueAnalysis
; CHECK-O-NEXT: Running pass: SimplifyCFGPass
; CHECK-O-NEXT: Running pass: InstCombinePass
+; CHECK-O-NEXT: Running analysis: UniformityInfoAnalysis
+; CHECK-O-NEXT: Running analysis: CycleAnalysis
; CHECK-O-NEXT: Running analysis: BlockFrequencyAnalysis on foo
; CHECK-O-NEXT: Running analysis: BranchProbabilityAnalysis on foo
; CHECK-O-NEXT: Running analysis: LoopAnalysis on foo
diff --git a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
index 0bb26330d000a..d8dd7b6a50468 100644
--- a/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-prelink-samplepgo-defaults.ll
@@ -54,6 +54,8 @@
; CHECK-O-NEXT: Running pass: PromotePass
; CHECK-O-NEXT: Running pass: InstCombinePass
; CHECK-O-NEXT: Running analysis: OptimizationRemarkEmitterAnalysis on foo
+; CHECK-O-NEXT: Running analysis: UniformityInfoAnalysis on foo
+; CHECK-O-NEXT: Running analysis: CycleAnalysis on foo
; CHECK-O-NEXT: Running analysis: AAManager on foo
; CHECK-O-NEXT: Running analysis: BasicAA
; CHECK-O-NEXT: Running analysis: ScopedNoAliasAA
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-*lane-intrinsic-combine.ll b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-*lane-intrinsic-combine.ll
index 780d7cea8eb4a..f33356ab88be0 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-*lane-intrinsic-combine.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-*lane-intrinsic-combine.ll
@@ -86,8 +86,7 @@ define amdgpu_kernel void @readlane_undef(ptr addrspace(1) %out) {
define amdgpu_kernel void @readlane_sgpr(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
; GFX-LABEL: @readlane_sgpr(
-; GFX-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[SRC0:%.*]], i32 [[SRC1:%.*]])
-; GFX-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT:%.*]], align 4
+; GFX-NEXT: store i32 [[SRC0:%.*]], ptr addrspace(1) [[OUT:%.*]], align 4
; GFX-NEXT: ret void
;
%v = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1)
@@ -156,8 +155,7 @@ define amdgpu_kernel void @readfirstlane_undef(ptr addrspace(1) %out) {
define amdgpu_kernel void @readfirstlane_sgpr(ptr addrspace(1) %out, i32 %src0) {
; GFX-LABEL: @readfirstlane_sgpr(
-; GFX-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[SRC0:%.*]])
-; GFX-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT:%.*]], align 4
+; GFX-NEXT: store i32 [[SRC0:%.*]], ptr addrspace(1) [[OUT:%.*]], align 4
; GFX-NEXT: ret void
;
%v = call i32 @llvm.amdgcn.readfirstlane(i32 %src0)
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
index 9cb79b2644865..0bf03c978e1f7 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/amdgcn-intrinsics.ll
@@ -2768,8 +2768,7 @@ declare i32 @llvm.amdgcn.readfirstlane(i32)
define amdgpu_kernel void @readfirstlane_constant(i32 %arg) {
; CHECK-LABEL: @readfirstlane_constant(
-; CHECK-NEXT: [[VAR:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG:%.*]])
-; CHECK-NEXT: store volatile i32 [[VAR]], ptr undef, align 4
+; CHECK-NEXT: store volatile i32 [[ARG:%.*]], ptr undef, align 4
; CHECK-NEXT: store volatile i32 0, ptr undef, align 4
; CHECK-NEXT: store volatile i32 123, ptr undef, align 4
; CHECK-NEXT: store volatile i32 ptrtoint (ptr @gv to i32), ptr undef, align 4
@@ -2816,8 +2815,7 @@ define i32 @readfirstlane_readfirstlane_different_block(i32 %arg) {
; CHECK-NEXT: [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG:%.*]])
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[READ1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[READ0]])
-; CHECK-NEXT: ret i32 [[READ1]]
+; CHECK-NEXT: ret i32 [[READ0]]
;
bb0:
%read0 = call i32 @llvm.amdgcn.readfirstlane(i32 %arg)
@@ -2834,8 +2832,7 @@ define i32 @readfirstlane_readlane_different_block(i32 %arg) {
; CHECK-NEXT: [[READ0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG:%.*]], i32 0)
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[READ1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[READ0]])
-; CHECK-NEXT: ret i32 [[READ1]]
+; CHECK-NEXT: ret i32 [[READ0]]
;
bb0:
%read0 = call i32 @llvm.amdgcn.readlane(i32 %arg, i32 0)
@@ -2854,8 +2851,7 @@ declare i32 @llvm.amdgcn.readlane(i32, i32)
define amdgpu_kernel void @readlane_constant(i32 %arg, i32 %lane) {
; CHECK-LABEL: @readlane_constant(
-; CHECK-NEXT: [[VAR:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG:%.*]], i32 7)
-; CHECK-NEXT: store volatile i32 [[VAR]], ptr undef, align 4
+; CHECK-NEXT: store volatile i32 [[ARG:%.*]], ptr undef, align 4
; CHECK-NEXT: store volatile i32 0, ptr undef, align 4
; CHECK-NEXT: store volatile i32 123, ptr undef, align 4
; CHECK-NEXT: store volatile i32 ptrtoint (ptr @gv to i32), ptr undef, align 4
@@ -2888,8 +2884,7 @@ define i32 @readlane_idempotent(i32 %arg, i32 %lane) {
define i32 @readlane_idempotent_different_lanes(i32 %arg, i32 %lane0, i32 %lane1) {
; CHECK-LABEL: @readlane_idempotent_different_lanes(
; CHECK-NEXT: [[READ0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG:%.*]], i32 [[LANE0:%.*]])
-; CHECK-NEXT: [[READ1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[READ0]], i32 [[LANE1:%.*]])
-; CHECK-NEXT: ret i32 [[READ1]]
+; CHECK-NEXT: ret i32 [[READ0]]
;
%read0 = call i32 @llvm.amdgcn.readlane(i32 %arg, i32 %lane0)
%read1 = call i32 @llvm.amdgcn.readlane(i32 %read0, i32 %lane1)
@@ -2912,8 +2907,7 @@ define i32 @readlane_idempotent_different_block(i32 %arg, i32 %lane) {
; CHECK-NEXT: [[READ0:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[ARG:%.*]], i32 [[LANE:%.*]])
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[READ1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[READ0]], i32 [[LANE]])
-; CHECK-NEXT: ret i32 [[READ1]]
+; CHECK-NEXT: ret i32 [[READ0]]
;
bb0:
%read0 = call i32 @llvm.amdgcn.readlane(i32 %arg, i32 %lane)
@@ -2931,8 +2925,7 @@ define i32 @readlane_readfirstlane_different_block(i32 %arg) {
; CHECK-NEXT: [[READ0:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[ARG:%.*]])
; CHECK-NEXT: br label [[BB1:%.*]]
; CHECK: bb1:
-; CHECK-NEXT: [[READ1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[READ0]], i32 0)
-; CHECK-NEXT: ret i32 [[READ1]]
+; CHECK-NEXT: ret i32 [[READ0]]
;
bb0:
%read0 = call i32 @llvm.amdgcn.readfirstlane(i32 %arg)
More information about the llvm-commits
mailing list