[llvm] [AMDGPU] Add shuffle optimizer pass (PR #155824)
Aleksandar Spasojevic via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 28 05:10:27 PDT 2025
https://github.com/aleksandar-amd created https://github.com/llvm/llvm-project/pull/155824
Introduces AMDGPUShuffleOptimizer pass that detects shuffle patterns and replaces generic shuffle intrinsics with hardware-specific instructions
>From fc257d478d1be8033163def58cab5a516d58f2a6 Mon Sep 17 00:00:00 2001
From: Aleksandar Spasojevic <aleksandar.spasojevic at amd.com>
Date: Thu, 28 Aug 2025 13:32:33 +0200
Subject: [PATCH] [AMDGPU] Add shuffle optimizer pass
Introduces AMDGPUShuffleOptimizer pass that detects shuffle patterns and
replaces generic shuffle intrinsics with hardware-specific instructions
---
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 32 +-
llvm/lib/Target/AMDGPU/AMDGPU.h | 10 +
.../Target/AMDGPU/AMDGPUShuffleOptimizer.cpp | 475 ++++++++++++++++++
.../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 3 +
llvm/lib/Target/AMDGPU/CMakeLists.txt | 1 +
llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll | 4 +-
llvm/test/CodeGen/AMDGPU/llc-pipeline.ll | 4 +
.../CodeGen/AMDGPU/shuffle-optimizer-basic.ll | 92 ++++
8 files changed, 615 insertions(+), 6 deletions(-)
create mode 100644 llvm/lib/Target/AMDGPU/AMDGPUShuffleOptimizer.cpp
create mode 100644 llvm/test/CodeGen/AMDGPU/shuffle-optimizer-basic.ll
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index a4d4adae580d0..f92559b4ed68d 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -3095,10 +3095,6 @@ def int_amdgcn_wmma_f32_16x16x16_bf8_bf8 : AMDGPUWmmaIntrinsic<llvm_anyint_ty, l
// A and B are <16 x iu4>.
def int_amdgcn_wmma_i32_16x16x32_iu4 : AMDGPUWmmaIntrinsicIU<llvm_anyint_ty, llvm_anyint_ty>;
-def int_amdgcn_swmmac_f32_16x16x32_f16 : AMDGPUSWmmacIntrinsicIdx<llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
-def int_amdgcn_swmmac_f32_16x16x32_bf16 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
-def int_amdgcn_swmmac_f16_16x16x32_f16 : AMDGPUSWmmacIntrinsicIdx<llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
-def int_amdgcn_swmmac_bf16_16x16x32_bf16 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty>;
def int_amdgcn_swmmac_i32_16x16x32_iu8 : AMDGPUSWmmacIntrinsicIUIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty>;
def int_amdgcn_swmmac_i32_16x16x32_iu4 : AMDGPUSWmmacIntrinsicIUIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty>;
def int_amdgcn_swmmac_i32_16x16x64_iu4 : AMDGPUSWmmacIntrinsicIUIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty>;
@@ -3665,6 +3661,30 @@ def int_amdgcn_permlane32_swap :
[IntrNoMem, IntrConvergent, IntrWillReturn,
ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, IntrNoCallback, IntrNoFree]>;
+//===----------------------------------------------------------------------===//
+// Generic shuffle intrinsics (for CUDA/HIP and SPIR-V compatibility)
+//===----------------------------------------------------------------------===//
+
+def int_amdgcn_generic_shuffle :
+ Intrinsic<[llvm_any_ty],
+ [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
+
+def int_amdgcn_generic_shuffle_up :
+ Intrinsic<[llvm_any_ty],
+ [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
+
+def int_amdgcn_generic_shuffle_down :
+ Intrinsic<[llvm_any_ty],
+ [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
+
+def int_amdgcn_generic_shuffle_xor :
+ Intrinsic<[llvm_any_ty],
+ [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;
+
// llvm.amdgcn.ashr_pk_i8_i32 int vdst, int src0, int src1 int src2
def int_amdgcn_ashr_pk_i8_i32 : ClangBuiltin<"__builtin_amdgcn_ashr_pk_i8_i32">,
DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
@@ -4061,6 +4081,10 @@ class AMDGPUSWmmacIntrinsicABIdx<LLVMType A, LLVMType B, LLVMType CD, LLVMType I
>;
defset list<Intrinsic> AMDGPUSWMMACIntrinsicsGFX1250 = {
+def int_amdgcn_swmmac_f32_16x16x32_f16 : AMDGPUSWmmacIntrinsicIdx<llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
+def int_amdgcn_swmmac_f32_16x16x32_bf16 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
+def int_amdgcn_swmmac_f16_16x16x32_f16 : AMDGPUSWmmacIntrinsicIdx<llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
+def int_amdgcn_swmmac_bf16_16x16x32_bf16 : AMDGPUSWmmacIntrinsicIdx<llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty, llvm_anyint_ty>;
def int_amdgcn_swmmac_f32_16x16x64_f16 : AMDGPUSWmmacIntrinsicABIdx<llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
def int_amdgcn_swmmac_f32_16x16x64_bf16 : AMDGPUSWmmacIntrinsicABIdx<llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
def int_amdgcn_swmmac_f16_16x16x64_f16 : AMDGPUSWmmacIntrinsicABIdx<llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyfloat_ty, llvm_anyint_ty>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index 0059a862ba9b2..f5b10969dc347 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -58,6 +58,16 @@ ModulePass *createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *);
FunctionPass *createAMDGPUCodeGenPreparePass();
FunctionPass *createAMDGPULateCodeGenPrepareLegacyPass();
FunctionPass *createAMDGPUReserveWWMRegsPass();
+FunctionPass *createAMDGPUShuffleOptimizerPass();
+void initializeAMDGPUShuffleOptimizerPass(PassRegistry &);
+
+struct AMDGPUShuffleOptimizerPass : PassInfoMixin<AMDGPUShuffleOptimizerPass> {
+ AMDGPUShuffleOptimizerPass(TargetMachine &TM) : TM(TM) {}
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+private:
+ TargetMachine &TM;
+};
FunctionPass *createAMDGPURewriteOutArgumentsPass();
ModulePass *
createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM = nullptr);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUShuffleOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUShuffleOptimizer.cpp
new file mode 100644
index 0000000000000..350aacab1c017
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUShuffleOptimizer.cpp
@@ -0,0 +1,475 @@
+//===-- AMDGPUShuffleOptimizer.cpp - Optimize shuffle patterns -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass optimizes generic shuffle intrinsics by detecting constant
+// patterns and replacing them with efficient hardware-specific instructions
+// (DPP, PERMLANE*, etc.) when beneficial, falling back to DS_BPERMUTE_B32
+// for unmatched patterns.
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "GCNSubtarget.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "amdgpu-shuffle-optimizer"
+
+static cl::opt<bool>
+ EnableShuffleOptimization("amdgpu-enable-shuffle-optimization",
+ cl::desc("Enable AMDGPU shuffle optimization"),
+ cl::init(true), cl::Hidden);
+
+namespace {
+
+// Represents a detected shuffle pattern that can be optimized
+struct ShufflePattern {
+ enum PatternKind {
+ DPP_QUAD_PERM, // DPP quad permutation
+ DPP_ROW_SHL, // DPP row shift left
+ DPP_ROW_SHR, // DPP row shift right
+ DPP_WAVE_SHL, // DPP wave shift left
+ DPP_WAVE_SHR, // DPP wave shift right
+ PERMLANE16, // V_PERMLANE16_B32
+ PERMLANEX16, // V_PERMLANEX16_B32
+ PERMLANE64, // V_PERMLANE64_B32
+ DS_BPERMUTE, // Fallback to DS_BPERMUTE_B32
+ UNSUPPORTED // Cannot be optimized
+ };
+
+ PatternKind Kind;
+ uint32_t DPPCtrl = 0; // DPP control value
+ uint32_t RowMask = 0xf; // DPP row mask
+ uint32_t BankMask = 0xf; // DPP bank mask
+ bool BoundCtrl = false; // DPP bound control
+
+ ShufflePattern() : Kind(UNSUPPORTED) {}
+};
+
+class AMDGPUShuffleOptimizer : public FunctionPass {
+public:
+ static char ID;
+
+ AMDGPUShuffleOptimizer() : FunctionPass(ID) {}
+
+ bool runOnFunction(Function &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<TargetPassConfig>();
+ AU.setPreservesCFG();
+ }
+
+ static bool runShuffleOptimizer(Function &F, const GCNSubtarget &ST);
+
+private:
+ const GCNSubtarget *ST = nullptr;
+
+ bool optimizeShuffleIntrinsic(CallInst *CI);
+ ShufflePattern analyzeShufflePattern(CallInst *CI);
+ ShufflePattern analyzeShuffleIdx(int Width, int Offset);
+ ShufflePattern analyzeShuffleUp(int Width, int Delta);
+ ShufflePattern analyzeShuffleDown(int Width, int Delta);
+ ShufflePattern analyzeShuffleXor(int Width, int Mask);
+
+ bool tryOptimizeToDPP(CallInst *CI, const ShufflePattern &Pattern);
+ bool tryOptimizeToPermlane(CallInst *CI, const ShufflePattern &Pattern);
+ bool fallbackToBpermute(CallInst *CI);
+
+ Value *createDPPIntrinsic(IRBuilder<> &Builder, Value *OldVal, Value *SrcVal,
+ const ShufflePattern &Pattern);
+ Value *createPermlaneIntrinsic(IRBuilder<> &Builder, Value *Val,
+ const ShufflePattern &Pattern);
+ Value *createBpermuteIntrinsic(IRBuilder<> &Builder, Value *Val,
+ Value *Index);
+
+ bool processShuffleIntrinsics(Function &F);
+};
+
+char AMDGPUShuffleOptimizer::ID = 0;
+
+bool AMDGPUShuffleOptimizer::runOnFunction(Function &F) {
+ if (!EnableShuffleOptimization)
+ return false;
+
+ auto &TPC = getAnalysis<TargetPassConfig>();
+ const TargetMachine &TM = TPC.getTM<TargetMachine>();
+ ST = &TM.getSubtarget<GCNSubtarget>(F);
+
+ return processShuffleIntrinsics(F);
+}
+
+bool AMDGPUShuffleOptimizer::optimizeShuffleIntrinsic(CallInst *CI) {
+ ShufflePattern Pattern = analyzeShufflePattern(CI);
+
+ if (Pattern.Kind == ShufflePattern::UNSUPPORTED)
+ return fallbackToBpermute(CI);
+
+ switch (Pattern.Kind) {
+ case ShufflePattern::DPP_QUAD_PERM:
+ case ShufflePattern::DPP_ROW_SHL:
+ case ShufflePattern::DPP_ROW_SHR:
+ case ShufflePattern::DPP_WAVE_SHL:
+ case ShufflePattern::DPP_WAVE_SHR:
+ return tryOptimizeToDPP(CI, Pattern);
+
+ case ShufflePattern::PERMLANE16:
+ case ShufflePattern::PERMLANEX16:
+ case ShufflePattern::PERMLANE64:
+ return tryOptimizeToPermlane(CI, Pattern);
+
+ case ShufflePattern::DS_BPERMUTE:
+ return fallbackToBpermute(CI);
+
+ default:
+ return false;
+ }
+}
+
+ShufflePattern AMDGPUShuffleOptimizer::analyzeShufflePattern(CallInst *CI) {
+ auto *II = cast<IntrinsicInst>(CI);
+
+ // Get width parameter (must be constant)
+ auto *WidthConst = dyn_cast<ConstantInt>(CI->getArgOperand(2));
+ if (!WidthConst)
+ return ShufflePattern();
+
+ int Width = WidthConst->getSExtValue();
+
+ // Get offset/delta/mask parameter (must be constant for pattern optimization)
+ auto *ParamConst = dyn_cast<ConstantInt>(CI->getArgOperand(1));
+ if (!ParamConst)
+ return ShufflePattern();
+
+ int Param = ParamConst->getSExtValue();
+
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::amdgcn_generic_shuffle:
+ return analyzeShuffleIdx(Width, Param);
+ case Intrinsic::amdgcn_generic_shuffle_up:
+ return analyzeShuffleUp(Width, Param);
+ case Intrinsic::amdgcn_generic_shuffle_down:
+ return analyzeShuffleDown(Width, Param);
+ case Intrinsic::amdgcn_generic_shuffle_xor:
+ return analyzeShuffleXor(Width, Param);
+ default:
+ return ShufflePattern();
+ }
+}
+
+ShufflePattern AMDGPUShuffleOptimizer::analyzeShuffleIdx(int Width,
+ int Offset) {
+ // For idx shuffle, all lanes read from the same offset
+ if (Offset == 0) {
+ // Broadcast from lane 0 - use DPP if supported, otherwise bpermute
+ if (ST->hasDPP() && Width == 16) {
+ // Use DPP quad permutation to broadcast lane 0 within each group of 4
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DPP_QUAD_PERM;
+ Pattern.DPPCtrl = 0x00; // [0,0,0,0] quad perm - broadcast lane 0
+ return Pattern;
+ } else {
+ // Fall back to bpermute for other cases
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DS_BPERMUTE;
+ return Pattern;
+ }
+ }
+
+ // For other constant broadcasts, fall back to bpermute
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DS_BPERMUTE;
+ return Pattern;
+}
+
+ShufflePattern AMDGPUShuffleOptimizer::analyzeShuffleUp(int Width, int Delta) {
+ if (Width == 32 && Delta == 1) {
+ // Simple wave shift up by 1 - can use DPP
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DPP_WAVE_SHL;
+ Pattern.DPPCtrl = 0x101; // WAVE_SHL1
+ return Pattern;
+ }
+
+ if (Width == 16 && Delta <= 15) {
+ // Row shift within 16 lanes - can use DPP row shift
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DPP_ROW_SHL;
+ Pattern.DPPCtrl = 0x100 + Delta; // ROW_SHL0 + delta
+ return Pattern;
+ }
+
+ // Check for permlane patterns
+ if (Width == 16) {
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::PERMLANE16;
+ return Pattern;
+ }
+
+ // Fall back to bpermute
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DS_BPERMUTE;
+ return Pattern;
+}
+
+ShufflePattern AMDGPUShuffleOptimizer::analyzeShuffleDown(int Width,
+ int Delta) {
+ if (Width == 32 && Delta == 1) {
+ // Simple wave shift down by 1 - can use DPP
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DPP_WAVE_SHR;
+ Pattern.DPPCtrl = 0x111; // WAVE_SHR1
+ return Pattern;
+ }
+
+ if (Width == 16 && Delta <= 15) {
+ // Row shift within 16 lanes - can use DPP row shift
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DPP_ROW_SHR;
+ Pattern.DPPCtrl = 0x110 + Delta; // ROW_SHR0 + delta
+ return Pattern;
+ }
+
+ // Check for permlane patterns
+ if (Width == 16) {
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::PERMLANE16;
+ return Pattern;
+ }
+
+ // Fall back to bpermute
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DS_BPERMUTE;
+ return Pattern;
+}
+
+ShufflePattern AMDGPUShuffleOptimizer::analyzeShuffleXor(int Width, int Mask) {
+ // XOR with mask 1 within quads - can use DPP quad permutation
+ if (Width == 32 && Mask == 1) {
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DPP_QUAD_PERM;
+ Pattern.DPPCtrl = 0xB1; // [2,3,0,1] - swap pairs within quads
+ return Pattern;
+ }
+
+ // XOR with mask 16 - cross-row exchange within 32 lanes
+ if (Width == 32 && Mask == 16) {
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::PERMLANE16;
+ return Pattern;
+ }
+
+ // XOR with mask 32 - cross-half exchange for Wave64 on GFX11+
+ if (Width == 64 && Mask == 32) {
+ if (ST->getGeneration() >= AMDGPUSubtarget::GFX11) {
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::PERMLANE64;
+ return Pattern;
+ } else {
+ // GFX10 doesn't have PERMLANE64, fall back to bpermute
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DS_BPERMUTE;
+ return Pattern;
+ }
+ }
+
+ // Fall back to bpermute for other patterns
+ ShufflePattern Pattern;
+ Pattern.Kind = ShufflePattern::DS_BPERMUTE;
+ return Pattern;
+}
+
+bool AMDGPUShuffleOptimizer::tryOptimizeToDPP(CallInst *CI,
+ const ShufflePattern &Pattern) {
+ IRBuilder<> Builder(CI);
+ Value *SrcVal = CI->getArgOperand(0);
+ Value *OldVal = PoisonValue::get(SrcVal->getType());
+
+ Value *DPPResult = createDPPIntrinsic(Builder, OldVal, SrcVal, Pattern);
+
+ CI->replaceAllUsesWith(DPPResult);
+ CI->eraseFromParent();
+
+ return true;
+}
+
+bool AMDGPUShuffleOptimizer::tryOptimizeToPermlane(
+ CallInst *CI, const ShufflePattern &Pattern) {
+ IRBuilder<> Builder(CI);
+ Value *Val = CI->getArgOperand(0);
+
+ Value *PermlaneResult = createPermlaneIntrinsic(Builder, Val, Pattern);
+
+ CI->replaceAllUsesWith(PermlaneResult);
+ CI->eraseFromParent();
+
+ return true;
+}
+
+bool AMDGPUShuffleOptimizer::fallbackToBpermute(CallInst *CI) {
+ IRBuilder<> Builder(CI);
+ Value *Val = CI->getArgOperand(0);
+
+ // Create lane ID
+ Value *LaneId =
+ Builder.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_lo, {},
+ {Builder.getInt32(-1), Builder.getInt32(0)});
+ if (ST->isWave64()) {
+ Value *LaneIdHi = Builder.CreateIntrinsic(Intrinsic::amdgcn_mbcnt_hi, {},
+ {Builder.getInt32(-1), LaneId});
+ LaneId = LaneIdHi;
+ }
+
+ // Calculate target lane based on shuffle type
+ Value *TargetLane = nullptr;
+ auto *II = cast<IntrinsicInst>(CI);
+ Value *Param = CI->getArgOperand(1);
+
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::amdgcn_generic_shuffle:
+ TargetLane = Param;
+ break;
+ case Intrinsic::amdgcn_generic_shuffle_up:
+ TargetLane = Builder.CreateSub(LaneId, Param);
+ break;
+ case Intrinsic::amdgcn_generic_shuffle_down:
+ TargetLane = Builder.CreateAdd(LaneId, Param);
+ break;
+ case Intrinsic::amdgcn_generic_shuffle_xor:
+ TargetLane = Builder.CreateXor(LaneId, Param);
+ break;
+ default:
+ return false;
+ }
+
+ // Create byte-aligned index for bpermute
+ Value *ByteIndex = Builder.CreateShl(TargetLane, 2);
+
+ Value *BpermuteResult = createBpermuteIntrinsic(Builder, Val, ByteIndex);
+
+ CI->replaceAllUsesWith(BpermuteResult);
+ CI->eraseFromParent();
+
+ return true;
+}
+
+Value *
+AMDGPUShuffleOptimizer::createDPPIntrinsic(IRBuilder<> &Builder, Value *OldVal,
+ Value *SrcVal,
+ const ShufflePattern &Pattern) {
+ return Builder.CreateIntrinsic(
+ Intrinsic::amdgcn_update_dpp, {SrcVal->getType()},
+ {OldVal, SrcVal, Builder.getInt32(Pattern.DPPCtrl),
+ Builder.getInt32(Pattern.RowMask), Builder.getInt32(Pattern.BankMask),
+ Builder.getInt1(Pattern.BoundCtrl)});
+}
+
+Value *AMDGPUShuffleOptimizer::createPermlaneIntrinsic(
+ IRBuilder<> &Builder, Value *Val, const ShufflePattern &Pattern) {
+ switch (Pattern.Kind) {
+ case ShufflePattern::PERMLANE16:
+ return Builder.CreateIntrinsic(
+ Intrinsic::amdgcn_permlane16, {Val->getType()},
+ {PoisonValue::get(Val->getType()), Val, Builder.getInt32(0),
+ Builder.getInt32(0), Builder.getInt1(false), Builder.getInt1(false)});
+
+ case ShufflePattern::PERMLANEX16:
+ return Builder.CreateIntrinsic(
+ Intrinsic::amdgcn_permlanex16, {Val->getType()},
+ {PoisonValue::get(Val->getType()), Val, Builder.getInt32(0),
+ Builder.getInt32(0), Builder.getInt1(false), Builder.getInt1(false)});
+
+ case ShufflePattern::PERMLANE64:
+ return Builder.CreateIntrinsic(Intrinsic::amdgcn_permlane64,
+ {Val->getType()}, {Val});
+ default:
+ llvm_unreachable("Invalid permlane pattern");
+ }
+}
+
+Value *AMDGPUShuffleOptimizer::createBpermuteIntrinsic(IRBuilder<> &Builder,
+ Value *Val,
+ Value *Index) {
+ // Convert value to i32 for bpermute
+ Type *OrigType = Val->getType();
+ Value *I32Val = Val;
+
+ if (OrigType != Builder.getInt32Ty())
+ I32Val = Builder.CreateBitCast(Val, Builder.getInt32Ty());
+
+ Value *Result = Builder.CreateIntrinsic(Intrinsic::amdgcn_ds_bpermute, {},
+ {Index, I32Val});
+
+ if (OrigType != Builder.getInt32Ty())
+ Result = Builder.CreateBitCast(Result, OrigType);
+
+ return Result;
+}
+
+} // end anonymous namespace
+
+INITIALIZE_PASS_BEGIN(AMDGPUShuffleOptimizer, DEBUG_TYPE,
+ "AMDGPU Shuffle Optimizer", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(AMDGPUShuffleOptimizer, DEBUG_TYPE,
+ "AMDGPU Shuffle Optimizer", false, false)
+
+FunctionPass *llvm::createAMDGPUShuffleOptimizerPass() {
+ return new AMDGPUShuffleOptimizer();
+}
+
+bool AMDGPUShuffleOptimizer::processShuffleIntrinsics(Function &F) {
+ bool Changed = false;
+
+ for (auto &BB : F)
+ for (auto &I : llvm::make_early_inc_range(BB))
+ if (auto *CI = dyn_cast<CallInst>(&I))
+ if (auto *II = dyn_cast<IntrinsicInst>(CI)) {
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::amdgcn_generic_shuffle:
+ case Intrinsic::amdgcn_generic_shuffle_up:
+ case Intrinsic::amdgcn_generic_shuffle_down:
+ case Intrinsic::amdgcn_generic_shuffle_xor:
+ Changed |= optimizeShuffleIntrinsic(CI);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return Changed;
+}
+
+bool AMDGPUShuffleOptimizer::runShuffleOptimizer(Function &F,
+ const GCNSubtarget &ST) {
+ if (!EnableShuffleOptimization)
+ return false;
+
+ AMDGPUShuffleOptimizer TempOptimizer;
+ TempOptimizer.ST = &ST;
+ return TempOptimizer.processShuffleIntrinsics(F);
+}
+
+PreservedAnalyses AMDGPUShuffleOptimizerPass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
+ bool Changed = AMDGPUShuffleOptimizer::runShuffleOptimizer(F, ST);
+ return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index e969f9ec88899..7cc172485b9e2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -573,6 +573,7 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
initializeAMDGPURegBankCombinerPass(*PR);
initializeAMDGPUPromoteAllocaPass(*PR);
initializeAMDGPUCodeGenPreparePass(*PR);
+ initializeAMDGPUShuffleOptimizerPass(*PR);
initializeAMDGPULateCodeGenPrepareLegacyPass(*PR);
initializeAMDGPURemoveIncompatibleFunctionsLegacyPass(*PR);
initializeAMDGPULowerModuleLDSLegacyPass(*PR);
@@ -1366,6 +1367,7 @@ void AMDGPUPassConfig::addIRPasses() {
if (TM.getTargetTriple().isAMDGCN()) {
// TODO: May want to move later or split into an early and late one.
addPass(createAMDGPUCodeGenPreparePass());
+ addPass(createAMDGPUShuffleOptimizerPass());
}
// Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
@@ -2104,6 +2106,7 @@ void AMDGPUCodeGenPassBuilder::addIRPasses(AddIRPass &addPass) const {
// TODO: May want to move later or split into an early and late one.
addPass(AMDGPUCodeGenPreparePass(TM));
+ addPass(AMDGPUShuffleOptimizerPass(TM));
// Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
// have expanded.
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index dc9dd220130ea..74b5e1236d406 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -105,6 +105,7 @@ add_llvm_target(AMDGPUCodeGen
AMDGPURewriteAGPRCopyMFMA.cpp
AMDGPURewriteOutArguments.cpp
AMDGPURewriteUndefForPHI.cpp
+ AMDGPUShuffleOptimizer.cpp
AMDGPUSelectionDAGInfo.cpp
AMDGPUSetWavePriority.cpp
AMDGPUSplitModule.cpp
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
index ceed41f3ed7c5..f189db6901950 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline-npm.ll
@@ -10,9 +10,9 @@
; GCN-O0: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(atomic-expand,verify,gc-lowering,lower-constant-intrinsics,unreachableblockelim,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,cgscc(function(lower-switch,lower-invoke,unreachableblockelim,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa,require<uniformity>,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,localstackalloc))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,phi-node-elimination,two-address-instruction,regallocfast,si-fix-vgpr-copies,remove-redundant-debug-values,fixup-statepoint-caller-saved,prolog-epilog,post-ra-pseudos,si-post-ra-bundler,fentry-insert,xray-instrumentation,patchable-function,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
-; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
+; GCN-O2: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,early-cse<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,AMDGPUShuffleOptimizerPass,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,early-cse<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
-; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
+; GCN-O3: require<MachineModuleAnalysis>,require<profile-summary>,require<collector-metadata>,pre-isel-intrinsic-lowering,function(expand-large-div-rem,expand-fp),amdgpu-remove-incompatible-functions,amdgpu-printf-runtime-binding,amdgpu-lower-ctor-dtor,function(amdgpu-image-intrinsic-opt),expand-variadics,amdgpu-always-inline,always-inline,amdgpu-export-kernel-runtime-handles,amdgpu-sw-lower-lds,amdgpu-lower-module-lds,function(amdgpu-atomic-optimizer,atomic-expand,amdgpu-promote-alloca,separate-const-offset-from-gep<>,slsr,gvn<>,nary-reassociate,early-cse<>,amdgpu-codegenprepare,AMDGPUShuffleOptimizerPass,loop-mssa(licm<allowspeculation>),verify,loop-mssa(canon-freeze,loop-reduce),mergeicmps,expand-memcmp,gc-lowering,lower-constant-intrinsics,unreachableblockelim,consthoist,replace-with-veclib,partially-inline-libcalls,ee-instrument<post-inline>,scalarize-masked-mem-intrin,expand-reductions,gvn<>),amdgpu-preload-kernel-arguments,function(amdgpu-lower-kernel-arguments),amdgpu-lower-buffer-fat-pointers,cgscc(function(codegenprepare,load-store-vectorizer,lower-switch,lower-invoke,unreachableblockelim,flatten-cfg,sink,amdgpu-late-codegenprepare,amdgpu-unify-divergent-exit-nodes,fix-irreducible,unify-loop-exits,StructurizeCFGPass,amdgpu-annotate-uniform,si-annotate-control-flow,amdgpu-rewrite-undef-for-phi,lcssa)),amdgpu-perf-hint,cgscc(function(require<uniformity>,objc-arc-contract,callbr-prepare,safe-stack,stack-protector,verify)),cgscc(function(machine-function(amdgpu-isel,si-fix-sgpr-copies,si-i1-copies,finalize-isel,early-tailduplication,opt-phis,stack-coloring,localstackalloc,dead-mi-elimination,early-machinelicm,machine-cse,machine-sink,peephole-opt,dead-mi-elimination,si-fold-operands,gcn-dpp-combine,si-load-store-opt,si-peephole-sdwa,early-machinelicm,machine-cse,si-fold-operands,dead-mi-elimination,si-shrink-instructions))),require<reg-usage>,cgscc(function(machine-function(reg-usage-propagation,amdgpu-prepare-agpr-alloc,detect-dead-lanes,dead-mi-elimination,init-undef,process-imp-defs,unreachable-mbb-elimination,require<live-vars>,si-opt-vgpr-liverange,require<machine-loops>,phi-node-elimination,si-lower-control-flow,two-address-instruction,register-coalescer,rename-independent-subregs,amdgpu-rewrite-partial-reg-uses,machine-scheduler,amdgpu-pre-ra-optimizations,si-wqm,si-optimize-exec-masking-pre-ra,si-form-memory-clauses,amdgpu-pre-ra-long-branch-reg,greedy<sgpr>,virt-reg-rewriter<no-clear-vregs>,stack-slot-coloring,si-lower-sgpr-spills,si-pre-allocate-wwm-regs,greedy<wwm>,si-lower-wwm-copies,virt-reg-rewriter<no-clear-vregs>,amdgpu-reserve-wwm-regs,greedy<vgpr>,amdgpu-nsa-reassign,virt-reg-rewriter,amdgpu-mark-last-scratch-load,machine-cp,machinelicm,si-fix-vgpr-copies,si-optimize-exec-masking,remove-redundant-debug-values,fixup-statepoint-caller-saved,postra-machine-sink,shrink-wrap,prolog-epilog,branch-folder,tailduplication,machine-latecleanup,machine-cp,post-ra-pseudos,si-shrink-instructions,si-post-ra-bundler,postmisched,block-placement,fentry-insert,xray-instrumentation,patchable-function,gcn-create-vopd,si-memory-legalizer,si-insert-waitcnts,si-late-branch-lowering,si-pre-emit-peephole,post-RA-hazard-rec,amdgpu-wait-sgpr-hazards,amdgpu-insert-delay-alu,branch-relaxation,reg-usage-collector,remove-loads-into-fake-uses,live-debug-values,machine-sanmd,stack-frame-layout,verify),free-machine-function))
define void @empty() {
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 3e17be6b34a57..c22729eaa2b10 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -199,6 +199,7 @@
; GCN-O1-NEXT: Cycle Info Analysis
; GCN-O1-NEXT: Uniformity Analysis
; GCN-O1-NEXT: AMDGPU IR optimizations
+; GCN-O1-NEXT: AMDGPU Shuffle Optimizer
; GCN-O1-NEXT: Basic Alias Analysis (stateless AA impl)
; GCN-O1-NEXT: Canonicalize natural loops
; GCN-O1-NEXT: Scalar Evolution Analysis
@@ -497,6 +498,7 @@
; GCN-O1-OPTS-NEXT: Cycle Info Analysis
; GCN-O1-OPTS-NEXT: Uniformity Analysis
; GCN-O1-OPTS-NEXT: AMDGPU IR optimizations
+; GCN-O1-OPTS-NEXT: AMDGPU Shuffle Optimizer
; GCN-O1-OPTS-NEXT: Basic Alias Analysis (stateless AA impl)
; GCN-O1-OPTS-NEXT: Canonicalize natural loops
; GCN-O1-OPTS-NEXT: Scalar Evolution Analysis
@@ -806,6 +808,7 @@
; GCN-O2-NEXT: Cycle Info Analysis
; GCN-O2-NEXT: Uniformity Analysis
; GCN-O2-NEXT: AMDGPU IR optimizations
+; GCN-O2-NEXT: AMDGPU Shuffle Optimizer
; GCN-O2-NEXT: Basic Alias Analysis (stateless AA impl)
; GCN-O2-NEXT: Function Alias Analysis Results
; GCN-O2-NEXT: Memory SSA
@@ -1131,6 +1134,7 @@
; GCN-O3-NEXT: Cycle Info Analysis
; GCN-O3-NEXT: Uniformity Analysis
; GCN-O3-NEXT: AMDGPU IR optimizations
+; GCN-O3-NEXT: AMDGPU Shuffle Optimizer
; GCN-O3-NEXT: Basic Alias Analysis (stateless AA impl)
; GCN-O3-NEXT: Function Alias Analysis Results
; GCN-O3-NEXT: Memory SSA
diff --git a/llvm/test/CodeGen/AMDGPU/shuffle-optimizer-basic.ll b/llvm/test/CodeGen/AMDGPU/shuffle-optimizer-basic.ll
new file mode 100644
index 0000000000000..8427dc3e5ac10
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/shuffle-optimizer-basic.ll
@@ -0,0 +1,92 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck %s
+
+declare i32 @llvm.amdgcn.generic.shuffle(i32, i32, i32)
+declare i32 @llvm.amdgcn.generic.shuffle.up(i32, i32, i32)
+declare i32 @llvm.amdgcn.generic.shuffle.down(i32, i32, i32)
+declare i32 @llvm.amdgcn.generic.shuffle.xor(i32, i32, i32)
+
+define amdgpu_kernel void @test_shuffle_up_dpp(ptr addrspace(1) %out, i32 %val) {
+; CHECK-LABEL: test_shuffle_up_dpp:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: s_load_dword s2, s[4:5], 0x2c
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, s2
+; CHECK-NEXT: v_mov_b32_dpp v0, v0 row_shl:1 row_mask:0xf bank_mask:0xf
+; CHECK-NEXT: global_store_dword v1, v0, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %result = call i32 @llvm.amdgcn.generic.shuffle.up(i32 %val, i32 1, i32 16)
+ store i32 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @test_shuffle_down_dpp(ptr addrspace(1) %out, i32 %val) {
+; CHECK-LABEL: test_shuffle_down_dpp:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: s_load_dword s2, s[4:5], 0x2c
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, s2
+; CHECK-NEXT: v_mov_b32_dpp v0, v0 row_shr:2 row_mask:0xf bank_mask:0xf
+; CHECK-NEXT: global_store_dword v1, v0, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %result = call i32 @llvm.amdgcn.generic.shuffle.down(i32 %val, i32 2, i32 16)
+ store i32 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @test_shuffle_xor_dpp(ptr addrspace(1) %out, i32 %val) {
+; CHECK-LABEL: test_shuffle_xor_dpp:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: s_load_dword s2, s[4:5], 0x2c
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, s2
+; CHECK-NEXT: v_mov_b32_dpp v0, v0 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
+; CHECK-NEXT: global_store_dword v1, v0, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %result = call i32 @llvm.amdgcn.generic.shuffle.xor(i32 %val, i32 1, i32 32)
+ store i32 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @test_shuffle_broadcast(ptr addrspace(1) %out, i32 %val) {
+; CHECK-LABEL: test_shuffle_broadcast:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dword s0, s[4:5], 0x2c
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v1, s0
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; CHECK-NEXT: ds_bpermute_b32 v1, v0, v1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_store_dword v0, v1, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %result = call i32 @llvm.amdgcn.generic.shuffle(i32 %val, i32 0, i32 64)
+ store i32 %result, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @test_shuffle_permlane16(ptr addrspace(1) %out, i32 %val) {
+; CHECK-LABEL: test_shuffle_permlane16:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_clause 0x1
+; CHECK-NEXT: s_load_dword s2, s[4:5], 0x2c
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, s2
+; CHECK-NEXT: v_permlane16_b32 v0, v0, 0, 0
+; CHECK-NEXT: global_store_dword v1, v0, s[0:1]
+; CHECK-NEXT: s_endpgm
+ %result = call i32 @llvm.amdgcn.generic.shuffle.xor(i32 %val, i32 16, i32 32)
+ store i32 %result, ptr addrspace(1) %out
+ ret void
+}
More information about the llvm-commits
mailing list