[llvm] [AMDGPU] Account for existing SDWA selections (PR #123221)
Frederik Harwath via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 26 03:17:45 PST 2025
https://github.com/frederik-h updated https://github.com/llvm/llvm-project/pull/123221
>From b29c0f218db0170f0848741a89b408bca25156c1 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Fri, 10 Jan 2025 09:59:00 -0800
Subject: [PATCH 01/33] [AMDGPU] Account for existing SDWA selections
Change-Id: I3e1cf6042f069e8dffe9dd5b4654288111f7b1bf
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 135 ++++++++++++++++++++--
1 file changed, 123 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 467f042892ceb..f515ba1aac5d0 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -85,6 +85,8 @@ class SIPeepholeSDWALegacy : public MachineFunctionPass {
}
};
+using namespace AMDGPU::SDWA;
+
class SDWAOperand {
private:
MachineOperand *Target; // Operand that would be used in converted instruction
@@ -102,12 +104,55 @@ class SDWAOperand {
virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) = 0;
- virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
+ virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine = false) = 0;
MachineOperand *getTargetOperand() const { return Target; }
MachineOperand *getReplacedOperand() const { return Replaced; }
MachineInstr *getParentInst() const { return Target->getParent(); }
+ /// Fold a \p FoldedOp SDWA selection into an \p ExistingOp existing SDWA
+ /// selection. If the selections are compatible, \p return true and store the
+ /// SDWA selection in
+ /// \p NewOp .
+ /// For example, if we have existing BYTE_0 Sel and are attempting to fold
+ /// WORD_1 Sel: BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
+ bool combineSdwaSel(SdwaSel ExistingOp, SdwaSel FoldedOp, SdwaSel &NewOp) {
+ if (ExistingOp == SdwaSel::DWORD) {
+ NewOp = FoldedOp;
+ return true;
+ }
+ if (FoldedOp == SdwaSel::DWORD) {
+ NewOp = ExistingOp;
+ return true;
+ }
+
+ if (FoldedOp != SdwaSel::WORD_0 && FoldedOp != SdwaSel::WORD_1 &&
+ FoldedOp != ExistingOp)
+ return false;
+
+ if (ExistingOp == SdwaSel::WORD_1 || ExistingOp == SdwaSel::BYTE_2 ||
+ ExistingOp == SdwaSel::BYTE_3)
+ return false;
+
+ if (ExistingOp == FoldedOp) {
+ NewOp = ExistingOp;
+ return true;
+ }
+
+ if (FoldedOp == SdwaSel::WORD_0) {
+ NewOp = ExistingOp;
+ return true;
+ }
+
+ if (FoldedOp == SdwaSel::WORD_1) {
+ NewOp = (SdwaSel)((unsigned)ExistingOp + 2);
+ return true;
+ }
+
+ return false;
+ }
+
MachineRegisterInfo *getMRI() const {
return &getParentInst()->getParent()->getParent()->getRegInfo();
}
@@ -118,8 +163,6 @@ class SDWAOperand {
#endif
};
-using namespace AMDGPU::SDWA;
-
class SDWASrcOperand : public SDWAOperand {
private:
SdwaSel SrcSel;
@@ -137,7 +180,8 @@ class SDWASrcOperand : public SDWAOperand {
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine = false) override;
SdwaSel getSrcSel() const { return SrcSel; }
bool getAbs() const { return Abs; }
@@ -166,7 +210,8 @@ class SDWADstOperand : public SDWAOperand {
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine = false) override;
SdwaSel getDstSel() const { return DstSel; }
DstUnused getDstUnused() const { return DstUn; }
@@ -186,7 +231,8 @@ class SDWADstPreserveOperand : public SDWADstOperand {
: SDWADstOperand(TargetOp, ReplacedOp, DstSel_, UNUSED_PRESERVE),
Preserve(PreserveOp) {}
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine = false) override;
MachineOperand *getPreservedOperand() const { return Preserve; }
@@ -375,7 +421,8 @@ MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII,
return PotentialMO->getParent();
}
-bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
+bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine) {
switch (MI.getOpcode()) {
case AMDGPU::V_CVT_F32_FP8_sdwa:
case AMDGPU::V_CVT_F32_BF8_sdwa:
@@ -451,7 +498,16 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
}
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
- SrcSel->setImm(getSrcSel());
+ if (Combine) {
+ SdwaSel NewOp;
+ bool CanCombine =
+ combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel(), NewOp);
+ if (!CanCombine)
+ return false;
+ SrcSel->setImm(NewOp);
+ } else {
+ SrcSel->setImm(getSrcSel());
+ }
SrcMods->setImm(getSrcMods(TII, Src));
}
getTargetOperand()->setIsKill(false);
@@ -479,7 +535,8 @@ MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII,
return PotentialMO->getParent();
}
-bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
+bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine) {
// Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
@@ -498,7 +555,16 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
copyRegOperand(*Operand, *getTargetOperand());
MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
assert(DstSel);
- DstSel->setImm(getDstSel());
+ if (Combine) {
+ SdwaSel NewOp;
+ bool CanCombine =
+ combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel(), NewOp);
+ if (!CanCombine)
+ return false;
+ DstSel->setImm(NewOp);
+ } else {
+ DstSel->setImm(getDstSel());
+ }
MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
assert(DstUnused);
DstUnused->setImm(getDstUnused());
@@ -510,7 +576,8 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
}
bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
- const SIInstrInfo *TII) {
+ const SIInstrInfo *TII,
+ bool Combine) {
// MI should be moved right before v_or_b32.
// For this we should clear all kill flags on uses of MI src-operands or else
// we can encounter problem with use of killed operand.
@@ -535,7 +602,7 @@ bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
MI.getNumOperands() - 1);
// Convert MI as any other SDWADstOperand and remove v_or_b32
- return SDWADstOperand::convertToSDWA(MI, TII);
+ return SDWADstOperand::convertToSDWA(MI, TII, Combine);
}
std::optional<int64_t>
@@ -1029,6 +1096,50 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
// Convert to sdwa
int SDWAOpcode;
unsigned Opcode = MI.getOpcode();
+
+ // If the MI is already SDWA, preserve any existing opsel
+ if (TII->isSDWA(Opcode)) {
+ auto SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
+ MI.getParent()->insert(MI.getIterator(), SDWAInst);
+
+ // Apply all sdwa operand patterns.
+ bool Converted = false;
+ for (auto &Operand : SDWAOperands) {
+ LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
+ // There should be no intersection between SDWA operands and potential MIs
+ // e.g.:
+ // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
+ // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
+ // v_add_u32 v3, v4, v2
+ //
+ // In that example it is possible that we would fold 2nd instruction into
+ // 3rd (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd
+ // (that was already destroyed). So if SDWAOperand is also a potential MI
+ // then do not apply it.
+ if (PotentialMatches.count(Operand->getParentInst()) == 0)
+ Converted |= Operand->convertToSDWA(*SDWAInst, TII, true);
+ }
+
+ if (Converted) {
+ ConvertedInstructions.push_back(SDWAInst);
+ for (MachineOperand &MO : SDWAInst->uses()) {
+ if (!MO.isReg())
+ continue;
+
+ MRI->clearKillFlags(MO.getReg());
+ }
+ } else {
+ SDWAInst->eraseFromParent();
+ return false;
+ }
+
+ LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
+ ++NumSDWAInstructionsPeepholed;
+
+ MI.eraseFromParent();
+ return true;
+ }
+
if (TII->isSDWA(Opcode)) {
SDWAOpcode = Opcode;
} else {
>From 8d16c1cdde49f8f1e5073693c9820404d7afbc29 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Tue, 14 Jan 2025 11:20:53 -0500
Subject: [PATCH 02/33] [AMDGPU] Correct transformation and simplify
combineSdwaSel
- Remove redundant "if".
- Replace arithmetic on SdwaSel type
The case distinction seems clearer and removes a mishandled case:
Since (SdwaSel)((unsigned)WORD_0 + 2) == DWORD,
the existing code led to the transformation:
WORD_0 Sel (WORD_1 Sel (%X)) -> DWORD Sel (%X)
The correct transformation should be:
WORD_0 Sel (WORD_1 Sel (%X)) -> WORD_1 Sel (%X)
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index f515ba1aac5d0..4cbc6de30b4f1 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -122,15 +122,12 @@ class SDWAOperand {
NewOp = FoldedOp;
return true;
}
+
if (FoldedOp == SdwaSel::DWORD) {
NewOp = ExistingOp;
return true;
}
- if (FoldedOp != SdwaSel::WORD_0 && FoldedOp != SdwaSel::WORD_1 &&
- FoldedOp != ExistingOp)
- return false;
-
if (ExistingOp == SdwaSel::WORD_1 || ExistingOp == SdwaSel::BYTE_2 ||
ExistingOp == SdwaSel::BYTE_3)
return false;
@@ -146,9 +143,15 @@ class SDWAOperand {
}
if (FoldedOp == SdwaSel::WORD_1) {
- NewOp = (SdwaSel)((unsigned)ExistingOp + 2);
+ if (ExistingOp == SdwaSel::BYTE_0)
+ NewOp = SdwaSel::BYTE_2;
+ else if (ExistingOp == SdwaSel::BYTE_1)
+ NewOp = SdwaSel::BYTE_3;
+ else if (ExistingOp == SdwaSel::WORD_0)
+ NewOp = SdwaSel::WORD_1;
+
return true;
- }
+ }
return false;
}
>From 20e23b697e789e07f642c2f3be297f5107d32eed Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 04:17:32 -0500
Subject: [PATCH 03/33] [AMDGPU] Change formatting of combineSdwaSel
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 4cbc6de30b4f1..8b9c7b9607dfd 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -116,7 +116,8 @@ class SDWAOperand {
/// SDWA selection in
/// \p NewOp .
/// For example, if we have existing BYTE_0 Sel and are attempting to fold
- /// WORD_1 Sel: BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
+ /// WORD_1 Sel:
+ /// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
bool combineSdwaSel(SdwaSel ExistingOp, SdwaSel FoldedOp, SdwaSel &NewOp) {
if (ExistingOp == SdwaSel::DWORD) {
NewOp = FoldedOp;
@@ -151,7 +152,7 @@ class SDWAOperand {
NewOp = SdwaSel::WORD_1;
return true;
- }
+ }
return false;
}
>From 663b94c8fceb7554be7935e168a4e660f6f82e44 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 07:30:08 -0500
Subject: [PATCH 04/33] [AMDGPU] Remove dead branch from
SIPeepholeSDWA::convertToSDWA
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 8b9c7b9607dfd..14c5cb730f3ee 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1098,7 +1098,6 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
// Convert to sdwa
- int SDWAOpcode;
unsigned Opcode = MI.getOpcode();
// If the MI is already SDWA, preserve any existing opsel
@@ -1144,13 +1143,10 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
return true;
}
- if (TII->isSDWA(Opcode)) {
- SDWAOpcode = Opcode;
- } else {
- SDWAOpcode = AMDGPU::getSDWAOp(Opcode);
- if (SDWAOpcode == -1)
- SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode));
- }
+ assert(!TII->isSDWA(Opcode));
+ int SDWAOpcode = AMDGPU::getSDWAOp(Opcode);
+ if (SDWAOpcode == -1)
+ SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode));
assert(SDWAOpcode != -1);
const MCInstrDesc &SDWADesc = TII->get(SDWAOpcode);
>From c2dfca063d1e06beadbd9205461b4f46c74c7dfe Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 07:58:03 -0500
Subject: [PATCH 05/33] [AMDGPU] Extract SDWA instruction creation from
convertToSDWA
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 108 ++++++++++++----------
1 file changed, 58 insertions(+), 50 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 14c5cb730f3ee..37aea94a3c5a8 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -62,6 +62,7 @@ class SIPeepholeSDWA {
std::unique_ptr<SDWAOperand> matchSDWAOperand(MachineInstr &MI);
void pseudoOpConvertToVOP2(MachineInstr &MI,
const GCNSubtarget &ST) const;
+ MachineInstr *createSDWAVersion(MachineInstr &MI);
bool convertToSDWA(MachineInstr &MI, const SDWAOperandsVector &SDWAOperands);
void legalizeScalarOperands(MachineInstr &MI, const GCNSubtarget &ST) const;
@@ -1092,58 +1093,10 @@ bool isConvertibleToSDWA(MachineInstr &MI,
}
} // namespace
-bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
- const SDWAOperandsVector &SDWAOperands) {
-
- LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
-
- // Convert to sdwa
+MachineInstr* SIPeepholeSDWA::createSDWAVersion(MachineInstr &MI) {
unsigned Opcode = MI.getOpcode();
-
- // If the MI is already SDWA, preserve any existing opsel
- if (TII->isSDWA(Opcode)) {
- auto SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
- MI.getParent()->insert(MI.getIterator(), SDWAInst);
-
- // Apply all sdwa operand patterns.
- bool Converted = false;
- for (auto &Operand : SDWAOperands) {
- LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
- // There should be no intersection between SDWA operands and potential MIs
- // e.g.:
- // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
- // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
- // v_add_u32 v3, v4, v2
- //
- // In that example it is possible that we would fold 2nd instruction into
- // 3rd (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd
- // (that was already destroyed). So if SDWAOperand is also a potential MI
- // then do not apply it.
- if (PotentialMatches.count(Operand->getParentInst()) == 0)
- Converted |= Operand->convertToSDWA(*SDWAInst, TII, true);
- }
-
- if (Converted) {
- ConvertedInstructions.push_back(SDWAInst);
- for (MachineOperand &MO : SDWAInst->uses()) {
- if (!MO.isReg())
- continue;
-
- MRI->clearKillFlags(MO.getReg());
- }
- } else {
- SDWAInst->eraseFromParent();
- return false;
- }
-
- LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
- ++NumSDWAInstructionsPeepholed;
-
- MI.eraseFromParent();
- return true;
- }
-
assert(!TII->isSDWA(Opcode));
+
int SDWAOpcode = AMDGPU::getSDWAOp(Opcode);
if (SDWAOpcode == -1)
SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode));
@@ -1280,6 +1233,61 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
SDWAInst->tieOperands(PreserveDstIdx, SDWAInst->getNumOperands() - 1);
}
+ return SDWAInst.getInstr();
+}
+
+bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
+ const SDWAOperandsVector &SDWAOperands) {
+ LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
+
+ // Convert to sdwa
+ unsigned Opcode = MI.getOpcode();
+
+ // If the MI is already SDWA, preserve any existing opsel
+ if (TII->isSDWA(Opcode)) {
+ auto SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
+ MI.getParent()->insert(MI.getIterator(), SDWAInst);
+
+ // Apply all sdwa operand patterns.
+ bool Converted = false;
+ for (auto &Operand : SDWAOperands) {
+ LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
+ // There should be no intersection between SDWA operands and potential MIs
+ // e.g.:
+ // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
+ // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
+ // v_add_u32 v3, v4, v2
+ //
+ // In that example it is possible that we would fold 2nd instruction into
+ // 3rd (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd
+ // (that was already destroyed). So if SDWAOperand is also a potential MI
+ // then do not apply it.
+ if (PotentialMatches.count(Operand->getParentInst()) == 0)
+ Converted |= Operand->convertToSDWA(*SDWAInst, TII, true);
+ }
+
+ if (Converted) {
+ ConvertedInstructions.push_back(SDWAInst);
+ for (MachineOperand &MO : SDWAInst->uses()) {
+ if (!MO.isReg())
+ continue;
+
+ MRI->clearKillFlags(MO.getReg());
+ }
+ } else {
+ SDWAInst->eraseFromParent();
+ return false;
+ }
+
+ LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
+ ++NumSDWAInstructionsPeepholed;
+
+ MI.eraseFromParent();
+ return true;
+ }
+
+ MachineInstr *SDWAInst{createSDWAVersion(MI)};
+
// Apply all sdwa operand patterns.
bool Converted = false;
for (auto &Operand : SDWAOperands) {
>From 38bd038049bd6cf6f69161903af0340ee9297ad9 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 08:18:56 -0500
Subject: [PATCH 06/33] [AMDGPU] Unify loops in SIPeepholeSDWA::convertToSDWA
There are two loops that invoke the conversion on the operands
of the input instruction, one for the case where the instruction
is already an SDWA instruction and one for the case where it isn't.
The loops are almost the same.
Fuse those loops into a single loop.
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 59 +++++------------------
1 file changed, 13 insertions(+), 46 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 37aea94a3c5a8..bbbff2083745d 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1240,54 +1240,21 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
const SDWAOperandsVector &SDWAOperands) {
LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
- // Convert to sdwa
- unsigned Opcode = MI.getOpcode();
-
- // If the MI is already SDWA, preserve any existing opsel
- if (TII->isSDWA(Opcode)) {
- auto SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
+ MachineInstr *SDWAInst;
+ bool CombineSelections;
+ if (TII->isSDWA(MI.getOpcode())) {
+ // No conversion necessary, since MI is an SDWA instruction. But
+ // tell convertToSDWA below to combine selections of this instruction
+ // and its SDWA operands.
+ SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
MI.getParent()->insert(MI.getIterator(), SDWAInst);
-
- // Apply all sdwa operand patterns.
- bool Converted = false;
- for (auto &Operand : SDWAOperands) {
- LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
- // There should be no intersection between SDWA operands and potential MIs
- // e.g.:
- // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
- // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
- // v_add_u32 v3, v4, v2
- //
- // In that example it is possible that we would fold 2nd instruction into
- // 3rd (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd
- // (that was already destroyed). So if SDWAOperand is also a potential MI
- // then do not apply it.
- if (PotentialMatches.count(Operand->getParentInst()) == 0)
- Converted |= Operand->convertToSDWA(*SDWAInst, TII, true);
- }
-
- if (Converted) {
- ConvertedInstructions.push_back(SDWAInst);
- for (MachineOperand &MO : SDWAInst->uses()) {
- if (!MO.isReg())
- continue;
-
- MRI->clearKillFlags(MO.getReg());
- }
- } else {
- SDWAInst->eraseFromParent();
- return false;
- }
-
- LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
- ++NumSDWAInstructionsPeepholed;
-
- MI.eraseFromParent();
- return true;
+ CombineSelections = true;
+ } else {
+ // Convert to sdwa
+ SDWAInst = createSDWAVersion(MI);
+ CombineSelections = false;
}
- MachineInstr *SDWAInst{createSDWAVersion(MI)};
-
// Apply all sdwa operand patterns.
bool Converted = false;
for (auto &Operand : SDWAOperands) {
@@ -1303,7 +1270,7 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
// was already destroyed). So if SDWAOperand is also a potential MI then do
// not apply it.
if (PotentialMatches.count(Operand->getParentInst()) == 0)
- Converted |= Operand->convertToSDWA(*SDWAInst, TII);
+ Converted |= Operand->convertToSDWA(*SDWAInst, TII, CombineSelections);
}
if (Converted) {
>From e5923ac0328d1f62729778cffd5c4e70d72ac758 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 08:28:45 -0500
Subject: [PATCH 07/33] [AMDGPU] Invert if statement in
SIPeepholeSDWA::convertToSDWA
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 17 ++++++++---------
1 file changed, 8 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index bbbff2083745d..944d85f72bf6b 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1273,19 +1273,18 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
Converted |= Operand->convertToSDWA(*SDWAInst, TII, CombineSelections);
}
- if (Converted) {
- ConvertedInstructions.push_back(SDWAInst);
- for (MachineOperand &MO : SDWAInst->uses()) {
- if (!MO.isReg())
- continue;
-
- MRI->clearKillFlags(MO.getReg());
- }
- } else {
+ if (!Converted) {
SDWAInst->eraseFromParent();
return false;
}
+ ConvertedInstructions.push_back(SDWAInst);
+ for (MachineOperand &MO : SDWAInst->uses()) {
+ if (!MO.isReg())
+ continue;
+
+ MRI->clearKillFlags(MO.getReg());
+ }
LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
++NumSDWAInstructionsPeepholed;
>From 7034d2dc78a7884e52462ac5fd6d338d3817a72e Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 08:39:37 -0500
Subject: [PATCH 08/33] [AMDGPU] Rename "Combine" to "CombineSelections" in
SIPeepholeSDWA
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 944d85f72bf6b..f018be15155f7 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -106,7 +106,7 @@ class SDWAOperand {
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) = 0;
virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine = false) = 0;
+ bool CombineSelections = false) = 0;
MachineOperand *getTargetOperand() const { return Target; }
MachineOperand *getReplacedOperand() const { return Replaced; }
@@ -186,7 +186,7 @@ class SDWASrcOperand : public SDWAOperand {
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine = false) override;
+ bool CombineSelections = false) override;
SdwaSel getSrcSel() const { return SrcSel; }
bool getAbs() const { return Abs; }
@@ -216,7 +216,7 @@ class SDWADstOperand : public SDWAOperand {
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine = false) override;
+ bool CombineSelections = false) override;
SdwaSel getDstSel() const { return DstSel; }
DstUnused getDstUnused() const { return DstUn; }
@@ -237,7 +237,7 @@ class SDWADstPreserveOperand : public SDWADstOperand {
Preserve(PreserveOp) {}
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine = false) override;
+ bool CombineSelections = false) override;
MachineOperand *getPreservedOperand() const { return Preserve; }
@@ -427,7 +427,7 @@ MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII,
}
bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine) {
+ bool CombineSelections) {
switch (MI.getOpcode()) {
case AMDGPU::V_CVT_F32_FP8_sdwa:
case AMDGPU::V_CVT_F32_BF8_sdwa:
@@ -503,7 +503,7 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
}
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
- if (Combine) {
+ if (CombineSelections) {
SdwaSel NewOp;
bool CanCombine =
combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel(), NewOp);
@@ -541,7 +541,7 @@ MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII,
}
bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine) {
+ bool CombineSelections) {
// Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
@@ -560,7 +560,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
copyRegOperand(*Operand, *getTargetOperand());
MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
assert(DstSel);
- if (Combine) {
+ if (CombineSelections) {
SdwaSel NewOp;
bool CanCombine =
combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel(), NewOp);
@@ -582,7 +582,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
const SIInstrInfo *TII,
- bool Combine) {
+ bool CombineSelections) {
// MI should be moved right before v_or_b32.
// For this we should clear all kill flags on uses of MI src-operands or else
// we can encounter problem with use of killed operand.
@@ -607,7 +607,7 @@ bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
MI.getNumOperands() - 1);
// Convert MI as any other SDWADstOperand and remove v_or_b32
- return SDWADstOperand::convertToSDWA(MI, TII, Combine);
+ return SDWADstOperand::convertToSDWA(MI, TII, CombineSelections);
}
std::optional<int64_t>
>From bbe9ab85865e2ff2c91c6e402d219bd9d533af38 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 09:19:43 -0500
Subject: [PATCH 09/33] [AMDGPU] Change combineSdwaSel to use optional return
type
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 68 +++++++++--------------
1 file changed, 27 insertions(+), 41 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index f018be15155f7..ae8c614ddb3fd 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -113,49 +113,37 @@ class SDWAOperand {
MachineInstr *getParentInst() const { return Target->getParent(); }
/// Fold a \p FoldedOp SDWA selection into an \p ExistingOp existing SDWA
- /// selection. If the selections are compatible, \p return true and store the
- /// SDWA selection in
- /// \p NewOp .
- /// For example, if we have existing BYTE_0 Sel and are attempting to fold
- /// WORD_1 Sel:
+ /// selection. If the selections are compatible, return the combined
+ /// selection, otherwise return a nullopt. For example, if we have existing
+ /// BYTE_0 Sel and are attempting to fold WORD_1 Sel:
/// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
- bool combineSdwaSel(SdwaSel ExistingOp, SdwaSel FoldedOp, SdwaSel &NewOp) {
- if (ExistingOp == SdwaSel::DWORD) {
- NewOp = FoldedOp;
- return true;
- }
+ std::optional<SdwaSel> combineSdwaSel(SdwaSel ExistingOp, SdwaSel FoldedOp) {
+ if (ExistingOp == SdwaSel::DWORD)
+ return FoldedOp;
- if (FoldedOp == SdwaSel::DWORD) {
- NewOp = ExistingOp;
- return true;
- }
+ if (FoldedOp == SdwaSel::DWORD)
+ return ExistingOp;
if (ExistingOp == SdwaSel::WORD_1 || ExistingOp == SdwaSel::BYTE_2 ||
ExistingOp == SdwaSel::BYTE_3)
- return false;
+ return {};
- if (ExistingOp == FoldedOp) {
- NewOp = ExistingOp;
- return true;
- }
+ if (ExistingOp == FoldedOp)
+ return ExistingOp;
- if (FoldedOp == SdwaSel::WORD_0) {
- NewOp = ExistingOp;
- return true;
- }
+ if (FoldedOp == SdwaSel::WORD_0)
+ return ExistingOp;
if (FoldedOp == SdwaSel::WORD_1) {
if (ExistingOp == SdwaSel::BYTE_0)
- NewOp = SdwaSel::BYTE_2;
- else if (ExistingOp == SdwaSel::BYTE_1)
- NewOp = SdwaSel::BYTE_3;
- else if (ExistingOp == SdwaSel::WORD_0)
- NewOp = SdwaSel::WORD_1;
-
- return true;
+ return SdwaSel::BYTE_2;
+ if (ExistingOp == SdwaSel::BYTE_1)
+ return SdwaSel::BYTE_3;
+ if (ExistingOp == SdwaSel::WORD_0)
+ return SdwaSel::WORD_1;
}
- return false;
+ return {};
}
MachineRegisterInfo *getMRI() const {
@@ -504,12 +492,11 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
if (CombineSelections) {
- SdwaSel NewOp;
- bool CanCombine =
- combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel(), NewOp);
- if (!CanCombine)
+ std::optional<SdwaSel> NewOp =
+ combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel());
+ if (!NewOp.has_value())
return false;
- SrcSel->setImm(NewOp);
+ SrcSel->setImm(NewOp.value());
} else {
SrcSel->setImm(getSrcSel());
}
@@ -561,12 +548,11 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
assert(DstSel);
if (CombineSelections) {
- SdwaSel NewOp;
- bool CanCombine =
- combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel(), NewOp);
- if (!CanCombine)
+ std::optional<SdwaSel> NewOp =
+ combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel());
+ if (!NewOp.has_value())
return false;
- DstSel->setImm(NewOp);
+ DstSel->setImm(NewOp.value());
} else {
DstSel->setImm(getDstSel());
}
>From 245c93bce25df06a000d7cc2e8d97ae828b55791 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 11:15:12 -0500
Subject: [PATCH 10/33] [AMDGPU] Add regression test for invalid SDWA selection
handling
---
.../sdwa-peephole-instr-combine-sel.mir | 124 ++++++++++++++++++
1 file changed, 124 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
new file mode 100644
index 0000000000000..43708e9513c68
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
@@ -0,0 +1,124 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -run-pass=si-peephole-sdwa -o - %s | FileCheck -check-prefix=NOHAZARD %s
+
+---
+name: sdwa_opsel_hazard
+body: |
+ ; NOHAZARD-LABEL: name: sdwa_opsel_hazard
+ ; NOHAZARD: bb.0:
+ ; NOHAZARD-NEXT: successors: %bb.7(0x40000000), %bb.8(0x40000000)
+ ; NOHAZARD-NEXT: liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[DEF1]], [[DEF2]], 0, 0, implicit $exec
+ ; NOHAZARD-NEXT: [[SI_IF:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.7
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.1:
+ ; NOHAZARD-NEXT: successors: %bb.2(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 255, implicit $exec
+ ; NOHAZARD-NEXT: [[V_AND_B32_sdwa:%[0-9]+]]:vgpr_32 = V_AND_B32_sdwa 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, [[V_MOV_B32_e32_]], 0, 6, 0, 5, 6, implicit $exec
+ ; NOHAZARD-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
+ ; NOHAZARD-NEXT: [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, [[V_MOV_B32_e32_1]], 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, 6, 0, 6, 2, implicit $exec
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.2:
+ ; NOHAZARD-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.3
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.3:
+ ; NOHAZARD-NEXT: successors: %bb.4(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.4:
+ ; NOHAZARD-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF1]], %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.5
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.5:
+ ; NOHAZARD-NEXT: successors: %bb.6(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.6:
+ ; NOHAZARD-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF3:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.9
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.7:
+ ; NOHAZARD-NEXT: successors: %bb.8(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.8:
+ ; NOHAZARD-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, undef [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+ ; NOHAZARD-NEXT: [[SI_IF4:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF]], %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.1
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.9:
+ ; NOHAZARD-NEXT: successors: %bb.10(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.10:
+ ; NOHAZARD-NEXT: S_ENDPGM 0
+ bb.0:
+ successors: %bb.7(0x40000000), %bb.8(0x40000000)
+ liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
+
+ %0:sreg_32 = IMPLICIT_DEF
+ %1:sreg_64_xexec_xnull = IMPLICIT_DEF
+ %2:vgpr_32 = IMPLICIT_DEF
+ %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed %1, %2, 0, 0, implicit $exec
+ %4:sreg_32 = SI_IF undef %0, %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.7
+
+ bb.1:
+ successors: %bb.2(0x80000000)
+
+ %5:vgpr_32 = V_AND_B32_e64 undef %6, 255, implicit $exec
+ %7:vgpr_32 = V_LSHLREV_B32_e64 2, killed undef %5, implicit $exec
+
+ bb.2:
+ successors: %bb.3(0x40000000), %bb.4(0x40000000)
+
+ %8:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.3
+
+ bb.3:
+ successors: %bb.4(0x80000000)
+
+ bb.4:
+ successors: %bb.5(0x40000000), %bb.6(0x40000000)
+
+ %10:sreg_32 = SI_IF killed undef %8, %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.5
+
+ bb.5:
+ successors: %bb.6(0x80000000)
+
+ bb.6:
+ successors: %bb.9(0x40000000), %bb.10(0x40000000)
+
+ %11:sreg_32 = SI_IF undef %0, %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.9
+
+ bb.7:
+ successors: %bb.8(0x80000000)
+
+ bb.8:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+
+ %6:vgpr_32 = V_LSHRREV_B32_e64 16, undef %3, implicit $exec
+ %9:sreg_32 = SI_IF killed undef %4, %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.1
+
+ bb.9:
+ successors: %bb.10(0x80000000)
+
+ bb.10:
+ S_ENDPGM 0
+
+...
+
>From 5b51aebbc41ab6354cc57bfe8e7fdd7ca8d83e2d Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 12:09:42 -0500
Subject: [PATCH 11/33] [AMDGPU] clang-format changes to SIPeepholeSDWA
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index ae8c614ddb3fd..713ef162f8dee 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -549,7 +549,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
assert(DstSel);
if (CombineSelections) {
std::optional<SdwaSel> NewOp =
- combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel());
+ combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel());
if (!NewOp.has_value())
return false;
DstSel->setImm(NewOp.value());
@@ -1079,7 +1079,7 @@ bool isConvertibleToSDWA(MachineInstr &MI,
}
} // namespace
-MachineInstr* SIPeepholeSDWA::createSDWAVersion(MachineInstr &MI) {
+MachineInstr *SIPeepholeSDWA::createSDWAVersion(MachineInstr &MI) {
unsigned Opcode = MI.getOpcode();
assert(!TII->isSDWA(Opcode));
>From b05facb3c0a7ca35aff90a0dd5a4cc22c6c0cfb9 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 23 Jan 2025 09:05:54 -0500
Subject: [PATCH 12/33] [AMDGPU] SIPeepholeSDWA: Reenable existing SDWA
instruction handling
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 7 +-
.../test/CodeGen/AMDGPU/GlobalISel/saddsat.ll | 15 +-
.../test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll | 15 +-
.../test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll | 26 ++--
.../test/CodeGen/AMDGPU/GlobalISel/usubsat.ll | 26 ++--
.../buffer-fat-pointer-atomicrmw-fadd.ll | 28 +---
.../CodeGen/AMDGPU/flat-atomicrmw-fadd.ll | 48 ++-----
.../CodeGen/AMDGPU/flat-atomicrmw-fsub.ll | 32 ++---
.../CodeGen/AMDGPU/global-atomicrmw-fadd.ll | 56 ++------
.../CodeGen/AMDGPU/global-atomicrmw-fsub.ll | 32 ++---
llvm/test/CodeGen/AMDGPU/idot4u.ll | 22 ++-
.../CodeGen/AMDGPU/local-atomicrmw-fadd.ll | 16 +--
.../CodeGen/AMDGPU/local-atomicrmw-fsub.ll | 16 +--
llvm/test/CodeGen/AMDGPU/permute_i8.ll | 3 +-
.../AMDGPU/sdwa-peephole-instr-combine-sel.ll | 16 +--
.../sdwa-peephole-instr-combine-sel.mir | 136 +++++++++++++-----
.../AMDGPU/sdwa-peephole-instr-gfx10.mir | 3 +-
.../CodeGen/AMDGPU/sdwa-peephole-instr.mir | 7 +-
llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir | 15 +-
llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll | 6 +-
20 files changed, 221 insertions(+), 304 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index ba3822d9b5775..713ef162f8dee 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1020,11 +1020,8 @@ bool isConvertibleToSDWA(MachineInstr &MI,
const SIInstrInfo* TII) {
// Check if this is already an SDWA instruction
unsigned Opc = MI.getOpcode();
- if (TII->isSDWA(Opc)) {
- // FIXME: Reenable after fixing selection handling.
- // Cf. llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.ll
- return false;
- }
+ if (TII->isSDWA(Opc))
+ return true;
// Check if this instruction has opcode that supports SDWA
if (AMDGPU::getSDWAOp(Opc) == -1)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
index 08184e700c1a4..4bfd29430ff1e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
@@ -280,9 +280,8 @@ define i16 @v_saddsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX8-NEXT: v_min_i16_e32 v1, v2, v1
; GFX8-NEXT: v_add_u16_e32 v1, v3, v1
; GFX8-NEXT: v_mov_b32_e32 v2, 0xff
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -300,8 +299,7 @@ define i16 @v_saddsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX9-NEXT: v_pk_add_i16 v0, v0, v1 clamp
; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -441,8 +439,7 @@ define amdgpu_ps i16 @s_saddsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
; GFX9-NEXT: v_pk_add_i16 v0, s0, v0 clamp
; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: ; return to shader part epilog
@@ -612,11 +609,9 @@ define i32 @v_saddsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v2), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v2), v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v3), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v3), v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
index 94f943af2532a..5673a6c6e869d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
@@ -281,9 +281,8 @@ define i16 @v_ssubsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX8-NEXT: v_min_i16_e32 v1, v1, v4
; GFX8-NEXT: v_sub_u16_e32 v1, v3, v1
; GFX8-NEXT: v_mov_b32_e32 v2, 0xff
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -301,8 +300,7 @@ define i16 @v_ssubsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX9-NEXT: v_pk_sub_i16 v0, v0, v1 clamp
; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -442,8 +440,7 @@ define amdgpu_ps i16 @s_ssubsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
; GFX9-NEXT: v_pk_sub_i16 v0, s0, v0 clamp
; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: ; return to shader part epilog
@@ -613,11 +610,9 @@ define i32 @v_ssubsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v2), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v2), v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v3), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v3), v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
index 3d7fec9a5986c..788692c94b0cf 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
@@ -224,8 +224,7 @@ define i16 @v_uaddsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX9-NEXT: v_pk_add_u16 v0, v0, v1 clamp
; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -330,8 +329,7 @@ define amdgpu_ps i16 @s_uaddsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
; GFX9-NEXT: v_pk_add_u16 v0, s0, v0 clamp
; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: ; return to shader part epilog
@@ -453,11 +451,9 @@ define i32 @v_uaddsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -622,20 +618,18 @@ define amdgpu_ps i32 @s_uaddsat_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg) {
; GFX8-NEXT: v_mov_b32_e32 v4, 0xff
; GFX8-NEXT: s_lshl_b32 s0, s3, 8
; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: s_lshl_b32 s1, s7, 8
; GFX8-NEXT: v_and_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_add_u16_e64 v2, s0, v2 clamp
-; GFX8-NEXT: s_lshl_b32 s1, s7, 8
-; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: s_lshl_b32 s0, s4, 8
; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_add_u16_e64 v3, s0, v3 clamp
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
; GFX8-NEXT: ; return to shader part epilog
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
index 5a8b5fcc93f61..4faa7edadf07a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
@@ -218,8 +218,7 @@ define i16 @v_usubsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX9-NEXT: v_pk_sub_u16 v0, v0, v1 clamp
; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -322,8 +321,7 @@ define amdgpu_ps i16 @s_usubsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
; GFX9-NEXT: v_pk_sub_u16 v0, s0, v0 clamp
; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: ; return to shader part epilog
@@ -441,11 +439,9 @@ define i32 @v_usubsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -606,20 +602,18 @@ define amdgpu_ps i32 @s_usubsat_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg) {
; GFX8-NEXT: v_mov_b32_e32 v4, 0xff
; GFX8-NEXT: s_lshl_b32 s0, s3, 8
; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: s_lshl_b32 s1, s7, 8
; GFX8-NEXT: v_and_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_sub_u16_e64 v2, s0, v2 clamp
-; GFX8-NEXT: s_lshl_b32 s1, s7, 8
-; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: s_lshl_b32 s0, s4, 8
; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_sub_u16_e64 v3, s0, v3 clamp
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
; GFX8-NEXT: ; return to shader part epilog
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
index a969e3d4f4f79..e8f1619c5d418 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
@@ -6398,10 +6398,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v1, v5, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
@@ -6627,10 +6625,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX8-NEXT: v_add_f16_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v1, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v2, v0
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
@@ -7048,9 +7044,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB21_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v8
-; GFX8-NEXT: v_add_f16_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_add_f16_sdwa v4, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v6, v8, v5
; GFX8-NEXT: v_or_b32_e32 v7, v6, v4
; GFX8-NEXT: v_mov_b32_e32 v6, v7
@@ -7396,10 +7390,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v1, v5, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
@@ -7658,10 +7650,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX8-NEXT: v_add_f16_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v1, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v2, v0
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
@@ -7925,10 +7915,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v1, v5, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
@@ -8187,10 +8175,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX8-NEXT: .LBB25_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX8-NEXT: v_add_f16_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v1, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v2, v0
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
index 72f883928cffb..ff48a3fc98018 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
@@ -14349,10 +14349,8 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__amdgpu_no_fine_grained_memo
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -14541,10 +14539,8 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__offset12b_pos__amdgpu_no_fi
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -14747,10 +14743,8 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__offset12b_neg__amdgpu_no_fi
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -14930,10 +14924,8 @@ define void @flat_agent_atomic_fadd_noret_v2f16__amdgpu_no_fine_grained_memory(p
; GFX8-NEXT: .LBB59_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -15115,10 +15107,8 @@ define void @flat_agent_atomic_fadd_noret_v2f16__offset12b_pos__amdgpu_no_fine_g
; GFX8-NEXT: .LBB60_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -15318,10 +15308,8 @@ define void @flat_agent_atomic_fadd_noret_v2f16__offset12b_neg__amdgpu_no_fine_g
; GFX8-NEXT: .LBB61_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -15514,10 +15502,8 @@ define <2 x half> @flat_system_atomic_fadd_ret_v2f16__offset12b_pos__amdgpu_no_f
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -15704,10 +15690,8 @@ define void @flat_system_atomic_fadd_noret_v2f16__offset12b_pos__amdgpu_no_fine_
; GFX8-NEXT: .LBB63_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -15894,10 +15878,8 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__amdgpu_no_remote_memory(ptr
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -16077,10 +16059,8 @@ define void @flat_agent_atomic_fadd_noret_v2f16__amdgpu_no_remote_memory(ptr %pt
; GFX8-NEXT: .LBB65_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -16264,10 +16244,8 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__amdgpu_no_fine_grained_memo
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -16447,10 +16425,8 @@ define void @flat_agent_atomic_fadd_noret_v2f16__amdgpu_no_fine_grained_memory__
; GFX8-NEXT: .LBB67_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll
index 9c2a76380d83d..14f75814128f1 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll
@@ -12094,10 +12094,8 @@ define <2 x half> @flat_agent_atomic_fsub_ret_v2f16(ptr %ptr, <2 x half> %val) #
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -12318,10 +12316,8 @@ define <2 x half> @flat_agent_atomic_fsub_ret_v2f16__offset12b_pos(ptr %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -12560,10 +12556,8 @@ define <2 x half> @flat_agent_atomic_fsub_ret_v2f16__offset12b_neg(ptr %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -12772,10 +12766,8 @@ define void @flat_agent_atomic_fsub_noret_v2f16(ptr %ptr, <2 x half> %val) #0 {
; GFX8-NEXT: .LBB45_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -12986,10 +12978,8 @@ define void @flat_agent_atomic_fsub_noret_v2f16__offset12b_pos(ptr %ptr, <2 x ha
; GFX8-NEXT: .LBB46_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -13221,10 +13211,8 @@ define void @flat_agent_atomic_fsub_noret_v2f16__offset12b_neg(ptr %ptr, <2 x ha
; GFX8-NEXT: .LBB47_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -13449,10 +13437,8 @@ define <2 x half> @flat_system_atomic_fsub_ret_v2f16__offset12b_pos(ptr %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -13668,10 +13654,8 @@ define void @flat_system_atomic_fsub_noret_v2f16__offset12b_pos(ptr %ptr, <2 x h
; GFX8-NEXT: .LBB49_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
index 2be6bf302d35f..ec4ea232e661c 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
@@ -15403,10 +15403,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__amdgpu_no_fine_grained_me
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -15637,10 +15635,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__offset12b_pos__amdgpu_no_
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -15871,10 +15867,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__offset12b_neg__amdgpu_no_
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -16089,10 +16083,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__amdgpu_no_fine_grained_memory
; GFX8-NEXT: .LBB67_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -16301,10 +16293,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__offset12b_pos__amdgpu_no_fine
; GFX8-NEXT: .LBB68_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -16514,10 +16504,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__offset12b_neg__amdgpu_no_fine
; GFX8-NEXT: .LBB69_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -16756,10 +16744,8 @@ define <2 x half> @global_system_atomic_fadd_ret_v2f16__offset12b_pos__amdgpu_no
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -16975,10 +16961,8 @@ define void @global_system_atomic_fadd_noret_v2f16__offset12b_pos__amdgpu_no_fin
; GFX8-NEXT: .LBB71_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -17218,10 +17202,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__amdgpu_no_remote_memory(p
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -17458,10 +17440,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__amdgpu_no_remote_memory(ptr a
; GFX8-NEXT: .LBB73_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -17686,10 +17666,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__amdgpu_no_fine_grained_me
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -17900,10 +17878,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__amdgpu_no_fine_grained_memory
; GFX8-NEXT: .LBB75_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -18142,10 +18118,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__maybe_remote(ptr addrspac
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -18382,10 +18356,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__maybe_remote(ptr addrspace(1)
; GFX8-NEXT: .LBB77_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
index 24791b60bfc6d..3dbf6477a7cb8 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
@@ -12433,10 +12433,8 @@ define <2 x half> @global_agent_atomic_fsub_ret_v2f16(ptr addrspace(1) %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -12713,10 +12711,8 @@ define <2 x half> @global_agent_atomic_fsub_ret_v2f16__offset12b_pos(ptr addrspa
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -12993,10 +12989,8 @@ define <2 x half> @global_agent_atomic_fsub_ret_v2f16__offset12b_neg(ptr addrspa
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -13266,10 +13260,8 @@ define void @global_agent_atomic_fsub_noret_v2f16(ptr addrspace(1) %ptr, <2 x ha
; GFX8-NEXT: .LBB45_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -13533,10 +13525,8 @@ define void @global_agent_atomic_fsub_noret_v2f16__offset12b_pos(ptr addrspace(1
; GFX8-NEXT: .LBB46_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -13801,10 +13791,8 @@ define void @global_agent_atomic_fsub_noret_v2f16__offset12b_neg(ptr addrspace(1
; GFX8-NEXT: .LBB47_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -14089,10 +14077,8 @@ define <2 x half> @global_system_atomic_fsub_ret_v2f16__offset12b_pos(ptr addrsp
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -14363,10 +14349,8 @@ define void @global_system_atomic_fsub_noret_v2f16__offset12b_pos(ptr addrspace(
; GFX8-NEXT: .LBB49_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/idot4u.ll b/llvm/test/CodeGen/AMDGPU/idot4u.ll
index 10fac09ef4ec0..8f82348d350e0 100644
--- a/llvm/test/CodeGen/AMDGPU/idot4u.ll
+++ b/llvm/test/CodeGen/AMDGPU/idot4u.ll
@@ -2518,17 +2518,16 @@ define amdgpu_kernel void @udot4_acc8_vecMul(ptr addrspace(1) %src1,
; GFX9-NODL-NEXT: s_waitcnt vmcnt(1)
; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v5, 16, v2
; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX9-NODL-NEXT: v_mul_lo_u16_e32 v8, v4, v5
-; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v7, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
-; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v9, 8, v6
-; GFX9-NODL-NEXT: v_or_b32_sdwa v6, v8, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NODL-NEXT: v_or_b32_e32 v6, v7, v6
+; GFX9-NODL-NEXT: v_mul_lo_u16_e32 v7, v4, v5
+; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v8, 8, v6
+; GFX9-NODL-NEXT: v_or_b32_sdwa v6, v7, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PRESERVE src0_sel:BYTE_1 src1_sel:BYTE_1
; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v6, 8, v6
; GFX9-NODL-NEXT: s_waitcnt vmcnt(0)
; GFX9-NODL-NEXT: v_mad_legacy_u16 v1, v1, v2, v3
; GFX9-NODL-NEXT: v_add_u16_e32 v1, v1, v6
; GFX9-NODL-NEXT: v_mad_legacy_u16 v1, v4, v5, v1
-; GFX9-NODL-NEXT: v_add_u16_e32 v1, v1, v9
+; GFX9-NODL-NEXT: v_add_u16_e32 v1, v1, v8
; GFX9-NODL-NEXT: global_store_byte v0, v1, s[6:7]
; GFX9-NODL-NEXT: s_endpgm
;
@@ -2547,17 +2546,16 @@ define amdgpu_kernel void @udot4_acc8_vecMul(ptr addrspace(1) %src1,
; GFX9-DL-NEXT: s_waitcnt vmcnt(1)
; GFX9-DL-NEXT: v_lshrrev_b32_e32 v5, 16, v2
; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX9-DL-NEXT: v_mul_lo_u16_e32 v8, v4, v5
-; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v7, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
-; GFX9-DL-NEXT: v_lshrrev_b32_e32 v9, 8, v6
-; GFX9-DL-NEXT: v_or_b32_sdwa v6, v8, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-DL-NEXT: v_or_b32_e32 v6, v7, v6
+; GFX9-DL-NEXT: v_mul_lo_u16_e32 v7, v4, v5
+; GFX9-DL-NEXT: v_lshrrev_b32_e32 v8, 8, v6
+; GFX9-DL-NEXT: v_or_b32_sdwa v6, v7, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PRESERVE src0_sel:BYTE_1 src1_sel:BYTE_1
; GFX9-DL-NEXT: v_lshrrev_b32_e32 v6, 8, v6
; GFX9-DL-NEXT: s_waitcnt vmcnt(0)
; GFX9-DL-NEXT: v_mad_legacy_u16 v1, v1, v2, v3
; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v6
; GFX9-DL-NEXT: v_mad_legacy_u16 v1, v4, v5, v1
-; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v9
+; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v8
; GFX9-DL-NEXT: global_store_byte v0, v1, s[6:7]
; GFX9-DL-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
index e4602f20f8a37..23b57a7efa586 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
@@ -5034,10 +5034,8 @@ define <2 x half> @local_atomic_fadd_ret_v2f16(ptr addrspace(3) %ptr, <2 x half>
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v3, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX8-NEXT: v_add_f16_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v3, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -5259,10 +5257,8 @@ define <2 x half> @local_atomic_fadd_ret_v2f16__offset(ptr addrspace(3) %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v3, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX8-NEXT: v_add_f16_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v3, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -5478,10 +5474,8 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -5694,10 +5688,8 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
index 967e972e53e29..1b08b64b046b4 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
@@ -5532,10 +5532,8 @@ define <2 x half> @local_atomic_fsub_ret_v2f16(ptr addrspace(3) %ptr, <2 x half>
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v3, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX8-NEXT: v_sub_f16_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v4, v3, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -5789,10 +5787,8 @@ define <2 x half> @local_atomic_fsub_ret_v2f16__offset(ptr addrspace(3) %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v3, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX8-NEXT: v_sub_f16_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v4, v3, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -6037,10 +6033,8 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -6282,10 +6276,8 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/permute_i8.ll b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
index 8c3758daacb9c..312dfa3717c77 100644
--- a/llvm/test/CodeGen/AMDGPU/permute_i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
@@ -592,8 +592,7 @@ define hidden void @addUsesOr(ptr addrspace(1) %in0, ptr addrspace(1) %in1, i8 %
; GFX9-NEXT: v_add_u16_sdwa v0, v4, v7 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
; GFX9-NEXT: v_add_u16_sdwa v1, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u16_sdwa v1, v4, v7 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
-; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX9-NEXT: v_add_u16_sdwa v0, v4, v7 dst_sel:BYTE_1 dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:BYTE_1
; GFX9-NEXT: global_store_dword v[5:6], v0, off
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.ll b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.ll
index 6eae905278f3e..8f984bfd4d7f7 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.ll
@@ -32,17 +32,15 @@ define amdgpu_kernel void @widget(ptr addrspace(1) %arg, i1 %arg1, ptr addrspace
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: ds_write_b32 v1, v1
; CHECK-NEXT: .LBB0_2: ; %bb20
-; CHECK-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CHECK-NEXT: s_mov_b32 s0, exec_lo
-; CHECK-NEXT: v_cmpx_ne_u16_e32 0, v0
-; CHECK-NEXT: s_xor_b32 s0, exec_lo, s0
-; CHECK-NEXT: s_cbranch_execz .LBB0_4
-; CHECK-NEXT: ; %bb.3: ; %bb11
-; CHECK-NEXT: v_mov_b32_e32 v1, 2
-; CHECK-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: v_cmp_ne_u16_sdwa s0, v0, v1 src0_sel:WORD_1 src1_sel:DWORD
+; CHECK-NEXT: s_and_saveexec_b32 s1, s0
+; CHECK-NEXT: s_xor_b32 s1, exec_lo, s1
+; CHECK-NEXT: ; %bb.3: ; %bb11
+; CHECK-NEXT: v_mov_b32_e32 v2, 2
+; CHECK-NEXT: v_lshlrev_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; CHECK-NEXT: ds_write_b32 v0, v1 offset:84
-; CHECK-NEXT: .LBB0_4: ; %bb14
+; CHECK-NEXT: ; %bb.4: ; %bb14
; CHECK-NEXT: s_endpgm
bb:
%call = tail call i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
index cc2c8b3940d78..43708e9513c68 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
@@ -1,56 +1,124 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -run-pass=si-peephole-sdwa -o - %s | FileCheck -check-prefix=CHECK %s
-
-# Currently the conversions in si-peephole-sdwa are disabled on preexisting sdwa instructions.
-# If they are reenabled, the code matches this pattern instead of the corresponding pattern
-# for V_LSHLREV_B32_sdwa further below:
-# [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, %{{[0-9]+}}, 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, 6, 0, 6, 5, implicit $exec
-
-# TODO Implement a fix for the incorrect sdwa selection
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -run-pass=si-peephole-sdwa -o - %s | FileCheck -check-prefix=NOHAZARD %s
---
name: sdwa_opsel_hazard
body: |
- ; CHECK-LABEL: name: sdwa_opsel_hazard
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.2(0x80000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
- ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
- ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[DEF1]], [[DEF2]], 0, 0, implicit $exec
- ; CHECK-NEXT: S_BRANCH %bb.2
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 undef %5, 255, implicit $exec
- ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
- ; CHECK-NEXT: [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, [[V_MOV_B32_e32_]], 0, undef %5, 0, 6, 0, 6, 0, implicit $exec
- ; CHECK-NEXT: S_ENDPGM 0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, undef [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
- ; CHECK-NEXT: S_BRANCH %bb.1
+ ; NOHAZARD-LABEL: name: sdwa_opsel_hazard
+ ; NOHAZARD: bb.0:
+ ; NOHAZARD-NEXT: successors: %bb.7(0x40000000), %bb.8(0x40000000)
+ ; NOHAZARD-NEXT: liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[DEF1]], [[DEF2]], 0, 0, implicit $exec
+ ; NOHAZARD-NEXT: [[SI_IF:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.7
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.1:
+ ; NOHAZARD-NEXT: successors: %bb.2(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 255, implicit $exec
+ ; NOHAZARD-NEXT: [[V_AND_B32_sdwa:%[0-9]+]]:vgpr_32 = V_AND_B32_sdwa 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, [[V_MOV_B32_e32_]], 0, 6, 0, 5, 6, implicit $exec
+ ; NOHAZARD-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
+ ; NOHAZARD-NEXT: [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, [[V_MOV_B32_e32_1]], 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, 6, 0, 6, 2, implicit $exec
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.2:
+ ; NOHAZARD-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.3
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.3:
+ ; NOHAZARD-NEXT: successors: %bb.4(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.4:
+ ; NOHAZARD-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF1]], %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.5
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.5:
+ ; NOHAZARD-NEXT: successors: %bb.6(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.6:
+ ; NOHAZARD-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF3:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.9
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.7:
+ ; NOHAZARD-NEXT: successors: %bb.8(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.8:
+ ; NOHAZARD-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, undef [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+ ; NOHAZARD-NEXT: [[SI_IF4:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF]], %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.1
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.9:
+ ; NOHAZARD-NEXT: successors: %bb.10(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.10:
+ ; NOHAZARD-NEXT: S_ENDPGM 0
bb.0:
- successors: %bb.2(0x40000000)
+ successors: %bb.7(0x40000000), %bb.8(0x40000000)
+ liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
+
%0:sreg_32 = IMPLICIT_DEF
%1:sreg_64_xexec_xnull = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed %1, %2, 0, 0, implicit $exec
- S_BRANCH %bb.2
+ %4:sreg_32 = SI_IF undef %0, %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.7
bb.1:
+ successors: %bb.2(0x80000000)
+
%5:vgpr_32 = V_AND_B32_e64 undef %6, 255, implicit $exec
%7:vgpr_32 = V_LSHLREV_B32_e64 2, killed undef %5, implicit $exec
- S_ENDPGM 0
bb.2:
- successors: %bb.1(0x40000000)
+ successors: %bb.3(0x40000000), %bb.4(0x40000000)
- %6:vgpr_32 = V_LSHRREV_B32_e64 16, undef %3, implicit $exec
+ %8:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.3
+
+ bb.3:
+ successors: %bb.4(0x80000000)
+
+ bb.4:
+ successors: %bb.5(0x40000000), %bb.6(0x40000000)
+
+ %10:sreg_32 = SI_IF killed undef %8, %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.5
+ bb.5:
+ successors: %bb.6(0x80000000)
+
+ bb.6:
+ successors: %bb.9(0x40000000), %bb.10(0x40000000)
+
+ %11:sreg_32 = SI_IF undef %0, %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.9
+
+ bb.7:
+ successors: %bb.8(0x80000000)
+
+ bb.8:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+
+ %6:vgpr_32 = V_LSHRREV_B32_e64 16, undef %3, implicit $exec
+ %9:sreg_32 = SI_IF killed undef %4, %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
S_BRANCH %bb.1
+ bb.9:
+ successors: %bb.10(0x80000000)
+
+ bb.10:
+ S_ENDPGM 0
+
...
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir
index aaa32d871148b..62538120f8451 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir
@@ -138,8 +138,7 @@ body: |
---
# GCN-LABEL: {{^}}name: vop2_instructions
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_LSHLREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
# GFX1010: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $mode, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
index c027600a8af67..e2854df2468b3 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
@@ -147,15 +147,14 @@ body: |
---
# GCN-LABEL: {{^}}name: vop2_instructions
-# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_LSHLREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+
+# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $mode, implicit $exec
# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_LSHLREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $mode, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir b/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
index 467bc77c18577..ffbd2d092b5d8 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
@@ -37,10 +37,9 @@ body: |
; SDWA-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, [[FLAT_LOAD_DWORD1]], implicit $exec
; SDWA-NEXT: [[V_BFE_U32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_U32_e64 [[FLAT_LOAD_DWORD]], 8, 8, implicit $exec
; SDWA-NEXT: [[V_LSHRREV_B32_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e32 24, [[FLAT_LOAD_DWORD1]], implicit $exec
- ; SDWA-NEXT: [[V_ADD_F16_sdwa:%[0-9]+]]:vgpr_32 = V_ADD_F16_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 1, 0, 4, 5, implicit $mode, implicit $exec
; SDWA-NEXT: [[V_MUL_F32_sdwa:%[0-9]+]]:vgpr_32 = V_MUL_F32_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 5, 0, 1, 3, implicit $mode, implicit $exec
- ; SDWA-NEXT: [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_ADD_F16_sdwa]], [[V_MUL_F32_sdwa]], implicit $exec
- ; SDWA-NEXT: FLAT_STORE_DWORD [[COPY2]], [[V_OR_B32_e64_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
+ ; SDWA-NEXT: [[V_ADD_F16_sdwa:%[0-9]+]]:vgpr_32 = V_ADD_F16_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 1, 2, 4, 5, implicit $mode, implicit $exec, implicit [[V_MUL_F32_sdwa]](tied-def 0)
+ ; SDWA-NEXT: FLAT_STORE_DWORD [[COPY2]], [[V_ADD_F16_sdwa]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
; SDWA-NEXT: $sgpr30_sgpr31 = COPY [[COPY]]
; SDWA-NEXT: S_SETPC_B64_return $sgpr30_sgpr31
%2 = COPY $sgpr30_sgpr31
@@ -146,7 +145,7 @@ body: |
; SDWA-NEXT: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e64 8, [[FLAT_LOAD_DWORD]], implicit $exec
; SDWA-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 65535
; SDWA-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 [[FLAT_LOAD_DWORD]], killed [[S_MOV_B32_]], implicit $exec
- ; SDWA-NEXT: [[V_MOV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_MOV_B32_sdwa 0, [[FLAT_LOAD_DWORD1]], 0, 5, 2, 4, implicit $exec, implicit [[V_AND_B32_e64_]](tied-def 0)
+ ; SDWA-NEXT: [[V_MOV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_MOV_B32_sdwa 0, [[FLAT_LOAD_DWORD1]], 0, 5, 2, 4, implicit $exec, implicit [[FLAT_LOAD_DWORD]](tied-def 0)
; SDWA-NEXT: FLAT_STORE_DWORD [[COPY2]], [[V_MOV_B32_sdwa]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
; SDWA-NEXT: S_ENDPGM 0
%2 = COPY $sgpr30_sgpr31
@@ -181,17 +180,15 @@ body: |
; SDWA-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, [[FLAT_LOAD_DWORD1]], implicit $exec
; SDWA-NEXT: [[V_BFE_U32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_U32_e64 [[FLAT_LOAD_DWORD]], 8, 8, implicit $exec
; SDWA-NEXT: [[V_LSHRREV_B32_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e32 24, [[FLAT_LOAD_DWORD1]], implicit $exec
- ; SDWA-NEXT: [[V_ADD_F16_sdwa:%[0-9]+]]:vgpr_32 = V_ADD_F16_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 1, 0, 4, 5, implicit $mode, implicit $exec
; SDWA-NEXT: {{ $}}
; SDWA-NEXT: bb.1:
; SDWA-NEXT: successors: %bb.2(0x80000000)
; SDWA-NEXT: {{ $}}
- ; SDWA-NEXT: [[V_MUL_F32_sdwa:%[0-9]+]]:vgpr_32 = V_MUL_F32_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 6, 0, 1, 3, implicit $mode, implicit $exec
- ; SDWA-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 16, [[V_MUL_F32_sdwa]], implicit $exec
+ ; SDWA-NEXT: [[V_MUL_F32_sdwa:%[0-9]+]]:vgpr_32 = V_MUL_F32_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 5, 0, 1, 3, implicit $mode, implicit $exec
; SDWA-NEXT: {{ $}}
; SDWA-NEXT: bb.2:
- ; SDWA-NEXT: [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_ADD_F16_sdwa]], [[V_LSHLREV_B32_e64_]], implicit $exec
- ; SDWA-NEXT: FLAT_STORE_DWORD [[COPY2]], [[V_OR_B32_e64_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
+ ; SDWA-NEXT: [[V_ADD_F16_sdwa:%[0-9]+]]:vgpr_32 = V_ADD_F16_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 1, 2, 4, 5, implicit $mode, implicit $exec, implicit [[V_MUL_F32_sdwa]](tied-def 0)
+ ; SDWA-NEXT: FLAT_STORE_DWORD [[COPY2]], [[V_ADD_F16_sdwa]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
; SDWA-NEXT: $sgpr30_sgpr31 = COPY [[COPY]]
; SDWA-NEXT: S_SETPC_B64_return $sgpr30_sgpr31
bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll b/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
index 934d9efba4656..2d84e87722951 100644
--- a/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
@@ -1230,8 +1230,7 @@ define i16 @basic_smax_smin_vec_input(<2 x i16> %src) {
; GISEL-GFX9-NEXT: v_pk_min_i16 v0, v1, v0
; GISEL-GFX9-NEXT: v_pk_max_i16 v0, 0, v0
; GISEL-GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GISEL-GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GISEL-GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GISEL-GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GISEL-GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -1346,8 +1345,7 @@ define i16 @basic_smax_smin_vec_input_rev(<2 x i16> %src) {
; GISEL-GFX9-NEXT: v_mov_b32_e32 v1, 0xff00ff
; GISEL-GFX9-NEXT: v_pk_min_i16 v0, v1, v0
; GISEL-GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GISEL-GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GISEL-GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GISEL-GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GISEL-GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
>From c3868a5c9c544fd8c5ee9e18d9f842be661771e5 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 29 Jan 2025 03:37:31 -0500
Subject: [PATCH 13/33] [AMDGPU] SIPeepholeSDWA: Stop using CombineSelections
in convertToSDWA
The flag is not necessary since the relevant instructions
can be detected by looking at the SrcSel->getImm().
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 55 +++++++++++------------
1 file changed, 26 insertions(+), 29 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 713ef162f8dee..de7ef15764466 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -105,8 +105,7 @@ class SDWAOperand {
virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) = 0;
- virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections = false) = 0;
+ virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
MachineOperand *getTargetOperand() const { return Target; }
MachineOperand *getReplacedOperand() const { return Replaced; }
@@ -170,11 +169,10 @@ class SDWASrcOperand : public SDWAOperand {
: SDWAOperand(TargetOp, ReplacedOp),
SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {}
- MachineInstr *potentialToConvert(const SIInstrInfo *TII,
- const GCNSubtarget &ST,
- SDWAOperandsMap *PotentialMatches = nullptr) override;
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections = false) override;
+ MachineInstr *
+ potentialToConvert(const SIInstrInfo *TII, const GCNSubtarget &ST,
+ SDWAOperandsMap *PotentialMatches = nullptr) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
SdwaSel getSrcSel() const { return SrcSel; }
bool getAbs() const { return Abs; }
@@ -200,11 +198,10 @@ class SDWADstOperand : public SDWAOperand {
SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
: SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
- MachineInstr *potentialToConvert(const SIInstrInfo *TII,
- const GCNSubtarget &ST,
- SDWAOperandsMap *PotentialMatches = nullptr) override;
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections = false) override;
+ MachineInstr *
+ potentialToConvert(const SIInstrInfo *TII, const GCNSubtarget &ST,
+ SDWAOperandsMap *PotentialMatches = nullptr) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
SdwaSel getDstSel() const { return DstSel; }
DstUnused getDstUnused() const { return DstUn; }
@@ -224,8 +221,7 @@ class SDWADstPreserveOperand : public SDWADstOperand {
: SDWADstOperand(TargetOp, ReplacedOp, DstSel_, UNUSED_PRESERVE),
Preserve(PreserveOp) {}
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections = false) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
MachineOperand *getPreservedOperand() const { return Preserve; }
@@ -414,8 +410,7 @@ MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII,
return PotentialMO->getParent();
}
-bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections) {
+bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
switch (MI.getOpcode()) {
case AMDGPU::V_CVT_F32_FP8_sdwa:
case AMDGPU::V_CVT_F32_BF8_sdwa:
@@ -491,14 +486,21 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
}
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
- if (CombineSelections) {
+ if (SrcSel->getImm() == AMDGPU::SDWA::DWORD) {
+ // An SDWA instruction with a trivial src_sel, i.e.
+ // it has either not been adjusted before or it has
+ // just been created at the call site of this function.
+ // Use the operand's src_sel.
+ SrcSel->setImm(getSrcSel());
+ }
+ else {
+ // A preexisting SDWA instruction with a non-trivial src_sel.
+ // Combine with the operand src_sel.
std::optional<SdwaSel> NewOp =
combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel());
if (!NewOp.has_value())
return false;
SrcSel->setImm(NewOp.value());
- } else {
- SrcSel->setImm(getSrcSel());
}
SrcMods->setImm(getSrcMods(TII, Src));
}
@@ -527,8 +529,7 @@ MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII,
return PotentialMO->getParent();
}
-bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections) {
+bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
// Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
@@ -547,7 +548,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
copyRegOperand(*Operand, *getTargetOperand());
MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
assert(DstSel);
- if (CombineSelections) {
+ if (DstSel->getImm() != AMDGPU::SDWA::DWORD) {
std::optional<SdwaSel> NewOp =
combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel());
if (!NewOp.has_value())
@@ -567,8 +568,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
}
bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
- const SIInstrInfo *TII,
- bool CombineSelections) {
+ const SIInstrInfo *TII) {
// MI should be moved right before v_or_b32.
// For this we should clear all kill flags on uses of MI src-operands or else
// we can encounter problem with use of killed operand.
@@ -593,7 +593,7 @@ bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
MI.getNumOperands() - 1);
// Convert MI as any other SDWADstOperand and remove v_or_b32
- return SDWADstOperand::convertToSDWA(MI, TII, CombineSelections);
+ return SDWADstOperand::convertToSDWA(MI, TII);
}
std::optional<int64_t>
@@ -1227,18 +1227,15 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
MachineInstr *SDWAInst;
- bool CombineSelections;
if (TII->isSDWA(MI.getOpcode())) {
// No conversion necessary, since MI is an SDWA instruction. But
// tell convertToSDWA below to combine selections of this instruction
// and its SDWA operands.
SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
MI.getParent()->insert(MI.getIterator(), SDWAInst);
- CombineSelections = true;
} else {
// Convert to sdwa
SDWAInst = createSDWAVersion(MI);
- CombineSelections = false;
}
// Apply all sdwa operand patterns.
@@ -1256,7 +1253,7 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
// was already destroyed). So if SDWAOperand is also a potential MI then do
// not apply it.
if (PotentialMatches.count(Operand->getParentInst()) == 0)
- Converted |= Operand->convertToSDWA(*SDWAInst, TII, CombineSelections);
+ Converted |= Operand->convertToSDWA(*SDWAInst, TII);
}
if (!Converted) {
>From c58493c38f2afa9959c4fe52752abe999b1c0ee7 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 29 Jan 2025 04:45:18 -0500
Subject: [PATCH 14/33] [AMDGPU] SIPeepholeSDWA.cpp: Simplify combineSdwaSel
uses
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 38 ++++++++---------------
1 file changed, 13 insertions(+), 25 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index de7ef15764466..cfcb170db5af0 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -486,22 +486,12 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
}
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
- if (SrcSel->getImm() == AMDGPU::SDWA::DWORD) {
- // An SDWA instruction with a trivial src_sel, i.e.
- // it has either not been adjusted before or it has
- // just been created at the call site of this function.
- // Use the operand's src_sel.
- SrcSel->setImm(getSrcSel());
- }
- else {
- // A preexisting SDWA instruction with a non-trivial src_sel.
- // Combine with the operand src_sel.
- std::optional<SdwaSel> NewOp =
- combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel());
- if (!NewOp.has_value())
- return false;
- SrcSel->setImm(NewOp.value());
- }
+ SdwaSel ExistingSel = static_cast<SdwaSel>(SrcSel->getImm());
+ std::optional<SdwaSel> NewSel = combineSdwaSel(ExistingSel, getSrcSel());
+ if (!NewSel.has_value())
+ return false;
+ SrcSel->setImm(NewSel.value());
+
SrcMods->setImm(getSrcMods(TII, Src));
}
getTargetOperand()->setIsKill(false);
@@ -548,15 +538,13 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
copyRegOperand(*Operand, *getTargetOperand());
MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
assert(DstSel);
- if (DstSel->getImm() != AMDGPU::SDWA::DWORD) {
- std::optional<SdwaSel> NewOp =
- combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel());
- if (!NewOp.has_value())
- return false;
- DstSel->setImm(NewOp.value());
- } else {
- DstSel->setImm(getDstSel());
- }
+
+ SdwaSel ExistingSel = static_cast<SdwaSel>(DstSel->getImm());
+ std::optional<SdwaSel> NewSel = combineSdwaSel(ExistingSel, getDstSel());
+ if (!NewSel.has_value())
+ return false;
+ DstSel->setImm(NewSel.value());
+
MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
assert(DstUnused);
DstUnused->setImm(getDstUnused());
>From 324267700bc0c98dfe6d505a88fa9e4de9807dce Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 29 Jan 2025 07:11:39 -0500
Subject: [PATCH 15/33] [AMDGPU] SIPeepholeSDWA: Change arg names and comments
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 44 +++++++++++------------
1 file changed, 21 insertions(+), 23 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index cfcb170db5af0..19d628acf30fe 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -111,34 +111,36 @@ class SDWAOperand {
MachineOperand *getReplacedOperand() const { return Replaced; }
MachineInstr *getParentInst() const { return Target->getParent(); }
- /// Fold a \p FoldedOp SDWA selection into an \p ExistingOp existing SDWA
- /// selection. If the selections are compatible, return the combined
- /// selection, otherwise return a nullopt. For example, if we have existing
- /// BYTE_0 Sel and are attempting to fold WORD_1 Sel:
+ /// Combine an SDWA instruction's existing SDWA selection \p
+ /// ExistingSel with the SDWA selection \p OpSel of its operand. If
+ /// the selections are compatible, return the combined selection,
+ /// otherwise return a nullopt. For example, if we have ExistingSel
+ /// = BYTE_0 Sel and FoldedSel WORD_1 Sel:
/// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
- std::optional<SdwaSel> combineSdwaSel(SdwaSel ExistingOp, SdwaSel FoldedOp) {
- if (ExistingOp == SdwaSel::DWORD)
- return FoldedOp;
+ std::optional<SdwaSel> combineSdwaSel(SdwaSel ExistingSel,
+ SdwaSel OperandSel) {
+ if (ExistingSel == SdwaSel::DWORD)
+ return OperandSel;
- if (FoldedOp == SdwaSel::DWORD)
- return ExistingOp;
+ if (OperandSel == SdwaSel::DWORD)
+ return ExistingSel;
- if (ExistingOp == SdwaSel::WORD_1 || ExistingOp == SdwaSel::BYTE_2 ||
- ExistingOp == SdwaSel::BYTE_3)
+ if (ExistingSel == SdwaSel::WORD_1 || ExistingSel == SdwaSel::BYTE_2 ||
+ ExistingSel == SdwaSel::BYTE_3)
return {};
- if (ExistingOp == FoldedOp)
- return ExistingOp;
+ if (ExistingSel == OperandSel)
+ return ExistingSel;
- if (FoldedOp == SdwaSel::WORD_0)
- return ExistingOp;
+ if (OperandSel == SdwaSel::WORD_0)
+ return ExistingSel;
- if (FoldedOp == SdwaSel::WORD_1) {
- if (ExistingOp == SdwaSel::BYTE_0)
+ if (OperandSel == SdwaSel::WORD_1) {
+ if (ExistingSel == SdwaSel::BYTE_0)
return SdwaSel::BYTE_2;
- if (ExistingOp == SdwaSel::BYTE_1)
+ if (ExistingSel == SdwaSel::BYTE_1)
return SdwaSel::BYTE_3;
- if (ExistingOp == SdwaSel::WORD_0)
+ if (ExistingSel == SdwaSel::WORD_0)
return SdwaSel::WORD_1;
}
@@ -1216,13 +1218,9 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
MachineInstr *SDWAInst;
if (TII->isSDWA(MI.getOpcode())) {
- // No conversion necessary, since MI is an SDWA instruction. But
- // tell convertToSDWA below to combine selections of this instruction
- // and its SDWA operands.
SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
MI.getParent()->insert(MI.getIterator(), SDWAInst);
} else {
- // Convert to sdwa
SDWAInst = createSDWAVersion(MI);
}
>From b5aa73d3bd87fad5f340ac4857b0716876334f4c Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 29 Jan 2025 08:13:50 -0500
Subject: [PATCH 16/33] [AMDGPU] Use default check prefix in
sdwa-peephole-instr-combine-sel.mir
---
.../sdwa-peephole-instr-combine-sel.mir | 120 +++++++++---------
1 file changed, 60 insertions(+), 60 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
index 43708e9513c68..acad03d6d8960 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
@@ -1,68 +1,68 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -run-pass=si-peephole-sdwa -o - %s | FileCheck -check-prefix=NOHAZARD %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -run-pass=si-peephole-sdwa -o - %s | FileCheck %s
---
name: sdwa_opsel_hazard
body: |
- ; NOHAZARD-LABEL: name: sdwa_opsel_hazard
- ; NOHAZARD: bb.0:
- ; NOHAZARD-NEXT: successors: %bb.7(0x40000000), %bb.8(0x40000000)
- ; NOHAZARD-NEXT: liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
- ; NOHAZARD-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
- ; NOHAZARD-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; NOHAZARD-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[DEF1]], [[DEF2]], 0, 0, implicit $exec
- ; NOHAZARD-NEXT: [[SI_IF:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; NOHAZARD-NEXT: S_BRANCH %bb.7
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.1:
- ; NOHAZARD-NEXT: successors: %bb.2(0x80000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 255, implicit $exec
- ; NOHAZARD-NEXT: [[V_AND_B32_sdwa:%[0-9]+]]:vgpr_32 = V_AND_B32_sdwa 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, [[V_MOV_B32_e32_]], 0, 6, 0, 5, 6, implicit $exec
- ; NOHAZARD-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
- ; NOHAZARD-NEXT: [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, [[V_MOV_B32_e32_1]], 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, 6, 0, 6, 2, implicit $exec
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.2:
- ; NOHAZARD-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; NOHAZARD-NEXT: S_BRANCH %bb.3
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.3:
- ; NOHAZARD-NEXT: successors: %bb.4(0x80000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.4:
- ; NOHAZARD-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF1]], %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; NOHAZARD-NEXT: S_BRANCH %bb.5
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.5:
- ; NOHAZARD-NEXT: successors: %bb.6(0x80000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.6:
- ; NOHAZARD-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[SI_IF3:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; NOHAZARD-NEXT: S_BRANCH %bb.9
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.7:
- ; NOHAZARD-NEXT: successors: %bb.8(0x80000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.8:
- ; NOHAZARD-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, undef [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
- ; NOHAZARD-NEXT: [[SI_IF4:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF]], %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; NOHAZARD-NEXT: S_BRANCH %bb.1
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.9:
- ; NOHAZARD-NEXT: successors: %bb.10(0x80000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.10:
- ; NOHAZARD-NEXT: S_ENDPGM 0
+ ; CHECK-LABEL: name: sdwa_opsel_hazard
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.7(0x40000000), %bb.8(0x40000000)
+ ; CHECK-NEXT: liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[DEF1]], [[DEF2]], 0, 0, implicit $exec
+ ; CHECK-NEXT: [[SI_IF:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 255, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_sdwa:%[0-9]+]]:vgpr_32 = V_AND_B32_sdwa 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, [[V_MOV_B32_e32_]], 0, 6, 0, 5, 6, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
+ ; CHECK-NEXT: [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, [[V_MOV_B32_e32_1]], 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, 6, 0, 6, 2, implicit $exec
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF1]], %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5:
+ ; CHECK-NEXT: successors: %bb.6(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6:
+ ; CHECK-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[SI_IF3:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.9
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7:
+ ; CHECK-NEXT: successors: %bb.8(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, undef [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+ ; CHECK-NEXT: [[SI_IF4:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF]], %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9:
+ ; CHECK-NEXT: successors: %bb.10(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.10:
+ ; CHECK-NEXT: S_ENDPGM 0
bb.0:
successors: %bb.7(0x40000000), %bb.8(0x40000000)
liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
>From ed16fbd1ae4daa3d9d347e2b49f738fcb977873e Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 30 Jan 2025 04:18:31 -0500
Subject: [PATCH 17/33] Revert unintended reformatting
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 19d628acf30fe..39fe65e7c3754 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -171,9 +171,9 @@ class SDWASrcOperand : public SDWAOperand {
: SDWAOperand(TargetOp, ReplacedOp),
SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {}
- MachineInstr *
- potentialToConvert(const SIInstrInfo *TII, const GCNSubtarget &ST,
- SDWAOperandsMap *PotentialMatches = nullptr) override;
+ MachineInstr *potentialToConvert(const SIInstrInfo *TII,
+ const GCNSubtarget &ST,
+ SDWAOperandsMap *PotentialMatches = nullptr) override;
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
SdwaSel getSrcSel() const { return SrcSel; }
@@ -200,9 +200,9 @@ class SDWADstOperand : public SDWAOperand {
SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
: SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
- MachineInstr *
- potentialToConvert(const SIInstrInfo *TII, const GCNSubtarget &ST,
- SDWAOperandsMap *PotentialMatches = nullptr) override;
+ MachineInstr *potentialToConvert(const SIInstrInfo *TII,
+ const GCNSubtarget &ST,
+ SDWAOperandsMap *PotentialMatches = nullptr) override;
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
SdwaSel getDstSel() const { return DstSel; }
>From 258fb148de458910f1b9218d62ff2fb92c45204f Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Tue, 11 Feb 2025 02:54:42 -0500
Subject: [PATCH 18/33] [AMDGPU] SIPeepholeSDWA: Verify compatibility of
selections earlier
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 168 +++++++++++++---------
1 file changed, 100 insertions(+), 68 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 39fe65e7c3754..37b12b2a5afe7 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -88,6 +88,43 @@ class SIPeepholeSDWALegacy : public MachineFunctionPass {
using namespace AMDGPU::SDWA;
+/// Check that the SDWA selections \p ExistingSel and \p OperandSel
+/// are suitable for being combined by combineSdwaSel.
+bool compatibleSelections(SdwaSel ExistingSel, SdwaSel OperandSel) {
+ return ExistingSel == SdwaSel::DWORD || OperandSel == ExistingSel ||
+ (ExistingSel != SdwaSel::WORD_1 && ExistingSel != SdwaSel::BYTE_2 &&
+ ExistingSel != SdwaSel::BYTE_3 &&
+ (OperandSel == SdwaSel::WORD_0 || OperandSel == SdwaSel::WORD_1));
+}
+
+/// Combine an SDWA instruction's existing SDWA selection \p
+/// ExistingSel with the SDWA selection \p OpSel of its operand. If
+/// the selections are compatible, return the combined selection,
+/// otherwise return a nullopt. For example, if we have ExistingSel
+/// = BYTE_0 Sel and FoldedSel WORD_1 Sel:
+/// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
+SdwaSel combineSdwaSel(SdwaSel ExistingSel, SdwaSel OperandSel) {
+ assert(compatibleSelections(ExistingSel, OperandSel));
+
+ if (ExistingSel == SdwaSel::DWORD)
+ return OperandSel;
+
+ if (OperandSel == SdwaSel::DWORD || ExistingSel == OperandSel ||
+ OperandSel == SdwaSel::WORD_0)
+ return ExistingSel;
+
+ if (OperandSel == SdwaSel::WORD_1) {
+ if (ExistingSel == SdwaSel::BYTE_0)
+ return SdwaSel::BYTE_2;
+ if (ExistingSel == SdwaSel::BYTE_1)
+ return SdwaSel::BYTE_3;
+ if (ExistingSel == SdwaSel::WORD_0)
+ return SdwaSel::WORD_1;
+ }
+
+ llvm_unreachable("Unexpected selections");
+}
+
class SDWAOperand {
private:
MachineOperand *Target; // Operand that would be used in converted instruction
@@ -111,42 +148,6 @@ class SDWAOperand {
MachineOperand *getReplacedOperand() const { return Replaced; }
MachineInstr *getParentInst() const { return Target->getParent(); }
- /// Combine an SDWA instruction's existing SDWA selection \p
- /// ExistingSel with the SDWA selection \p OpSel of its operand. If
- /// the selections are compatible, return the combined selection,
- /// otherwise return a nullopt. For example, if we have ExistingSel
- /// = BYTE_0 Sel and FoldedSel WORD_1 Sel:
- /// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
- std::optional<SdwaSel> combineSdwaSel(SdwaSel ExistingSel,
- SdwaSel OperandSel) {
- if (ExistingSel == SdwaSel::DWORD)
- return OperandSel;
-
- if (OperandSel == SdwaSel::DWORD)
- return ExistingSel;
-
- if (ExistingSel == SdwaSel::WORD_1 || ExistingSel == SdwaSel::BYTE_2 ||
- ExistingSel == SdwaSel::BYTE_3)
- return {};
-
- if (ExistingSel == OperandSel)
- return ExistingSel;
-
- if (OperandSel == SdwaSel::WORD_0)
- return ExistingSel;
-
- if (OperandSel == SdwaSel::WORD_1) {
- if (ExistingSel == SdwaSel::BYTE_0)
- return SdwaSel::BYTE_2;
- if (ExistingSel == SdwaSel::BYTE_1)
- return SdwaSel::BYTE_3;
- if (ExistingSel == SdwaSel::WORD_0)
- return SdwaSel::WORD_1;
- }
-
- return {};
- }
-
MachineRegisterInfo *getMRI() const {
return &getParentInst()->getParent()->getParent()->getRegInfo();
}
@@ -164,12 +165,34 @@ class SDWASrcOperand : public SDWAOperand {
bool Neg;
bool Sext;
-public:
+protected:
SDWASrcOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
bool Sext_ = false)
- : SDWAOperand(TargetOp, ReplacedOp),
- SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {}
+ : SDWAOperand(TargetOp, ReplacedOp), SrcSel(SrcSel_), Abs(Abs_),
+ Neg(Neg_), Sext(Sext_) {}
+public:
+ /// Create an SDWASrcOperand as an operand for \p MI from the given arguments
+ /// if \p SrcSel_ and the src_sel0 and src_sel1 operands of \p MI are
+ /// compatible.
+ static std::unique_ptr<SDWAOperand>
+ create(const SIInstrInfo *TII, const MachineInstr &MI,
+ MachineOperand *TargetOp, MachineOperand *ReplacedOp,
+ SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
+ bool Sext_ = false) {
+ if (TII->isSDWA(MI.getOpcode())) {
+ for (auto SelOpName :
+ {AMDGPU::OpName::src0_sel, AMDGPU::OpName::src1_sel}) {
+ const MachineOperand *NamedOp = TII->getNamedOperand(MI, SelOpName);
+ if (NamedOp && !compatibleSelections(
+ static_cast<SdwaSel>(NamedOp->getImm()), SrcSel_))
+ return std::unique_ptr<SDWAOperand>(nullptr);
+ }
+ }
+
+ return std::unique_ptr<SDWAOperand>(new SDWASrcOperand(
+ TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
+ };
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
@@ -194,11 +217,29 @@ class SDWADstOperand : public SDWAOperand {
SdwaSel DstSel;
DstUnused DstUn;
-public:
-
+protected:
SDWADstOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
- : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
+ : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
+
+public:
+ /// Create an SDWASrcOperand as an operand for \p MI from the given arguments
+ /// if \p SrcSel_ and the dst_sel operand of \p MI are
+ /// compatible.
+ static std::unique_ptr<SDWAOperand>
+ create(const SIInstrInfo *TII, const MachineInstr &MI,
+ MachineOperand *TargetOp, MachineOperand *ReplacedOp, SdwaSel DstSel_,
+ DstUnused DstUn_) {
+ if (TII->isSDWA(MI.getOpcode())) {
+ SdwaSel InstSel = static_cast<SdwaSel>(
+ TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel)->getImm());
+ if (!compatibleSelections(InstSel, DstSel_))
+ return nullptr;
+ }
+
+ return std::unique_ptr<SDWAOperand>(
+ new SDWADstOperand(TargetOp, ReplacedOp, DstSel_, DstUn_));
+ };
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
@@ -489,11 +530,7 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
SdwaSel ExistingSel = static_cast<SdwaSel>(SrcSel->getImm());
- std::optional<SdwaSel> NewSel = combineSdwaSel(ExistingSel, getSrcSel());
- if (!NewSel.has_value())
- return false;
- SrcSel->setImm(NewSel.value());
-
+ SrcSel->setImm(combineSdwaSel(ExistingSel, getSrcSel()));
SrcMods->setImm(getSrcMods(TII, Src));
}
getTargetOperand()->setIsKill(false);
@@ -542,10 +579,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
assert(DstSel);
SdwaSel ExistingSel = static_cast<SdwaSel>(DstSel->getImm());
- std::optional<SdwaSel> NewSel = combineSdwaSel(ExistingSel, getDstSel());
- if (!NewSel.has_value())
- return false;
- DstSel->setImm(NewSel.value());
+ DstSel->setImm(combineSdwaSel(ExistingSel, getDstSel()));
MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
assert(DstUnused);
@@ -648,13 +682,13 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
Opcode == AMDGPU::V_LSHLREV_B32_e64) {
- return std::make_unique<SDWADstOperand>(
- Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
+ return SDWADstOperand::create(TII, MI, Dst, Src1,
+ *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
}
- return std::make_unique<SDWASrcOperand>(
- Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false,
- Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
- Opcode != AMDGPU::V_LSHRREV_B32_e64);
+ return SDWASrcOperand::create(TII, MI, Src1, Dst,
+ *Imm == 16 ? WORD_1 : BYTE_3, false, false,
+ Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
+ Opcode != AMDGPU::V_LSHRREV_B32_e64);
break;
}
@@ -686,11 +720,10 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
Opcode == AMDGPU::V_LSHLREV_B16_e64)
- return std::make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD);
- return std::make_unique<SDWASrcOperand>(
- Src1, Dst, BYTE_1, false, false,
- Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
- Opcode != AMDGPU::V_LSHRREV_B16_e64);
+ return SDWADstOperand::create(TII, MI, Dst, Src1, BYTE_1, UNUSED_PAD);
+ return SDWASrcOperand::create(TII, MI, Src1, Dst, BYTE_1, false, false,
+ Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
+ Opcode != AMDGPU::V_LSHRREV_B16_e64);
break;
}
@@ -746,8 +779,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
Dst->getReg().isPhysical())
break;
- return std::make_unique<SDWASrcOperand>(
- Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32_e64);
+ return SDWASrcOperand::create(TII, MI, Src0, Dst, SrcSel, false, false,
+ Opcode != AMDGPU::V_BFE_U32_e64);
}
case AMDGPU::V_AND_B32_e32:
@@ -774,9 +807,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
if (!ValSrc->isReg() || ValSrc->getReg().isPhysical() ||
Dst->getReg().isPhysical())
break;
-
- return std::make_unique<SDWASrcOperand>(
- ValSrc, Dst, *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
+ return SDWASrcOperand::create(TII, MI, ValSrc, Dst,
+ *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
}
case AMDGPU::V_OR_B32_e32:
@@ -912,7 +944,7 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
}
}
- return std::unique_ptr<SDWAOperand>(nullptr);
+ return nullptr;
}
#if !defined(NDEBUG)
>From ac0a1339eaf37623639a47ea56db349dff9e94b3 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Tue, 11 Feb 2025 03:05:27 -0500
Subject: [PATCH 19/33] [AMDGPU] SIPeepholeSDWA: Adjust comments and variable
names
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 35 +++++++++++------------
1 file changed, 17 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 37b12b2a5afe7..ef2857009ff00 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -88,37 +88,36 @@ class SIPeepholeSDWALegacy : public MachineFunctionPass {
using namespace AMDGPU::SDWA;
-/// Check that the SDWA selections \p ExistingSel and \p OperandSel
+/// Check that the SDWA selections \p Sel and \p OperandSel
/// are suitable for being combined by combineSdwaSel.
-bool compatibleSelections(SdwaSel ExistingSel, SdwaSel OperandSel) {
- return ExistingSel == SdwaSel::DWORD || OperandSel == ExistingSel ||
- (ExistingSel != SdwaSel::WORD_1 && ExistingSel != SdwaSel::BYTE_2 &&
- ExistingSel != SdwaSel::BYTE_3 &&
+bool compatibleSelections(SdwaSel Sel, SdwaSel OperandSel) {
+ return Sel == SdwaSel::DWORD || OperandSel == Sel ||
+ (Sel != SdwaSel::WORD_1 && Sel != SdwaSel::BYTE_2 &&
+ Sel != SdwaSel::BYTE_3 &&
(OperandSel == SdwaSel::WORD_0 || OperandSel == SdwaSel::WORD_1));
}
-/// Combine an SDWA instruction's existing SDWA selection \p
-/// ExistingSel with the SDWA selection \p OpSel of its operand. If
-/// the selections are compatible, return the combined selection,
-/// otherwise return a nullopt. For example, if we have ExistingSel
-/// = BYTE_0 Sel and FoldedSel WORD_1 Sel:
+/// Combine an SDWA instruction's existing SDWA selection \p Sel with
+/// the SDWA selection \p OpSel of its operand which must be
+/// compatible.
+/// For example, if we have Sel = BYTE_0 Sel and OperandSel = WORD_1:
/// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
-SdwaSel combineSdwaSel(SdwaSel ExistingSel, SdwaSel OperandSel) {
- assert(compatibleSelections(ExistingSel, OperandSel));
+SdwaSel combineSdwaSel(SdwaSel Sel, SdwaSel OperandSel) {
+ assert(compatibleSelections(Sel, OperandSel));
- if (ExistingSel == SdwaSel::DWORD)
+ if (Sel == SdwaSel::DWORD)
return OperandSel;
- if (OperandSel == SdwaSel::DWORD || ExistingSel == OperandSel ||
+ if (OperandSel == SdwaSel::DWORD || Sel == OperandSel ||
OperandSel == SdwaSel::WORD_0)
- return ExistingSel;
+ return Sel;
if (OperandSel == SdwaSel::WORD_1) {
- if (ExistingSel == SdwaSel::BYTE_0)
+ if (Sel == SdwaSel::BYTE_0)
return SdwaSel::BYTE_2;
- if (ExistingSel == SdwaSel::BYTE_1)
+ if (Sel == SdwaSel::BYTE_1)
return SdwaSel::BYTE_3;
- if (ExistingSel == SdwaSel::WORD_0)
+ if (Sel == SdwaSel::WORD_0)
return SdwaSel::WORD_1;
}
>From a9e38fa87d8eacaedd5111ccb3630e9423191291 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Tue, 11 Feb 2025 03:10:55 -0500
Subject: [PATCH 20/33] [AMDGPU] SIPeepholeSDWA: Add comment answering a review
question
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 3 +++
1 file changed, 3 insertions(+)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index ef2857009ff00..0bbbcf2605eff 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1249,6 +1249,9 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
MachineInstr *SDWAInst;
if (TII->isSDWA(MI.getOpcode())) {
+ // Clone the instruction to allow revoking changes
+ // made to MI during the processing of the operands
+ // if the conversion fails.
SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
MI.getParent()->insert(MI.getIterator(), SDWAInst);
} else {
>From db7f6743d8722a59355790bf6216a41ce4cdbac4 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Tue, 11 Feb 2025 03:20:20 -0500
Subject: [PATCH 21/33] clang-format changes
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 0bbbcf2605eff..67e5bc8d3d504 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -170,6 +170,7 @@ class SDWASrcOperand : public SDWAOperand {
bool Sext_ = false)
: SDWAOperand(TargetOp, ReplacedOp), SrcSel(SrcSel_), Abs(Abs_),
Neg(Neg_), Sext(Sext_) {}
+
public:
/// Create an SDWASrcOperand as an operand for \p MI from the given arguments
/// if \p SrcSel_ and the src_sel0 and src_sel1 operands of \p MI are
@@ -189,8 +190,8 @@ class SDWASrcOperand : public SDWAOperand {
}
}
- return std::unique_ptr<SDWAOperand>(new SDWASrcOperand(
- TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
+ return std::unique_ptr<SDWAOperand>(
+ new SDWASrcOperand(TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
};
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
>From ac80b8615f654d3ac2d1783339ce84c98cbcf9d6 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 12 Feb 2025 02:47:51 -0500
Subject: [PATCH 22/33] Use consistent/more specific return type for
SDWA{Src,Dst}Operand factory
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 67e5bc8d3d504..f0a0b6e656312 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -175,7 +175,7 @@ class SDWASrcOperand : public SDWAOperand {
/// Create an SDWASrcOperand as an operand for \p MI from the given arguments
/// if \p SrcSel_ and the src_sel0 and src_sel1 operands of \p MI are
/// compatible.
- static std::unique_ptr<SDWAOperand>
+ static std::unique_ptr<SDWASrcOperand>
create(const SIInstrInfo *TII, const MachineInstr &MI,
MachineOperand *TargetOp, MachineOperand *ReplacedOp,
SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
@@ -186,12 +186,11 @@ class SDWASrcOperand : public SDWAOperand {
const MachineOperand *NamedOp = TII->getNamedOperand(MI, SelOpName);
if (NamedOp && !compatibleSelections(
static_cast<SdwaSel>(NamedOp->getImm()), SrcSel_))
- return std::unique_ptr<SDWAOperand>(nullptr);
+ return nullptr;
}
}
- return std::unique_ptr<SDWAOperand>(
- new SDWASrcOperand(TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
+ return std::unique_ptr<SDWASrcOperand>(new SDWASrcOperand(TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
};
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
@@ -223,10 +222,10 @@ class SDWADstOperand : public SDWAOperand {
: SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
public:
- /// Create an SDWASrcOperand as an operand for \p MI from the given arguments
+ /// Create an SDWADstOperand as an operand for \p MI from the given arguments
/// if \p SrcSel_ and the dst_sel operand of \p MI are
/// compatible.
- static std::unique_ptr<SDWAOperand>
+ static std::unique_ptr<SDWADstOperand>
create(const SIInstrInfo *TII, const MachineInstr &MI,
MachineOperand *TargetOp, MachineOperand *ReplacedOp, SdwaSel DstSel_,
DstUnused DstUn_) {
@@ -237,7 +236,7 @@ class SDWADstOperand : public SDWAOperand {
return nullptr;
}
- return std::unique_ptr<SDWAOperand>(
+ return std::unique_ptr<SDWADstOperand>(
new SDWADstOperand(TargetOp, ReplacedOp, DstSel_, DstUn_));
};
>From bbe87ff3046041913e93c0aa1c6b0da71cae9fbb Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 12 Feb 2025 02:50:25 -0500
Subject: [PATCH 23/33] fixup! Use consistent/more specific return type for
SDWA{Src,Dst}Operand factory
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index f0a0b6e656312..74dfd8a80b2be 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -190,7 +190,8 @@ class SDWASrcOperand : public SDWAOperand {
}
}
- return std::unique_ptr<SDWASrcOperand>(new SDWASrcOperand(TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
+ return std::unique_ptr<SDWASrcOperand>(
+ new SDWASrcOperand(TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
};
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
>From 179007c2f8b29859a9f6b751d54befbb9e144e53 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 12 Feb 2025 03:07:35 -0500
Subject: [PATCH 24/33] Merge "compatibleSelections" function back into
"combineSdwaSel"
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 47 ++++++++++++-----------
1 file changed, 24 insertions(+), 23 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 74dfd8a80b2be..96494d6d8e38e 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -88,28 +88,27 @@ class SIPeepholeSDWALegacy : public MachineFunctionPass {
using namespace AMDGPU::SDWA;
-/// Check that the SDWA selections \p Sel and \p OperandSel
-/// are suitable for being combined by combineSdwaSel.
-bool compatibleSelections(SdwaSel Sel, SdwaSel OperandSel) {
- return Sel == SdwaSel::DWORD || OperandSel == Sel ||
- (Sel != SdwaSel::WORD_1 && Sel != SdwaSel::BYTE_2 &&
- Sel != SdwaSel::BYTE_3 &&
- (OperandSel == SdwaSel::WORD_0 || OperandSel == SdwaSel::WORD_1));
-}
-
/// Combine an SDWA instruction's existing SDWA selection \p Sel with
-/// the SDWA selection \p OpSel of its operand which must be
-/// compatible.
+/// the SDWA selection \p OperandSel of its operand. If the selections
+/// are compatible, return the combined selection, otherwise return a
+/// nullopt.
/// For example, if we have Sel = BYTE_0 Sel and OperandSel = WORD_1:
/// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
-SdwaSel combineSdwaSel(SdwaSel Sel, SdwaSel OperandSel) {
- assert(compatibleSelections(Sel, OperandSel));
-
+std::optional<SdwaSel> combineSdwaSel(SdwaSel Sel, SdwaSel OperandSel) {
if (Sel == SdwaSel::DWORD)
return OperandSel;
- if (OperandSel == SdwaSel::DWORD || Sel == OperandSel ||
- OperandSel == SdwaSel::WORD_0)
+ if (OperandSel == SdwaSel::DWORD)
+ return Sel;
+
+ if (Sel == SdwaSel::WORD_1 || Sel == SdwaSel::BYTE_2 ||
+ Sel == SdwaSel::BYTE_3)
+ return {};
+
+ if (Sel == OperandSel)
+ return Sel;
+
+ if (OperandSel == SdwaSel::WORD_0)
return Sel;
if (OperandSel == SdwaSel::WORD_1) {
@@ -121,7 +120,7 @@ SdwaSel combineSdwaSel(SdwaSel Sel, SdwaSel OperandSel) {
return SdwaSel::WORD_1;
}
- llvm_unreachable("Unexpected selections");
+ return {};
}
class SDWAOperand {
@@ -183,9 +182,11 @@ class SDWASrcOperand : public SDWAOperand {
if (TII->isSDWA(MI.getOpcode())) {
for (auto SelOpName :
{AMDGPU::OpName::src0_sel, AMDGPU::OpName::src1_sel}) {
- const MachineOperand *NamedOp = TII->getNamedOperand(MI, SelOpName);
- if (NamedOp && !compatibleSelections(
- static_cast<SdwaSel>(NamedOp->getImm()), SrcSel_))
+ const MachineOperand *Op = TII->getNamedOperand(MI, SelOpName);
+ if (!Op)
+ break;
+ auto Sel = static_cast<SdwaSel>(Op->getImm());
+ if (!combineSdwaSel(Sel, SrcSel_).has_value())
return nullptr;
}
}
@@ -233,7 +234,7 @@ class SDWADstOperand : public SDWAOperand {
if (TII->isSDWA(MI.getOpcode())) {
SdwaSel InstSel = static_cast<SdwaSel>(
TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel)->getImm());
- if (!compatibleSelections(InstSel, DstSel_))
+ if (!combineSdwaSel(InstSel, DstSel_).has_value())
return nullptr;
}
@@ -530,7 +531,7 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
SdwaSel ExistingSel = static_cast<SdwaSel>(SrcSel->getImm());
- SrcSel->setImm(combineSdwaSel(ExistingSel, getSrcSel()));
+ SrcSel->setImm(combineSdwaSel(ExistingSel, getSrcSel()).value());
SrcMods->setImm(getSrcMods(TII, Src));
}
getTargetOperand()->setIsKill(false);
@@ -579,7 +580,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
assert(DstSel);
SdwaSel ExistingSel = static_cast<SdwaSel>(DstSel->getImm());
- DstSel->setImm(combineSdwaSel(ExistingSel, getDstSel()));
+ DstSel->setImm(combineSdwaSel(ExistingSel, getDstSel()).value());
MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
assert(DstUnused);
>From a5a45aae0f85a455b34098c24d3f24e820005947 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 13 Feb 2025 10:42:07 -0500
Subject: [PATCH 25/33] Add comprehensive test for source selection
combinations
---
.../sdwa-peephole-instr-combine-sel2.mir | 796 ++++++++++++++++++
1 file changed, 796 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir
new file mode 100644
index 0000000000000..c4e0450077a97
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir
@@ -0,0 +1,796 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -simplify-mir -run-pass=si-peephole-sdwa -o - %s | FileCheck %s
+
+# Test the combination of SDWA selections in si-peephole-sdwa. In each
+# example, the SDWA source selection specified on the last instruction
+# must be combined with the source selection that the pass determines
+# for this operand, i.e. the second instruction. In the cases where
+# this is not possible, no conversion should occur, i.e. the last
+# instruction in the output mir should still use the second
+# instruction with the same source selection.
+
+---
+name: op_select_byte0_instr_select_dword
+body: |
+
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte0_instr_select_dword
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 255, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 255, %3, implicit $exec /* Select Byte_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 0, 6, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_0_instr_select_word_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_0_instr_select_word_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 255, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_AND_B32_e64_]], 0, 1, 0, 0, 5, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 255, %3, implicit $exec /* Select Byte_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 0, 5, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_0_instr_select_word_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_0_instr_select_word_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 255, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_AND_B32_e64_]], 0, 1, 0, 0, 4, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 255, %3, implicit $exec /* Select Byte_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 0, 4, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_0_instr_select_byte_3
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_0_instr_select_byte_3
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 255, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_AND_B32_e64_]], 0, 1, 0, 0, 3, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 255, %3, implicit $exec /* Select Byte_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 0, 3, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_0_instr_select_byte_2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_0_instr_select_byte_2
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 255, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_AND_B32_e64_]], 0, 1, 0, 0, 2, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 255, %3, implicit $exec /* Select Byte_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 0, 2, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_0_instr_select_byte_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_0_instr_select_byte_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 255, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_AND_B32_e64_]], 0, 1, 0, 0, 1, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 255, %3, implicit $exec /* Select Byte_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 0, 1, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_0_instr_select_byte_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_0_instr_select_byte_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 255, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 255, %3, implicit $exec /* Select Byte_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 0, 0, implicit $exec
+
+ S_ENDPGM 0
+
+...
+
+---
+name: op_select_word_0_instr_select_dword
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_0_instr_select_dword
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 4, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 65535, %3, implicit $exec /* Select Word_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 6, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_0_instr_select_word_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_0_instr_select_word_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_AND_B32_e64_]], 0, 1, 0, 6, 5, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 65535, %3, implicit $exec /* Select Word_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 5, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_0_instr_select_word_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_0_instr_select_word_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 4, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 65535, %3, implicit $exec /* Select Word_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 4, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_0_instr_select_byte_3
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_0_instr_select_byte_3
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_AND_B32_e64_]], 0, 1, 0, 6, 3, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 65535, %3, implicit $exec /* Select Word_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 3, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_0_instr_select_byte_2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_0_instr_select_byte_2
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_AND_B32_e64_]], 0, 1, 0, 6, 2, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 65535, %3, implicit $exec /* Select Word_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 2, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_0_instr_select_byte_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_0_instr_select_byte_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 1, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 65535, %3, implicit $exec /* Select Word_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 1, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_0_instr_select_byte_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_0_instr_select_byte_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 65535, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_AND_B32_e64 65535, %3, implicit $exec /* Select Word_0 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 0, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_1_instr_select_dword
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_1_instr_select_dword
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B16_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e32 8, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 1, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_LSHRREV_B16_e32 8, %3, implicit $exec /* Select BYTE_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 6, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_1_instr_select_word_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_1_instr_select_word_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B16_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e32 8, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B16_e32_]], 0, 1, 0, 6, 5, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_LSHRREV_B16_e32 8, %3, implicit $exec /* Select BYTE_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 5, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_1_instr_select_word_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_1_instr_select_word_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B16_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e32 8, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B16_e32_]], 0, 1, 0, 6, 4, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_LSHRREV_B16_e32 8, %3, implicit $exec /* Select BYTE_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 4, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_1_instr_select_byte_3
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_1_instr_select_byte_3
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B16_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e32 8, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B16_e32_]], 0, 1, 0, 6, 3, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_LSHRREV_B16_e32 8, %3, implicit $exec /* Select BYTE_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 3, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_1_instr_select_byte_2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_1_instr_select_byte_2
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B16_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e32 8, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B16_e32_]], 0, 1, 0, 6, 2, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_LSHRREV_B16_e32 8, %3, implicit $exec /* Select BYTE_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 2, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_1_instr_select_byte_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_1_instr_select_byte_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B16_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e32 8, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 1, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_LSHRREV_B16_e32 8, %3, implicit $exec /* Select BYTE_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 1, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_1_instr_select_byte_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_1_instr_select_byte_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B16_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e32 8, [[V_LSHRREV_B32_sdwa]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_LSHRREV_B16_e32_]], 0, 1, 0, 6, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_LSHRREV_B16_e32 8, %3, implicit $exec /* Select BYTE_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 0, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_2_instr_select_dword
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_2_instr_select_dword
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 2, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 8, implicit $exec /* Select BYTE_2 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 6, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_2_instr_select_word_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_2_instr_select_word_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 5, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 8, implicit $exec /* Select BYTE_2 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 5, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_2_instr_select_word_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_2_instr_select_word_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 4, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 8, implicit $exec /* Select BYTE_2 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 4, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_2_instr_select_byte_3
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_2_instr_select_byte_3
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 3, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 8, implicit $exec /* Select BYTE_2 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 3, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_2_instr_select_byte_2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_2_instr_select_byte_2
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 2, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 8, implicit $exec /* Select BYTE_2 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 2, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_2_instr_select_byte_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_2_instr_select_byte_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 1, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 8, implicit $exec /* Select BYTE_2 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 1, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_2_instr_select_byte_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_2_instr_select_byte_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 8, implicit $exec /* Select BYTE_2 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 0, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_3_instr_select_dword
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_3_instr_select_dword
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 24, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 3, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 24, 8, implicit $exec /* Select BYTE_3 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 6, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_3_instr_select_word_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_3_instr_select_word_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 24, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 5, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 24, 8, implicit $exec /* Select BYTE_3 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 5, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_3_instr_select_word_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_3_instr_select_word_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 24, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 4, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 24, 8, implicit $exec /* Select BYTE_3 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 4, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_3_instr_select_byte_3
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_3_instr_select_byte_3
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 24, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 3, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 24, 8, implicit $exec /* Select BYTE_3 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 3, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_3_instr_select_byte_2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_3_instr_select_byte_2
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 24, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 2, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 24, 8, implicit $exec /* Select BYTE_3 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 2, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_3_instr_select_byte_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_3_instr_select_byte_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 24, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 1, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 24, 8, implicit $exec /* Select BYTE_3 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 1, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_byte_3_instr_select_byte_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_byte_3_instr_select_byte_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 24, 8, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 24, 8, implicit $exec /* Select BYTE_3 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 0, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_1_instr_select_dword
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_1_instr_select_dword
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 16, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 5, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 16, implicit $exec /* Select WORD_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 6, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_1_instr_select_word_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_1_instr_select_word_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 16, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 5, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 16, implicit $exec /* Select WORD_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 5, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_1_instr_select_word_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_1_instr_select_word_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 16, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 5, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 16, implicit $exec /* Select WORD_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 4, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_1_instr_select_byte_3
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_1_instr_select_byte_3
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 16, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 3, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 16, implicit $exec /* Select WORD_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 3, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_1_instr_select_byte_2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_1_instr_select_byte_2
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 16, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 2, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 16, implicit $exec /* Select WORD_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 2, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_1_instr_select_byte_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_1_instr_select_byte_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 16, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 3, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 16, implicit $exec /* Select WORD_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 1, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_word_1_instr_select_byte_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_word_1_instr_select_byte_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 16, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 2, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 16, 16, implicit $exec /* Select WORD_1 */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 0, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_dword_instr_select_dword
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_dword_instr_select_dword
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 0, 32, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 6, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 0, 32, implicit $exec /* Select DWORD */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 6, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_dword_instr_select_word_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_dword_instr_select_word_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 0, 32, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 5, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 0, 32, implicit $exec /* Select DWORD */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 5, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_dword_instr_select_word_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_dword_instr_select_word_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 0, 32, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 4, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 0, 32, implicit $exec /* Select DWORD */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 4, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_dword_instr_select_byte_3
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_dword_instr_select_byte_3
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 0, 32, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 3, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 0, 32, implicit $exec /* Select DWORD */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 3, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_dword_instr_select_byte_2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_dword_instr_select_byte_2
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 0, 32, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 2, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 0, 32, implicit $exec /* Select DWORD */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 2, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_dword_instr_select_byte_1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_dword_instr_select_byte_1
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 0, 32, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 1, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 0, 32, implicit $exec /* Select DWORD */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 1, implicit $exec
+
+ S_ENDPGM 0
+...
+
+---
+name: op_select_dword_instr_select_byte_0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: op_select_dword_instr_select_byte_0
+ ; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ ; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 0, 32, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ %3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
+ %8:vgpr_32 = V_BFE_I32_e64 %3, 0, 32, implicit $exec /* Select DWORD */
+ %9:vgpr_32 = V_LSHRREV_B32_sdwa 0, %3, 0, %8, 0, 1, 0, 6, 0, implicit $exec
+
+ S_ENDPGM 0
+...
>From 12903695e09e14684e89a48ca1886aaa448eee22 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 13 Feb 2025 10:56:25 -0500
Subject: [PATCH 26/33] Revert introduction of SDWA{Dst,Src}Operand::create
The place where this was used is not the right place
for performing the combineSdwaSel check.
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 80 ++++++-----------------
1 file changed, 19 insertions(+), 61 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 96494d6d8e38e..23e289fda684f 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -163,38 +163,13 @@ class SDWASrcOperand : public SDWAOperand {
bool Neg;
bool Sext;
-protected:
+public:
SDWASrcOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
bool Sext_ = false)
: SDWAOperand(TargetOp, ReplacedOp), SrcSel(SrcSel_), Abs(Abs_),
Neg(Neg_), Sext(Sext_) {}
-public:
- /// Create an SDWASrcOperand as an operand for \p MI from the given arguments
- /// if \p SrcSel_ and the src_sel0 and src_sel1 operands of \p MI are
- /// compatible.
- static std::unique_ptr<SDWASrcOperand>
- create(const SIInstrInfo *TII, const MachineInstr &MI,
- MachineOperand *TargetOp, MachineOperand *ReplacedOp,
- SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
- bool Sext_ = false) {
- if (TII->isSDWA(MI.getOpcode())) {
- for (auto SelOpName :
- {AMDGPU::OpName::src0_sel, AMDGPU::OpName::src1_sel}) {
- const MachineOperand *Op = TII->getNamedOperand(MI, SelOpName);
- if (!Op)
- break;
- auto Sel = static_cast<SdwaSel>(Op->getImm());
- if (!combineSdwaSel(Sel, SrcSel_).has_value())
- return nullptr;
- }
- }
-
- return std::unique_ptr<SDWASrcOperand>(
- new SDWASrcOperand(TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
- };
-
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
@@ -218,30 +193,11 @@ class SDWADstOperand : public SDWAOperand {
SdwaSel DstSel;
DstUnused DstUn;
-protected:
+public:
SDWADstOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
: SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
-public:
- /// Create an SDWADstOperand as an operand for \p MI from the given arguments
- /// if \p SrcSel_ and the dst_sel operand of \p MI are
- /// compatible.
- static std::unique_ptr<SDWADstOperand>
- create(const SIInstrInfo *TII, const MachineInstr &MI,
- MachineOperand *TargetOp, MachineOperand *ReplacedOp, SdwaSel DstSel_,
- DstUnused DstUn_) {
- if (TII->isSDWA(MI.getOpcode())) {
- SdwaSel InstSel = static_cast<SdwaSel>(
- TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel)->getImm());
- if (!combineSdwaSel(InstSel, DstSel_).has_value())
- return nullptr;
- }
-
- return std::unique_ptr<SDWADstOperand>(
- new SDWADstOperand(TargetOp, ReplacedOp, DstSel_, DstUn_));
- };
-
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
@@ -683,13 +639,13 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
Opcode == AMDGPU::V_LSHLREV_B32_e64) {
- return SDWADstOperand::create(TII, MI, Dst, Src1,
- *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
+ return std::make_unique<SDWADstOperand>(
+ Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
}
- return SDWASrcOperand::create(TII, MI, Src1, Dst,
- *Imm == 16 ? WORD_1 : BYTE_3, false, false,
- Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
- Opcode != AMDGPU::V_LSHRREV_B32_e64);
+ return std::make_unique<SDWASrcOperand>(
+ Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false,
+ Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
+ Opcode != AMDGPU::V_LSHRREV_B32_e64);
break;
}
@@ -721,10 +677,11 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
Opcode == AMDGPU::V_LSHLREV_B16_e64)
- return SDWADstOperand::create(TII, MI, Dst, Src1, BYTE_1, UNUSED_PAD);
- return SDWASrcOperand::create(TII, MI, Src1, Dst, BYTE_1, false, false,
- Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
- Opcode != AMDGPU::V_LSHRREV_B16_e64);
+ return std::make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD);
+ return std::make_unique<SDWASrcOperand>(
+ Src1, Dst, BYTE_1, false, false,
+ Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
+ Opcode != AMDGPU::V_LSHRREV_B16_e64);
break;
}
@@ -780,8 +737,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
Dst->getReg().isPhysical())
break;
- return SDWASrcOperand::create(TII, MI, Src0, Dst, SrcSel, false, false,
- Opcode != AMDGPU::V_BFE_U32_e64);
+ return std::make_unique<SDWASrcOperand>(
+ Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32_e64);
}
case AMDGPU::V_AND_B32_e32:
@@ -808,8 +765,9 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
if (!ValSrc->isReg() || ValSrc->getReg().isPhysical() ||
Dst->getReg().isPhysical())
break;
- return SDWASrcOperand::create(TII, MI, ValSrc, Dst,
- *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
+
+ return std::make_unique<SDWASrcOperand>(
+ ValSrc, Dst, *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
}
case AMDGPU::V_OR_B32_e32:
@@ -945,7 +903,7 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
}
}
- return nullptr;
+ return std::unique_ptr<SDWAOperand>(nullptr);
}
#if !defined(NDEBUG)
>From 84889b5c407bb7b73577d9cc66043672492fbfcd Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Fri, 14 Feb 2025 09:02:20 -0500
Subject: [PATCH 27/33] Fix combineSdwaSel handling of Sel == OperandSel case
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 6 ++----
.../CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir | 6 +++---
2 files changed, 5 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 23e289fda684f..b09de1b48eedd 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -98,16 +98,14 @@ std::optional<SdwaSel> combineSdwaSel(SdwaSel Sel, SdwaSel OperandSel) {
if (Sel == SdwaSel::DWORD)
return OperandSel;
- if (OperandSel == SdwaSel::DWORD)
+ if (Sel == OperandSel ||
+ OperandSel == SdwaSel::DWORD)
return Sel;
if (Sel == SdwaSel::WORD_1 || Sel == SdwaSel::BYTE_2 ||
Sel == SdwaSel::BYTE_3)
return {};
- if (Sel == OperandSel)
- return Sel;
-
if (OperandSel == SdwaSel::WORD_0)
return Sel;
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir
index c4e0450077a97..b3db94e539e7f 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir
@@ -418,7 +418,7 @@ body: |
; CHECK-LABEL: name: op_select_byte_2_instr_select_byte_2
; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 8, implicit $exec
- ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 2, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 2, implicit $exec
; CHECK-NEXT: S_ENDPGM 0
%3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
%8:vgpr_32 = V_BFE_I32_e64 %3, 16, 8, implicit $exec /* Select BYTE_2 */
@@ -514,7 +514,7 @@ body: |
; CHECK-LABEL: name: op_select_byte_3_instr_select_byte_3
; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 24, 8, implicit $exec
- ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 3, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 3, implicit $exec
; CHECK-NEXT: S_ENDPGM 0
%3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
%8:vgpr_32 = V_BFE_I32_e64 %3, 24, 8, implicit $exec /* Select BYTE_3 */
@@ -594,7 +594,7 @@ body: |
; CHECK-LABEL: name: op_select_word_1_instr_select_word_1
; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
; CHECK-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[V_LSHRREV_B32_sdwa]], 16, 16, implicit $exec
- ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 0, [[V_BFE_I32_e64_]], 0, 1, 0, 6, 5, implicit $exec
+ ; CHECK-NEXT: [[V_LSHRREV_B32_sdwa1:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, [[V_LSHRREV_B32_sdwa]], 1, [[V_LSHRREV_B32_sdwa]], 0, 1, 0, 6, 5, implicit $exec
; CHECK-NEXT: S_ENDPGM 0
%3:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
%8:vgpr_32 = V_BFE_I32_e64 %3, 16, 16, implicit $exec /* Select WORD_1 */
>From 96e055b777b99edafa39aaa3c9b6c9ec7be0faec Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Fri, 14 Feb 2025 09:05:11 -0500
Subject: [PATCH 28/33] Add new early check for combinable selections
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 59 +++++++++++++++++++++--
1 file changed, 54 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index b09de1b48eedd..b8c6ed542c08f 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -140,6 +140,11 @@ class SDWAOperand {
SDWAOperandsMap *PotentialMatches = nullptr) = 0;
virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
+ /// Returns true iff the SDWA selection of this SDWAOperand can be combined
+ /// with the SDWA selections of its uses in \p MI.
+ virtual bool canCombineSelections(const MachineInstr &MI,
+ const SIInstrInfo *TII) = 0;
+
MachineOperand *getTargetOperand() const { return Target; }
MachineOperand *getReplacedOperand() const { return Replaced; }
MachineInstr *getParentInst() const { return Target->getParent(); }
@@ -172,6 +177,8 @@ class SDWASrcOperand : public SDWAOperand {
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
+ bool canCombineSelections(const MachineInstr &MI,
+ const SIInstrInfo *TII) override;
SdwaSel getSrcSel() const { return SrcSel; }
bool getAbs() const { return Abs; }
@@ -200,6 +207,8 @@ class SDWADstOperand : public SDWAOperand {
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
+ bool canCombineSelections(const MachineInstr &MI,
+ const SIInstrInfo *TII) override;
SdwaSel getDstSel() const { return DstSel; }
DstUnused getDstUnused() const { return DstUn; }
@@ -220,6 +229,8 @@ class SDWADstPreserveOperand : public SDWADstOperand {
Preserve(PreserveOp) {}
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
+ bool canCombineSelections(const MachineInstr &MI,
+ const SIInstrInfo *TII) override;
MachineOperand *getPreservedOperand() const { return Preserve; }
@@ -383,7 +394,8 @@ MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII,
for (MachineInstr &UseMI : getMRI()->use_nodbg_instructions(Reg->getReg()))
// Check that all instructions that use Reg can be converted
- if (!isConvertibleToSDWA(UseMI, ST, TII))
+ if (!isConvertibleToSDWA(UseMI, ST, TII) ||
+ !canCombineSelections(UseMI, TII))
return nullptr;
// Now that it's guaranteed all uses are legal, iterate over the uses again
@@ -406,7 +418,7 @@ MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII,
return nullptr;
return PotentialMO->getParent();
-}
+ }
bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
switch (MI.getOpcode()) {
@@ -492,6 +504,25 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
return true;
}
+bool SDWASrcOperand::canCombineSelections(const MachineInstr &MI, const SIInstrInfo *TII) {
+ if (!TII->isSDWA(MI.getOpcode()))
+ return true;
+
+ auto canCombineSel = [&](auto SrcOpName, auto SrcSelOpName)
+ {
+ const MachineOperand *Src = TII->getNamedOperand(MI, SrcOpName);
+ const MachineOperand *SrcSel = TII->getNamedOperand(MI, SrcSelOpName);
+ return !Src || !isSameReg(*Src, *getReplacedOperand()) ||
+ combineSdwaSel(static_cast<SdwaSel>(SrcSel->getImm()), getSrcSel())
+ .has_value();
+ };
+
+ using namespace AMDGPU;
+
+ return canCombineSel(OpName::src0, OpName::src0_sel) &&
+ canCombineSel(OpName::src1, OpName::src1_sel);
+}
+
MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches) {
@@ -546,6 +577,17 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
return true;
}
+bool SDWADstOperand::canCombineSelections(const MachineInstr &MI,
+ const SIInstrInfo *TII) {
+ if (!TII->isSDWA(MI.getOpcode()))
+ return true;
+
+ SdwaSel InstSel = static_cast<SdwaSel>(
+ TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel)->getImm());
+
+ return combineSdwaSel(InstSel, getDstSel()).has_value();
+}
+
bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
const SIInstrInfo *TII) {
// MI should be moved right before v_or_b32.
@@ -575,6 +617,11 @@ bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
return SDWADstOperand::convertToSDWA(MI, TII);
}
+bool SDWADstPreserveOperand::canCombineSelections(const MachineInstr &MI,
+ const SIInstrInfo *TII) {
+ return SDWADstOperand::canCombineSelections(MI, TII);
+}
+
std::optional<int64_t>
SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
if (Op.isImm()) {
@@ -1328,10 +1375,12 @@ bool SIPeepholeSDWA::run(MachineFunction &MF) {
for (const auto &OperandPair : SDWAOperands) {
const auto &Operand = OperandPair.second;
- MachineInstr *PotentialMI = Operand->potentialToConvert(TII, ST, &PotentialMatches);
- if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST, TII)) {
+ MachineInstr *PotentialMI =
+ Operand->potentialToConvert(TII, ST, &PotentialMatches);
+
+ if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST, TII) &&
+ Operand->canCombineSelections(*PotentialMI, TII))
PotentialMatches[PotentialMI].push_back(Operand.get());
- }
}
for (auto &PotentialPair : PotentialMatches) {
>From 0724d769104319b1ee50f0307292f1b62d8d7a0e Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Fri, 14 Feb 2025 09:06:24 -0500
Subject: [PATCH 29/33] clang-format changes
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index b8c6ed542c08f..a8b09c4d3b220 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -98,8 +98,7 @@ std::optional<SdwaSel> combineSdwaSel(SdwaSel Sel, SdwaSel OperandSel) {
if (Sel == SdwaSel::DWORD)
return OperandSel;
- if (Sel == OperandSel ||
- OperandSel == SdwaSel::DWORD)
+ if (Sel == OperandSel || OperandSel == SdwaSel::DWORD)
return Sel;
if (Sel == SdwaSel::WORD_1 || Sel == SdwaSel::BYTE_2 ||
@@ -418,7 +417,7 @@ MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII,
return nullptr;
return PotentialMO->getParent();
- }
+}
bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
switch (MI.getOpcode()) {
@@ -504,12 +503,12 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
return true;
}
-bool SDWASrcOperand::canCombineSelections(const MachineInstr &MI, const SIInstrInfo *TII) {
+bool SDWASrcOperand::canCombineSelections(const MachineInstr &MI,
+ const SIInstrInfo *TII) {
if (!TII->isSDWA(MI.getOpcode()))
return true;
- auto canCombineSel = [&](auto SrcOpName, auto SrcSelOpName)
- {
+ auto canCombineSel = [&](auto SrcOpName, auto SrcSelOpName) {
const MachineOperand *Src = TII->getNamedOperand(MI, SrcOpName);
const MachineOperand *SrcSel = TII->getNamedOperand(MI, SrcSelOpName);
return !Src || !isSameReg(*Src, *getReplacedOperand()) ||
>From d2943abef9f012c29fa096b4745a27471d2992ac Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Tue, 18 Feb 2025 05:05:18 -0500
Subject: [PATCH 30/33] Move combineSdwaSel from anon namespace and make
'static'
As per LLVM coding standards.
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 64 +++++++++++------------
1 file changed, 32 insertions(+), 32 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index a8b09c4d3b220..3014e66ad247a 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -88,38 +88,6 @@ class SIPeepholeSDWALegacy : public MachineFunctionPass {
using namespace AMDGPU::SDWA;
-/// Combine an SDWA instruction's existing SDWA selection \p Sel with
-/// the SDWA selection \p OperandSel of its operand. If the selections
-/// are compatible, return the combined selection, otherwise return a
-/// nullopt.
-/// For example, if we have Sel = BYTE_0 Sel and OperandSel = WORD_1:
-/// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
-std::optional<SdwaSel> combineSdwaSel(SdwaSel Sel, SdwaSel OperandSel) {
- if (Sel == SdwaSel::DWORD)
- return OperandSel;
-
- if (Sel == OperandSel || OperandSel == SdwaSel::DWORD)
- return Sel;
-
- if (Sel == SdwaSel::WORD_1 || Sel == SdwaSel::BYTE_2 ||
- Sel == SdwaSel::BYTE_3)
- return {};
-
- if (OperandSel == SdwaSel::WORD_0)
- return Sel;
-
- if (OperandSel == SdwaSel::WORD_1) {
- if (Sel == SdwaSel::BYTE_0)
- return SdwaSel::BYTE_2;
- if (Sel == SdwaSel::BYTE_1)
- return SdwaSel::BYTE_3;
- if (Sel == SdwaSel::WORD_0)
- return SdwaSel::WORD_1;
- }
-
- return {};
-}
-
class SDWAOperand {
private:
MachineOperand *Target; // Operand that would be used in converted instruction
@@ -357,6 +325,38 @@ static MachineOperand *findSingleRegDef(const MachineOperand *Reg,
return nullptr;
}
+/// Combine an SDWA instruction's existing SDWA selection \p Sel with
+/// the SDWA selection \p OperandSel of its operand. If the selections
+/// are compatible, return the combined selection, otherwise return a
+/// nullopt.
+/// For example, if we have Sel = BYTE_0 Sel and OperandSel = WORD_1:
+/// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
+static std::optional<SdwaSel> combineSdwaSel(SdwaSel Sel, SdwaSel OperandSel) {
+ if (Sel == SdwaSel::DWORD)
+ return OperandSel;
+
+ if (Sel == OperandSel || OperandSel == SdwaSel::DWORD)
+ return Sel;
+
+ if (Sel == SdwaSel::WORD_1 || Sel == SdwaSel::BYTE_2 ||
+ Sel == SdwaSel::BYTE_3)
+ return {};
+
+ if (OperandSel == SdwaSel::WORD_0)
+ return Sel;
+
+ if (OperandSel == SdwaSel::WORD_1) {
+ if (Sel == SdwaSel::BYTE_0)
+ return SdwaSel::BYTE_2;
+ if (Sel == SdwaSel::BYTE_1)
+ return SdwaSel::BYTE_3;
+ if (Sel == SdwaSel::WORD_0)
+ return SdwaSel::WORD_1;
+ }
+
+ return {};
+}
+
uint64_t SDWASrcOperand::getSrcMods(const SIInstrInfo *TII,
const MachineOperand *SrcOp) const {
uint64_t Mods = 0;
>From c6d9f874357005178783808ee326483f97162563 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Mon, 24 Feb 2025 02:33:41 -0500
Subject: [PATCH 31/33] Move all uses of "canCombineSelections" into
"potentialToConvert"
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 3014e66ad247a..5caac7b8dc727 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -93,6 +93,11 @@ class SDWAOperand {
MachineOperand *Target; // Operand that would be used in converted instruction
MachineOperand *Replaced; // Operand that would be replace by Target
+ /// Returns true iff the SDWA selection of this SDWAOperand can be combined
+ /// with the SDWA selections of its uses in \p MI.
+ virtual bool canCombineSelections(const MachineInstr &MI,
+ const SIInstrInfo *TII) = 0;
+
public:
SDWAOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp)
: Target(TargetOp), Replaced(ReplacedOp) {
@@ -107,11 +112,6 @@ class SDWAOperand {
SDWAOperandsMap *PotentialMatches = nullptr) = 0;
virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
- /// Returns true iff the SDWA selection of this SDWAOperand can be combined
- /// with the SDWA selections of its uses in \p MI.
- virtual bool canCombineSelections(const MachineInstr &MI,
- const SIInstrInfo *TII) = 0;
-
MachineOperand *getTargetOperand() const { return Target; }
MachineOperand *getReplacedOperand() const { return Replaced; }
MachineInstr *getParentInst() const { return Target->getParent(); }
@@ -416,7 +416,9 @@ MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII,
if (!PotentialMO)
return nullptr;
- return PotentialMO->getParent();
+ auto parent = PotentialMO->getParent();
+
+ return canCombineSelections(*parent, TII) ? parent : nullptr;
}
bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
@@ -540,7 +542,8 @@ MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII,
return nullptr;
}
- return PotentialMO->getParent();
+ auto parent = PotentialMO->getParent();
+ return canCombineSelections(*parent, TII) ? parent : nullptr;
}
bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
@@ -1377,8 +1380,7 @@ bool SIPeepholeSDWA::run(MachineFunction &MF) {
MachineInstr *PotentialMI =
Operand->potentialToConvert(TII, ST, &PotentialMatches);
- if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST, TII) &&
- Operand->canCombineSelections(*PotentialMI, TII))
+ if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST, TII))
PotentialMatches[PotentialMI].push_back(Operand.get());
}
>From 9d41c6c1c59c404a823257f04f61709abd75861f Mon Sep 17 00:00:00 2001
From: Frederik Harwath <frederik at harwath.name>
Date: Wed, 26 Feb 2025 12:16:58 +0100
Subject: [PATCH 32/33] Update llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
Co-authored-by: Matt Arsenault <arsenm2 at gmail.com>
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 5caac7b8dc727..ee9d689424e55 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -514,8 +514,7 @@ bool SDWASrcOperand::canCombineSelections(const MachineInstr &MI,
const MachineOperand *Src = TII->getNamedOperand(MI, SrcOpName);
const MachineOperand *SrcSel = TII->getNamedOperand(MI, SrcSelOpName);
return !Src || !isSameReg(*Src, *getReplacedOperand()) ||
- combineSdwaSel(static_cast<SdwaSel>(SrcSel->getImm()), getSrcSel())
- .has_value();
+ !combineSdwaSel(static_cast<SdwaSel>(SrcSel->getImm()), getSrcSel());
};
using namespace AMDGPU;
>From 7bc33f4dfc812a2b543e81b9d7b6125374b71ebe Mon Sep 17 00:00:00 2001
From: Frederik Harwath <frederik at harwath.name>
Date: Wed, 26 Feb 2025 12:17:20 +0100
Subject: [PATCH 33/33] Update
llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir
Co-authored-by: Matt Arsenault <arsenm2 at gmail.com>
---
llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir
index b3db94e539e7f..08fbfd26cedb7 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel2.mir
@@ -11,8 +11,8 @@
---
name: op_select_byte0_instr_select_dword
+tracksRegLiveness: true
body: |
-
bb.0:
; CHECK-LABEL: name: op_select_byte0_instr_select_dword
; CHECK: [[V_LSHRREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_sdwa 0, $vgpr0, 0, $vgpr0, 0, 1, 0, 5, 0, implicit $exec
More information about the llvm-commits
mailing list