[llvm] [AMDGPU] Account for existing SDWA selections (PR #123221)
Frederik Harwath via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 11 23:49:37 PST 2025
https://github.com/frederik-h updated https://github.com/llvm/llvm-project/pull/123221
>From b29c0f218db0170f0848741a89b408bca25156c1 Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Fri, 10 Jan 2025 09:59:00 -0800
Subject: [PATCH 01/22] [AMDGPU] Account for existing SDWA selections
Change-Id: I3e1cf6042f069e8dffe9dd5b4654288111f7b1bf
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 135 ++++++++++++++++++++--
1 file changed, 123 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 467f042892ceb..f515ba1aac5d0 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -85,6 +85,8 @@ class SIPeepholeSDWALegacy : public MachineFunctionPass {
}
};
+using namespace AMDGPU::SDWA;
+
class SDWAOperand {
private:
MachineOperand *Target; // Operand that would be used in converted instruction
@@ -102,12 +104,55 @@ class SDWAOperand {
virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) = 0;
- virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
+ virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine = false) = 0;
MachineOperand *getTargetOperand() const { return Target; }
MachineOperand *getReplacedOperand() const { return Replaced; }
MachineInstr *getParentInst() const { return Target->getParent(); }
+ /// Fold a \p FoldedOp SDWA selection into an \p ExistingOp existing SDWA
+ /// selection. If the selections are compatible, \p return true and store the
+ /// SDWA selection in
+ /// \p NewOp .
+ /// For example, if we have existing BYTE_0 Sel and are attempting to fold
+ /// WORD_1 Sel: BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
+ bool combineSdwaSel(SdwaSel ExistingOp, SdwaSel FoldedOp, SdwaSel &NewOp) {
+ if (ExistingOp == SdwaSel::DWORD) {
+ NewOp = FoldedOp;
+ return true;
+ }
+ if (FoldedOp == SdwaSel::DWORD) {
+ NewOp = ExistingOp;
+ return true;
+ }
+
+ if (FoldedOp != SdwaSel::WORD_0 && FoldedOp != SdwaSel::WORD_1 &&
+ FoldedOp != ExistingOp)
+ return false;
+
+ if (ExistingOp == SdwaSel::WORD_1 || ExistingOp == SdwaSel::BYTE_2 ||
+ ExistingOp == SdwaSel::BYTE_3)
+ return false;
+
+ if (ExistingOp == FoldedOp) {
+ NewOp = ExistingOp;
+ return true;
+ }
+
+ if (FoldedOp == SdwaSel::WORD_0) {
+ NewOp = ExistingOp;
+ return true;
+ }
+
+ if (FoldedOp == SdwaSel::WORD_1) {
+ NewOp = (SdwaSel)((unsigned)ExistingOp + 2);
+ return true;
+ }
+
+ return false;
+ }
+
MachineRegisterInfo *getMRI() const {
return &getParentInst()->getParent()->getParent()->getRegInfo();
}
@@ -118,8 +163,6 @@ class SDWAOperand {
#endif
};
-using namespace AMDGPU::SDWA;
-
class SDWASrcOperand : public SDWAOperand {
private:
SdwaSel SrcSel;
@@ -137,7 +180,8 @@ class SDWASrcOperand : public SDWAOperand {
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine = false) override;
SdwaSel getSrcSel() const { return SrcSel; }
bool getAbs() const { return Abs; }
@@ -166,7 +210,8 @@ class SDWADstOperand : public SDWAOperand {
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine = false) override;
SdwaSel getDstSel() const { return DstSel; }
DstUnused getDstUnused() const { return DstUn; }
@@ -186,7 +231,8 @@ class SDWADstPreserveOperand : public SDWADstOperand {
: SDWADstOperand(TargetOp, ReplacedOp, DstSel_, UNUSED_PRESERVE),
Preserve(PreserveOp) {}
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine = false) override;
MachineOperand *getPreservedOperand() const { return Preserve; }
@@ -375,7 +421,8 @@ MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII,
return PotentialMO->getParent();
}
-bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
+bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine) {
switch (MI.getOpcode()) {
case AMDGPU::V_CVT_F32_FP8_sdwa:
case AMDGPU::V_CVT_F32_BF8_sdwa:
@@ -451,7 +498,16 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
}
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
- SrcSel->setImm(getSrcSel());
+ if (Combine) {
+ SdwaSel NewOp;
+ bool CanCombine =
+ combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel(), NewOp);
+ if (!CanCombine)
+ return false;
+ SrcSel->setImm(NewOp);
+ } else {
+ SrcSel->setImm(getSrcSel());
+ }
SrcMods->setImm(getSrcMods(TII, Src));
}
getTargetOperand()->setIsKill(false);
@@ -479,7 +535,8 @@ MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII,
return PotentialMO->getParent();
}
-bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
+bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
+ bool Combine) {
// Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
@@ -498,7 +555,16 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
copyRegOperand(*Operand, *getTargetOperand());
MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
assert(DstSel);
- DstSel->setImm(getDstSel());
+ if (Combine) {
+ SdwaSel NewOp;
+ bool CanCombine =
+ combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel(), NewOp);
+ if (!CanCombine)
+ return false;
+ DstSel->setImm(NewOp);
+ } else {
+ DstSel->setImm(getDstSel());
+ }
MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
assert(DstUnused);
DstUnused->setImm(getDstUnused());
@@ -510,7 +576,8 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
}
bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
- const SIInstrInfo *TII) {
+ const SIInstrInfo *TII,
+ bool Combine) {
// MI should be moved right before v_or_b32.
// For this we should clear all kill flags on uses of MI src-operands or else
// we can encounter problem with use of killed operand.
@@ -535,7 +602,7 @@ bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
MI.getNumOperands() - 1);
// Convert MI as any other SDWADstOperand and remove v_or_b32
- return SDWADstOperand::convertToSDWA(MI, TII);
+ return SDWADstOperand::convertToSDWA(MI, TII, Combine);
}
std::optional<int64_t>
@@ -1029,6 +1096,50 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
// Convert to sdwa
int SDWAOpcode;
unsigned Opcode = MI.getOpcode();
+
+ // If the MI is already SDWA, preserve any existing opsel
+ if (TII->isSDWA(Opcode)) {
+ auto SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
+ MI.getParent()->insert(MI.getIterator(), SDWAInst);
+
+ // Apply all sdwa operand patterns.
+ bool Converted = false;
+ for (auto &Operand : SDWAOperands) {
+ LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
+ // There should be no intersection between SDWA operands and potential MIs
+ // e.g.:
+ // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
+ // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
+ // v_add_u32 v3, v4, v2
+ //
+ // In that example it is possible that we would fold 2nd instruction into
+ // 3rd (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd
+ // (that was already destroyed). So if SDWAOperand is also a potential MI
+ // then do not apply it.
+ if (PotentialMatches.count(Operand->getParentInst()) == 0)
+ Converted |= Operand->convertToSDWA(*SDWAInst, TII, true);
+ }
+
+ if (Converted) {
+ ConvertedInstructions.push_back(SDWAInst);
+ for (MachineOperand &MO : SDWAInst->uses()) {
+ if (!MO.isReg())
+ continue;
+
+ MRI->clearKillFlags(MO.getReg());
+ }
+ } else {
+ SDWAInst->eraseFromParent();
+ return false;
+ }
+
+ LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
+ ++NumSDWAInstructionsPeepholed;
+
+ MI.eraseFromParent();
+ return true;
+ }
+
if (TII->isSDWA(Opcode)) {
SDWAOpcode = Opcode;
} else {
>From 8d16c1cdde49f8f1e5073693c9820404d7afbc29 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Tue, 14 Jan 2025 11:20:53 -0500
Subject: [PATCH 02/22] [AMDGPU] Correct transformation and simplify
combineSdwaSel
- Remove redundant "if".
- Replace arithmetic on SdwaSel type
The case distinction seems clearer and removes a mishandled case:
Since (SdwaSel)((unsigned)WORD_0 + 2) == DWORD,
the existing code led to the transformation:
WORD_0 Sel (WORD_1 Sel (%X)) -> DWORD Sel (%X)
The correct transformation should be:
WORD_0 Sel (WORD_1 Sel (%X)) -> WORD_1 Sel (%X)
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index f515ba1aac5d0..4cbc6de30b4f1 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -122,15 +122,12 @@ class SDWAOperand {
NewOp = FoldedOp;
return true;
}
+
if (FoldedOp == SdwaSel::DWORD) {
NewOp = ExistingOp;
return true;
}
- if (FoldedOp != SdwaSel::WORD_0 && FoldedOp != SdwaSel::WORD_1 &&
- FoldedOp != ExistingOp)
- return false;
-
if (ExistingOp == SdwaSel::WORD_1 || ExistingOp == SdwaSel::BYTE_2 ||
ExistingOp == SdwaSel::BYTE_3)
return false;
@@ -146,9 +143,15 @@ class SDWAOperand {
}
if (FoldedOp == SdwaSel::WORD_1) {
- NewOp = (SdwaSel)((unsigned)ExistingOp + 2);
+ if (ExistingOp == SdwaSel::BYTE_0)
+ NewOp = SdwaSel::BYTE_2;
+ else if (ExistingOp == SdwaSel::BYTE_1)
+ NewOp = SdwaSel::BYTE_3;
+ else if (ExistingOp == SdwaSel::WORD_0)
+ NewOp = SdwaSel::WORD_1;
+
return true;
- }
+ }
return false;
}
>From 20e23b697e789e07f642c2f3be297f5107d32eed Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 04:17:32 -0500
Subject: [PATCH 03/22] [AMDGPU] Change formatting of combineSdwaSel
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 4cbc6de30b4f1..8b9c7b9607dfd 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -116,7 +116,8 @@ class SDWAOperand {
/// SDWA selection in
/// \p NewOp .
/// For example, if we have existing BYTE_0 Sel and are attempting to fold
- /// WORD_1 Sel: BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
+ /// WORD_1 Sel:
+ /// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
bool combineSdwaSel(SdwaSel ExistingOp, SdwaSel FoldedOp, SdwaSel &NewOp) {
if (ExistingOp == SdwaSel::DWORD) {
NewOp = FoldedOp;
@@ -151,7 +152,7 @@ class SDWAOperand {
NewOp = SdwaSel::WORD_1;
return true;
- }
+ }
return false;
}
>From 663b94c8fceb7554be7935e168a4e660f6f82e44 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 07:30:08 -0500
Subject: [PATCH 04/22] [AMDGPU] Remove dead branch from
SIPeepholeSDWA::convertToSDWA
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 8b9c7b9607dfd..14c5cb730f3ee 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1098,7 +1098,6 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
// Convert to sdwa
- int SDWAOpcode;
unsigned Opcode = MI.getOpcode();
// If the MI is already SDWA, preserve any existing opsel
@@ -1144,13 +1143,10 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
return true;
}
- if (TII->isSDWA(Opcode)) {
- SDWAOpcode = Opcode;
- } else {
- SDWAOpcode = AMDGPU::getSDWAOp(Opcode);
- if (SDWAOpcode == -1)
- SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode));
- }
+ assert(!TII->isSDWA(Opcode));
+ int SDWAOpcode = AMDGPU::getSDWAOp(Opcode);
+ if (SDWAOpcode == -1)
+ SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode));
assert(SDWAOpcode != -1);
const MCInstrDesc &SDWADesc = TII->get(SDWAOpcode);
>From c2dfca063d1e06beadbd9205461b4f46c74c7dfe Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 07:58:03 -0500
Subject: [PATCH 05/22] [AMDGPU] Extract SDWA instruction creation from
convertToSDWA
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 108 ++++++++++++----------
1 file changed, 58 insertions(+), 50 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 14c5cb730f3ee..37aea94a3c5a8 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -62,6 +62,7 @@ class SIPeepholeSDWA {
std::unique_ptr<SDWAOperand> matchSDWAOperand(MachineInstr &MI);
void pseudoOpConvertToVOP2(MachineInstr &MI,
const GCNSubtarget &ST) const;
+ MachineInstr *createSDWAVersion(MachineInstr &MI);
bool convertToSDWA(MachineInstr &MI, const SDWAOperandsVector &SDWAOperands);
void legalizeScalarOperands(MachineInstr &MI, const GCNSubtarget &ST) const;
@@ -1092,58 +1093,10 @@ bool isConvertibleToSDWA(MachineInstr &MI,
}
} // namespace
-bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
- const SDWAOperandsVector &SDWAOperands) {
-
- LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
-
- // Convert to sdwa
+MachineInstr* SIPeepholeSDWA::createSDWAVersion(MachineInstr &MI) {
unsigned Opcode = MI.getOpcode();
-
- // If the MI is already SDWA, preserve any existing opsel
- if (TII->isSDWA(Opcode)) {
- auto SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
- MI.getParent()->insert(MI.getIterator(), SDWAInst);
-
- // Apply all sdwa operand patterns.
- bool Converted = false;
- for (auto &Operand : SDWAOperands) {
- LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
- // There should be no intersection between SDWA operands and potential MIs
- // e.g.:
- // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
- // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
- // v_add_u32 v3, v4, v2
- //
- // In that example it is possible that we would fold 2nd instruction into
- // 3rd (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd
- // (that was already destroyed). So if SDWAOperand is also a potential MI
- // then do not apply it.
- if (PotentialMatches.count(Operand->getParentInst()) == 0)
- Converted |= Operand->convertToSDWA(*SDWAInst, TII, true);
- }
-
- if (Converted) {
- ConvertedInstructions.push_back(SDWAInst);
- for (MachineOperand &MO : SDWAInst->uses()) {
- if (!MO.isReg())
- continue;
-
- MRI->clearKillFlags(MO.getReg());
- }
- } else {
- SDWAInst->eraseFromParent();
- return false;
- }
-
- LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
- ++NumSDWAInstructionsPeepholed;
-
- MI.eraseFromParent();
- return true;
- }
-
assert(!TII->isSDWA(Opcode));
+
int SDWAOpcode = AMDGPU::getSDWAOp(Opcode);
if (SDWAOpcode == -1)
SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(Opcode));
@@ -1280,6 +1233,61 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
SDWAInst->tieOperands(PreserveDstIdx, SDWAInst->getNumOperands() - 1);
}
+ return SDWAInst.getInstr();
+}
+
+bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
+ const SDWAOperandsVector &SDWAOperands) {
+ LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
+
+ // Convert to sdwa
+ unsigned Opcode = MI.getOpcode();
+
+ // If the MI is already SDWA, preserve any existing opsel
+ if (TII->isSDWA(Opcode)) {
+ auto SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
+ MI.getParent()->insert(MI.getIterator(), SDWAInst);
+
+ // Apply all sdwa operand patterns.
+ bool Converted = false;
+ for (auto &Operand : SDWAOperands) {
+ LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
+ // There should be no intersection between SDWA operands and potential MIs
+ // e.g.:
+ // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
+ // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
+ // v_add_u32 v3, v4, v2
+ //
+ // In that example it is possible that we would fold 2nd instruction into
+ // 3rd (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd
+ // (that was already destroyed). So if SDWAOperand is also a potential MI
+ // then do not apply it.
+ if (PotentialMatches.count(Operand->getParentInst()) == 0)
+ Converted |= Operand->convertToSDWA(*SDWAInst, TII, true);
+ }
+
+ if (Converted) {
+ ConvertedInstructions.push_back(SDWAInst);
+ for (MachineOperand &MO : SDWAInst->uses()) {
+ if (!MO.isReg())
+ continue;
+
+ MRI->clearKillFlags(MO.getReg());
+ }
+ } else {
+ SDWAInst->eraseFromParent();
+ return false;
+ }
+
+ LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
+ ++NumSDWAInstructionsPeepholed;
+
+ MI.eraseFromParent();
+ return true;
+ }
+
+ MachineInstr *SDWAInst{createSDWAVersion(MI)};
+
// Apply all sdwa operand patterns.
bool Converted = false;
for (auto &Operand : SDWAOperands) {
>From 38bd038049bd6cf6f69161903af0340ee9297ad9 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 08:18:56 -0500
Subject: [PATCH 06/22] [AMDGPU] Unify loops in SIPeepholeSDWA::convertToSDWA
There are two loops that invoke the conversion on the operands
of the input instruction, one for the case where the instruction
is already an SDWA instruction and one for the case where it isn't.
The loops are almost the same.
Fuse those loops into a single loop.
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 59 +++++------------------
1 file changed, 13 insertions(+), 46 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 37aea94a3c5a8..bbbff2083745d 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1240,54 +1240,21 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
const SDWAOperandsVector &SDWAOperands) {
LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
- // Convert to sdwa
- unsigned Opcode = MI.getOpcode();
-
- // If the MI is already SDWA, preserve any existing opsel
- if (TII->isSDWA(Opcode)) {
- auto SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
+ MachineInstr *SDWAInst;
+ bool CombineSelections;
+ if (TII->isSDWA(MI.getOpcode())) {
+ // No conversion necessary, since MI is an SDWA instruction. But
+ // tell convertToSDWA below to combine selections of this instruction
+ // and its SDWA operands.
+ SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
MI.getParent()->insert(MI.getIterator(), SDWAInst);
-
- // Apply all sdwa operand patterns.
- bool Converted = false;
- for (auto &Operand : SDWAOperands) {
- LLVM_DEBUG(dbgs() << *SDWAInst << "\nOperand: " << *Operand);
- // There should be no intersection between SDWA operands and potential MIs
- // e.g.:
- // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
- // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
- // v_add_u32 v3, v4, v2
- //
- // In that example it is possible that we would fold 2nd instruction into
- // 3rd (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd
- // (that was already destroyed). So if SDWAOperand is also a potential MI
- // then do not apply it.
- if (PotentialMatches.count(Operand->getParentInst()) == 0)
- Converted |= Operand->convertToSDWA(*SDWAInst, TII, true);
- }
-
- if (Converted) {
- ConvertedInstructions.push_back(SDWAInst);
- for (MachineOperand &MO : SDWAInst->uses()) {
- if (!MO.isReg())
- continue;
-
- MRI->clearKillFlags(MO.getReg());
- }
- } else {
- SDWAInst->eraseFromParent();
- return false;
- }
-
- LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
- ++NumSDWAInstructionsPeepholed;
-
- MI.eraseFromParent();
- return true;
+ CombineSelections = true;
+ } else {
+ // Convert to sdwa
+ SDWAInst = createSDWAVersion(MI);
+ CombineSelections = false;
}
- MachineInstr *SDWAInst{createSDWAVersion(MI)};
-
// Apply all sdwa operand patterns.
bool Converted = false;
for (auto &Operand : SDWAOperands) {
@@ -1303,7 +1270,7 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
// was already destroyed). So if SDWAOperand is also a potential MI then do
// not apply it.
if (PotentialMatches.count(Operand->getParentInst()) == 0)
- Converted |= Operand->convertToSDWA(*SDWAInst, TII);
+ Converted |= Operand->convertToSDWA(*SDWAInst, TII, CombineSelections);
}
if (Converted) {
>From e5923ac0328d1f62729778cffd5c4e70d72ac758 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 08:28:45 -0500
Subject: [PATCH 07/22] [AMDGPU] Invert if statement in
SIPeepholeSDWA::convertToSDWA
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 17 ++++++++---------
1 file changed, 8 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index bbbff2083745d..944d85f72bf6b 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1273,19 +1273,18 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
Converted |= Operand->convertToSDWA(*SDWAInst, TII, CombineSelections);
}
- if (Converted) {
- ConvertedInstructions.push_back(SDWAInst);
- for (MachineOperand &MO : SDWAInst->uses()) {
- if (!MO.isReg())
- continue;
-
- MRI->clearKillFlags(MO.getReg());
- }
- } else {
+ if (!Converted) {
SDWAInst->eraseFromParent();
return false;
}
+ ConvertedInstructions.push_back(SDWAInst);
+ for (MachineOperand &MO : SDWAInst->uses()) {
+ if (!MO.isReg())
+ continue;
+
+ MRI->clearKillFlags(MO.getReg());
+ }
LLVM_DEBUG(dbgs() << "\nInto:" << *SDWAInst << '\n');
++NumSDWAInstructionsPeepholed;
>From 7034d2dc78a7884e52462ac5fd6d338d3817a72e Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 08:39:37 -0500
Subject: [PATCH 08/22] [AMDGPU] Rename "Combine" to "CombineSelections" in
SIPeepholeSDWA
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 944d85f72bf6b..f018be15155f7 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -106,7 +106,7 @@ class SDWAOperand {
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) = 0;
virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine = false) = 0;
+ bool CombineSelections = false) = 0;
MachineOperand *getTargetOperand() const { return Target; }
MachineOperand *getReplacedOperand() const { return Replaced; }
@@ -186,7 +186,7 @@ class SDWASrcOperand : public SDWAOperand {
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine = false) override;
+ bool CombineSelections = false) override;
SdwaSel getSrcSel() const { return SrcSel; }
bool getAbs() const { return Abs; }
@@ -216,7 +216,7 @@ class SDWADstOperand : public SDWAOperand {
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) override;
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine = false) override;
+ bool CombineSelections = false) override;
SdwaSel getDstSel() const { return DstSel; }
DstUnused getDstUnused() const { return DstUn; }
@@ -237,7 +237,7 @@ class SDWADstPreserveOperand : public SDWADstOperand {
Preserve(PreserveOp) {}
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine = false) override;
+ bool CombineSelections = false) override;
MachineOperand *getPreservedOperand() const { return Preserve; }
@@ -427,7 +427,7 @@ MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII,
}
bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine) {
+ bool CombineSelections) {
switch (MI.getOpcode()) {
case AMDGPU::V_CVT_F32_FP8_sdwa:
case AMDGPU::V_CVT_F32_BF8_sdwa:
@@ -503,7 +503,7 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
}
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
- if (Combine) {
+ if (CombineSelections) {
SdwaSel NewOp;
bool CanCombine =
combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel(), NewOp);
@@ -541,7 +541,7 @@ MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII,
}
bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool Combine) {
+ bool CombineSelections) {
// Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
@@ -560,7 +560,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
copyRegOperand(*Operand, *getTargetOperand());
MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
assert(DstSel);
- if (Combine) {
+ if (CombineSelections) {
SdwaSel NewOp;
bool CanCombine =
combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel(), NewOp);
@@ -582,7 +582,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
const SIInstrInfo *TII,
- bool Combine) {
+ bool CombineSelections) {
// MI should be moved right before v_or_b32.
// For this we should clear all kill flags on uses of MI src-operands or else
// we can encounter problem with use of killed operand.
@@ -607,7 +607,7 @@ bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
MI.getNumOperands() - 1);
// Convert MI as any other SDWADstOperand and remove v_or_b32
- return SDWADstOperand::convertToSDWA(MI, TII, Combine);
+ return SDWADstOperand::convertToSDWA(MI, TII, CombineSelections);
}
std::optional<int64_t>
>From bbe9ab85865e2ff2c91c6e402d219bd9d533af38 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 09:19:43 -0500
Subject: [PATCH 09/22] [AMDGPU] Change combineSdwaSel to use optional return
type
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 68 +++++++++--------------
1 file changed, 27 insertions(+), 41 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index f018be15155f7..ae8c614ddb3fd 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -113,49 +113,37 @@ class SDWAOperand {
MachineInstr *getParentInst() const { return Target->getParent(); }
/// Fold a \p FoldedOp SDWA selection into an \p ExistingOp existing SDWA
- /// selection. If the selections are compatible, \p return true and store the
- /// SDWA selection in
- /// \p NewOp .
- /// For example, if we have existing BYTE_0 Sel and are attempting to fold
- /// WORD_1 Sel:
+ /// selection. If the selections are compatible, return the combined
+ /// selection, otherwise return a nullopt. For example, if we have existing
+ /// BYTE_0 Sel and are attempting to fold WORD_1 Sel:
/// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
- bool combineSdwaSel(SdwaSel ExistingOp, SdwaSel FoldedOp, SdwaSel &NewOp) {
- if (ExistingOp == SdwaSel::DWORD) {
- NewOp = FoldedOp;
- return true;
- }
+ std::optional<SdwaSel> combineSdwaSel(SdwaSel ExistingOp, SdwaSel FoldedOp) {
+ if (ExistingOp == SdwaSel::DWORD)
+ return FoldedOp;
- if (FoldedOp == SdwaSel::DWORD) {
- NewOp = ExistingOp;
- return true;
- }
+ if (FoldedOp == SdwaSel::DWORD)
+ return ExistingOp;
if (ExistingOp == SdwaSel::WORD_1 || ExistingOp == SdwaSel::BYTE_2 ||
ExistingOp == SdwaSel::BYTE_3)
- return false;
+ return {};
- if (ExistingOp == FoldedOp) {
- NewOp = ExistingOp;
- return true;
- }
+ if (ExistingOp == FoldedOp)
+ return ExistingOp;
- if (FoldedOp == SdwaSel::WORD_0) {
- NewOp = ExistingOp;
- return true;
- }
+ if (FoldedOp == SdwaSel::WORD_0)
+ return ExistingOp;
if (FoldedOp == SdwaSel::WORD_1) {
if (ExistingOp == SdwaSel::BYTE_0)
- NewOp = SdwaSel::BYTE_2;
- else if (ExistingOp == SdwaSel::BYTE_1)
- NewOp = SdwaSel::BYTE_3;
- else if (ExistingOp == SdwaSel::WORD_0)
- NewOp = SdwaSel::WORD_1;
-
- return true;
+ return SdwaSel::BYTE_2;
+ if (ExistingOp == SdwaSel::BYTE_1)
+ return SdwaSel::BYTE_3;
+ if (ExistingOp == SdwaSel::WORD_0)
+ return SdwaSel::WORD_1;
}
- return false;
+ return {};
}
MachineRegisterInfo *getMRI() const {
@@ -504,12 +492,11 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
if (CombineSelections) {
- SdwaSel NewOp;
- bool CanCombine =
- combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel(), NewOp);
- if (!CanCombine)
+ std::optional<SdwaSel> NewOp =
+ combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel());
+ if (!NewOp.has_value())
return false;
- SrcSel->setImm(NewOp);
+ SrcSel->setImm(NewOp.value());
} else {
SrcSel->setImm(getSrcSel());
}
@@ -561,12 +548,11 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
assert(DstSel);
if (CombineSelections) {
- SdwaSel NewOp;
- bool CanCombine =
- combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel(), NewOp);
- if (!CanCombine)
+ std::optional<SdwaSel> NewOp =
+ combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel());
+ if (!NewOp.has_value())
return false;
- DstSel->setImm(NewOp);
+ DstSel->setImm(NewOp.value());
} else {
DstSel->setImm(getDstSel());
}
>From 245c93bce25df06a000d7cc2e8d97ae828b55791 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 11:15:12 -0500
Subject: [PATCH 10/22] [AMDGPU] Add regression test for invalid SDWA selection
handling
---
.../sdwa-peephole-instr-combine-sel.mir | 124 ++++++++++++++++++
1 file changed, 124 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
new file mode 100644
index 0000000000000..43708e9513c68
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
@@ -0,0 +1,124 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -run-pass=si-peephole-sdwa -o - %s | FileCheck -check-prefix=NOHAZARD %s
+
+---
+name: sdwa_opsel_hazard
+body: |
+ ; NOHAZARD-LABEL: name: sdwa_opsel_hazard
+ ; NOHAZARD: bb.0:
+ ; NOHAZARD-NEXT: successors: %bb.7(0x40000000), %bb.8(0x40000000)
+ ; NOHAZARD-NEXT: liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[DEF1]], [[DEF2]], 0, 0, implicit $exec
+ ; NOHAZARD-NEXT: [[SI_IF:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.7
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.1:
+ ; NOHAZARD-NEXT: successors: %bb.2(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 255, implicit $exec
+ ; NOHAZARD-NEXT: [[V_AND_B32_sdwa:%[0-9]+]]:vgpr_32 = V_AND_B32_sdwa 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, [[V_MOV_B32_e32_]], 0, 6, 0, 5, 6, implicit $exec
+ ; NOHAZARD-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
+ ; NOHAZARD-NEXT: [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, [[V_MOV_B32_e32_1]], 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, 6, 0, 6, 2, implicit $exec
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.2:
+ ; NOHAZARD-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.3
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.3:
+ ; NOHAZARD-NEXT: successors: %bb.4(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.4:
+ ; NOHAZARD-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF1]], %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.5
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.5:
+ ; NOHAZARD-NEXT: successors: %bb.6(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.6:
+ ; NOHAZARD-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF3:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.9
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.7:
+ ; NOHAZARD-NEXT: successors: %bb.8(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.8:
+ ; NOHAZARD-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, undef [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+ ; NOHAZARD-NEXT: [[SI_IF4:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF]], %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.1
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.9:
+ ; NOHAZARD-NEXT: successors: %bb.10(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.10:
+ ; NOHAZARD-NEXT: S_ENDPGM 0
+ bb.0:
+ successors: %bb.7(0x40000000), %bb.8(0x40000000)
+ liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
+
+ %0:sreg_32 = IMPLICIT_DEF
+ %1:sreg_64_xexec_xnull = IMPLICIT_DEF
+ %2:vgpr_32 = IMPLICIT_DEF
+ %3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed %1, %2, 0, 0, implicit $exec
+ %4:sreg_32 = SI_IF undef %0, %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.7
+
+ bb.1:
+ successors: %bb.2(0x80000000)
+
+ %5:vgpr_32 = V_AND_B32_e64 undef %6, 255, implicit $exec
+ %7:vgpr_32 = V_LSHLREV_B32_e64 2, killed undef %5, implicit $exec
+
+ bb.2:
+ successors: %bb.3(0x40000000), %bb.4(0x40000000)
+
+ %8:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.3
+
+ bb.3:
+ successors: %bb.4(0x80000000)
+
+ bb.4:
+ successors: %bb.5(0x40000000), %bb.6(0x40000000)
+
+ %10:sreg_32 = SI_IF killed undef %8, %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.5
+
+ bb.5:
+ successors: %bb.6(0x80000000)
+
+ bb.6:
+ successors: %bb.9(0x40000000), %bb.10(0x40000000)
+
+ %11:sreg_32 = SI_IF undef %0, %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.9
+
+ bb.7:
+ successors: %bb.8(0x80000000)
+
+ bb.8:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+
+ %6:vgpr_32 = V_LSHRREV_B32_e64 16, undef %3, implicit $exec
+ %9:sreg_32 = SI_IF killed undef %4, %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.1
+
+ bb.9:
+ successors: %bb.10(0x80000000)
+
+ bb.10:
+ S_ENDPGM 0
+
+...
+
>From 5b51aebbc41ab6354cc57bfe8e7fdd7ca8d83e2d Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 16 Jan 2025 12:09:42 -0500
Subject: [PATCH 11/22] [AMDGPU] clang-format changes to SIPeepholeSDWA
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index ae8c614ddb3fd..713ef162f8dee 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -549,7 +549,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
assert(DstSel);
if (CombineSelections) {
std::optional<SdwaSel> NewOp =
- combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel());
+ combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel());
if (!NewOp.has_value())
return false;
DstSel->setImm(NewOp.value());
@@ -1079,7 +1079,7 @@ bool isConvertibleToSDWA(MachineInstr &MI,
}
} // namespace
-MachineInstr* SIPeepholeSDWA::createSDWAVersion(MachineInstr &MI) {
+MachineInstr *SIPeepholeSDWA::createSDWAVersion(MachineInstr &MI) {
unsigned Opcode = MI.getOpcode();
assert(!TII->isSDWA(Opcode));
>From b05facb3c0a7ca35aff90a0dd5a4cc22c6c0cfb9 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 23 Jan 2025 09:05:54 -0500
Subject: [PATCH 12/22] [AMDGPU] SIPeepholeSDWA: Reenable existing SDWA
instruction handling
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 7 +-
.../test/CodeGen/AMDGPU/GlobalISel/saddsat.ll | 15 +-
.../test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll | 15 +-
.../test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll | 26 ++--
.../test/CodeGen/AMDGPU/GlobalISel/usubsat.ll | 26 ++--
.../buffer-fat-pointer-atomicrmw-fadd.ll | 28 +---
.../CodeGen/AMDGPU/flat-atomicrmw-fadd.ll | 48 ++-----
.../CodeGen/AMDGPU/flat-atomicrmw-fsub.ll | 32 ++---
.../CodeGen/AMDGPU/global-atomicrmw-fadd.ll | 56 ++------
.../CodeGen/AMDGPU/global-atomicrmw-fsub.ll | 32 ++---
llvm/test/CodeGen/AMDGPU/idot4u.ll | 22 ++-
.../CodeGen/AMDGPU/local-atomicrmw-fadd.ll | 16 +--
.../CodeGen/AMDGPU/local-atomicrmw-fsub.ll | 16 +--
llvm/test/CodeGen/AMDGPU/permute_i8.ll | 3 +-
.../AMDGPU/sdwa-peephole-instr-combine-sel.ll | 16 +--
.../sdwa-peephole-instr-combine-sel.mir | 136 +++++++++++++-----
.../AMDGPU/sdwa-peephole-instr-gfx10.mir | 3 +-
.../CodeGen/AMDGPU/sdwa-peephole-instr.mir | 7 +-
llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir | 15 +-
llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll | 6 +-
20 files changed, 221 insertions(+), 304 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index ba3822d9b5775..713ef162f8dee 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1020,11 +1020,8 @@ bool isConvertibleToSDWA(MachineInstr &MI,
const SIInstrInfo* TII) {
// Check if this is already an SDWA instruction
unsigned Opc = MI.getOpcode();
- if (TII->isSDWA(Opc)) {
- // FIXME: Reenable after fixing selection handling.
- // Cf. llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.ll
- return false;
- }
+ if (TII->isSDWA(Opc))
+ return true;
// Check if this instruction has opcode that supports SDWA
if (AMDGPU::getSDWAOp(Opc) == -1)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
index 08184e700c1a4..4bfd29430ff1e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
@@ -280,9 +280,8 @@ define i16 @v_saddsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX8-NEXT: v_min_i16_e32 v1, v2, v1
; GFX8-NEXT: v_add_u16_e32 v1, v3, v1
; GFX8-NEXT: v_mov_b32_e32 v2, 0xff
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -300,8 +299,7 @@ define i16 @v_saddsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX9-NEXT: v_pk_add_i16 v0, v0, v1 clamp
; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -441,8 +439,7 @@ define amdgpu_ps i16 @s_saddsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
; GFX9-NEXT: v_pk_add_i16 v0, s0, v0 clamp
; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: ; return to shader part epilog
@@ -612,11 +609,9 @@ define i32 @v_saddsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v2), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v2), v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v3), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v3), v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
index 94f943af2532a..5673a6c6e869d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
@@ -281,9 +281,8 @@ define i16 @v_ssubsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX8-NEXT: v_min_i16_e32 v1, v1, v4
; GFX8-NEXT: v_sub_u16_e32 v1, v3, v1
; GFX8-NEXT: v_mov_b32_e32 v2, 0xff
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -301,8 +300,7 @@ define i16 @v_ssubsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX9-NEXT: v_pk_sub_i16 v0, v0, v1 clamp
; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -442,8 +440,7 @@ define amdgpu_ps i16 @s_ssubsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
; GFX9-NEXT: v_pk_sub_i16 v0, s0, v0 clamp
; GFX9-NEXT: v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: ; return to shader part epilog
@@ -613,11 +610,9 @@ define i32 @v_ssubsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v2), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v2), v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, sext(v3), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v3), v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
index 3d7fec9a5986c..788692c94b0cf 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
@@ -224,8 +224,7 @@ define i16 @v_uaddsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX9-NEXT: v_pk_add_u16 v0, v0, v1 clamp
; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -330,8 +329,7 @@ define amdgpu_ps i16 @s_uaddsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
; GFX9-NEXT: v_pk_add_u16 v0, s0, v0 clamp
; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: ; return to shader part epilog
@@ -453,11 +451,9 @@ define i32 @v_uaddsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -622,20 +618,18 @@ define amdgpu_ps i32 @s_uaddsat_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg) {
; GFX8-NEXT: v_mov_b32_e32 v4, 0xff
; GFX8-NEXT: s_lshl_b32 s0, s3, 8
; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: s_lshl_b32 s1, s7, 8
; GFX8-NEXT: v_and_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_add_u16_e64 v2, s0, v2 clamp
-; GFX8-NEXT: s_lshl_b32 s1, s7, 8
-; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: s_lshl_b32 s0, s4, 8
; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_add_u16_e64 v3, s0, v3 clamp
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
; GFX8-NEXT: ; return to shader part epilog
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
index 5a8b5fcc93f61..4faa7edadf07a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
@@ -218,8 +218,7 @@ define i16 @v_usubsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
; GFX9-NEXT: v_pk_sub_u16 v0, v0, v1 clamp
; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -322,8 +321,7 @@ define amdgpu_ps i16 @s_usubsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
; GFX9-NEXT: v_pk_sub_u16 v0, s0, v0 clamp
; GFX9-NEXT: v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
; GFX9-NEXT: ; return to shader part epilog
@@ -441,11 +439,9 @@ define i32 @v_usubsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -606,20 +602,18 @@ define amdgpu_ps i32 @s_usubsat_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg) {
; GFX8-NEXT: v_mov_b32_e32 v4, 0xff
; GFX8-NEXT: s_lshl_b32 s0, s3, 8
; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: s_lshl_b32 s1, s7, 8
; GFX8-NEXT: v_and_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_sub_u16_e64 v2, s0, v2 clamp
-; GFX8-NEXT: s_lshl_b32 s1, s7, 8
-; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: s_lshl_b32 s0, s4, 8
; GFX8-NEXT: v_mov_b32_e32 v3, s1
-; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX8-NEXT: v_sub_u16_e64 v3, s0, v3 clamp
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
; GFX8-NEXT: ; return to shader part epilog
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
index a969e3d4f4f79..e8f1619c5d418 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointer-atomicrmw-fadd.ll
@@ -6398,10 +6398,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v1, v5, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
@@ -6627,10 +6625,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_fin
; GFX8-NEXT: .LBB20_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX8-NEXT: v_add_f16_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v1, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v2, v0
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
@@ -7048,9 +7044,7 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__waterfall
; GFX8-NEXT: ; =>This Loop Header: Depth=1
; GFX8-NEXT: ; Child Loop BB21_4 Depth 2
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v8
-; GFX8-NEXT: v_add_f16_sdwa v4, v4, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_add_f16_sdwa v4, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v6, v8, v5
; GFX8-NEXT: v_or_b32_e32 v7, v6, v4
; GFX8-NEXT: v_mov_b32_e32 v6, v7
@@ -7396,10 +7390,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset(ptr addrsp
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v1, v5, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
@@ -7658,10 +7650,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset(ptr addrspace(
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX8-NEXT: v_add_f16_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v1, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v2, v0
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
@@ -7925,10 +7915,8 @@ define <2 x half> @buffer_fat_ptr_agent_atomic_fadd_ret_v2f16__offset__amdgpu_no
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v5, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v5
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v5, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v1, v5, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v4, v1, v0
; GFX8-NEXT: v_mov_b32_e32 v0, v4
; GFX8-NEXT: v_mov_b32_e32 v1, v5
@@ -8187,10 +8175,8 @@ define void @buffer_fat_ptr_agent_atomic_fadd_noret_v2f16__offset__amdgpu_no_rem
; GFX8-NEXT: .LBB25_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v2
-; GFX8-NEXT: v_add_f16_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v1, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v2, v0
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
; GFX8-NEXT: v_mov_b32_e32 v5, v2
; GFX8-NEXT: v_mov_b32_e32 v4, v1
diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
index 72f883928cffb..ff48a3fc98018 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fadd.ll
@@ -14349,10 +14349,8 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__amdgpu_no_fine_grained_memo
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -14541,10 +14539,8 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__offset12b_pos__amdgpu_no_fi
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -14747,10 +14743,8 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__offset12b_neg__amdgpu_no_fi
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -14930,10 +14924,8 @@ define void @flat_agent_atomic_fadd_noret_v2f16__amdgpu_no_fine_grained_memory(p
; GFX8-NEXT: .LBB59_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -15115,10 +15107,8 @@ define void @flat_agent_atomic_fadd_noret_v2f16__offset12b_pos__amdgpu_no_fine_g
; GFX8-NEXT: .LBB60_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -15318,10 +15308,8 @@ define void @flat_agent_atomic_fadd_noret_v2f16__offset12b_neg__amdgpu_no_fine_g
; GFX8-NEXT: .LBB61_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -15514,10 +15502,8 @@ define <2 x half> @flat_system_atomic_fadd_ret_v2f16__offset12b_pos__amdgpu_no_f
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -15704,10 +15690,8 @@ define void @flat_system_atomic_fadd_noret_v2f16__offset12b_pos__amdgpu_no_fine_
; GFX8-NEXT: .LBB63_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -15894,10 +15878,8 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__amdgpu_no_remote_memory(ptr
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -16077,10 +16059,8 @@ define void @flat_agent_atomic_fadd_noret_v2f16__amdgpu_no_remote_memory(ptr %pt
; GFX8-NEXT: .LBB65_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -16264,10 +16244,8 @@ define <2 x half> @flat_agent_atomic_fadd_ret_v2f16__amdgpu_no_fine_grained_memo
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -16447,10 +16425,8 @@ define void @flat_agent_atomic_fadd_noret_v2f16__amdgpu_no_fine_grained_memory__
; GFX8-NEXT: .LBB67_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll
index 9c2a76380d83d..14f75814128f1 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-atomicrmw-fsub.ll
@@ -12094,10 +12094,8 @@ define <2 x half> @flat_agent_atomic_fsub_ret_v2f16(ptr %ptr, <2 x half> %val) #
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -12318,10 +12316,8 @@ define <2 x half> @flat_agent_atomic_fsub_ret_v2f16__offset12b_pos(ptr %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -12560,10 +12556,8 @@ define <2 x half> @flat_agent_atomic_fsub_ret_v2f16__offset12b_neg(ptr %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -12772,10 +12766,8 @@ define void @flat_agent_atomic_fsub_noret_v2f16(ptr %ptr, <2 x half> %val) #0 {
; GFX8-NEXT: .LBB45_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -12986,10 +12978,8 @@ define void @flat_agent_atomic_fsub_noret_v2f16__offset12b_pos(ptr %ptr, <2 x ha
; GFX8-NEXT: .LBB46_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -13221,10 +13211,8 @@ define void @flat_agent_atomic_fsub_noret_v2f16__offset12b_neg(ptr %ptr, <2 x ha
; GFX8-NEXT: .LBB47_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -13449,10 +13437,8 @@ define <2 x half> @flat_system_atomic_fsub_ret_v2f16__offset12b_pos(ptr %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -13668,10 +13654,8 @@ define void @flat_system_atomic_fsub_noret_v2f16__offset12b_pos(ptr %ptr, <2 x h
; GFX8-NEXT: .LBB49_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
index 2be6bf302d35f..ec4ea232e661c 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fadd.ll
@@ -15403,10 +15403,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__amdgpu_no_fine_grained_me
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -15637,10 +15635,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__offset12b_pos__amdgpu_no_
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -15871,10 +15867,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__offset12b_neg__amdgpu_no_
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -16089,10 +16083,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__amdgpu_no_fine_grained_memory
; GFX8-NEXT: .LBB67_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -16301,10 +16293,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__offset12b_pos__amdgpu_no_fine
; GFX8-NEXT: .LBB68_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -16514,10 +16504,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__offset12b_neg__amdgpu_no_fine
; GFX8-NEXT: .LBB69_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -16756,10 +16744,8 @@ define <2 x half> @global_system_atomic_fadd_ret_v2f16__offset12b_pos__amdgpu_no
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -16975,10 +16961,8 @@ define void @global_system_atomic_fadd_noret_v2f16__offset12b_pos__amdgpu_no_fin
; GFX8-NEXT: .LBB71_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -17218,10 +17202,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__amdgpu_no_remote_memory(p
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -17458,10 +17440,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__amdgpu_no_remote_memory(ptr a
; GFX8-NEXT: .LBB73_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -17686,10 +17666,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__amdgpu_no_fine_grained_me
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -17900,10 +17878,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__amdgpu_no_fine_grained_memory
; GFX8-NEXT: .LBB75_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -18142,10 +18118,8 @@ define <2 x half> @global_agent_atomic_fadd_ret_v2f16__maybe_remote(ptr addrspac
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -18382,10 +18356,8 @@ define void @global_agent_atomic_fadd_noret_v2f16__maybe_remote(ptr addrspace(1)
; GFX8-NEXT: .LBB77_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
index 24791b60bfc6d..3dbf6477a7cb8 100644
--- a/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-atomicrmw-fsub.ll
@@ -12433,10 +12433,8 @@ define <2 x half> @global_agent_atomic_fsub_ret_v2f16(ptr addrspace(1) %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v4, v3
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -12713,10 +12711,8 @@ define <2 x half> @global_agent_atomic_fsub_ret_v2f16__offset12b_pos(ptr addrspa
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -12993,10 +12989,8 @@ define <2 x half> @global_agent_atomic_fsub_ret_v2f16__offset12b_neg(ptr addrspa
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -13266,10 +13260,8 @@ define void @global_agent_atomic_fsub_noret_v2f16(ptr addrspace(1) %ptr, <2 x ha
; GFX8-NEXT: .LBB45_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -13533,10 +13525,8 @@ define void @global_agent_atomic_fsub_noret_v2f16__offset12b_pos(ptr addrspace(1
; GFX8-NEXT: .LBB46_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -13801,10 +13791,8 @@ define void @global_agent_atomic_fsub_noret_v2f16__offset12b_neg(ptr addrspace(1
; GFX8-NEXT: .LBB47_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -14089,10 +14077,8 @@ define <2 x half> @global_system_atomic_fsub_ret_v2f16__offset12b_pos(ptr addrsp
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v1, v0
-; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v1
-; GFX8-NEXT: v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v0, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v1, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX8-NEXT: v_or_b32_e32 v0, v5, v0
; GFX8-NEXT: flat_atomic_cmpswap v0, v[3:4], v[0:1] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
@@ -14363,10 +14349,8 @@ define void @global_system_atomic_fsub_noret_v2f16__offset12b_pos(ptr addrspace(
; GFX8-NEXT: .LBB49_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v4
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v5, v4, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
; GFX8-NEXT: flat_atomic_cmpswap v3, v[0:1], v[3:4] glc
; GFX8-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/idot4u.ll b/llvm/test/CodeGen/AMDGPU/idot4u.ll
index 10fac09ef4ec0..8f82348d350e0 100644
--- a/llvm/test/CodeGen/AMDGPU/idot4u.ll
+++ b/llvm/test/CodeGen/AMDGPU/idot4u.ll
@@ -2518,17 +2518,16 @@ define amdgpu_kernel void @udot4_acc8_vecMul(ptr addrspace(1) %src1,
; GFX9-NODL-NEXT: s_waitcnt vmcnt(1)
; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v5, 16, v2
; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX9-NODL-NEXT: v_mul_lo_u16_e32 v8, v4, v5
-; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v7, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
-; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v9, 8, v6
-; GFX9-NODL-NEXT: v_or_b32_sdwa v6, v8, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NODL-NEXT: v_or_b32_e32 v6, v7, v6
+; GFX9-NODL-NEXT: v_mul_lo_u16_e32 v7, v4, v5
+; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v8, 8, v6
+; GFX9-NODL-NEXT: v_or_b32_sdwa v6, v7, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PRESERVE src0_sel:BYTE_1 src1_sel:BYTE_1
; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v6, 8, v6
; GFX9-NODL-NEXT: s_waitcnt vmcnt(0)
; GFX9-NODL-NEXT: v_mad_legacy_u16 v1, v1, v2, v3
; GFX9-NODL-NEXT: v_add_u16_e32 v1, v1, v6
; GFX9-NODL-NEXT: v_mad_legacy_u16 v1, v4, v5, v1
-; GFX9-NODL-NEXT: v_add_u16_e32 v1, v1, v9
+; GFX9-NODL-NEXT: v_add_u16_e32 v1, v1, v8
; GFX9-NODL-NEXT: global_store_byte v0, v1, s[6:7]
; GFX9-NODL-NEXT: s_endpgm
;
@@ -2547,17 +2546,16 @@ define amdgpu_kernel void @udot4_acc8_vecMul(ptr addrspace(1) %src1,
; GFX9-DL-NEXT: s_waitcnt vmcnt(1)
; GFX9-DL-NEXT: v_lshrrev_b32_e32 v5, 16, v2
; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
-; GFX9-DL-NEXT: v_mul_lo_u16_e32 v8, v4, v5
-; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v7, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
-; GFX9-DL-NEXT: v_lshrrev_b32_e32 v9, 8, v6
-; GFX9-DL-NEXT: v_or_b32_sdwa v6, v8, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-DL-NEXT: v_or_b32_e32 v6, v7, v6
+; GFX9-DL-NEXT: v_mul_lo_u16_e32 v7, v4, v5
+; GFX9-DL-NEXT: v_lshrrev_b32_e32 v8, 8, v6
+; GFX9-DL-NEXT: v_or_b32_sdwa v6, v7, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-DL-NEXT: v_mul_lo_u16_sdwa v6, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PRESERVE src0_sel:BYTE_1 src1_sel:BYTE_1
; GFX9-DL-NEXT: v_lshrrev_b32_e32 v6, 8, v6
; GFX9-DL-NEXT: s_waitcnt vmcnt(0)
; GFX9-DL-NEXT: v_mad_legacy_u16 v1, v1, v2, v3
; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v6
; GFX9-DL-NEXT: v_mad_legacy_u16 v1, v4, v5, v1
-; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v9
+; GFX9-DL-NEXT: v_add_u16_e32 v1, v1, v8
; GFX9-DL-NEXT: global_store_byte v0, v1, s[6:7]
; GFX9-DL-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
index e4602f20f8a37..23b57a7efa586 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
@@ -5034,10 +5034,8 @@ define <2 x half> @local_atomic_fadd_ret_v2f16(ptr addrspace(3) %ptr, <2 x half>
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v3, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX8-NEXT: v_add_f16_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v3, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -5259,10 +5257,8 @@ define <2 x half> @local_atomic_fadd_ret_v2f16__offset(ptr addrspace(3) %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v3, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX8-NEXT: v_add_f16_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v3, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -5478,10 +5474,8 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -5694,10 +5688,8 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_add_f16_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_add_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
index 967e972e53e29..1b08b64b046b4 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
@@ -5532,10 +5532,8 @@ define <2 x half> @local_atomic_fsub_ret_v2f16(ptr addrspace(3) %ptr, <2 x half>
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v3, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX8-NEXT: v_sub_f16_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v4, v3, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -5789,10 +5787,8 @@ define <2 x half> @local_atomic_fsub_ret_v2f16__offset(ptr addrspace(3) %ptr, <2
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_mov_b32_e32 v3, v2
-; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v3
-; GFX8-NEXT: v_sub_f16_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v4, v3, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -6037,10 +6033,8 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
@@ -6282,10 +6276,8 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_sub_f16_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX8-NEXT: v_sub_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/permute_i8.ll b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
index 8c3758daacb9c..312dfa3717c77 100644
--- a/llvm/test/CodeGen/AMDGPU/permute_i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
@@ -592,8 +592,7 @@ define hidden void @addUsesOr(ptr addrspace(1) %in0, ptr addrspace(1) %in1, i8 %
; GFX9-NEXT: v_add_u16_sdwa v0, v4, v7 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3
; GFX9-NEXT: v_add_u16_sdwa v1, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u16_sdwa v1, v4, v7 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
-; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX9-NEXT: v_add_u16_sdwa v0, v4, v7 dst_sel:BYTE_1 dst_unused:UNUSED_PRESERVE src0_sel:DWORD src1_sel:BYTE_1
; GFX9-NEXT: global_store_dword v[5:6], v0, off
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.ll b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.ll
index 6eae905278f3e..8f984bfd4d7f7 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.ll
@@ -32,17 +32,15 @@ define amdgpu_kernel void @widget(ptr addrspace(1) %arg, i1 %arg1, ptr addrspace
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: ds_write_b32 v1, v1
; CHECK-NEXT: .LBB0_2: ; %bb20
-; CHECK-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CHECK-NEXT: s_mov_b32 s0, exec_lo
-; CHECK-NEXT: v_cmpx_ne_u16_e32 0, v0
-; CHECK-NEXT: s_xor_b32 s0, exec_lo, s0
-; CHECK-NEXT: s_cbranch_execz .LBB0_4
-; CHECK-NEXT: ; %bb.3: ; %bb11
-; CHECK-NEXT: v_mov_b32_e32 v1, 2
-; CHECK-NEXT: v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: v_cmp_ne_u16_sdwa s0, v0, v1 src0_sel:WORD_1 src1_sel:DWORD
+; CHECK-NEXT: s_and_saveexec_b32 s1, s0
+; CHECK-NEXT: s_xor_b32 s1, exec_lo, s1
+; CHECK-NEXT: ; %bb.3: ; %bb11
+; CHECK-NEXT: v_mov_b32_e32 v2, 2
+; CHECK-NEXT: v_lshlrev_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; CHECK-NEXT: ds_write_b32 v0, v1 offset:84
-; CHECK-NEXT: .LBB0_4: ; %bb14
+; CHECK-NEXT: ; %bb.4: ; %bb14
; CHECK-NEXT: s_endpgm
bb:
%call = tail call i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
index cc2c8b3940d78..43708e9513c68 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
@@ -1,56 +1,124 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -run-pass=si-peephole-sdwa -o - %s | FileCheck -check-prefix=CHECK %s
-
-# Currently the conversions in si-peephole-sdwa are disabled on preexisting sdwa instructions.
-# If they are reenabled, the code matches this pattern instead of the corresponding pattern
-# for V_LSHLREV_B32_sdwa further below:
-# [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, %{{[0-9]+}}, 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, 6, 0, 6, 5, implicit $exec
-
-# TODO Implement a fix for the incorrect sdwa selection
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -run-pass=si-peephole-sdwa -o - %s | FileCheck -check-prefix=NOHAZARD %s
---
name: sdwa_opsel_hazard
body: |
- ; CHECK-LABEL: name: sdwa_opsel_hazard
- ; CHECK: bb.0:
- ; CHECK-NEXT: successors: %bb.2(0x80000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
- ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
- ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[DEF1]], [[DEF2]], 0, 0, implicit $exec
- ; CHECK-NEXT: S_BRANCH %bb.2
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.1:
- ; CHECK-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 undef %5, 255, implicit $exec
- ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
- ; CHECK-NEXT: [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, [[V_MOV_B32_e32_]], 0, undef %5, 0, 6, 0, 6, 0, implicit $exec
- ; CHECK-NEXT: S_ENDPGM 0
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: successors: %bb.1(0x80000000)
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, undef [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
- ; CHECK-NEXT: S_BRANCH %bb.1
+ ; NOHAZARD-LABEL: name: sdwa_opsel_hazard
+ ; NOHAZARD: bb.0:
+ ; NOHAZARD-NEXT: successors: %bb.7(0x40000000), %bb.8(0x40000000)
+ ; NOHAZARD-NEXT: liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; NOHAZARD-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[DEF1]], [[DEF2]], 0, 0, implicit $exec
+ ; NOHAZARD-NEXT: [[SI_IF:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.7
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.1:
+ ; NOHAZARD-NEXT: successors: %bb.2(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 255, implicit $exec
+ ; NOHAZARD-NEXT: [[V_AND_B32_sdwa:%[0-9]+]]:vgpr_32 = V_AND_B32_sdwa 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, [[V_MOV_B32_e32_]], 0, 6, 0, 5, 6, implicit $exec
+ ; NOHAZARD-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
+ ; NOHAZARD-NEXT: [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, [[V_MOV_B32_e32_1]], 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, 6, 0, 6, 2, implicit $exec
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.2:
+ ; NOHAZARD-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.3
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.3:
+ ; NOHAZARD-NEXT: successors: %bb.4(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.4:
+ ; NOHAZARD-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF1]], %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.5
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.5:
+ ; NOHAZARD-NEXT: successors: %bb.6(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.6:
+ ; NOHAZARD-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[SI_IF3:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.9
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.7:
+ ; NOHAZARD-NEXT: successors: %bb.8(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.8:
+ ; NOHAZARD-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, undef [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+ ; NOHAZARD-NEXT: [[SI_IF4:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF]], %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; NOHAZARD-NEXT: S_BRANCH %bb.1
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.9:
+ ; NOHAZARD-NEXT: successors: %bb.10(0x80000000)
+ ; NOHAZARD-NEXT: {{ $}}
+ ; NOHAZARD-NEXT: bb.10:
+ ; NOHAZARD-NEXT: S_ENDPGM 0
bb.0:
- successors: %bb.2(0x40000000)
+ successors: %bb.7(0x40000000), %bb.8(0x40000000)
+ liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
+
%0:sreg_32 = IMPLICIT_DEF
%1:sreg_64_xexec_xnull = IMPLICIT_DEF
%2:vgpr_32 = IMPLICIT_DEF
%3:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed %1, %2, 0, 0, implicit $exec
- S_BRANCH %bb.2
+ %4:sreg_32 = SI_IF undef %0, %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.7
bb.1:
+ successors: %bb.2(0x80000000)
+
%5:vgpr_32 = V_AND_B32_e64 undef %6, 255, implicit $exec
%7:vgpr_32 = V_LSHLREV_B32_e64 2, killed undef %5, implicit $exec
- S_ENDPGM 0
bb.2:
- successors: %bb.1(0x40000000)
+ successors: %bb.3(0x40000000), %bb.4(0x40000000)
- %6:vgpr_32 = V_LSHRREV_B32_e64 16, undef %3, implicit $exec
+ %8:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.3
+
+ bb.3:
+ successors: %bb.4(0x80000000)
+
+ bb.4:
+ successors: %bb.5(0x40000000), %bb.6(0x40000000)
+
+ %10:sreg_32 = SI_IF killed undef %8, %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.5
+ bb.5:
+ successors: %bb.6(0x80000000)
+
+ bb.6:
+ successors: %bb.9(0x40000000), %bb.10(0x40000000)
+
+ %11:sreg_32 = SI_IF undef %0, %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ S_BRANCH %bb.9
+
+ bb.7:
+ successors: %bb.8(0x80000000)
+
+ bb.8:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+
+ %6:vgpr_32 = V_LSHRREV_B32_e64 16, undef %3, implicit $exec
+ %9:sreg_32 = SI_IF killed undef %4, %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
S_BRANCH %bb.1
+ bb.9:
+ successors: %bb.10(0x80000000)
+
+ bb.10:
+ S_ENDPGM 0
+
...
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir
index aaa32d871148b..62538120f8451 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir
@@ -138,8 +138,7 @@ body: |
---
# GCN-LABEL: {{^}}name: vop2_instructions
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_LSHLREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
# GFX1010: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $mode, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
index c027600a8af67..e2854df2468b3 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
@@ -147,15 +147,14 @@ body: |
---
# GCN-LABEL: {{^}}name: vop2_instructions
-# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_LSHLREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+
+# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $mode, implicit $exec
# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_LSHLREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $mode, implicit $exec
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir b/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
index 467bc77c18577..ffbd2d092b5d8 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
@@ -37,10 +37,9 @@ body: |
; SDWA-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, [[FLAT_LOAD_DWORD1]], implicit $exec
; SDWA-NEXT: [[V_BFE_U32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_U32_e64 [[FLAT_LOAD_DWORD]], 8, 8, implicit $exec
; SDWA-NEXT: [[V_LSHRREV_B32_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e32 24, [[FLAT_LOAD_DWORD1]], implicit $exec
- ; SDWA-NEXT: [[V_ADD_F16_sdwa:%[0-9]+]]:vgpr_32 = V_ADD_F16_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 1, 0, 4, 5, implicit $mode, implicit $exec
; SDWA-NEXT: [[V_MUL_F32_sdwa:%[0-9]+]]:vgpr_32 = V_MUL_F32_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 5, 0, 1, 3, implicit $mode, implicit $exec
- ; SDWA-NEXT: [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_ADD_F16_sdwa]], [[V_MUL_F32_sdwa]], implicit $exec
- ; SDWA-NEXT: FLAT_STORE_DWORD [[COPY2]], [[V_OR_B32_e64_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
+ ; SDWA-NEXT: [[V_ADD_F16_sdwa:%[0-9]+]]:vgpr_32 = V_ADD_F16_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 1, 2, 4, 5, implicit $mode, implicit $exec, implicit [[V_MUL_F32_sdwa]](tied-def 0)
+ ; SDWA-NEXT: FLAT_STORE_DWORD [[COPY2]], [[V_ADD_F16_sdwa]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
; SDWA-NEXT: $sgpr30_sgpr31 = COPY [[COPY]]
; SDWA-NEXT: S_SETPC_B64_return $sgpr30_sgpr31
%2 = COPY $sgpr30_sgpr31
@@ -146,7 +145,7 @@ body: |
; SDWA-NEXT: [[V_LSHRREV_B16_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B16_e64 8, [[FLAT_LOAD_DWORD]], implicit $exec
; SDWA-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 65535
; SDWA-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 [[FLAT_LOAD_DWORD]], killed [[S_MOV_B32_]], implicit $exec
- ; SDWA-NEXT: [[V_MOV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_MOV_B32_sdwa 0, [[FLAT_LOAD_DWORD1]], 0, 5, 2, 4, implicit $exec, implicit [[V_AND_B32_e64_]](tied-def 0)
+ ; SDWA-NEXT: [[V_MOV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_MOV_B32_sdwa 0, [[FLAT_LOAD_DWORD1]], 0, 5, 2, 4, implicit $exec, implicit [[FLAT_LOAD_DWORD]](tied-def 0)
; SDWA-NEXT: FLAT_STORE_DWORD [[COPY2]], [[V_MOV_B32_sdwa]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
; SDWA-NEXT: S_ENDPGM 0
%2 = COPY $sgpr30_sgpr31
@@ -181,17 +180,15 @@ body: |
; SDWA-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, [[FLAT_LOAD_DWORD1]], implicit $exec
; SDWA-NEXT: [[V_BFE_U32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_U32_e64 [[FLAT_LOAD_DWORD]], 8, 8, implicit $exec
; SDWA-NEXT: [[V_LSHRREV_B32_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e32 24, [[FLAT_LOAD_DWORD1]], implicit $exec
- ; SDWA-NEXT: [[V_ADD_F16_sdwa:%[0-9]+]]:vgpr_32 = V_ADD_F16_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 1, 0, 4, 5, implicit $mode, implicit $exec
; SDWA-NEXT: {{ $}}
; SDWA-NEXT: bb.1:
; SDWA-NEXT: successors: %bb.2(0x80000000)
; SDWA-NEXT: {{ $}}
- ; SDWA-NEXT: [[V_MUL_F32_sdwa:%[0-9]+]]:vgpr_32 = V_MUL_F32_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 6, 0, 1, 3, implicit $mode, implicit $exec
- ; SDWA-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 16, [[V_MUL_F32_sdwa]], implicit $exec
+ ; SDWA-NEXT: [[V_MUL_F32_sdwa:%[0-9]+]]:vgpr_32 = V_MUL_F32_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 5, 0, 1, 3, implicit $mode, implicit $exec
; SDWA-NEXT: {{ $}}
; SDWA-NEXT: bb.2:
- ; SDWA-NEXT: [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 [[V_ADD_F16_sdwa]], [[V_LSHLREV_B32_e64_]], implicit $exec
- ; SDWA-NEXT: FLAT_STORE_DWORD [[COPY2]], [[V_OR_B32_e64_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
+ ; SDWA-NEXT: [[V_ADD_F16_sdwa:%[0-9]+]]:vgpr_32 = V_ADD_F16_sdwa 0, [[FLAT_LOAD_DWORD]], 0, [[FLAT_LOAD_DWORD1]], 0, 0, 1, 2, 4, 5, implicit $mode, implicit $exec, implicit [[V_MUL_F32_sdwa]](tied-def 0)
+ ; SDWA-NEXT: FLAT_STORE_DWORD [[COPY2]], [[V_ADD_F16_sdwa]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
; SDWA-NEXT: $sgpr30_sgpr31 = COPY [[COPY]]
; SDWA-NEXT: S_SETPC_B64_return $sgpr30_sgpr31
bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll b/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
index 934d9efba4656..2d84e87722951 100644
--- a/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_sat_pk_u8_i16.ll
@@ -1230,8 +1230,7 @@ define i16 @basic_smax_smin_vec_input(<2 x i16> %src) {
; GISEL-GFX9-NEXT: v_pk_min_i16 v0, v1, v0
; GISEL-GFX9-NEXT: v_pk_max_i16 v0, 0, v0
; GISEL-GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GISEL-GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GISEL-GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GISEL-GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GISEL-GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -1346,8 +1345,7 @@ define i16 @basic_smax_smin_vec_input_rev(<2 x i16> %src) {
; GISEL-GFX9-NEXT: v_mov_b32_e32 v1, 0xff00ff
; GISEL-GFX9-NEXT: v_pk_min_i16 v0, v1, v0
; GISEL-GFX9-NEXT: v_mov_b32_e32 v1, 0xff
-; GISEL-GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
-; GISEL-GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GISEL-GFX9-NEXT: v_and_b32_sdwa v1, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GISEL-GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GISEL-GFX9-NEXT: s_setpc_b64 s[30:31]
;
>From c3868a5c9c544fd8c5ee9e18d9f842be661771e5 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 29 Jan 2025 03:37:31 -0500
Subject: [PATCH 13/22] [AMDGPU] SIPeepholeSDWA: Stop using CombineSelections
in convertToSDWA
The flag is not necessary since the relevant instructions
can be detected by looking at the SrcSel->getImm().
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 55 +++++++++++------------
1 file changed, 26 insertions(+), 29 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 713ef162f8dee..de7ef15764466 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -105,8 +105,7 @@ class SDWAOperand {
virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
SDWAOperandsMap *PotentialMatches = nullptr) = 0;
- virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections = false) = 0;
+ virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
MachineOperand *getTargetOperand() const { return Target; }
MachineOperand *getReplacedOperand() const { return Replaced; }
@@ -170,11 +169,10 @@ class SDWASrcOperand : public SDWAOperand {
: SDWAOperand(TargetOp, ReplacedOp),
SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {}
- MachineInstr *potentialToConvert(const SIInstrInfo *TII,
- const GCNSubtarget &ST,
- SDWAOperandsMap *PotentialMatches = nullptr) override;
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections = false) override;
+ MachineInstr *
+ potentialToConvert(const SIInstrInfo *TII, const GCNSubtarget &ST,
+ SDWAOperandsMap *PotentialMatches = nullptr) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
SdwaSel getSrcSel() const { return SrcSel; }
bool getAbs() const { return Abs; }
@@ -200,11 +198,10 @@ class SDWADstOperand : public SDWAOperand {
SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
: SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
- MachineInstr *potentialToConvert(const SIInstrInfo *TII,
- const GCNSubtarget &ST,
- SDWAOperandsMap *PotentialMatches = nullptr) override;
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections = false) override;
+ MachineInstr *
+ potentialToConvert(const SIInstrInfo *TII, const GCNSubtarget &ST,
+ SDWAOperandsMap *PotentialMatches = nullptr) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
SdwaSel getDstSel() const { return DstSel; }
DstUnused getDstUnused() const { return DstUn; }
@@ -224,8 +221,7 @@ class SDWADstPreserveOperand : public SDWADstOperand {
: SDWADstOperand(TargetOp, ReplacedOp, DstSel_, UNUSED_PRESERVE),
Preserve(PreserveOp) {}
- bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections = false) override;
+ bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
MachineOperand *getPreservedOperand() const { return Preserve; }
@@ -414,8 +410,7 @@ MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII,
return PotentialMO->getParent();
}
-bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections) {
+bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
switch (MI.getOpcode()) {
case AMDGPU::V_CVT_F32_FP8_sdwa:
case AMDGPU::V_CVT_F32_BF8_sdwa:
@@ -491,14 +486,21 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
}
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
- if (CombineSelections) {
+ if (SrcSel->getImm() == AMDGPU::SDWA::DWORD) {
+ // An SDWA instruction with a trivial src_sel, i.e.
+ // it has either not been adjusted before or it has
+ // just been created at the call site of this function.
+ // Use the operand's src_sel.
+ SrcSel->setImm(getSrcSel());
+ }
+ else {
+ // A preexisting SDWA instruction with a non-trivial src_sel.
+ // Combine with the operand src_sel.
std::optional<SdwaSel> NewOp =
combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel());
if (!NewOp.has_value())
return false;
SrcSel->setImm(NewOp.value());
- } else {
- SrcSel->setImm(getSrcSel());
}
SrcMods->setImm(getSrcMods(TII, Src));
}
@@ -527,8 +529,7 @@ MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII,
return PotentialMO->getParent();
}
-bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
- bool CombineSelections) {
+bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
// Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
if ((MI.getOpcode() == AMDGPU::V_FMAC_F16_sdwa ||
@@ -547,7 +548,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
copyRegOperand(*Operand, *getTargetOperand());
MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
assert(DstSel);
- if (CombineSelections) {
+ if (DstSel->getImm() != AMDGPU::SDWA::DWORD) {
std::optional<SdwaSel> NewOp =
combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel());
if (!NewOp.has_value())
@@ -567,8 +568,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII,
}
bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
- const SIInstrInfo *TII,
- bool CombineSelections) {
+ const SIInstrInfo *TII) {
// MI should be moved right before v_or_b32.
// For this we should clear all kill flags on uses of MI src-operands or else
// we can encounter problem with use of killed operand.
@@ -593,7 +593,7 @@ bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
MI.getNumOperands() - 1);
// Convert MI as any other SDWADstOperand and remove v_or_b32
- return SDWADstOperand::convertToSDWA(MI, TII, CombineSelections);
+ return SDWADstOperand::convertToSDWA(MI, TII);
}
std::optional<int64_t>
@@ -1227,18 +1227,15 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
LLVM_DEBUG(dbgs() << "Convert instruction:" << MI);
MachineInstr *SDWAInst;
- bool CombineSelections;
if (TII->isSDWA(MI.getOpcode())) {
// No conversion necessary, since MI is an SDWA instruction. But
// tell convertToSDWA below to combine selections of this instruction
// and its SDWA operands.
SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
MI.getParent()->insert(MI.getIterator(), SDWAInst);
- CombineSelections = true;
} else {
// Convert to sdwa
SDWAInst = createSDWAVersion(MI);
- CombineSelections = false;
}
// Apply all sdwa operand patterns.
@@ -1256,7 +1253,7 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
// was already destroyed). So if SDWAOperand is also a potential MI then do
// not apply it.
if (PotentialMatches.count(Operand->getParentInst()) == 0)
- Converted |= Operand->convertToSDWA(*SDWAInst, TII, CombineSelections);
+ Converted |= Operand->convertToSDWA(*SDWAInst, TII);
}
if (!Converted) {
>From c58493c38f2afa9959c4fe52752abe999b1c0ee7 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 29 Jan 2025 04:45:18 -0500
Subject: [PATCH 14/22] [AMDGPU] SIPeepholeSDWA.cpp: Simplify combineSdwaSel
uses
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 38 ++++++++---------------
1 file changed, 13 insertions(+), 25 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index de7ef15764466..cfcb170db5af0 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -486,22 +486,12 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
}
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
- if (SrcSel->getImm() == AMDGPU::SDWA::DWORD) {
- // An SDWA instruction with a trivial src_sel, i.e.
- // it has either not been adjusted before or it has
- // just been created at the call site of this function.
- // Use the operand's src_sel.
- SrcSel->setImm(getSrcSel());
- }
- else {
- // A preexisting SDWA instruction with a non-trivial src_sel.
- // Combine with the operand src_sel.
- std::optional<SdwaSel> NewOp =
- combineSdwaSel((SdwaSel)SrcSel->getImm(), getSrcSel());
- if (!NewOp.has_value())
- return false;
- SrcSel->setImm(NewOp.value());
- }
+ SdwaSel ExistingSel = static_cast<SdwaSel>(SrcSel->getImm());
+ std::optional<SdwaSel> NewSel = combineSdwaSel(ExistingSel, getSrcSel());
+ if (!NewSel.has_value())
+ return false;
+ SrcSel->setImm(NewSel.value());
+
SrcMods->setImm(getSrcMods(TII, Src));
}
getTargetOperand()->setIsKill(false);
@@ -548,15 +538,13 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
copyRegOperand(*Operand, *getTargetOperand());
MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
assert(DstSel);
- if (DstSel->getImm() != AMDGPU::SDWA::DWORD) {
- std::optional<SdwaSel> NewOp =
- combineSdwaSel((SdwaSel)DstSel->getImm(), getDstSel());
- if (!NewOp.has_value())
- return false;
- DstSel->setImm(NewOp.value());
- } else {
- DstSel->setImm(getDstSel());
- }
+
+ SdwaSel ExistingSel = static_cast<SdwaSel>(DstSel->getImm());
+ std::optional<SdwaSel> NewSel = combineSdwaSel(ExistingSel, getDstSel());
+ if (!NewSel.has_value())
+ return false;
+ DstSel->setImm(NewSel.value());
+
MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
assert(DstUnused);
DstUnused->setImm(getDstUnused());
>From 324267700bc0c98dfe6d505a88fa9e4de9807dce Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 29 Jan 2025 07:11:39 -0500
Subject: [PATCH 15/22] [AMDGPU] SIPeepholeSDWA: Change arg names and comments
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 44 +++++++++++------------
1 file changed, 21 insertions(+), 23 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index cfcb170db5af0..19d628acf30fe 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -111,34 +111,36 @@ class SDWAOperand {
MachineOperand *getReplacedOperand() const { return Replaced; }
MachineInstr *getParentInst() const { return Target->getParent(); }
- /// Fold a \p FoldedOp SDWA selection into an \p ExistingOp existing SDWA
- /// selection. If the selections are compatible, return the combined
- /// selection, otherwise return a nullopt. For example, if we have existing
- /// BYTE_0 Sel and are attempting to fold WORD_1 Sel:
+ /// Combine an SDWA instruction's existing SDWA selection \p
+ /// ExistingSel with the SDWA selection \p OpSel of its operand. If
+ /// the selections are compatible, return the combined selection,
+ /// otherwise return a nullopt. For example, if we have ExistingSel
+ /// = BYTE_0 Sel and FoldedSel WORD_1 Sel:
/// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
- std::optional<SdwaSel> combineSdwaSel(SdwaSel ExistingOp, SdwaSel FoldedOp) {
- if (ExistingOp == SdwaSel::DWORD)
- return FoldedOp;
+ std::optional<SdwaSel> combineSdwaSel(SdwaSel ExistingSel,
+ SdwaSel OperandSel) {
+ if (ExistingSel == SdwaSel::DWORD)
+ return OperandSel;
- if (FoldedOp == SdwaSel::DWORD)
- return ExistingOp;
+ if (OperandSel == SdwaSel::DWORD)
+ return ExistingSel;
- if (ExistingOp == SdwaSel::WORD_1 || ExistingOp == SdwaSel::BYTE_2 ||
- ExistingOp == SdwaSel::BYTE_3)
+ if (ExistingSel == SdwaSel::WORD_1 || ExistingSel == SdwaSel::BYTE_2 ||
+ ExistingSel == SdwaSel::BYTE_3)
return {};
- if (ExistingOp == FoldedOp)
- return ExistingOp;
+ if (ExistingSel == OperandSel)
+ return ExistingSel;
- if (FoldedOp == SdwaSel::WORD_0)
- return ExistingOp;
+ if (OperandSel == SdwaSel::WORD_0)
+ return ExistingSel;
- if (FoldedOp == SdwaSel::WORD_1) {
- if (ExistingOp == SdwaSel::BYTE_0)
+ if (OperandSel == SdwaSel::WORD_1) {
+ if (ExistingSel == SdwaSel::BYTE_0)
return SdwaSel::BYTE_2;
- if (ExistingOp == SdwaSel::BYTE_1)
+ if (ExistingSel == SdwaSel::BYTE_1)
return SdwaSel::BYTE_3;
- if (ExistingOp == SdwaSel::WORD_0)
+ if (ExistingSel == SdwaSel::WORD_0)
return SdwaSel::WORD_1;
}
@@ -1216,13 +1218,9 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
MachineInstr *SDWAInst;
if (TII->isSDWA(MI.getOpcode())) {
- // No conversion necessary, since MI is an SDWA instruction. But
- // tell convertToSDWA below to combine selections of this instruction
- // and its SDWA operands.
SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
MI.getParent()->insert(MI.getIterator(), SDWAInst);
} else {
- // Convert to sdwa
SDWAInst = createSDWAVersion(MI);
}
>From b5aa73d3bd87fad5f340ac4857b0716876334f4c Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 29 Jan 2025 08:13:50 -0500
Subject: [PATCH 16/22] [AMDGPU] Use default check prefix in
sdwa-peephole-instr-combine-sel.mir
---
.../sdwa-peephole-instr-combine-sel.mir | 120 +++++++++---------
1 file changed, 60 insertions(+), 60 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
index 43708e9513c68..acad03d6d8960 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-combine-sel.mir
@@ -1,68 +1,68 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -run-pass=si-peephole-sdwa -o - %s | FileCheck -check-prefix=NOHAZARD %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -run-pass=si-peephole-sdwa -o - %s | FileCheck %s
---
name: sdwa_opsel_hazard
body: |
- ; NOHAZARD-LABEL: name: sdwa_opsel_hazard
- ; NOHAZARD: bb.0:
- ; NOHAZARD-NEXT: successors: %bb.7(0x40000000), %bb.8(0x40000000)
- ; NOHAZARD-NEXT: liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
- ; NOHAZARD-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
- ; NOHAZARD-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
- ; NOHAZARD-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[DEF1]], [[DEF2]], 0, 0, implicit $exec
- ; NOHAZARD-NEXT: [[SI_IF:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; NOHAZARD-NEXT: S_BRANCH %bb.7
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.1:
- ; NOHAZARD-NEXT: successors: %bb.2(0x80000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 255, implicit $exec
- ; NOHAZARD-NEXT: [[V_AND_B32_sdwa:%[0-9]+]]:vgpr_32 = V_AND_B32_sdwa 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, [[V_MOV_B32_e32_]], 0, 6, 0, 5, 6, implicit $exec
- ; NOHAZARD-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
- ; NOHAZARD-NEXT: [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, [[V_MOV_B32_e32_1]], 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, 6, 0, 6, 2, implicit $exec
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.2:
- ; NOHAZARD-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; NOHAZARD-NEXT: S_BRANCH %bb.3
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.3:
- ; NOHAZARD-NEXT: successors: %bb.4(0x80000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.4:
- ; NOHAZARD-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF1]], %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; NOHAZARD-NEXT: S_BRANCH %bb.5
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.5:
- ; NOHAZARD-NEXT: successors: %bb.6(0x80000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.6:
- ; NOHAZARD-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[SI_IF3:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; NOHAZARD-NEXT: S_BRANCH %bb.9
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.7:
- ; NOHAZARD-NEXT: successors: %bb.8(0x80000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.8:
- ; NOHAZARD-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, undef [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
- ; NOHAZARD-NEXT: [[SI_IF4:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF]], %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
- ; NOHAZARD-NEXT: S_BRANCH %bb.1
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.9:
- ; NOHAZARD-NEXT: successors: %bb.10(0x80000000)
- ; NOHAZARD-NEXT: {{ $}}
- ; NOHAZARD-NEXT: bb.10:
- ; NOHAZARD-NEXT: S_ENDPGM 0
+ ; CHECK-LABEL: name: sdwa_opsel_hazard
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.7(0x40000000), %bb.8(0x40000000)
+ ; CHECK-NEXT: liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:sreg_64_xexec_xnull = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF2:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR killed [[DEF1]], [[DEF2]], 0, 0, implicit $exec
+ ; CHECK-NEXT: [[SI_IF:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.8, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 255, implicit $exec
+ ; CHECK-NEXT: [[V_AND_B32_sdwa:%[0-9]+]]:vgpr_32 = V_AND_B32_sdwa 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, [[V_MOV_B32_e32_]], 0, 6, 0, 5, 6, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
+ ; CHECK-NEXT: [[V_LSHLREV_B32_sdwa:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_sdwa 0, [[V_MOV_B32_e32_1]], 0, undef [[GLOBAL_LOAD_DWORD_SADDR]], 0, 6, 0, 6, 2, implicit $exec
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32 = SI_IF killed undef %9, %bb.4, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[SI_IF2:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF1]], %bb.6, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5:
+ ; CHECK-NEXT: successors: %bb.6(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6:
+ ; CHECK-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[SI_IF3:%[0-9]+]]:sreg_32 = SI_IF undef [[DEF]], %bb.10, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.9
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7:
+ ; CHECK-NEXT: successors: %bb.8(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, undef [[GLOBAL_LOAD_DWORD_SADDR]], implicit $exec
+ ; CHECK-NEXT: [[SI_IF4:%[0-9]+]]:sreg_32 = SI_IF killed undef [[SI_IF]], %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9:
+ ; CHECK-NEXT: successors: %bb.10(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.10:
+ ; CHECK-NEXT: S_ENDPGM 0
bb.0:
successors: %bb.7(0x40000000), %bb.8(0x40000000)
liveins: $vgpr0, $sgpr4_sgpr5, $sgpr6
>From ed16fbd1ae4daa3d9d347e2b49f738fcb977873e Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Thu, 30 Jan 2025 04:18:31 -0500
Subject: [PATCH 17/22] Revert unintended reformatting
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 19d628acf30fe..39fe65e7c3754 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -171,9 +171,9 @@ class SDWASrcOperand : public SDWAOperand {
: SDWAOperand(TargetOp, ReplacedOp),
SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {}
- MachineInstr *
- potentialToConvert(const SIInstrInfo *TII, const GCNSubtarget &ST,
- SDWAOperandsMap *PotentialMatches = nullptr) override;
+ MachineInstr *potentialToConvert(const SIInstrInfo *TII,
+ const GCNSubtarget &ST,
+ SDWAOperandsMap *PotentialMatches = nullptr) override;
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
SdwaSel getSrcSel() const { return SrcSel; }
@@ -200,9 +200,9 @@ class SDWADstOperand : public SDWAOperand {
SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
: SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
- MachineInstr *
- potentialToConvert(const SIInstrInfo *TII, const GCNSubtarget &ST,
- SDWAOperandsMap *PotentialMatches = nullptr) override;
+ MachineInstr *potentialToConvert(const SIInstrInfo *TII,
+ const GCNSubtarget &ST,
+ SDWAOperandsMap *PotentialMatches = nullptr) override;
bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
SdwaSel getDstSel() const { return DstSel; }
>From 258fb148de458910f1b9218d62ff2fb92c45204f Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Tue, 11 Feb 2025 02:54:42 -0500
Subject: [PATCH 18/22] [AMDGPU] SIPeepholeSDWA: Verify compatibility of
selections earlier
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 168 +++++++++++++---------
1 file changed, 100 insertions(+), 68 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 39fe65e7c3754..37b12b2a5afe7 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -88,6 +88,43 @@ class SIPeepholeSDWALegacy : public MachineFunctionPass {
using namespace AMDGPU::SDWA;
+/// Check that the SDWA selections \p ExistingSel and \p OperandSel
+/// are suitable for being combined by combineSdwaSel.
+bool compatibleSelections(SdwaSel ExistingSel, SdwaSel OperandSel) {
+ return ExistingSel == SdwaSel::DWORD || OperandSel == ExistingSel ||
+ (ExistingSel != SdwaSel::WORD_1 && ExistingSel != SdwaSel::BYTE_2 &&
+ ExistingSel != SdwaSel::BYTE_3 &&
+ (OperandSel == SdwaSel::WORD_0 || OperandSel == SdwaSel::WORD_1));
+}
+
+/// Combine an SDWA instruction's existing SDWA selection \p
+/// ExistingSel with the SDWA selection \p OpSel of its operand. If
+/// the selections are compatible, return the combined selection,
+/// otherwise return a nullopt. For example, if we have ExistingSel
+/// = BYTE_0 Sel and FoldedSel WORD_1 Sel:
+/// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
+SdwaSel combineSdwaSel(SdwaSel ExistingSel, SdwaSel OperandSel) {
+ assert(compatibleSelections(ExistingSel, OperandSel));
+
+ if (ExistingSel == SdwaSel::DWORD)
+ return OperandSel;
+
+ if (OperandSel == SdwaSel::DWORD || ExistingSel == OperandSel ||
+ OperandSel == SdwaSel::WORD_0)
+ return ExistingSel;
+
+ if (OperandSel == SdwaSel::WORD_1) {
+ if (ExistingSel == SdwaSel::BYTE_0)
+ return SdwaSel::BYTE_2;
+ if (ExistingSel == SdwaSel::BYTE_1)
+ return SdwaSel::BYTE_3;
+ if (ExistingSel == SdwaSel::WORD_0)
+ return SdwaSel::WORD_1;
+ }
+
+ llvm_unreachable("Unexpected selections");
+}
+
class SDWAOperand {
private:
MachineOperand *Target; // Operand that would be used in converted instruction
@@ -111,42 +148,6 @@ class SDWAOperand {
MachineOperand *getReplacedOperand() const { return Replaced; }
MachineInstr *getParentInst() const { return Target->getParent(); }
- /// Combine an SDWA instruction's existing SDWA selection \p
- /// ExistingSel with the SDWA selection \p OpSel of its operand. If
- /// the selections are compatible, return the combined selection,
- /// otherwise return a nullopt. For example, if we have ExistingSel
- /// = BYTE_0 Sel and FoldedSel WORD_1 Sel:
- /// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
- std::optional<SdwaSel> combineSdwaSel(SdwaSel ExistingSel,
- SdwaSel OperandSel) {
- if (ExistingSel == SdwaSel::DWORD)
- return OperandSel;
-
- if (OperandSel == SdwaSel::DWORD)
- return ExistingSel;
-
- if (ExistingSel == SdwaSel::WORD_1 || ExistingSel == SdwaSel::BYTE_2 ||
- ExistingSel == SdwaSel::BYTE_3)
- return {};
-
- if (ExistingSel == OperandSel)
- return ExistingSel;
-
- if (OperandSel == SdwaSel::WORD_0)
- return ExistingSel;
-
- if (OperandSel == SdwaSel::WORD_1) {
- if (ExistingSel == SdwaSel::BYTE_0)
- return SdwaSel::BYTE_2;
- if (ExistingSel == SdwaSel::BYTE_1)
- return SdwaSel::BYTE_3;
- if (ExistingSel == SdwaSel::WORD_0)
- return SdwaSel::WORD_1;
- }
-
- return {};
- }
-
MachineRegisterInfo *getMRI() const {
return &getParentInst()->getParent()->getParent()->getRegInfo();
}
@@ -164,12 +165,34 @@ class SDWASrcOperand : public SDWAOperand {
bool Neg;
bool Sext;
-public:
+protected:
SDWASrcOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
bool Sext_ = false)
- : SDWAOperand(TargetOp, ReplacedOp),
- SrcSel(SrcSel_), Abs(Abs_), Neg(Neg_), Sext(Sext_) {}
+ : SDWAOperand(TargetOp, ReplacedOp), SrcSel(SrcSel_), Abs(Abs_),
+ Neg(Neg_), Sext(Sext_) {}
+public:
+ /// Create an SDWASrcOperand as an operand for \p MI from the given arguments
+ /// if \p SrcSel_ and the src_sel0 and src_sel1 operands of \p MI are
+ /// compatible.
+ static std::unique_ptr<SDWAOperand>
+ create(const SIInstrInfo *TII, const MachineInstr &MI,
+ MachineOperand *TargetOp, MachineOperand *ReplacedOp,
+ SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
+ bool Sext_ = false) {
+ if (TII->isSDWA(MI.getOpcode())) {
+ for (auto SelOpName :
+ {AMDGPU::OpName::src0_sel, AMDGPU::OpName::src1_sel}) {
+ const MachineOperand *NamedOp = TII->getNamedOperand(MI, SelOpName);
+ if (NamedOp && !compatibleSelections(
+ static_cast<SdwaSel>(NamedOp->getImm()), SrcSel_))
+ return std::unique_ptr<SDWAOperand>(nullptr);
+ }
+ }
+
+ return std::unique_ptr<SDWAOperand>(new SDWASrcOperand(
+ TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
+ };
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
@@ -194,11 +217,29 @@ class SDWADstOperand : public SDWAOperand {
SdwaSel DstSel;
DstUnused DstUn;
-public:
-
+protected:
SDWADstOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
- : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
+ : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
+
+public:
+ /// Create an SDWASrcOperand as an operand for \p MI from the given arguments
+ /// if \p SrcSel_ and the dst_sel operand of \p MI are
+ /// compatible.
+ static std::unique_ptr<SDWAOperand>
+ create(const SIInstrInfo *TII, const MachineInstr &MI,
+ MachineOperand *TargetOp, MachineOperand *ReplacedOp, SdwaSel DstSel_,
+ DstUnused DstUn_) {
+ if (TII->isSDWA(MI.getOpcode())) {
+ SdwaSel InstSel = static_cast<SdwaSel>(
+ TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel)->getImm());
+ if (!compatibleSelections(InstSel, DstSel_))
+ return nullptr;
+ }
+
+ return std::unique_ptr<SDWAOperand>(
+ new SDWADstOperand(TargetOp, ReplacedOp, DstSel_, DstUn_));
+ };
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
const GCNSubtarget &ST,
@@ -489,11 +530,7 @@ bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
copyRegOperand(*Src, *getTargetOperand());
if (!IsPreserveSrc) {
SdwaSel ExistingSel = static_cast<SdwaSel>(SrcSel->getImm());
- std::optional<SdwaSel> NewSel = combineSdwaSel(ExistingSel, getSrcSel());
- if (!NewSel.has_value())
- return false;
- SrcSel->setImm(NewSel.value());
-
+ SrcSel->setImm(combineSdwaSel(ExistingSel, getSrcSel()));
SrcMods->setImm(getSrcMods(TII, Src));
}
getTargetOperand()->setIsKill(false);
@@ -542,10 +579,7 @@ bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
assert(DstSel);
SdwaSel ExistingSel = static_cast<SdwaSel>(DstSel->getImm());
- std::optional<SdwaSel> NewSel = combineSdwaSel(ExistingSel, getDstSel());
- if (!NewSel.has_value())
- return false;
- DstSel->setImm(NewSel.value());
+ DstSel->setImm(combineSdwaSel(ExistingSel, getDstSel()));
MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
assert(DstUnused);
@@ -648,13 +682,13 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
Opcode == AMDGPU::V_LSHLREV_B32_e64) {
- return std::make_unique<SDWADstOperand>(
- Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
+ return SDWADstOperand::create(TII, MI, Dst, Src1,
+ *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
}
- return std::make_unique<SDWASrcOperand>(
- Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false,
- Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
- Opcode != AMDGPU::V_LSHRREV_B32_e64);
+ return SDWASrcOperand::create(TII, MI, Src1, Dst,
+ *Imm == 16 ? WORD_1 : BYTE_3, false, false,
+ Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
+ Opcode != AMDGPU::V_LSHRREV_B32_e64);
break;
}
@@ -686,11 +720,10 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
Opcode == AMDGPU::V_LSHLREV_B16_e64)
- return std::make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD);
- return std::make_unique<SDWASrcOperand>(
- Src1, Dst, BYTE_1, false, false,
- Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
- Opcode != AMDGPU::V_LSHRREV_B16_e64);
+ return SDWADstOperand::create(TII, MI, Dst, Src1, BYTE_1, UNUSED_PAD);
+ return SDWASrcOperand::create(TII, MI, Src1, Dst, BYTE_1, false, false,
+ Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
+ Opcode != AMDGPU::V_LSHRREV_B16_e64);
break;
}
@@ -746,8 +779,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
Dst->getReg().isPhysical())
break;
- return std::make_unique<SDWASrcOperand>(
- Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32_e64);
+ return SDWASrcOperand::create(TII, MI, Src0, Dst, SrcSel, false, false,
+ Opcode != AMDGPU::V_BFE_U32_e64);
}
case AMDGPU::V_AND_B32_e32:
@@ -774,9 +807,8 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
if (!ValSrc->isReg() || ValSrc->getReg().isPhysical() ||
Dst->getReg().isPhysical())
break;
-
- return std::make_unique<SDWASrcOperand>(
- ValSrc, Dst, *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
+ return SDWASrcOperand::create(TII, MI, ValSrc, Dst,
+ *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
}
case AMDGPU::V_OR_B32_e32:
@@ -912,7 +944,7 @@ SIPeepholeSDWA::matchSDWAOperand(MachineInstr &MI) {
}
}
- return std::unique_ptr<SDWAOperand>(nullptr);
+ return nullptr;
}
#if !defined(NDEBUG)
>From ac0a1339eaf37623639a47ea56db349dff9e94b3 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Tue, 11 Feb 2025 03:05:27 -0500
Subject: [PATCH 19/22] [AMDGPU] SIPeepholeSDWA: Adjust comments and variable
names
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 35 +++++++++++------------
1 file changed, 17 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 37b12b2a5afe7..ef2857009ff00 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -88,37 +88,36 @@ class SIPeepholeSDWALegacy : public MachineFunctionPass {
using namespace AMDGPU::SDWA;
-/// Check that the SDWA selections \p ExistingSel and \p OperandSel
+/// Check that the SDWA selections \p Sel and \p OperandSel
/// are suitable for being combined by combineSdwaSel.
-bool compatibleSelections(SdwaSel ExistingSel, SdwaSel OperandSel) {
- return ExistingSel == SdwaSel::DWORD || OperandSel == ExistingSel ||
- (ExistingSel != SdwaSel::WORD_1 && ExistingSel != SdwaSel::BYTE_2 &&
- ExistingSel != SdwaSel::BYTE_3 &&
+bool compatibleSelections(SdwaSel Sel, SdwaSel OperandSel) {
+ return Sel == SdwaSel::DWORD || OperandSel == Sel ||
+ (Sel != SdwaSel::WORD_1 && Sel != SdwaSel::BYTE_2 &&
+ Sel != SdwaSel::BYTE_3 &&
(OperandSel == SdwaSel::WORD_0 || OperandSel == SdwaSel::WORD_1));
}
-/// Combine an SDWA instruction's existing SDWA selection \p
-/// ExistingSel with the SDWA selection \p OpSel of its operand. If
-/// the selections are compatible, return the combined selection,
-/// otherwise return a nullopt. For example, if we have ExistingSel
-/// = BYTE_0 Sel and FoldedSel WORD_1 Sel:
+/// Combine an SDWA instruction's existing SDWA selection \p Sel with
+/// the SDWA selection \p OpSel of its operand which must be
+/// compatible.
+/// For example, if we have Sel = BYTE_0 Sel and OperandSel = WORD_1:
/// BYTE_0 Sel (WORD_1 Sel (%X)) -> BYTE_2 Sel (%X)
-SdwaSel combineSdwaSel(SdwaSel ExistingSel, SdwaSel OperandSel) {
- assert(compatibleSelections(ExistingSel, OperandSel));
+SdwaSel combineSdwaSel(SdwaSel Sel, SdwaSel OperandSel) {
+ assert(compatibleSelections(Sel, OperandSel));
- if (ExistingSel == SdwaSel::DWORD)
+ if (Sel == SdwaSel::DWORD)
return OperandSel;
- if (OperandSel == SdwaSel::DWORD || ExistingSel == OperandSel ||
+ if (OperandSel == SdwaSel::DWORD || Sel == OperandSel ||
OperandSel == SdwaSel::WORD_0)
- return ExistingSel;
+ return Sel;
if (OperandSel == SdwaSel::WORD_1) {
- if (ExistingSel == SdwaSel::BYTE_0)
+ if (Sel == SdwaSel::BYTE_0)
return SdwaSel::BYTE_2;
- if (ExistingSel == SdwaSel::BYTE_1)
+ if (Sel == SdwaSel::BYTE_1)
return SdwaSel::BYTE_3;
- if (ExistingSel == SdwaSel::WORD_0)
+ if (Sel == SdwaSel::WORD_0)
return SdwaSel::WORD_1;
}
>From a9e38fa87d8eacaedd5111ccb3630e9423191291 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Tue, 11 Feb 2025 03:10:55 -0500
Subject: [PATCH 20/22] [AMDGPU] SIPeepholeSDWA: Add comment answering a review
question
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 3 +++
1 file changed, 3 insertions(+)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index ef2857009ff00..0bbbcf2605eff 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -1249,6 +1249,9 @@ bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
MachineInstr *SDWAInst;
if (TII->isSDWA(MI.getOpcode())) {
+ // Clone the instruction to allow revoking changes
+ // made to MI during the processing of the operands
+ // if the conversion fails.
SDWAInst = MI.getParent()->getParent()->CloneMachineInstr(&MI);
MI.getParent()->insert(MI.getIterator(), SDWAInst);
} else {
>From db7f6743d8722a59355790bf6216a41ce4cdbac4 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Tue, 11 Feb 2025 03:20:20 -0500
Subject: [PATCH 21/22] clang-format changes
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 0bbbcf2605eff..67e5bc8d3d504 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -170,6 +170,7 @@ class SDWASrcOperand : public SDWAOperand {
bool Sext_ = false)
: SDWAOperand(TargetOp, ReplacedOp), SrcSel(SrcSel_), Abs(Abs_),
Neg(Neg_), Sext(Sext_) {}
+
public:
/// Create an SDWASrcOperand as an operand for \p MI from the given arguments
/// if \p SrcSel_ and the src_sel0 and src_sel1 operands of \p MI are
@@ -189,8 +190,8 @@ class SDWASrcOperand : public SDWAOperand {
}
}
- return std::unique_ptr<SDWAOperand>(new SDWASrcOperand(
- TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
+ return std::unique_ptr<SDWAOperand>(
+ new SDWASrcOperand(TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
};
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
>From ac80b8615f654d3ac2d1783339ce84c98cbcf9d6 Mon Sep 17 00:00:00 2001
From: Frederik Harwath <fharwath at amd.com>
Date: Wed, 12 Feb 2025 02:47:51 -0500
Subject: [PATCH 22/22] Use consistent/more specific return type for
SDWA{Src,Dst}Operand factory
---
llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 67e5bc8d3d504..f0a0b6e656312 100644
--- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -175,7 +175,7 @@ class SDWASrcOperand : public SDWAOperand {
/// Create an SDWASrcOperand as an operand for \p MI from the given arguments
/// if \p SrcSel_ and the src_sel0 and src_sel1 operands of \p MI are
/// compatible.
- static std::unique_ptr<SDWAOperand>
+ static std::unique_ptr<SDWASrcOperand>
create(const SIInstrInfo *TII, const MachineInstr &MI,
MachineOperand *TargetOp, MachineOperand *ReplacedOp,
SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
@@ -186,12 +186,11 @@ class SDWASrcOperand : public SDWAOperand {
const MachineOperand *NamedOp = TII->getNamedOperand(MI, SelOpName);
if (NamedOp && !compatibleSelections(
static_cast<SdwaSel>(NamedOp->getImm()), SrcSel_))
- return std::unique_ptr<SDWAOperand>(nullptr);
+ return nullptr;
}
}
- return std::unique_ptr<SDWAOperand>(
- new SDWASrcOperand(TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
+ return std::unique_ptr<SDWASrcOperand>(new SDWASrcOperand(TargetOp, ReplacedOp, SrcSel_, Abs_, Neg_, Sext_));
};
MachineInstr *potentialToConvert(const SIInstrInfo *TII,
@@ -223,10 +222,10 @@ class SDWADstOperand : public SDWAOperand {
: SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
public:
- /// Create an SDWASrcOperand as an operand for \p MI from the given arguments
+ /// Create an SDWADstOperand as an operand for \p MI from the given arguments
/// if \p SrcSel_ and the dst_sel operand of \p MI are
/// compatible.
- static std::unique_ptr<SDWAOperand>
+ static std::unique_ptr<SDWADstOperand>
create(const SIInstrInfo *TII, const MachineInstr &MI,
MachineOperand *TargetOp, MachineOperand *ReplacedOp, SdwaSel DstSel_,
DstUnused DstUn_) {
@@ -237,7 +236,7 @@ class SDWADstOperand : public SDWAOperand {
return nullptr;
}
- return std::unique_ptr<SDWAOperand>(
+ return std::unique_ptr<SDWADstOperand>(
new SDWADstOperand(TargetOp, ReplacedOp, DstSel_, DstUn_));
};
More information about the llvm-commits
mailing list