[llvm] [AMDGPU][CodeGen] support v_mov_b16 and v_swap_b16 in true16 format (PR #102198)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 6 11:47:00 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Brox Chen (broxigarchen)
<details>
<summary>Changes</summary>
support v_swap_b16 in true16 format and update Folding for v_mov_b16
---
Full diff: https://github.com/llvm/llvm-project/pull/102198.diff
8 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (+9-1)
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.cpp (+5-1)
- (modified) llvm/lib/Target/AMDGPU/SIInstructions.td (-14)
- (modified) llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp (+40-18)
- (modified) llvm/lib/Target/AMDGPU/VOP1Instructions.td (+1-1)
- (modified) llvm/test/CodeGen/AMDGPU/bf16.ll (+8-20)
- (modified) llvm/test/CodeGen/AMDGPU/fadd.f16.ll (+4-12)
- (added) llvm/test/CodeGen/AMDGPU/v_swap_b16.ll (+113)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index 32ecf350db59c..a2352fd892c82 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -1460,7 +1460,15 @@ bool SIFoldOperands::tryFoldFoldableCopy(
return false;
}
- MachineOperand &OpToFold = MI.getOperand(1);
+ MachineOperand *OpToFoldPtr;
+ if (MI.getOpcode() == AMDGPU::V_MOV_B16_t16_e64) {
+ // Folding when any src_modifiers are non-zero is unsupported
+ if (TII->hasAnyModifiersSet(MI))
+ return false;
+ OpToFoldPtr = &MI.getOperand(2);
+ } else
+ OpToFoldPtr = &MI.getOperand(1);
+ MachineOperand &OpToFold = *OpToFoldPtr;
bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
// FIXME: We could also be folding things like TargetIndexes.
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 463737f645d45..438d380d85135 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -3369,6 +3369,8 @@ void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) {
switch (MI.getOpcode()) {
+ case AMDGPU::V_MOV_B16_t16_e32:
+ case AMDGPU::V_MOV_B16_t16_e64:
case AMDGPU::V_MOV_B32_e32:
case AMDGPU::V_MOV_B32_e64:
case AMDGPU::V_MOV_B64_PSEUDO:
@@ -5635,7 +5637,9 @@ void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
unsigned RCID = get(MI.getOpcode()).operands()[OpIdx].RegClass;
const TargetRegisterClass *RC = RI.getRegClass(RCID);
unsigned Size = RI.getRegSizeInBits(*RC);
- unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32;
+ unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO
+ : Size == 16 ? AMDGPU::V_MOV_B16_t16_e64
+ : AMDGPU::V_MOV_B32_e32;
if (MO.isReg())
Opcode = AMDGPU::COPY;
else if (RI.isSGPRClass(RC))
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index c41850ab55f75..2fcdcbd6b5ba1 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -2192,20 +2192,6 @@ foreach pred = [NotHasTrue16BitInsts, UseFakeTrue16Insts] in {
}
}
-let True16Predicate = UseRealTrue16Insts in {
- def : GCNPat <
- (VGPRImm<(i16 imm)>:$imm),
- (V_MOV_B16_t16_e64 0, imm:$imm, 0)
- >;
-
- foreach vt = [f16, bf16] in {
- def : GCNPat <
- (VGPRImm<(vt fpimm)>:$imm),
- (V_MOV_B16_t16_e64 0, $imm, 0)
- >;
- }
-}
-
// V_MOV_B64_PSEUDO and S_MOV_B64_IMM_PSEUDO can be used with any 64-bit
// immediate and wil be expanded as needed, but we will only use these patterns
// for values which can be encoded.
diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
index 79bcf5e8cd30d..9369c8685f1fb 100644
--- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
+++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp
@@ -657,6 +657,7 @@ void SIShrinkInstructions::dropInstructionKeepingImpDefs(
// although requirements match the pass placement and it reduces code size too.
MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const {
assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
+ MovT.getOpcode() == AMDGPU::V_MOV_B16_t16_e32 ||
MovT.getOpcode() == AMDGPU::COPY);
Register T = MovT.getOperand(0).getReg();
@@ -668,7 +669,12 @@ MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const {
Register X = Xop.getReg();
unsigned Xsub = Xop.getSubReg();
- unsigned Size = TII->getOpSize(MovT, 0) / 4;
+ unsigned Size = TII->getOpSize(MovT, 0);
+
+ // We can't match v_swap_b16 pre-RA, because VGPR_16_Lo128 registers
+ // are not allocatble.
+ if (Size == 2 && X.isVirtual())
+ return nullptr;
if (!TRI->isVGPR(*MRI, X))
return nullptr;
@@ -684,6 +690,7 @@ MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const {
KilledT = MovY->killsRegister(T, TRI);
if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
+ MovY->getOpcode() != AMDGPU::V_MOV_B16_t16_e32 &&
MovY->getOpcode() != AMDGPU::COPY) ||
!MovY->getOperand(1).isReg() ||
MovY->getOperand(1).getReg() != T ||
@@ -714,6 +721,7 @@ MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const {
}
if (MovX ||
(I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
+ I->getOpcode() != AMDGPU::V_MOV_B16_t16_e32 &&
I->getOpcode() != AMDGPU::COPY) ||
I->getOperand(0).getReg() != X ||
I->getOperand(0).getSubReg() != Xsub) {
@@ -721,7 +729,7 @@ MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const {
break;
}
- if (Size > 1 && (I->getNumImplicitOperands() > (I->isCopy() ? 0U : 1U)))
+ if (Size > 4 && (I->getNumImplicitOperands() > (I->isCopy() ? 0U : 1U)))
continue;
MovX = &*I;
@@ -730,23 +738,36 @@ MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const {
if (!MovX)
continue;
- LLVM_DEBUG(dbgs() << "Matched v_swap_b32:\n" << MovT << *MovX << *MovY);
+ LLVM_DEBUG(dbgs() << "Matched v_swap:\n" << MovT << *MovX << *MovY);
- for (unsigned I = 0; I < Size; ++I) {
- TargetInstrInfo::RegSubRegPair X1, Y1;
- X1 = getSubRegForIndex(X, Xsub, I);
- Y1 = getSubRegForIndex(Y, Ysub, I);
- MachineBasicBlock &MBB = *MovT.getParent();
+ MachineBasicBlock &MBB = *MovT.getParent();
+ SmallVector<MachineInstr*, 4> Swaps;
+ if (Size == 2) {
auto MIB = BuildMI(MBB, MovX->getIterator(), MovT.getDebugLoc(),
- TII->get(AMDGPU::V_SWAP_B32))
- .addDef(X1.Reg, 0, X1.SubReg)
- .addDef(Y1.Reg, 0, Y1.SubReg)
- .addReg(Y1.Reg, 0, Y1.SubReg)
- .addReg(X1.Reg, 0, X1.SubReg).getInstr();
- if (MovX->hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
- // Drop implicit EXEC.
- MIB->removeOperand(MIB->getNumExplicitOperands());
- MIB->copyImplicitOps(*MBB.getParent(), *MovX);
+ TII->get(AMDGPU::V_SWAP_B16))
+ .addDef(X).addDef(Y)
+ .addReg(Y).addReg(X).getInstr();
+ Swaps.push_back(MIB);
+ } else {
+ assert(Size > 0 && Size % 4 == 0);
+ for (unsigned I = 0; I < Size / 4; ++I) {
+ TargetInstrInfo::RegSubRegPair X1, Y1;
+ X1 = getSubRegForIndex(X, Xsub, I);
+ Y1 = getSubRegForIndex(Y, Ysub, I);
+ auto MIB = BuildMI(MBB, MovX->getIterator(), MovT.getDebugLoc(),
+ TII->get(AMDGPU::V_SWAP_B32))
+ .addDef(X1.Reg, 0, X1.SubReg)
+ .addDef(Y1.Reg, 0, Y1.SubReg)
+ .addReg(Y1.Reg, 0, Y1.SubReg)
+ .addReg(X1.Reg, 0, X1.SubReg).getInstr();
+ Swaps.push_back(MIB);
+ }
+ }
+ // Drop implicit EXEC.
+ if (MovX->hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
+ for (MachineInstr *Swap : Swaps) {
+ Swap->removeOperand(Swap->getNumExplicitOperands());
+ Swap->copyImplicitOps(*MBB.getParent(), *MovX);
}
}
MovX->eraseFromParent();
@@ -833,6 +854,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
}
if (ST->hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
+ MI.getOpcode() == AMDGPU::V_MOV_B16_t16_e32 ||
MI.getOpcode() == AMDGPU::COPY)) {
if (auto *NextMI = matchSwap(MI)) {
Next = NextMI->getIterator();
@@ -1023,7 +1045,7 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
MachineFunctionProperties::Property::NoVRegs))
continue;
- if (ST->hasTrue16BitInsts() && AMDGPU::isTrue16Inst(MI.getOpcode()) &&
+ if (ST->useRealTrue16Insts() && AMDGPU::isTrue16Inst(MI.getOpcode()) &&
!shouldShrinkTrue16(MI))
continue;
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 0a2e338b34787..02a3960512fcf 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -751,7 +751,7 @@ let SubtargetPredicate = isGFX11Plus in {
let IsInvalidSingleUseConsumer = 1;
let IsInvalidSingleUseProducer = 1;
}
- defm V_MOV_B16_t16 : VOP1Inst<"v_mov_b16_t16", VOPProfile_True16<VOP_I16_I16>>;
+ defm V_MOV_B16 : VOP1Inst_t16<"v_mov_b16_t16", VOP_I16_I16>;
defm V_NOT_B16 : VOP1Inst_t16<"v_not_b16", VOP_I16_I16>;
defm V_CVT_I32_I16 : VOP1Inst_t16<"v_cvt_i32_i16", VOP_I32_I16>;
defm V_CVT_U32_U16 : VOP1Inst_t16<"v_cvt_u32_u16", VOP_I32_I16>;
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index d732da1a67bc1..970bb08e1838b 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -2131,26 +2131,14 @@ define void @test_store_fpimm(ptr addrspace(1) %ptr0, ptr addrspace(1) %ptr1) {
; GFX10-NEXT: global_store_short v[2:3], v5, off
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
-; GFX11TRUE16-LABEL: test_store_fpimm:
-; GFX11TRUE16: ; %bb.0:
-; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, 0x3f80
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.h, 0x4228
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.l
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v4.h
-; GFX11TRUE16-NEXT: global_store_b16 v[0:1], v5, off
-; GFX11TRUE16-NEXT: global_store_b16 v[2:3], v4, off
-; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11FAKE16-LABEL: test_store_fpimm:
-; GFX11FAKE16: ; %bb.0:
-; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11FAKE16-NEXT: v_mov_b32_e32 v4, 0x3f80
-; GFX11FAKE16-NEXT: v_mov_b32_e32 v5, 0x4228
-; GFX11FAKE16-NEXT: global_store_b16 v[0:1], v4, off
-; GFX11FAKE16-NEXT: global_store_b16 v[2:3], v5, off
-; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX11-LABEL: test_store_fpimm:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v4, 0x3f80
+; GFX11-NEXT: v_mov_b32_e32 v5, 0x4228
+; GFX11-NEXT: global_store_b16 v[0:1], v4, off
+; GFX11-NEXT: global_store_b16 v[2:3], v5, off
+; GFX11-NEXT: s_setpc_b64 s[30:31]
store bfloat 1.0, ptr addrspace(1) %ptr0
store bfloat 42.0, ptr addrspace(1) %ptr1
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/fadd.f16.ll b/llvm/test/CodeGen/AMDGPU/fadd.f16.ll
index 7352fcdd071d5..9fe7544003568 100644
--- a/llvm/test/CodeGen/AMDGPU/fadd.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fadd.f16.ll
@@ -246,9 +246,7 @@ define amdgpu_kernel void @fadd_f16_imm_a(
; GFX11-SDAG-NEXT: s_mov_b32 s3, s7
; GFX11-SDAG-NEXT: buffer_load_u16 v0, off, s[0:3], 0
; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-NEXT: v_mov_b16_e32 v0.h, 0x3c00
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_f16_e32 v0.l, v0.l, v0.h
+; GFX11-SDAG-NEXT: v_add_f16_e32 v0.l, 1.0, v0.l
; GFX11-SDAG-NEXT: buffer_store_b16 v0, off, s[4:7], 0
; GFX11-SDAG-NEXT: s_nop 0
; GFX11-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -264,9 +262,7 @@ define amdgpu_kernel void @fadd_f16_imm_a(
; GFX11-GISEL-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX11-GISEL-NEXT: buffer_load_u16 v0, off, s[4:7], 0
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX11-GISEL-NEXT: v_mov_b16_e32 v0.h, 0x3c00
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_add_f16_e32 v0.l, v0.l, v0.h
+; GFX11-GISEL-NEXT: v_add_f16_e32 v0.l, 1.0, v0.l
; GFX11-GISEL-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-NEXT: s_nop 0
; GFX11-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -390,9 +386,7 @@ define amdgpu_kernel void @fadd_f16_imm_b(
; GFX11-SDAG-NEXT: s_mov_b32 s3, s7
; GFX11-SDAG-NEXT: buffer_load_u16 v0, off, s[0:3], 0
; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX11-SDAG-NEXT: v_mov_b16_e32 v0.h, 0x4000
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_f16_e32 v0.l, v0.l, v0.h
+; GFX11-SDAG-NEXT: v_add_f16_e32 v0.l, 2.0, v0.l
; GFX11-SDAG-NEXT: buffer_store_b16 v0, off, s[4:7], 0
; GFX11-SDAG-NEXT: s_nop 0
; GFX11-SDAG-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -408,9 +402,7 @@ define amdgpu_kernel void @fadd_f16_imm_b(
; GFX11-GISEL-NEXT: s_mov_b64 s[2:3], s[6:7]
; GFX11-GISEL-NEXT: buffer_load_u16 v0, off, s[4:7], 0
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX11-GISEL-NEXT: v_mov_b16_e32 v0.h, 0x4000
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_add_f16_e32 v0.l, v0.l, v0.h
+; GFX11-GISEL-NEXT: v_add_f16_e32 v0.l, 2.0, v0.l
; GFX11-GISEL-NEXT: buffer_store_b16 v0, off, s[0:3], 0
; GFX11-GISEL-NEXT: s_nop 0
; GFX11-GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
diff --git a/llvm/test/CodeGen/AMDGPU/v_swap_b16.ll b/llvm/test/CodeGen/AMDGPU/v_swap_b16.ll
new file mode 100644
index 0000000000000..fc53f5d556322
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/v_swap_b16.ll
@@ -0,0 +1,113 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1200 -mattr=+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
+
+define half @swap(half %a, half %b, i32 %i) {
+; GFX11-TRUE16-LABEL: swap:
+; GFX11-TRUE16: ; %bb.0: ; %entry
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: .LBB0_1: ; %loop
+; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v2, -1, v2
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX11-TRUE16-NEXT: v_swap_b16 v0.l, v0.h
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-TRUE16-NEXT: ; %bb.2: ; %ret
+; GFX11-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: swap:
+; GFX11-FAKE16: ; %bb.0: ; %entry
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: .LBB0_1: ; %loop
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_add_nc_u32 v2, -1, v2
+; GFX11-FAKE16-NEXT: v_swap_b32 v1, v0
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %ret
+; GFX11-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-TRUE16-LABEL: swap:
+; GFX12-TRUE16: ; %bb.0: ; %entry
+; GFX12-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_expcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.h, v0.l
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v0.l, v1.l
+; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX12-TRUE16-NEXT: .LBB0_1: ; %loop
+; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-TRUE16-NEXT: v_add_nc_u32_e32 v2, -1, v2
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.l, v0.l
+; GFX12-TRUE16-NEXT: v_swap_b16 v0.l, v0.h
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_cbranch_execnz .LBB0_1
+; GFX12-TRUE16-NEXT: ; %bb.2: ; %ret
+; GFX12-TRUE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-FAKE16-LABEL: swap:
+; GFX12-FAKE16: ; %bb.0: ; %entry
+; GFX12-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_expcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_samplecnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_bvhcnt 0x0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX12-FAKE16-NEXT: .LBB0_1: ; %loop
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-FAKE16-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_add_nc_u32 v2, -1, v2
+; GFX12-FAKE16-NEXT: v_swap_b32 v1, v0
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB0_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %ret
+; GFX12-FAKE16-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v1
+; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
+entry:
+ br label %loop
+
+loop:
+ %x = phi half [%a, %entry], [%y, %loop]
+ %y = phi half [%b, %entry], [%x, %loop]
+ %i2 = phi i32 [%i, %entry], [%i3, %loop]
+
+ %i3 = sub i32 %i2, 1
+
+ %cmp = icmp eq i32 %i3, 0
+ br i1 %cmp, label %ret, label %loop
+
+ret:
+ ret half %x
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX11: {{.*}}
+; GFX12: {{.*}}
``````````
</details>
https://github.com/llvm/llvm-project/pull/102198
More information about the llvm-commits
mailing list