[llvm] [DAGCombiner][AMDGPU] Track signedness in ByteProviders (PR #65995)
Jeffrey Byrnes via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 11 15:20:46 PDT 2023
https://github.com/jrbyrnes updated https://github.com/llvm/llvm-project/pull/65995:
>From 3b1345fb52d46c638a33745ce62b2f15098f307a Mon Sep 17 00:00:00 2001
From: Jeffrey Byrnes <Jeffrey.Byrnes at amd.com>
Date: Mon, 11 Sep 2023 11:31:44 -0700
Subject: [PATCH] [DAGCombiner][AMDGPU] Track signedness in ByteProviders
Change-Id: I3d35503bb6d6adfaf68b5742219b46202d7a6193
---
llvm/include/llvm/CodeGen/ByteProvider.h | 12 +-
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 86 +++++----
llvm/test/CodeGen/AMDGPU/idot4s.ll | 221 ++++++++++++++++++++--
llvm/test/CodeGen/AMDGPU/idot4u.ll | 76 ++++++--
4 files changed, 330 insertions(+), 65 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/ByteProvider.h b/llvm/include/llvm/CodeGen/ByteProvider.h
index 3187b4e68c56f3a..2a103effadc82d0 100644
--- a/llvm/include/llvm/CodeGen/ByteProvider.h
+++ b/llvm/include/llvm/CodeGen/ByteProvider.h
@@ -32,6 +32,11 @@ template <typename ISelOp> class ByteProvider {
ByteProvider(std::optional<ISelOp> Src, int64_t DestOffset, int64_t SrcOffset)
: Src(Src), DestOffset(DestOffset), SrcOffset(SrcOffset) {}
+ ByteProvider(std::optional<ISelOp> Src, int64_t DestOffset, int64_t SrcOffset,
+ bool IsSigned)
+ : Src(Src), DestOffset(DestOffset), SrcOffset(SrcOffset),
+ IsSigned(IsSigned) {}
+
// TODO -- use constraint in c++20
// Does this type correspond with an operation in selection DAG
template <typename T> class is_op {
@@ -61,13 +66,16 @@ template <typename ISelOp> class ByteProvider {
// DestOffset
int64_t SrcOffset = 0;
+ // Whether or not Src be treated as signed
+ bool IsSigned = false;
+
ByteProvider() = default;
static ByteProvider getSrc(std::optional<ISelOp> Val, int64_t ByteOffset,
- int64_t VectorOffset) {
+ int64_t VectorOffset, bool IsSigned = false) {
static_assert(is_op<ISelOp>().value,
"ByteProviders must contain an operation in selection DAG.");
- return ByteProvider(Val, ByteOffset, VectorOffset);
+ return ByteProvider(Val, ByteOffset, VectorOffset, IsSigned);
}
static ByteProvider getConstantZero() {
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 85c9ed489e926ce..31fde136627dc6c 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -10513,7 +10513,7 @@ SDValue SITargetLowering::performAndCombine(SDNode *N,
// performed.
static const std::optional<ByteProvider<SDValue>>
calculateSrcByte(const SDValue Op, uint64_t DestByte, uint64_t SrcIndex = 0,
- unsigned Depth = 0) {
+ bool IsSigned = false, unsigned Depth = 0) {
// We may need to recursively traverse a series of SRLs
if (Depth >= 6)
return std::nullopt;
@@ -10524,12 +10524,15 @@ calculateSrcByte(const SDValue Op, uint64_t DestByte, uint64_t SrcIndex = 0,
switch (Op->getOpcode()) {
case ISD::TRUNCATE: {
- return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, Depth + 1);
+ return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, IsSigned,
+ Depth + 1);
}
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::SIGN_EXTEND_INREG: {
+ IsSigned |= Op->getOpcode() == ISD::SIGN_EXTEND ||
+ Op->getOpcode() == ISD::SIGN_EXTEND_INREG;
SDValue NarrowOp = Op->getOperand(0);
auto NarrowVT = NarrowOp.getValueType();
if (Op->getOpcode() == ISD::SIGN_EXTEND_INREG) {
@@ -10542,7 +10545,8 @@ calculateSrcByte(const SDValue Op, uint64_t DestByte, uint64_t SrcIndex = 0,
if (SrcIndex >= NarrowByteWidth)
return std::nullopt;
- return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, Depth + 1);
+ return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, IsSigned,
+ Depth + 1);
}
case ISD::SRA:
@@ -10558,11 +10562,15 @@ calculateSrcByte(const SDValue Op, uint64_t DestByte, uint64_t SrcIndex = 0,
SrcIndex += BitShift / 8;
- return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, Depth + 1);
+ return calculateSrcByte(Op->getOperand(0), DestByte, SrcIndex, IsSigned,
+ Depth + 1);
}
default: {
- return ByteProvider<SDValue>::getSrc(Op, DestByte, SrcIndex);
+ if (auto L = dyn_cast<LoadSDNode>(Op))
+ IsSigned |= L->getExtensionType() == ISD::SEXTLOAD;
+
+ return ByteProvider<SDValue>::getSrc(Op, DestByte, SrcIndex, IsSigned);
}
}
llvm_unreachable("fully handled switch");
@@ -10576,7 +10584,7 @@ calculateSrcByte(const SDValue Op, uint64_t DestByte, uint64_t SrcIndex = 0,
// performed. \p StartingIndex is the originally requested byte of the Or
static const std::optional<ByteProvider<SDValue>>
calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
- unsigned StartingIndex = 0) {
+ unsigned StartingIndex = 0, bool IsSigned = false) {
// Finding Src tree of RHS of or typically requires at least 1 additional
// depth
if (Depth > 6)
@@ -10591,11 +10599,11 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
switch (Op.getOpcode()) {
case ISD::OR: {
auto RHS = calculateByteProvider(Op.getOperand(1), Index, Depth + 1,
- StartingIndex);
+ StartingIndex, IsSigned);
if (!RHS)
return std::nullopt;
auto LHS = calculateByteProvider(Op.getOperand(0), Index, Depth + 1,
- StartingIndex);
+ StartingIndex, IsSigned);
if (!LHS)
return std::nullopt;
// A well formed Or will have two ByteProviders for each byte, one of which
@@ -10626,7 +10634,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
return ByteProvider<SDValue>::getConstantZero();
}
- return calculateSrcByte(Op->getOperand(0), StartingIndex, Index);
+ return calculateSrcByte(Op->getOperand(0), StartingIndex, Index, IsSigned);
}
case ISD::SRA:
@@ -10651,7 +10659,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
// the SRL is Index + ByteShift
return BytesProvided - ByteShift > Index
? calculateSrcByte(Op->getOperand(0), StartingIndex,
- Index + ByteShift)
+ Index + ByteShift, IsSigned)
: ByteProvider<SDValue>::getConstantZero();
}
@@ -10672,7 +10680,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
return Index < ByteShift
? ByteProvider<SDValue>::getConstantZero()
: calculateByteProvider(Op.getOperand(0), Index - ByteShift,
- Depth + 1, StartingIndex);
+ Depth + 1, StartingIndex, IsSigned);
}
case ISD::ANY_EXTEND:
case ISD::SIGN_EXTEND:
@@ -10691,13 +10699,17 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
if (NarrowBitWidth % 8 != 0)
return std::nullopt;
uint64_t NarrowByteWidth = NarrowBitWidth / 8;
+ IsSigned |= Op->getOpcode() == ISD::SIGN_EXTEND ||
+ Op->getOpcode() == ISD::SIGN_EXTEND_INREG ||
+ Op->getOpcode() == ISD::AssertSext;
if (Index >= NarrowByteWidth)
return Op.getOpcode() == ISD::ZERO_EXTEND
? std::optional<ByteProvider<SDValue>>(
ByteProvider<SDValue>::getConstantZero())
: std::nullopt;
- return calculateByteProvider(NarrowOp, Index, Depth + 1, StartingIndex);
+ return calculateByteProvider(NarrowOp, Index, Depth + 1, StartingIndex,
+ IsSigned);
}
case ISD::TRUNCATE: {
@@ -10705,7 +10717,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
if (NarrowByteWidth >= Index) {
return calculateByteProvider(Op.getOperand(0), Index, Depth + 1,
- StartingIndex);
+ StartingIndex, IsSigned);
}
return std::nullopt;
@@ -10713,13 +10725,14 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
case ISD::CopyFromReg: {
if (BitWidth / 8 > Index)
- return calculateSrcByte(Op, StartingIndex, Index);
+ return calculateSrcByte(Op, StartingIndex, Index, IsSigned);
return std::nullopt;
}
case ISD::LOAD: {
auto L = cast<LoadSDNode>(Op.getNode());
+ IsSigned |= L->getExtensionType() == ISD::SEXTLOAD;
unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits();
if (NarrowBitWidth % 8 != 0)
return std::nullopt;
@@ -10736,7 +10749,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
}
if (NarrowByteWidth > Index) {
- return calculateSrcByte(Op, StartingIndex, Index);
+ return calculateSrcByte(Op, StartingIndex, Index, IsSigned);
}
return std::nullopt;
@@ -10744,7 +10757,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
case ISD::BSWAP:
return calculateByteProvider(Op->getOperand(0), BitWidth / 8 - Index - 1,
- Depth + 1, StartingIndex);
+ Depth + 1, StartingIndex, IsSigned);
case ISD::EXTRACT_VECTOR_ELT: {
auto IdxOp = dyn_cast<ConstantSDNode>(Op->getOperand(1));
@@ -10759,7 +10772,7 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
}
return calculateSrcByte(ScalarSize == 32 ? Op : Op.getOperand(0),
- StartingIndex, Index);
+ StartingIndex, Index, IsSigned);
}
case AMDGPUISD::PERM: {
@@ -10775,9 +10788,10 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
auto NextOp = Op.getOperand(IdxMask > 0x03 ? 0 : 1);
auto NextIndex = IdxMask > 0x03 ? IdxMask % 4 : IdxMask;
- return IdxMask != 0x0c ? calculateSrcByte(NextOp, StartingIndex, NextIndex)
- : ByteProvider<SDValue>(
- ByteProvider<SDValue>::getConstantZero());
+ return IdxMask != 0x0c
+ ? calculateSrcByte(NextOp, StartingIndex, NextIndex, IsSigned)
+ : ByteProvider<SDValue>(
+ ByteProvider<SDValue>::getConstantZero());
}
default: {
@@ -12587,11 +12601,7 @@ SDValue SITargetLowering::performAddCombine(SDNode *N,
auto MulIdx = isMul(LHS) ? 0 : 1;
auto MulOpcode = TempNode.getOperand(MulIdx).getOpcode();
- bool IsSigned =
- MulOpcode == AMDGPUISD::MUL_I24 ||
- (MulOpcode == ISD::MUL &&
- TempNode->getOperand(MulIdx)->getFlags().hasNoSignedWrap() &&
- !TempNode->getOperand(MulIdx)->getFlags().hasNoUnsignedWrap());
+ std::optional<bool> IsSigned;
SmallVector<std::pair<SDValue, unsigned>, 4> Src0s;
SmallVector<std::pair<SDValue, unsigned>, 4> Src1s;
SmallVector<SDValue, 4> Src2s;
@@ -12607,15 +12617,17 @@ SDValue SITargetLowering::performAddCombine(SDNode *N,
(MulOpcode == ISD::MUL &&
TempNode->getOperand(MulIdx)->getFlags().hasNoSignedWrap() &&
!TempNode->getOperand(MulIdx)->getFlags().hasNoUnsignedWrap());
- if (IterIsSigned != IsSigned) {
- break;
- }
auto Src0 = handleMulOperand(TempNode->getOperand(MulIdx)->getOperand(0));
if (!Src0)
break;
auto Src1 = handleMulOperand(TempNode->getOperand(MulIdx)->getOperand(1));
if (!Src1)
break;
+ IterIsSigned |= Src0->IsSigned || Src1->IsSigned;
+ if (!IsSigned)
+ IsSigned = IterIsSigned;
+ if (IterIsSigned != *IsSigned)
+ break;
placeSources(*Src0, *Src1, Src0s, Src1s, I);
auto AddIdx = 1 - MulIdx;
// Allow the special case where add (add (mul24, 0), mul24) became ->
@@ -12630,6 +12642,15 @@ SDValue SITargetLowering::performAddCombine(SDNode *N,
handleMulOperand(TempNode->getOperand(AddIdx)->getOperand(1));
if (!Src1)
break;
+ auto IterIsSigned =
+ MulOpcode == AMDGPUISD::MUL_I24 ||
+ (MulOpcode == ISD::MUL &&
+ TempNode->getOperand(MulIdx)->getFlags().hasNoSignedWrap() &&
+ !TempNode->getOperand(MulIdx)->getFlags().hasNoUnsignedWrap());
+ IterIsSigned |= Src0->IsSigned || Src1->IsSigned;
+ assert(IsSigned);
+ if (IterIsSigned != *IsSigned)
+ break;
placeSources(*Src0, *Src1, Src0s, Src1s, I + 1);
Src2s.push_back(DAG.getConstant(0, SL, MVT::i32));
ChainLength = I + 2;
@@ -12695,18 +12716,19 @@ SDValue SITargetLowering::performAddCombine(SDNode *N,
Src1 = resolveSources(DAG, SL, Src1s, false, true);
}
+ assert(IsSigned);
SDValue Src2 =
- DAG.getExtOrTrunc(IsSigned, Src2s[ChainLength - 1], SL, MVT::i32);
+ DAG.getExtOrTrunc(*IsSigned, Src2s[ChainLength - 1], SL, MVT::i32);
- SDValue IID = DAG.getTargetConstant(IsSigned ? Intrinsic::amdgcn_sdot4
- : Intrinsic::amdgcn_udot4,
+ SDValue IID = DAG.getTargetConstant(*IsSigned ? Intrinsic::amdgcn_sdot4
+ : Intrinsic::amdgcn_udot4,
SL, MVT::i64);
assert(!VT.isVector());
auto Dot = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SL, MVT::i32, IID, Src0,
Src1, Src2, DAG.getTargetConstant(0, SL, MVT::i1));
- return DAG.getExtOrTrunc(IsSigned, Dot, SL, VT);
+ return DAG.getExtOrTrunc(*IsSigned, Dot, SL, VT);
}
if (VT != MVT::i32 || !DCI.isAfterLegalizeDAG())
diff --git a/llvm/test/CodeGen/AMDGPU/idot4s.ll b/llvm/test/CodeGen/AMDGPU/idot4s.ll
index 7edd24f12982ebd..e521039ce9ac838 100644
--- a/llvm/test/CodeGen/AMDGPU/idot4s.ll
+++ b/llvm/test/CodeGen/AMDGPU/idot4s.ll
@@ -143,7 +143,7 @@ define amdgpu_kernel void @idot4_acc32(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: global_load_b32 v0, v0, s[6:7]
; GFX11-DL-NEXT: s_load_b32 s2, s[0:1], 0x0
; GFX11-DL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v1, v0, s2
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v1, v0, s2 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -352,7 +352,7 @@ define amdgpu_kernel void @idot4_acc16(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: global_load_b32 v0, v0, s[6:7]
; GFX11-DL-NEXT: global_load_i16 v3, v1, s[0:1]
; GFX11-DL-NEXT: s_waitcnt vmcnt(0)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v2, v0, v3
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v2, v0, v3 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b16 v1, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -732,7 +732,7 @@ define amdgpu_kernel void @idot4_multiuse_mul1(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11-DL-NEXT: v_mad_i32_i24 v2, v2, v3, s2
; GFX11-DL-NEXT: v_mov_b32_e32 v3, 0
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v1, v0, v2
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v1, v0, v2 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v3, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -922,7 +922,7 @@ define amdgpu_kernel void @idot4_acc32_vecMul(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: global_load_b32 v0, v0, s[6:7]
; GFX11-DL-NEXT: s_load_b32 s2, s[0:1], 0x0
; GFX11-DL-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v1, v0, s2
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v1, v0, s2 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1356,7 +1356,7 @@ define amdgpu_kernel void @idot4_acc32_2ele(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: v_perm_b32 v0, v0, v0, 0xc0c0100
; GFX11-DL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-DL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s2
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s2 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1534,7 +1534,7 @@ define amdgpu_kernel void @idot4_acc32_3ele(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: v_perm_b32 v0, v0, v0, 0xc020100
; GFX11-DL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-DL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s2
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s2 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1719,7 +1719,7 @@ define amdgpu_kernel void @idot4_acc32_3ele_permuted(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: v_perm_b32 v0, v0, v0, 0xc020003
; GFX11-DL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-DL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s2
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s2 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1885,7 +1885,7 @@ define amdgpu_kernel void @idot4_acc32_opt(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: global_load_b32 v1, v0, s[4:5]
; GFX11-DL-NEXT: global_load_b32 v0, v0, s[6:7]
; GFX11-DL-NEXT: s_waitcnt vmcnt(0)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v1, v0, 0
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v1, v0, 0 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2092,7 +2092,7 @@ define amdgpu_kernel void @idot4_acc32_3src(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: v_or_b32_e32 v1, v1, v2
; GFX11-DL-NEXT: v_mov_b32_e32 v2, 0
; GFX11-DL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s0
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s0 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v2, v0, s[6:7]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2299,7 +2299,7 @@ define amdgpu_kernel void @idot4_acc32_3src_3ele(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: v_or_b32_e32 v1, v1, v2
; GFX11-DL-NEXT: v_mov_b32_e32 v2, 0
; GFX11-DL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s0
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s0 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v2, v0, s[6:7]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2504,7 +2504,7 @@ define amdgpu_kernel void @idot4_bad_source(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-DL-NEXT: v_mad_i32_i24 v2, v2, s2, s3
; GFX11-DL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v1, v0, v2
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v1, v0, v2 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v3, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2695,7 +2695,7 @@ define amdgpu_kernel void @idot4_commutative(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: v_perm_b32 v0, v0, v0, 0xc020100
; GFX11-DL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-DL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s2
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s2 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v2, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2897,7 +2897,7 @@ define amdgpu_kernel void @idot4_acc32_3src_3ele_src0(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: v_or_b32_e32 v1, v1, v2
; GFX11-DL-NEXT: v_mov_b32_e32 v2, 0
; GFX11-DL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s0
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v1, s0 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v2, v0, s[6:7]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -3133,7 +3133,7 @@ define amdgpu_kernel void @idot4_4src(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: v_mov_b32_e32 v1, 0
; GFX11-DL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-DL-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v2, s2
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v0, v2, s2 neg_lo:[1,1,0]
; GFX11-DL-NEXT: global_store_b32 v1, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -3190,5 +3190,198 @@ entry:
ret void
}
+define amdgpu_kernel void @idot4_nonstandard_signed(ptr addrspace(1) %src1,
+; GFX7-LABEL: idot4_nonstandard_signed:
+; GFX7: ; %bb.0: ; %entry
+; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s10, 0
+; GFX7-NEXT: s_mov_b32 s11, s3
+; GFX7-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7-NEXT: s_mov_b64 s[8:9], s[4:5]
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX7-NEXT: v_mov_b32_e32 v1, 0
+; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[8:11], 0 addr64
+; GFX7-NEXT: s_mov_b64 s[8:9], s[6:7]
+; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
+; GFX7-NEXT: s_mov_b32 s2, -1
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_bfe_i32 v1, v2, 0, 8
+; GFX7-NEXT: v_bfe_i32 v3, v2, 8, 8
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v5, 0xff, v0
+; GFX7-NEXT: v_bfe_i32 v4, v2, 16, 8
+; GFX7-NEXT: v_bfe_u32 v6, v0, 8, 8
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX7-NEXT: v_mul_u32_u24_e32 v1, v1, v5
+; GFX7-NEXT: v_ashrrev_i32_e32 v2, 24, v2
+; GFX7-NEXT: v_bfe_u32 v7, v0, 16, 8
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX7-NEXT: v_mad_u32_u24 v1, v6, v3, v1
+; GFX7-NEXT: v_lshrrev_b32_e32 v0, 24, v0
+; GFX7-NEXT: v_mad_u32_u24 v1, v7, v4, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX7-NEXT: v_mad_u32_u24 v0, v2, v0, v1
+; GFX7-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX7-NEXT: s_endpgm
+;
+; GFX8-LABEL: idot4_nonstandard_signed:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; GFX8-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v1, s5
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, s4, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT: flat_load_dword v3, v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v1, s7
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, s6, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT: flat_load_dword v2, v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: s_waitcnt vmcnt(1)
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 8, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v3
+; GFX8-NEXT: v_bfe_i32 v7, v7, 0, 8
+; GFX8-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 8, v2
+; GFX8-NEXT: v_mul_lo_u16_sdwa v6, sext(v3), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX8-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX8-NEXT: v_and_b32_sdwa v4, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, 24, v3
+; GFX8-NEXT: v_mad_u16 v6, v8, v7, v6
+; GFX8-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX8-NEXT: v_mad_u16 v4, v4, v5, v6
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX8-NEXT: v_mad_u16 v2, v3, v2, v4
+; GFX8-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GFX8-NEXT: flat_store_dword v[0:1], v2
+; GFX8-NEXT: s_endpgm
+;
+; GFX9-NODL-LABEL: idot4_nonstandard_signed:
+; GFX9-NODL: ; %bb.0: ; %entry
+; GFX9-NODL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-NODL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-NODL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-NODL-NEXT: s_movk_i32 s0, 0xff
+; GFX9-NODL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NODL-NEXT: global_load_dword v1, v0, s[4:5]
+; GFX9-NODL-NEXT: global_load_dword v2, v0, s[6:7]
+; GFX9-NODL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NODL-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v5, 8, v1
+; GFX9-NODL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v6, 8, v2
+; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX9-NODL-NEXT: v_mul_lo_u16_sdwa v4, sext(v1), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0
+; GFX9-NODL-NEXT: v_bfe_i32 v5, v5, 0, 8
+; GFX9-NODL-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX9-NODL-NEXT: v_and_b32_sdwa v7, v2, s0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
+; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v1, 24, v1
+; GFX9-NODL-NEXT: v_bfe_i32 v3, v3, 0, 8
+; GFX9-NODL-NEXT: v_mad_legacy_u16 v4, v6, v5, v4
+; GFX9-NODL-NEXT: v_bfe_i32 v1, v1, 0, 8
+; GFX9-NODL-NEXT: v_mad_legacy_u16 v3, v7, v3, v4
+; GFX9-NODL-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX9-NODL-NEXT: v_mad_legacy_u16 v1, v1, v2, v3
+; GFX9-NODL-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX9-NODL-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX9-NODL-NEXT: s_endpgm
+;
+; GFX9-DL-LABEL: idot4_nonstandard_signed:
+; GFX9-DL: ; %bb.0: ; %entry
+; GFX9-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX9-DL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
+; GFX9-DL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX9-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-DL-NEXT: global_load_dword v1, v0, s[4:5]
+; GFX9-DL-NEXT: global_load_dword v2, v0, s[6:7]
+; GFX9-DL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DL-NEXT: v_dot4_i32_i8 v1, v1, v2, 0
+; GFX9-DL-NEXT: v_bfe_i32 v1, v1, 0, 16
+; GFX9-DL-NEXT: global_store_dword v0, v1, s[2:3]
+; GFX9-DL-NEXT: s_endpgm
+;
+; GFX10-DL-LABEL: idot4_nonstandard_signed:
+; GFX10-DL: ; %bb.0: ; %entry
+; GFX10-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX10-DL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX10-DL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
+; GFX10-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-DL-NEXT: s_clause 0x1
+; GFX10-DL-NEXT: global_load_dword v1, v0, s[4:5]
+; GFX10-DL-NEXT: global_load_dword v2, v0, s[6:7]
+; GFX10-DL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-DL-NEXT: s_waitcnt vmcnt(0)
+; GFX10-DL-NEXT: v_dot4c_i32_i8_e32 v0, v1, v2
+; GFX10-DL-NEXT: v_mov_b32_e32 v1, 0
+; GFX10-DL-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX10-DL-NEXT: global_store_dword v1, v0, s[0:1]
+; GFX10-DL-NEXT: s_endpgm
+;
+; GFX11-DL-LABEL: idot4_nonstandard_signed:
+; GFX11-DL: ; %bb.0: ; %entry
+; GFX11-DL-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
+; GFX11-DL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; GFX11-DL-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
+; GFX11-DL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-DL-NEXT: s_clause 0x1
+; GFX11-DL-NEXT: global_load_b32 v1, v0, s[4:5]
+; GFX11-DL-NEXT: global_load_b32 v0, v0, s[6:7]
+; GFX11-DL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-DL-NEXT: v_dot4_i32_iu8 v0, v1, v0, 0 neg_lo:[1,1,0]
+; GFX11-DL-NEXT: v_mov_b32_e32 v1, 0
+; GFX11-DL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-DL-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX11-DL-NEXT: global_store_b32 v1, v0, s[0:1]
+; GFX11-DL-NEXT: s_nop 0
+; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-DL-NEXT: s_endpgm
+ ptr addrspace(1) %src2,
+ ptr addrspace(1) nocapture %dst) {
+entry:
+ %idx = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr <4 x i8>, ptr addrspace(1) %src1, i32 %idx
+ %vec1 = load <4 x i8>, ptr addrspace(1) %gep1
+ %gep2 = getelementptr <4 x i8>, ptr addrspace(1) %src2, i32 %idx
+ %vec2 = load <4 x i8>, ptr addrspace(1) %gep2
+ %v1e0 = extractelement <4 x i8> %vec1, i64 0
+ %v1e0e = sext i8 %v1e0 to i16
+ %v2e0 = extractelement <4 x i8> %vec2, i64 0
+ %v2e0e = zext i8 %v2e0 to i16
+ %mul0 = mul nsw i16 %v1e0e, %v2e0e
+ %add0 = add i16 %mul0, 0
+
+ %v1e1 = extractelement <4 x i8> %vec1, i64 1
+ %v1e1e = sext i8 %v1e1 to i16
+ %v2e1 = extractelement <4 x i8> %vec2, i64 1
+ %v2e1e = zext i8 %v2e1 to i16
+ %mul1 = mul nsw i16 %v2e1e, %v1e1e
+ %add1 = add i16 %mul1, %add0
+ %v1e2 = extractelement <4 x i8> %vec1, i64 2
+ %v1e2e = sext i8 %v1e2 to i16
+ %v2e2 = extractelement <4 x i8> %vec2, i64 2
+ %v2e2e = zext i8 %v2e2 to i16
+ %mul2 = mul nsw i16 %v2e2e, %v1e2e
+ %add2 = add i16 %mul2, %add1
+ %v1e3 = extractelement <4 x i8> %vec1, i64 3
+ %v1e3e = sext i8 %v1e3 to i16
+ %v2e3 = extractelement <4 x i8> %vec2, i64 3
+ %v2e3e = zext i8 %v2e3 to i16
+ %mul3 = mul nsw i16 %v1e3e, %v2e3e
+ %add3 = add i16 %mul3, %add2
+ %res = sext i16 %add3 to i32
+ store i32 %res, ptr addrspace(1) %dst, align 4
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/CodeGen/AMDGPU/idot4u.ll b/llvm/test/CodeGen/AMDGPU/idot4u.ll
index 7fce369ff9f99da..a82c5215f3b2c65 100644
--- a/llvm/test/CodeGen/AMDGPU/idot4u.ll
+++ b/llvm/test/CodeGen/AMDGPU/idot4u.ll
@@ -1606,14 +1606,27 @@ define amdgpu_kernel void @notdot4_mixedtypes(ptr addrspace(1) %src1,
; GFX9-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX9-DL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
; GFX9-DL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GFX9-DL-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-DL-NEXT: s_mov_b32 s0, 0xc0c0302
; GFX9-DL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DL-NEXT: global_load_dword v2, v0, s[4:5]
-; GFX9-DL-NEXT: global_load_dword v3, v0, s[6:7]
-; GFX9-DL-NEXT: global_load_ushort v4, v1, s[2:3]
+; GFX9-DL-NEXT: global_load_dword v1, v0, s[4:5]
+; GFX9-DL-NEXT: global_load_dword v2, v0, s[6:7]
+; GFX9-DL-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-DL-NEXT: global_load_ushort v3, v0, s[2:3]
+; GFX9-DL-NEXT: s_waitcnt vmcnt(2)
+; GFX9-DL-NEXT: v_lshrrev_b32_e32 v6, 8, v1
+; GFX9-DL-NEXT: s_waitcnt vmcnt(1)
+; GFX9-DL-NEXT: v_lshrrev_b32_e32 v7, 8, v2
+; GFX9-DL-NEXT: v_and_b32_e32 v6, 0xff, v6
+; GFX9-DL-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX9-DL-NEXT: v_bfe_i32 v4, v1, 0, 8
+; GFX9-DL-NEXT: v_bfe_i32 v5, v2, 0, 8
; GFX9-DL-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DL-NEXT: v_dot4_u32_u8 v0, v2, v3, v4
-; GFX9-DL-NEXT: global_store_short v1, v0, s[2:3]
+; GFX9-DL-NEXT: v_mad_legacy_u16 v3, v6, v7, v3
+; GFX9-DL-NEXT: v_perm_b32 v2, v2, v2, s0
+; GFX9-DL-NEXT: v_mad_legacy_u16 v3, v4, v5, v3
+; GFX9-DL-NEXT: v_perm_b32 v1, v1, v1, s0
+; GFX9-DL-NEXT: v_dot4_u32_u8 v1, v1, v2, v3
+; GFX9-DL-NEXT: global_store_short v0, v1, s[2:3]
; GFX9-DL-NEXT: s_endpgm
;
; GFX10-DL-LABEL: notdot4_mixedtypes:
@@ -1622,15 +1635,28 @@ define amdgpu_kernel void @notdot4_mixedtypes(ptr addrspace(1) %src1,
; GFX10-DL-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
; GFX10-DL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
; GFX10-DL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; GFX10-DL-NEXT: v_mov_b32_e32 v1, 0
; GFX10-DL-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-DL-NEXT: s_clause 0x1
-; GFX10-DL-NEXT: global_load_dword v2, v0, s[4:5]
-; GFX10-DL-NEXT: global_load_dword v3, v0, s[6:7]
-; GFX10-DL-NEXT: global_load_ushort v4, v1, s[2:3]
+; GFX10-DL-NEXT: global_load_dword v1, v0, s[4:5]
+; GFX10-DL-NEXT: global_load_dword v2, v0, s[6:7]
+; GFX10-DL-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-DL-NEXT: global_load_ushort v3, v0, s[2:3]
+; GFX10-DL-NEXT: s_waitcnt vmcnt(2)
+; GFX10-DL-NEXT: v_lshrrev_b32_e32 v4, 8, v1
+; GFX10-DL-NEXT: s_waitcnt vmcnt(1)
+; GFX10-DL-NEXT: v_lshrrev_b32_e32 v5, 8, v2
+; GFX10-DL-NEXT: v_bfe_i32 v6, v1, 0, 8
+; GFX10-DL-NEXT: v_bfe_i32 v7, v2, 0, 8
+; GFX10-DL-NEXT: v_perm_b32 v2, v2, v2, 0xc0c0302
+; GFX10-DL-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX10-DL-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX10-DL-NEXT: v_perm_b32 v1, v1, v1, 0xc0c0302
; GFX10-DL-NEXT: s_waitcnt vmcnt(0)
-; GFX10-DL-NEXT: v_dot4_u32_u8 v0, v2, v3, v4
-; GFX10-DL-NEXT: global_store_short v1, v0, s[2:3]
+; GFX10-DL-NEXT: v_mad_u16 v3, v4, v5, v3
+; GFX10-DL-NEXT: v_mad_u16 v3, v6, v7, v3
+; GFX10-DL-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX10-DL-NEXT: v_dot4_u32_u8 v1, v1, v2, v3
+; GFX10-DL-NEXT: global_store_short v0, v1, s[2:3]
; GFX10-DL-NEXT: s_endpgm
;
; GFX11-DL-LABEL: notdot4_mixedtypes:
@@ -1638,15 +1664,31 @@ define amdgpu_kernel void @notdot4_mixedtypes(ptr addrspace(1) %src1,
; GFX11-DL-NEXT: s_clause 0x1
; GFX11-DL-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
; GFX11-DL-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
-; GFX11-DL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 2, v0
+; GFX11-DL-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX11-DL-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-DL-NEXT: s_clause 0x1
-; GFX11-DL-NEXT: global_load_b32 v2, v0, s[4:5]
+; GFX11-DL-NEXT: global_load_b32 v1, v0, s[4:5]
; GFX11-DL-NEXT: global_load_b32 v0, v0, s[6:7]
-; GFX11-DL-NEXT: global_load_u16 v3, v1, s[0:1]
+; GFX11-DL-NEXT: s_waitcnt vmcnt(1)
+; GFX11-DL-NEXT: v_lshrrev_b32_e32 v4, 8, v1
; GFX11-DL-NEXT: s_waitcnt vmcnt(0)
-; GFX11-DL-NEXT: v_dot4_u32_u8 v0, v2, v0, v3
-; GFX11-DL-NEXT: global_store_b16 v1, v0, s[0:1]
+; GFX11-DL-NEXT: v_lshrrev_b32_e32 v5, 8, v0
+; GFX11-DL-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-DL-NEXT: v_bfe_i32 v6, v1, 0, 8
+; GFX11-DL-NEXT: v_bfe_i32 v7, v0, 0, 8
+; GFX11-DL-NEXT: v_and_b32_e32 v4, 0xff, v4
+; GFX11-DL-NEXT: v_and_b32_e32 v5, 0xff, v5
+; GFX11-DL-NEXT: global_load_u16 v3, v2, s[0:1]
+; GFX11-DL-NEXT: v_perm_b32 v0, v0, v0, 0xc0c0302
+; GFX11-DL-NEXT: v_perm_b32 v1, v1, v1, 0xc0c0302
+; GFX11-DL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-DL-NEXT: v_mad_u16 v3, v4, v5, v3
+; GFX11-DL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-DL-NEXT: v_mad_u16 v3, v6, v7, v3
+; GFX11-DL-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-DL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-DL-NEXT: v_dot4_u32_u8 v0, v1, v0, v3
+; GFX11-DL-NEXT: global_store_b16 v2, v0, s[0:1]
; GFX11-DL-NEXT: s_nop 0
; GFX11-DL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
; GFX11-DL-NEXT: s_endpgm
More information about the llvm-commits
mailing list