[llvm] [AMDGPU] Enable sinking of free vector ops that will be folded into their uses (PR #162580)
Gheorghe-Teodor Bercea via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 9 08:02:09 PST 2026
https://github.com/doru1004 updated https://github.com/llvm/llvm-project/pull/162580
>From f200f5b253deee188195fa4d2616806277d946bb Mon Sep 17 00:00:00 2001
From: Gheorghe-Teodor Bercea <dobercea at amd.com>
Date: Wed, 8 Oct 2025 17:45:43 -0500
Subject: [PATCH 1/2] Allow sinking of free vector ops
---
.../AMDGPU/AMDGPUTargetTransformInfo.cpp | 54 ++++-
llvm/test/CodeGen/AMDGPU/loop-vector-sink.ll | 194 +++++++++++++++++-
.../AMDGPU/undef-handling-crash-in-ra.ll | 2 -
3 files changed, 237 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 4f07aaef909e4..d9b0708bdc443 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -1341,8 +1341,60 @@ bool GCNTTIImpl::isProfitableToSinkOperands(Instruction *I,
if (any_of(Ops, [&](Use *U) { return U->get() == Op.get(); }))
continue;
- if (match(&Op, m_FAbs(m_Value())) || match(&Op, m_FNeg(m_Value())))
+ if (match(&Op, m_FAbs(m_Value())) || match(&Op, m_FNeg(m_Value()))) {
Ops.push_back(&Op);
+ continue;
+ }
+
+ // Check for zero-cost InsertElement/ExtractElement instructions
+ if (Instruction *OpInst = dyn_cast<Instruction>(Op.get())) {
+ if (OpInst->getType()->isVectorTy() && OpInst->getNumOperands() > 1) {
+ Instruction *VecOpInst = dyn_cast<Instruction>(OpInst->getOperand(0));
+ if (VecOpInst && VecOpInst->hasOneUse())
+ continue;
+
+ if (getVectorInstrCost(OpInst->getOpcode(), OpInst->getType(),
+ TTI::TCK_RecipThroughput, 0,
+ OpInst->getOperand(0),
+ OpInst->getOperand(1)) == 0) {
+ Ops.push_back(&Op);
+ continue;
+ }
+ }
+ }
+
+ if (auto *Shuffle = dyn_cast<ShuffleVectorInst>(Op.get())) {
+
+ unsigned EltSize = DL.getTypeSizeInBits(
+ cast<VectorType>(Shuffle->getType())->getElementType());
+
+ // For i32 (or greater) shufflevectors, these will be lowered into a
+ // series of insert / extract elements, which will be coalesced away.
+
+ if (EltSize < 16 || !ST->has16BitInsts())
+ continue;
+
+ int NumSubElts, SubIndex;
+ if (Shuffle->changesLength()) {
+ if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding()) {
+ Ops.push_back(&Op);
+ continue;
+ }
+
+ if ((Shuffle->isExtractSubvectorMask(SubIndex) ||
+ Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex)) &&
+ !(SubIndex & 0x1)) {
+ Ops.push_back(&Op);
+ continue;
+ }
+ }
+
+ if (Shuffle->isReverse() || Shuffle->isZeroEltSplat() ||
+ Shuffle->isSingleSource()) {
+ Ops.push_back(&Op);
+ continue;
+ }
+ }
}
return !Ops.empty();
diff --git a/llvm/test/CodeGen/AMDGPU/loop-vector-sink.ll b/llvm/test/CodeGen/AMDGPU/loop-vector-sink.ll
index 670e2c5b2c9e0..0bc7824718e58 100644
--- a/llvm/test/CodeGen/AMDGPU/loop-vector-sink.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop-vector-sink.ll
@@ -7,12 +7,12 @@ define amdgpu_kernel void @runningSum(ptr addrspace(1) %out0, ptr addrspace(1) %
; OPT-SAME: ptr addrspace(1) [[OUT0:%.*]], ptr addrspace(1) [[OUT1:%.*]], i32 [[INPUTELEMENT1:%.*]], i32 [[INPUTITER:%.*]]) #[[ATTR0:[0-9]+]] {
; OPT-NEXT: [[PREHEADER:.*]]:
; OPT-NEXT: [[VECELEMENT1:%.*]] = insertelement <2 x i32> poison, i32 [[INPUTELEMENT1]], i64 0
-; OPT-NEXT: [[TMP1:%.*]] = shufflevector <2 x i32> [[VECELEMENT1]], <2 x i32> poison, <2 x i32> zeroinitializer
+; OPT-NEXT: [[TMP0:%.*]] = shufflevector <2 x i32> [[VECELEMENT1]], <2 x i32> poison, <2 x i32> zeroinitializer
; OPT-NEXT: br label %[[LOOPBODY:.*]]
; OPT: [[LOOPBODY]]:
-; OPT-NEXT: [[PREVIOUSSUM:%.*]] = phi <2 x i32> [ [[TMP1]], %[[PREHEADER]] ], [ [[RUNNINGSUM:%.*]], %[[LOOPBODY]] ]
+; OPT-NEXT: [[PREVIOUSSUM:%.*]] = phi <2 x i32> [ [[TMP0]], %[[PREHEADER]] ], [ [[RUNNINGSUM:%.*]], %[[LOOPBODY]] ]
; OPT-NEXT: [[ITERCOUNT:%.*]] = phi i32 [ [[INPUTITER]], %[[PREHEADER]] ], [ [[ITERSLEFT:%.*]], %[[LOOPBODY]] ]
-; OPT-NEXT: [[RUNNINGSUM]] = add <2 x i32> [[TMP1]], [[PREVIOUSSUM]]
+; OPT-NEXT: [[RUNNINGSUM]] = add <2 x i32> [[TMP0]], [[PREVIOUSSUM]]
; OPT-NEXT: [[ITERSLEFT]] = sub i32 [[ITERCOUNT]], 1
; OPT-NEXT: [[COND:%.*]] = icmp eq i32 [[ITERSLEFT]], 0
; OPT-NEXT: br i1 [[COND]], label %[[LOOPEXIT:.*]], label %[[LOOPBODY]]
@@ -121,14 +121,14 @@ define amdgpu_kernel void @test_shuffle_insert_subvector(ptr addrspace(1) %ptr,
; OPT-NEXT: [[ENTRY:.*:]]
; OPT-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x i16> [[VEC1]], <4 x i16> [[VEC2]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
; OPT-NEXT: [[SHUFFLE2:%.*]] = shufflevector <4 x i16> [[VEC1]], <4 x i16> [[VEC2]], <4 x i32> <i32 2, i32 3, i32 6, i32 7>
-; OPT-NEXT: [[SHUFFLE3:%.*]] = shufflevector <4 x i16> [[VEC1]], <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; OPT-NEXT: [[SHUFFLE4:%.*]] = shufflevector <4 x i16> [[VEC2]], <4 x i16> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
; OPT-NEXT: [[SHUFFLE5:%.*]] = shufflevector <4 x i16> [[SHUFFLE]], <4 x i16> [[SHUFFLE2]], <4 x i32> <i32 0, i32 2, i32 4, i32 6>
; OPT-NEXT: [[CMP:%.*]] = icmp slt i32 [[TID]], [[COND]]
; OPT-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
; OPT: [[IF_THEN]]:
; OPT-NEXT: [[RESULT_VEC:%.*]] = add <4 x i16> [[SHUFFLE5]], <i16 100, i16 200, i16 300, i16 400>
+; OPT-NEXT: [[SHUFFLE3:%.*]] = shufflevector <4 x i16> [[VEC1]], <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; OPT-NEXT: [[OTHER_RESULT:%.*]] = mul <4 x i16> [[SHUFFLE3]], splat (i16 2)
+; OPT-NEXT: [[SHUFFLE4:%.*]] = shufflevector <4 x i16> [[VEC2]], <4 x i16> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
; OPT-NEXT: [[MORE_RESULT:%.*]] = sub <4 x i16> [[SHUFFLE4]], splat (i16 5)
; OPT-NEXT: store <4 x i16> [[RESULT_VEC]], ptr addrspace(1) [[PTR]], align 8
; OPT-NEXT: store <4 x i16> [[OTHER_RESULT]], ptr addrspace(1) [[PTR]], align 8
@@ -164,14 +164,14 @@ define amdgpu_kernel void @test_shuffle_extract_subvector(ptr addrspace(1) %ptr,
; OPT-LABEL: define amdgpu_kernel void @test_shuffle_extract_subvector(
; OPT-SAME: ptr addrspace(1) [[PTR:%.*]], <4 x i16> [[INPUT_VEC:%.*]], i32 [[TID:%.*]], i32 [[COND:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[ENTRY:.*:]]
-; OPT-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x i16> [[INPUT_VEC]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
-; OPT-NEXT: [[SHUFFLE2:%.*]] = shufflevector <4 x i16> [[INPUT_VEC]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
-; OPT-NEXT: [[SHUFFLE3:%.*]] = shufflevector <4 x i16> [[INPUT_VEC]], <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; OPT-NEXT: [[CMP:%.*]] = icmp slt i32 [[TID]], [[COND]]
; OPT-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
; OPT: [[IF_THEN]]:
+; OPT-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x i16> [[INPUT_VEC]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
; OPT-NEXT: [[RESULT_VEC:%.*]] = add <2 x i16> [[SHUFFLE]], <i16 100, i16 200>
+; OPT-NEXT: [[SHUFFLE2:%.*]] = shufflevector <4 x i16> [[INPUT_VEC]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
; OPT-NEXT: [[RESULT_VEC2:%.*]] = mul <2 x i16> [[SHUFFLE2]], splat (i16 3)
+; OPT-NEXT: [[SHUFFLE3:%.*]] = shufflevector <4 x i16> [[INPUT_VEC]], <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; OPT-NEXT: [[RESULT_VEC3:%.*]] = sub <4 x i16> [[SHUFFLE3]], splat (i16 10)
; OPT-NEXT: store <2 x i16> [[RESULT_VEC]], ptr addrspace(1) [[PTR]], align 4
; OPT-NEXT: store <2 x i16> [[RESULT_VEC2]], ptr addrspace(1) [[PTR]], align 4
@@ -205,12 +205,12 @@ define amdgpu_kernel void @test_shuffle_sink_operands(ptr addrspace(1) %ptr, <2
; OPT-LABEL: define amdgpu_kernel void @test_shuffle_sink_operands(
; OPT-SAME: ptr addrspace(1) [[PTR:%.*]], <2 x i16> [[INPUT_VEC:%.*]], <2 x i16> [[INPUT_VEC2:%.*]], i32 [[TID:%.*]], i32 [[COND:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[ENTRY:.*:]]
-; OPT-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x i16> [[INPUT_VEC]], <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; OPT-NEXT: [[SHUFFLE2:%.*]] = shufflevector <2 x i16> [[INPUT_VEC2]], <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
; OPT-NEXT: [[CMP:%.*]] = icmp slt i32 [[TID]], [[COND]]
; OPT-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
; OPT: [[IF_THEN]]:
+; OPT-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x i16> [[INPUT_VEC]], <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
; OPT-NEXT: [[RESULT_VEC:%.*]] = add <4 x i16> [[SHUFFLE]], <i16 100, i16 200, i16 300, i16 400>
+; OPT-NEXT: [[SHUFFLE2:%.*]] = shufflevector <2 x i16> [[INPUT_VEC2]], <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
; OPT-NEXT: [[RESULT_VEC2:%.*]] = mul <4 x i16> [[SHUFFLE2]], splat (i16 5)
; OPT-NEXT: store <4 x i16> [[RESULT_VEC]], ptr addrspace(1) [[PTR]], align 8
; OPT-NEXT: store <4 x i16> [[RESULT_VEC2]], ptr addrspace(1) [[PTR]], align 8
@@ -234,3 +234,177 @@ if.then:
if.end:
ret void
}
+
+; testing identity shuffle - should sink into if.then
+define amdgpu_kernel void @test_shuffle_identity(ptr addrspace(1) %ptr, <4 x i16> %input_vec, i32 %tid, i32 %cond) {
+; OPT-LABEL: define amdgpu_kernel void @test_shuffle_identity(
+; OPT-SAME: ptr addrspace(1) [[PTR:%.*]], <4 x i16> [[INPUT_VEC:%.*]], i32 [[TID:%.*]], i32 [[COND:%.*]]) #[[ATTR0]] {
+; OPT-NEXT: [[ENTRY:.*:]]
+; OPT-NEXT: [[CMP:%.*]] = icmp slt i32 [[TID]], [[COND]]
+; OPT-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
+; OPT: [[IF_THEN]]:
+; OPT-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x i16> [[INPUT_VEC]], <4 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; OPT-NEXT: [[RESULT_VEC:%.*]] = add <4 x i16> [[SHUFFLE]], <i16 100, i16 200, i16 300, i16 400>
+; OPT-NEXT: store <4 x i16> [[RESULT_VEC]], ptr addrspace(1) [[PTR]], align 8
+; OPT-NEXT: br label %[[IF_END]]
+; OPT: [[IF_END]]:
+; OPT-NEXT: ret void
+;
+entry:
+ %shuffle = shufflevector <4 x i16> %input_vec, <4 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %cmp = icmp slt i32 %tid, %cond
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %result_vec = add <4 x i16> %shuffle, <i16 100, i16 200, i16 300, i16 400>
+ store <4 x i16> %result_vec, ptr addrspace(1) %ptr
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; testing i32 element shuffle - should sink into if.then (EltSize >= 32)
+define amdgpu_kernel void @test_shuffle_i32_elements(ptr addrspace(1) %ptr, <4 x i32> %input_vec, i32 %tid, i32 %cond) {
+; OPT-LABEL: define amdgpu_kernel void @test_shuffle_i32_elements(
+; OPT-SAME: ptr addrspace(1) [[PTR:%.*]], <4 x i32> [[INPUT_VEC:%.*]], i32 [[TID:%.*]], i32 [[COND:%.*]]) #[[ATTR0]] {
+; OPT-NEXT: [[ENTRY:.*:]]
+; OPT-NEXT: [[CMP:%.*]] = icmp slt i32 [[TID]], [[COND]]
+; OPT-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
+; OPT: [[IF_THEN]]:
+; OPT-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[INPUT_VEC]], <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
+; OPT-NEXT: [[RESULT_VEC:%.*]] = add <4 x i32> [[SHUFFLE]], <i32 100, i32 200, i32 300, i32 400>
+; OPT-NEXT: store <4 x i32> [[RESULT_VEC]], ptr addrspace(1) [[PTR]], align 16
+; OPT-NEXT: br label %[[IF_END]]
+; OPT: [[IF_END]]:
+; OPT-NEXT: ret void
+;
+entry:
+ %shuffle = shufflevector <4 x i32> %input_vec, <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
+ %cmp = icmp slt i32 %tid, %cond
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %result_vec = add <4 x i32> %shuffle, <i32 100, i32 200, i32 300, i32 400>
+ store <4 x i32> %result_vec, ptr addrspace(1) %ptr
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; testing reverse shuffle - should sink into if.then
+define amdgpu_kernel void @test_shuffle_reverse(ptr addrspace(1) %ptr, <4 x i16> %input_vec, i32 %tid, i32 %cond) {
+; OPT-LABEL: define amdgpu_kernel void @test_shuffle_reverse(
+; OPT-SAME: ptr addrspace(1) [[PTR:%.*]], <4 x i16> [[INPUT_VEC:%.*]], i32 [[TID:%.*]], i32 [[COND:%.*]]) #[[ATTR0]] {
+; OPT-NEXT: [[ENTRY:.*:]]
+; OPT-NEXT: [[CMP:%.*]] = icmp slt i32 [[TID]], [[COND]]
+; OPT-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
+; OPT: [[IF_THEN]]:
+; OPT-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x i16> [[INPUT_VEC]], <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; OPT-NEXT: [[RESULT_VEC:%.*]] = add <4 x i16> [[SHUFFLE]], <i16 100, i16 200, i16 300, i16 400>
+; OPT-NEXT: store <4 x i16> [[RESULT_VEC]], ptr addrspace(1) [[PTR]], align 8
+; OPT-NEXT: br label %[[IF_END]]
+; OPT: [[IF_END]]:
+; OPT-NEXT: ret void
+;
+entry:
+ %shuffle = shufflevector <4 x i16> %input_vec, <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %cmp = icmp slt i32 %tid, %cond
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %result_vec = add <4 x i16> %shuffle, <i16 100, i16 200, i16 300, i16 400>
+ store <4 x i16> %result_vec, ptr addrspace(1) %ptr
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; testing zero element splat shuffle - should sink into if.then
+define amdgpu_kernel void @test_shuffle_zero_splat(ptr addrspace(1) %ptr, <4 x i16> %input_vec, i32 %tid, i32 %cond) {
+; OPT-LABEL: define amdgpu_kernel void @test_shuffle_zero_splat(
+; OPT-SAME: ptr addrspace(1) [[PTR:%.*]], <4 x i16> [[INPUT_VEC:%.*]], i32 [[TID:%.*]], i32 [[COND:%.*]]) #[[ATTR0]] {
+; OPT-NEXT: [[ENTRY:.*:]]
+; OPT-NEXT: [[CMP:%.*]] = icmp slt i32 [[TID]], [[COND]]
+; OPT-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
+; OPT: [[IF_THEN]]:
+; OPT-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x i16> [[INPUT_VEC]], <4 x i16> poison, <4 x i32> zeroinitializer
+; OPT-NEXT: [[RESULT_VEC:%.*]] = add <4 x i16> [[SHUFFLE]], <i16 100, i16 200, i16 300, i16 400>
+; OPT-NEXT: store <4 x i16> [[RESULT_VEC]], ptr addrspace(1) [[PTR]], align 8
+; OPT-NEXT: br label %[[IF_END]]
+; OPT: [[IF_END]]:
+; OPT-NEXT: ret void
+;
+entry:
+ %shuffle = shufflevector <4 x i16> %input_vec, <4 x i16> poison, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %cmp = icmp slt i32 %tid, %cond
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %result_vec = add <4 x i16> %shuffle, <i16 100, i16 200, i16 300, i16 400>
+ store <4 x i16> %result_vec, ptr addrspace(1) %ptr
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; testing shuffle with odd SubIndex - should NOT sink (stays in entry)
+define amdgpu_kernel void @test_shuffle_odd_subindex_no_sink(ptr addrspace(1) %ptr, <8 x i16> %input_vec, i32 %tid, i32 %cond) {
+; OPT-LABEL: define amdgpu_kernel void @test_shuffle_odd_subindex_no_sink(
+; OPT-SAME: ptr addrspace(1) [[PTR:%.*]], <8 x i16> [[INPUT_VEC:%.*]], i32 [[TID:%.*]], i32 [[COND:%.*]]) #[[ATTR0]] {
+; OPT-NEXT: [[ENTRY:.*:]]
+; OPT-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x i16> [[INPUT_VEC]], <8 x i16> poison, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+; OPT-NEXT: [[CMP:%.*]] = icmp slt i32 [[TID]], [[COND]]
+; OPT-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
+; OPT: [[IF_THEN]]:
+; OPT-NEXT: [[RESULT_VEC:%.*]] = add <4 x i16> [[SHUFFLE]], <i16 100, i16 200, i16 300, i16 400>
+; OPT-NEXT: store <4 x i16> [[RESULT_VEC]], ptr addrspace(1) [[PTR]], align 8
+; OPT-NEXT: br label %[[IF_END]]
+; OPT: [[IF_END]]:
+; OPT-NEXT: ret void
+;
+entry:
+ %shuffle = shufflevector <8 x i16> %input_vec, <8 x i16> poison, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ %cmp = icmp slt i32 %tid, %cond
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %result_vec = add <4 x i16> %shuffle, <i16 100, i16 200, i16 300, i16 400>
+ store <4 x i16> %result_vec, ptr addrspace(1) %ptr
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; testing shuffle with i8 elements (< 16 bits) - should NOT sink (stays in entry)
+define amdgpu_kernel void @test_shuffle_i8_no_sink(ptr addrspace(1) %ptr, <4 x i8> %input_vec, i32 %tid, i32 %cond) {
+; OPT-LABEL: define amdgpu_kernel void @test_shuffle_i8_no_sink(
+; OPT-SAME: ptr addrspace(1) [[PTR:%.*]], <4 x i8> [[INPUT_VEC:%.*]], i32 [[TID:%.*]], i32 [[COND:%.*]]) #[[ATTR0]] {
+; OPT-NEXT: [[ENTRY:.*:]]
+; OPT-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x i8> [[INPUT_VEC]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; OPT-NEXT: [[CMP:%.*]] = icmp slt i32 [[TID]], [[COND]]
+; OPT-NEXT: br i1 [[CMP]], label %[[IF_THEN:.*]], label %[[IF_END:.*]]
+; OPT: [[IF_THEN]]:
+; OPT-NEXT: [[RESULT_VEC:%.*]] = add <4 x i8> [[SHUFFLE]], <i8 1, i8 2, i8 3, i8 4>
+; OPT-NEXT: store <4 x i8> [[RESULT_VEC]], ptr addrspace(1) [[PTR]], align 4
+; OPT-NEXT: br label %[[IF_END]]
+; OPT: [[IF_END]]:
+; OPT-NEXT: ret void
+;
+entry:
+ %shuffle = shufflevector <4 x i8> %input_vec, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %cmp = icmp slt i32 %tid, %cond
+ br i1 %cmp, label %if.then, label %if.end
+
+if.then:
+ %result_vec = add <4 x i8> %shuffle, <i8 1, i8 2, i8 3, i8 4>
+ store <4 x i8> %result_vec, ptr addrspace(1) %ptr
+ br label %if.end
+
+if.end:
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/undef-handling-crash-in-ra.ll b/llvm/test/CodeGen/AMDGPU/undef-handling-crash-in-ra.ll
index fc32bc644ddcd..bb27275a5ae61 100644
--- a/llvm/test/CodeGen/AMDGPU/undef-handling-crash-in-ra.ll
+++ b/llvm/test/CodeGen/AMDGPU/undef-handling-crash-in-ra.ll
@@ -50,7 +50,6 @@ define amdgpu_kernel void @foo(ptr addrspace(5) %ptr5, ptr %p0, double %v0, <4 x
; CHECK-NEXT: v_mov_b32_e32 v60, s66
; CHECK-NEXT: v_mov_b32_e32 v61, s67
; CHECK-NEXT: flat_store_dwordx2 v[56:57], v[62:63]
-; CHECK-NEXT: ; kill: def $sgpr15 killed $sgpr15
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[54:55]
; CHECK-NEXT: flat_load_dwordx2 a[32:33], v[56:57]
@@ -67,7 +66,6 @@ define amdgpu_kernel void @foo(ptr addrspace(5) %ptr5, ptr %p0, double %v0, <4 x
; CHECK-NEXT: flat_store_dwordx2 v[44:45], v[58:59]
; CHECK-NEXT: flat_store_dwordx2 v[56:57], v[62:63]
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: ; kill: def $sgpr15 killed $sgpr15
; CHECK-NEXT: s_swappc_b64 s[30:31], s[54:55]
; CHECK-NEXT: flat_load_dwordx2 v[0:1], v[46:47] glc
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
>From fe2dc441f379a7f92c19d01680625892ff777000 Mon Sep 17 00:00:00 2001
From: Gheorghe-Teodor Bercea <gt.bercea at gmail.com>
Date: Fri, 6 Feb 2026 16:32:45 -0500
Subject: [PATCH 2/2] Update
llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
Co-authored-by: Matt Arsenault <arsenm2 at gmail.com>
---
llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index d9b0708bdc443..d4a6838ae4896 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -1346,7 +1346,8 @@ bool GCNTTIImpl::isProfitableToSinkOperands(Instruction *I,
continue;
}
- // Check for zero-cost InsertElement/ExtractElement instructions
+ // Check for zero-cost multiple use InsertElement/ExtractElement
+ // instructions
if (Instruction *OpInst = dyn_cast<Instruction>(Op.get())) {
if (OpInst->getType()->isVectorTy() && OpInst->getNumOperands() > 1) {
Instruction *VecOpInst = dyn_cast<Instruction>(OpInst->getOperand(0));
@@ -1370,7 +1371,6 @@ bool GCNTTIImpl::isProfitableToSinkOperands(Instruction *I,
// For i32 (or greater) shufflevectors, these will be lowered into a
// series of insert / extract elements, which will be coalesced away.
-
if (EltSize < 16 || !ST->has16BitInsts())
continue;
More information about the llvm-commits
mailing list