[llvm] [DAGCombiner] Lower dynamic insertelt chain more efficiently (PR #162368)
Princeton Ferro via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 28 18:44:50 PDT 2025
https://github.com/Prince781 updated https://github.com/llvm/llvm-project/pull/162368
>From 26a55cc8d9bccea1e3f43ea28f408622e6224723 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Fri, 3 Oct 2025 20:43:38 -0700
Subject: [PATCH 1/5] [DAGCombiner] Lower dynamic insertelt chain
For an insertelt with dynamic indices, the default handling in
DAGTypeLegalizer and LegalizeDAG will reserve a stack slot for the
vector, lower the insertelt to a store, then load the modified vector
back into temporaries. The vector store and load may be legalized into a
sequence of smaller operations depending on the target. Let V = the
vector size and L = the length of a chain of insertelts with dynamic
indices. In the worse case, this chain will lower to O(VL) operations,
which can increase code size dramatically.
Instead, identify such chains, reserve one stack slot for the vector,
and lower all of the insertelts to stores at once. This requires only
O(V + L) operations. This change only affects the default lowering
behavior and still leaves targets to do their own thing.
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 66 ++++
llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll | 360 ++++++++++++++++++
llvm/test/CodeGen/PowerPC/vec_insert_elt.ll | 58 ++-
3 files changed, 449 insertions(+), 35 deletions(-)
create mode 100644 llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 310d35d9b1d1e..96e28353bde04 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -23480,6 +23480,72 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
// inselt undef, InVal, EltNo --> build_vector < InVal, InVal, ... >
if (InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT))
return DAG.getSplat(VT, DL, InVal);
+
+ // Check if this operation is illegal and will be handled the default way.
+ if (TLI.getTypeAction(*DAG.getContext(), VT) ==
+ TargetLowering::TypeSplitVector ||
+ TLI.isOperationExpand(ISD::INSERT_VECTOR_ELT, VT)) {
+ // For each dynamic insertelt, the default way will save the vector to
+ // the stack, store at an offset, and load the modified vector. This can
+ // dramatically increase code size if we have a chain of insertelts on a
+ // large vector: requiring O(V*C) stores/loads where V = length of
+ // vector and C is length of chain. If each insertelt is only fed into the
+ // next, the vector is write-only across this chain, and we can just
+ // save once before the chain and load after in O(V + C) operations.
+ SmallVector<SDNode *> Seq{N};
+ unsigned NumDynamic = 1;
+ while (true) {
+ SDValue InVec = Seq.back()->getOperand(0);
+ if (InVec.getOpcode() != ISD::INSERT_VECTOR_ELT)
+ break;
+ Seq.push_back(InVec.getNode());
+ NumDynamic += !isa<ConstantSDNode>(InVec.getOperand(2));
+ }
+
+ // We will lower every insertelt in the sequence to a store. In the
+ // default handling, only dynamic insertelts in the sequence will be
+ // lowered to a store (+ vector save/load for each). Check that our
+ // approach reduces the total number of loads and stores over the default.
+ if (2 * VT.getVectorMinNumElements() + Seq.size() <
+ NumDynamic * (2 * VT.getVectorMinNumElements() + 1)) {
+ // In cases where the vector is illegal it will be broken down into
+ // parts and stored in parts - we should use the alignment for the
+ // smallest part.
+ Align SmallestAlign = DAG.getReducedAlign(VT, /*UseABI=*/false);
+ SDValue StackPtr =
+ DAG.CreateStackTemporary(VT.getStoreSize(), SmallestAlign);
+ auto &MF = DAG.getMachineFunction();
+ int FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
+ auto PtrInfo = MachinePointerInfo::getFixedStack(MF, FrameIndex);
+
+ // Save the vector to the stack
+ SDValue InVec = Seq.back()->getOperand(0);
+ SDValue Store = DAG.getStore(DAG.getEntryNode(), DL, InVec, StackPtr,
+ PtrInfo, SmallestAlign);
+
+ // Lower each dynamic insertelt to a store
+ for (SDNode *N : reverse(Seq)) {
+ SDValue Elmnt = N->getOperand(1);
+ SDValue Index = N->getOperand(2);
+
+ // Store the new element. This may be larger than the vector element
+ // type, so use a truncating store.
+ SDValue EltPtr =
+ TLI.getVectorElementPointer(DAG, StackPtr, VT, Index);
+ EVT EltVT = Elmnt.getValueType();
+ Store = DAG.getTruncStore(
+ Store, DL, Elmnt, EltPtr, MachinePointerInfo::getUnknownStack(MF),
+ EltVT,
+ commonAlignment(SmallestAlign, EltVT.getFixedSizeInBits() / 8));
+ }
+
+ // Load the saved vector from the stack
+ SDValue Load =
+ DAG.getLoad(VT, DL, Store, StackPtr, PtrInfo, SmallestAlign);
+ return Load.getValue(0);
+ }
+ }
+
return SDValue();
}
diff --git a/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
new file mode 100644
index 0000000000000..eb9e1328835fc
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
@@ -0,0 +1,360 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mcpu=sm_20 | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -mcpu=sm_20 | %ptxas-verify %}
+target triple = "nvptx64-nvidia-cuda"
+
+; COM: Save the vector to the stack once.
+define ptx_kernel void @lower_once(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) local_unnamed_addr {
+; CHECK-LABEL: lower_once(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot0[64];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b64 %rd<39>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: mov.b64 %SPL, __local_depot0;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b64 %rd1, [lower_once_param_0];
+; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [lower_once_param_1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [lower_once_param_1+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [lower_once_param_1+32];
+; CHECK-NEXT: ld.param.v2.b64 {%rd10, %rd11}, [lower_once_param_1+48];
+; CHECK-NEXT: ld.shared.b64 %rd12, [%rd1+16];
+; CHECK-NEXT: ld.shared.b64 %rd13, [%rd1+24];
+; CHECK-NEXT: ld.param.b32 %rd14, [lower_once_param_2];
+; CHECK-NEXT: and.b64 %rd15, %rd14, 7;
+; CHECK-NEXT: shl.b64 %rd16, %rd15, 3;
+; CHECK-NEXT: add.u64 %rd17, %SP, 0;
+; CHECK-NEXT: add.s64 %rd18, %rd17, %rd16;
+; CHECK-NEXT: ld.param.b32 %rd19, [lower_once_param_3];
+; CHECK-NEXT: and.b64 %rd20, %rd19, 7;
+; CHECK-NEXT: shl.b64 %rd21, %rd20, 3;
+; CHECK-NEXT: add.s64 %rd22, %rd17, %rd21;
+; CHECK-NEXT: ld.param.b32 %rd23, [lower_once_param_4];
+; CHECK-NEXT: and.b64 %rd24, %rd23, 7;
+; CHECK-NEXT: shl.b64 %rd25, %rd24, 3;
+; CHECK-NEXT: add.s64 %rd26, %rd17, %rd25;
+; CHECK-NEXT: st.b64 [%SP+56], %rd11;
+; CHECK-NEXT: st.b64 [%SP+48], %rd10;
+; CHECK-NEXT: st.b64 [%SP+40], %rd9;
+; CHECK-NEXT: st.b64 [%SP+32], %rd8;
+; CHECK-NEXT: st.b64 [%SP+24], %rd7;
+; CHECK-NEXT: st.b64 [%SP+16], %rd6;
+; CHECK-NEXT: st.b64 [%SP+8], %rd5;
+; CHECK-NEXT: st.b64 [%SP], %rd4;
+; CHECK-NEXT: st.b64 [%rd18], %rd2;
+; CHECK-NEXT: st.b64 [%rd22], %rd3;
+; CHECK-NEXT: st.b64 [%rd26], %rd12;
+; CHECK-NEXT: ld.param.b32 %rd27, [lower_once_param_5];
+; CHECK-NEXT: and.b64 %rd28, %rd27, 7;
+; CHECK-NEXT: shl.b64 %rd29, %rd28, 3;
+; CHECK-NEXT: add.s64 %rd30, %rd17, %rd29;
+; CHECK-NEXT: st.b64 [%rd30], %rd13;
+; CHECK-NEXT: ld.b64 %rd31, [%SP+8];
+; CHECK-NEXT: ld.b64 %rd32, [%SP];
+; CHECK-NEXT: ld.b64 %rd33, [%SP+24];
+; CHECK-NEXT: ld.b64 %rd34, [%SP+16];
+; CHECK-NEXT: ld.b64 %rd35, [%SP+40];
+; CHECK-NEXT: ld.b64 %rd36, [%SP+32];
+; CHECK-NEXT: ld.b64 %rd37, [%SP+56];
+; CHECK-NEXT: ld.b64 %rd38, [%SP+48];
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd38, %rd37};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd36, %rd35};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd34, %rd33};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd32, %rd31};
+; CHECK-NEXT: ret;
+entry:
+ %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
+ %element.0 = load double, ptr addrspace(3) %offset.0, align 64
+ %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
+ %element.1 = load double, ptr addrspace(3) %offset.1, align 8
+ %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
+ %element.2 = load double, ptr addrspace(3) %offset.2, align 8
+ %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
+ %element.3 = load double, ptr addrspace(3) %offset.3, align 8
+ %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 %idx0
+ %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 %idx1
+ %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 %idx2
+ %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx3
+ %location = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1024
+ store <8 x double> %vector.build3, ptr addrspace(3) %location, align 64
+ ret void
+}
+
+; COM: Save the vector to the stack twice. Because these are in two different
+; slots, the resulting sequences may be non-overlapping even though the
+; insertelt sequences overlap.
+define ptx_kernel void @lower_twice(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) local_unnamed_addr {
+; CHECK-LABEL: lower_twice(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot1[128];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b64 %rd<51>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: mov.b64 %SPL, __local_depot1;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b64 %rd1, [lower_twice_param_0];
+; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [lower_twice_param_1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [lower_twice_param_1+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [lower_twice_param_1+32];
+; CHECK-NEXT: ld.param.v2.b64 {%rd10, %rd11}, [lower_twice_param_1+48];
+; CHECK-NEXT: ld.shared.b64 %rd12, [%rd1+16];
+; CHECK-NEXT: ld.shared.b64 %rd13, [%rd1+24];
+; CHECK-NEXT: ld.param.b32 %rd14, [lower_twice_param_2];
+; CHECK-NEXT: and.b64 %rd15, %rd14, 7;
+; CHECK-NEXT: shl.b64 %rd16, %rd15, 3;
+; CHECK-NEXT: add.u64 %rd17, %SP, 0;
+; CHECK-NEXT: add.s64 %rd18, %rd17, %rd16;
+; CHECK-NEXT: add.u64 %rd19, %SP, 64;
+; CHECK-NEXT: add.s64 %rd20, %rd19, %rd16;
+; CHECK-NEXT: ld.param.b32 %rd21, [lower_twice_param_3];
+; CHECK-NEXT: and.b64 %rd22, %rd21, 7;
+; CHECK-NEXT: shl.b64 %rd23, %rd22, 3;
+; CHECK-NEXT: add.s64 %rd24, %rd17, %rd23;
+; CHECK-NEXT: add.s64 %rd25, %rd19, %rd23;
+; CHECK-NEXT: st.b64 [%SP+120], %rd11;
+; CHECK-NEXT: st.b64 [%SP+112], %rd10;
+; CHECK-NEXT: st.b64 [%SP+104], %rd9;
+; CHECK-NEXT: st.b64 [%SP+96], %rd8;
+; CHECK-NEXT: st.b64 [%SP+88], %rd7;
+; CHECK-NEXT: st.b64 [%SP+80], %rd6;
+; CHECK-NEXT: st.b64 [%SP+72], %rd5;
+; CHECK-NEXT: st.b64 [%SP+64], %rd4;
+; CHECK-NEXT: st.b64 [%rd20], %rd2;
+; CHECK-NEXT: st.b64 [%rd25], %rd3;
+; CHECK-NEXT: ld.param.b32 %rd26, [lower_twice_param_4];
+; CHECK-NEXT: and.b64 %rd27, %rd26, 7;
+; CHECK-NEXT: shl.b64 %rd28, %rd27, 3;
+; CHECK-NEXT: add.s64 %rd29, %rd19, %rd28;
+; CHECK-NEXT: st.b64 [%rd29], %rd12;
+; CHECK-NEXT: add.s64 %rd30, %rd17, %rd28;
+; CHECK-NEXT: ld.b64 %rd31, [%SP+72];
+; CHECK-NEXT: ld.b64 %rd32, [%SP+64];
+; CHECK-NEXT: ld.b64 %rd33, [%SP+88];
+; CHECK-NEXT: ld.b64 %rd34, [%SP+80];
+; CHECK-NEXT: ld.b64 %rd35, [%SP+104];
+; CHECK-NEXT: ld.b64 %rd36, [%SP+96];
+; CHECK-NEXT: ld.b64 %rd37, [%SP+120];
+; CHECK-NEXT: ld.b64 %rd38, [%SP+112];
+; CHECK-NEXT: st.b64 [%SP+56], %rd11;
+; CHECK-NEXT: st.b64 [%SP+48], %rd10;
+; CHECK-NEXT: st.b64 [%SP+40], %rd9;
+; CHECK-NEXT: st.b64 [%SP+32], %rd8;
+; CHECK-NEXT: st.b64 [%SP+24], %rd7;
+; CHECK-NEXT: st.b64 [%SP+16], %rd6;
+; CHECK-NEXT: st.b64 [%SP+8], %rd5;
+; CHECK-NEXT: st.b64 [%SP], %rd4;
+; CHECK-NEXT: st.b64 [%rd18], %rd2;
+; CHECK-NEXT: st.b64 [%rd24], %rd3;
+; CHECK-NEXT: st.b64 [%rd30], %rd12;
+; CHECK-NEXT: ld.param.b32 %rd39, [lower_twice_param_5];
+; CHECK-NEXT: and.b64 %rd40, %rd39, 7;
+; CHECK-NEXT: shl.b64 %rd41, %rd40, 3;
+; CHECK-NEXT: add.s64 %rd42, %rd17, %rd41;
+; CHECK-NEXT: st.b64 [%rd42], %rd13;
+; CHECK-NEXT: ld.b64 %rd43, [%SP+8];
+; CHECK-NEXT: ld.b64 %rd44, [%SP];
+; CHECK-NEXT: ld.b64 %rd45, [%SP+24];
+; CHECK-NEXT: ld.b64 %rd46, [%SP+16];
+; CHECK-NEXT: ld.b64 %rd47, [%SP+40];
+; CHECK-NEXT: ld.b64 %rd48, [%SP+32];
+; CHECK-NEXT: ld.b64 %rd49, [%SP+56];
+; CHECK-NEXT: ld.b64 %rd50, [%SP+48];
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd50, %rd49};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd48, %rd47};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd46, %rd45};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd44, %rd43};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1144], {%rd38, %rd37};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1128], {%rd36, %rd35};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1112], {%rd34, %rd33};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1096], {%rd32, %rd31};
+; CHECK-NEXT: ret;
+entry:
+ %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
+ %element.0 = load double, ptr addrspace(3) %offset.0, align 64
+ %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
+ %element.1 = load double, ptr addrspace(3) %offset.1, align 8
+ %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
+ %element.2 = load double, ptr addrspace(3) %offset.2, align 8
+ %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
+ %element.3 = load double, ptr addrspace(3) %offset.3, align 8
+
+; COM: begin chain 1
+ %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 %idx0
+ %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 %idx1
+
+; COM: interleave a second chain of insertelements
+ %vector.build1-2 = insertelement <8 x double> %vector.build1, double %element.2, i32 %idx2
+
+; COM: continue chain 1
+ %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 %idx2
+ %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx3
+
+; COM: save chain 1
+ %location = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1024
+ store <8 x double> %vector.build3, ptr addrspace(3) %location, align 64
+
+; COM: save chain 2
+ %location-2 = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1096
+ store <8 x double> %vector.build1-2, ptr addrspace(3) %location-2, align 64
+ ret void
+}
+
+; COM: a chain of insertelts may include dynamic and constant indices. We only
+; reduce the total number of memory operations if there is a high ratio of
+; dynamic to constant insertelts.
+
+; COM: lower all insertelts to stores. This avoids lowering the two dynamic
+; insertelts individually and saves memory.
+define ptx_kernel void @mix_lower_all(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0, i32 %idx2) local_unnamed_addr {
+; CHECK-LABEL: mix_lower_all(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot2[64];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b64 %rd<31>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: mov.b64 %SPL, __local_depot2;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b64 %rd1, [mix_lower_all_param_0];
+; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [mix_lower_all_param_1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [mix_lower_all_param_1+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [mix_lower_all_param_1+32];
+; CHECK-NEXT: ld.param.v2.b64 {%rd10, %rd11}, [mix_lower_all_param_1+48];
+; CHECK-NEXT: ld.shared.b64 %rd12, [%rd1+16];
+; CHECK-NEXT: ld.shared.b64 %rd13, [%rd1+24];
+; CHECK-NEXT: ld.param.b32 %rd14, [mix_lower_all_param_2];
+; CHECK-NEXT: and.b64 %rd15, %rd14, 7;
+; CHECK-NEXT: shl.b64 %rd16, %rd15, 3;
+; CHECK-NEXT: add.u64 %rd17, %SP, 0;
+; CHECK-NEXT: add.s64 %rd18, %rd17, %rd16;
+; CHECK-NEXT: st.b64 [%SP+56], %rd11;
+; CHECK-NEXT: st.b64 [%SP+48], %rd10;
+; CHECK-NEXT: st.b64 [%SP+40], %rd9;
+; CHECK-NEXT: st.b64 [%SP+32], %rd8;
+; CHECK-NEXT: st.b64 [%SP+24], %rd7;
+; CHECK-NEXT: st.b64 [%SP+16], %rd6;
+; CHECK-NEXT: st.b64 [%SP+8], %rd5;
+; CHECK-NEXT: st.b64 [%SP], %rd4;
+; CHECK-NEXT: st.b64 [%rd18], %rd2;
+; CHECK-NEXT: st.b64 [%SP+16], %rd12;
+; CHECK-NEXT: st.b64 [%SP+8], %rd3;
+; CHECK-NEXT: ld.param.b32 %rd19, [mix_lower_all_param_3];
+; CHECK-NEXT: and.b64 %rd20, %rd19, 7;
+; CHECK-NEXT: shl.b64 %rd21, %rd20, 3;
+; CHECK-NEXT: add.s64 %rd22, %rd17, %rd21;
+; CHECK-NEXT: st.b64 [%rd22], %rd13;
+; CHECK-NEXT: ld.b64 %rd23, [%SP+8];
+; CHECK-NEXT: ld.b64 %rd24, [%SP];
+; CHECK-NEXT: ld.b64 %rd25, [%SP+24];
+; CHECK-NEXT: ld.b64 %rd26, [%SP+16];
+; CHECK-NEXT: ld.b64 %rd27, [%SP+40];
+; CHECK-NEXT: ld.b64 %rd28, [%SP+32];
+; CHECK-NEXT: ld.b64 %rd29, [%SP+56];
+; CHECK-NEXT: ld.b64 %rd30, [%SP+48];
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd30, %rd29};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd28, %rd27};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd26, %rd25};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd24, %rd23};
+; CHECK-NEXT: ret;
+entry:
+ %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
+ %element.0 = load double, ptr addrspace(3) %offset.0, align 64
+ %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
+ %element.1 = load double, ptr addrspace(3) %offset.1, align 8
+ %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
+ %element.2 = load double, ptr addrspace(3) %offset.2, align 8
+ %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
+ %element.3 = load double, ptr addrspace(3) %offset.3, align 8
+ %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 %idx0
+ %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 1
+ %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 2
+ %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx2
+ %location = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1024
+ store <8 x double> %vector.build3, ptr addrspace(3) %location, align 64
+ ret void
+}
+
+; COM: lower only the single dynamic insertelt. Lowering the constant
+; insertelts to stores would not reduce the total amount of loads and stores.
+define ptx_kernel void @mix_lower_some(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0) local_unnamed_addr {
+; CHECK-LABEL: mix_lower_some(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot3[64];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b64 %rd<25>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: mov.b64 %SPL, __local_depot3;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b64 %rd1, [mix_lower_some_param_0];
+; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [mix_lower_some_param_1+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [mix_lower_some_param_1+32];
+; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [mix_lower_some_param_1+48];
+; CHECK-NEXT: ld.param.b32 %rd10, [mix_lower_some_param_2];
+; CHECK-NEXT: and.b64 %rd11, %rd10, 7;
+; CHECK-NEXT: shl.b64 %rd12, %rd11, 3;
+; CHECK-NEXT: add.u64 %rd13, %SP, 0;
+; CHECK-NEXT: add.s64 %rd14, %rd13, %rd12;
+; CHECK-NEXT: ld.shared.b64 %rd15, [%rd1+16];
+; CHECK-NEXT: ld.shared.b64 %rd16, [%rd1+24];
+; CHECK-NEXT: ld.shared.b64 %rd17, [%rd1+32];
+; CHECK-NEXT: ld.shared.b64 %rd18, [%rd1+40];
+; CHECK-NEXT: ld.shared.b64 %rd19, [%rd1+48];
+; CHECK-NEXT: ld.shared.b64 %rd20, [%rd1+56];
+; CHECK-NEXT: st.b64 [%SP+56], %rd9;
+; CHECK-NEXT: st.b64 [%SP+48], %rd8;
+; CHECK-NEXT: st.b64 [%SP+40], %rd7;
+; CHECK-NEXT: st.b64 [%SP+32], %rd6;
+; CHECK-NEXT: st.b64 [%SP+24], %rd5;
+; CHECK-NEXT: st.b64 [%SP+16], %rd15;
+; CHECK-NEXT: st.b64 [%SP+8], %rd3;
+; CHECK-NEXT: st.b64 [%SP], %rd2;
+; CHECK-NEXT: st.b64 [%rd14], %rd16;
+; CHECK-NEXT: ld.b64 %rd21, [%SP+8];
+; CHECK-NEXT: ld.b64 %rd22, [%SP];
+; CHECK-NEXT: ld.b64 %rd23, [%SP+24];
+; CHECK-NEXT: ld.b64 %rd24, [%SP+16];
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd19, %rd20};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd17, %rd18};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd24, %rd23};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd22, %rd21};
+; CHECK-NEXT: ret;
+entry:
+ %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
+ %element.0 = load double, ptr addrspace(3) %offset.0, align 64
+ %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
+ %element.1 = load double, ptr addrspace(3) %offset.1, align 8
+ %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
+ %element.2 = load double, ptr addrspace(3) %offset.2, align 8
+ %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
+ %element.3 = load double, ptr addrspace(3) %offset.3, align 8
+ %offset.4 = getelementptr double, ptr addrspace(3) %shared.mem, i32 4
+ %element.4 = load double, ptr addrspace(3) %offset.4, align 8
+ %offset.5 = getelementptr double, ptr addrspace(3) %shared.mem, i32 5
+ %element.5 = load double, ptr addrspace(3) %offset.5, align 8
+ %offset.6 = getelementptr double, ptr addrspace(3) %shared.mem, i32 6
+ %element.6 = load double, ptr addrspace(3) %offset.6, align 8
+ %offset.7 = getelementptr double, ptr addrspace(3) %shared.mem, i32 7
+ %element.7 = load double, ptr addrspace(3) %offset.7, align 8
+ %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 0
+ %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 1
+ %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 2
+ %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx0
+ %vector.build4 = insertelement <8 x double> %vector.build3, double %element.4, i32 4
+ %vector.build5 = insertelement <8 x double> %vector.build4, double %element.5, i32 5
+ %vector.build6 = insertelement <8 x double> %vector.build5, double %element.6, i32 6
+ %vector.build7 = insertelement <8 x double> %vector.build6, double %element.7, i32 7
+ %location = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1024
+ store <8 x double> %vector.build7, ptr addrspace(3) %location, align 64
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
index 291a9c1f978da..b006c78604648 100644
--- a/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_insert_elt.ll
@@ -242,17 +242,14 @@ define <2 x i64> @testDoubleword(<2 x i64> %a, i64 %b, i64 %idx) {
; AIX-P8-32-LABEL: testDoubleword:
; AIX-P8-32: # %bb.0: # %entry
; AIX-P8-32-NEXT: add r6, r6, r6
-; AIX-P8-32-NEXT: addi r5, r1, -32
+; AIX-P8-32-NEXT: addi r5, r1, -16
; AIX-P8-32-NEXT: rlwinm r7, r6, 2, 28, 29
-; AIX-P8-32-NEXT: stxvw4x v2, 0, r5
+; AIX-P8-32-NEXT: stxvd2x v2, 0, r5
; AIX-P8-32-NEXT: stwx r3, r5, r7
-; AIX-P8-32-NEXT: addi r3, r1, -16
-; AIX-P8-32-NEXT: lxvw4x vs0, 0, r5
-; AIX-P8-32-NEXT: addi r5, r6, 1
-; AIX-P8-32-NEXT: rlwinm r5, r5, 2, 28, 29
-; AIX-P8-32-NEXT: stxvw4x vs0, 0, r3
-; AIX-P8-32-NEXT: stwx r4, r3, r5
-; AIX-P8-32-NEXT: lxvw4x v2, 0, r3
+; AIX-P8-32-NEXT: addi r3, r6, 1
+; AIX-P8-32-NEXT: rlwinm r3, r3, 2, 28, 29
+; AIX-P8-32-NEXT: stwx r4, r5, r3
+; AIX-P8-32-NEXT: lxvd2x v2, 0, r5
; AIX-P8-32-NEXT: blr
entry:
%vecins = insertelement <2 x i64> %a, i64 %b, i64 %idx
@@ -426,17 +423,14 @@ define <4 x float> @testFloat2(<4 x float> %a, ptr %b, i32 zeroext %idx1, i32 ze
; AIX-P8-LABEL: testFloat2:
; AIX-P8: # %bb.0: # %entry
; AIX-P8-NEXT: lwz r6, 0(r3)
-; AIX-P8-NEXT: rlwinm r4, r4, 2, 28, 29
-; AIX-P8-NEXT: addi r7, r1, -32
+; AIX-P8-NEXT: lwz r3, 1(r3)
+; AIX-P8-NEXT: addi r7, r1, -16
; AIX-P8-NEXT: stxvw4x v2, 0, r7
; AIX-P8-NEXT: rlwinm r5, r5, 2, 28, 29
+; AIX-P8-NEXT: rlwinm r4, r4, 2, 28, 29
; AIX-P8-NEXT: stwx r6, r7, r4
-; AIX-P8-NEXT: addi r4, r1, -16
-; AIX-P8-NEXT: lxvw4x vs0, 0, r7
-; AIX-P8-NEXT: lwz r3, 1(r3)
-; AIX-P8-NEXT: stxvw4x vs0, 0, r4
-; AIX-P8-NEXT: stwx r3, r4, r5
-; AIX-P8-NEXT: lxvw4x v2, 0, r4
+; AIX-P8-NEXT: stwx r3, r7, r5
+; AIX-P8-NEXT: lxvw4x v2, 0, r7
; AIX-P8-NEXT: blr
entry:
%add.ptr1 = getelementptr inbounds i8, ptr %b, i64 1
@@ -493,38 +487,32 @@ define <4 x float> @testFloat3(<4 x float> %a, ptr %b, i32 zeroext %idx1, i32 ze
;
; AIX-P8-64-LABEL: testFloat3:
; AIX-P8-64: # %bb.0: # %entry
+; AIX-P8-64-NEXT: li r7, 1
; AIX-P8-64-NEXT: lis r6, 1
-; AIX-P8-64-NEXT: rlwinm r4, r4, 2, 28, 29
-; AIX-P8-64-NEXT: addi r7, r1, -32
; AIX-P8-64-NEXT: rlwinm r5, r5, 2, 28, 29
+; AIX-P8-64-NEXT: rlwinm r4, r4, 2, 28, 29
+; AIX-P8-64-NEXT: rldic r7, r7, 36, 27
; AIX-P8-64-NEXT: lwzx r6, r3, r6
+; AIX-P8-64-NEXT: lwzx r3, r3, r7
+; AIX-P8-64-NEXT: addi r7, r1, -16
; AIX-P8-64-NEXT: stxvw4x v2, 0, r7
; AIX-P8-64-NEXT: stwx r6, r7, r4
-; AIX-P8-64-NEXT: li r4, 1
-; AIX-P8-64-NEXT: lxvw4x vs0, 0, r7
-; AIX-P8-64-NEXT: rldic r4, r4, 36, 27
-; AIX-P8-64-NEXT: lwzx r3, r3, r4
-; AIX-P8-64-NEXT: addi r4, r1, -16
-; AIX-P8-64-NEXT: stxvw4x vs0, 0, r4
-; AIX-P8-64-NEXT: stwx r3, r4, r5
-; AIX-P8-64-NEXT: lxvw4x v2, 0, r4
+; AIX-P8-64-NEXT: stwx r3, r7, r5
+; AIX-P8-64-NEXT: lxvw4x v2, 0, r7
; AIX-P8-64-NEXT: blr
;
; AIX-P8-32-LABEL: testFloat3:
; AIX-P8-32: # %bb.0: # %entry
; AIX-P8-32-NEXT: lis r6, 1
-; AIX-P8-32-NEXT: rlwinm r4, r4, 2, 28, 29
-; AIX-P8-32-NEXT: addi r7, r1, -32
; AIX-P8-32-NEXT: rlwinm r5, r5, 2, 28, 29
+; AIX-P8-32-NEXT: rlwinm r4, r4, 2, 28, 29
+; AIX-P8-32-NEXT: addi r7, r1, -16
; AIX-P8-32-NEXT: lwzx r6, r3, r6
+; AIX-P8-32-NEXT: lwz r3, 0(r3)
; AIX-P8-32-NEXT: stxvw4x v2, 0, r7
; AIX-P8-32-NEXT: stwx r6, r7, r4
-; AIX-P8-32-NEXT: addi r4, r1, -16
-; AIX-P8-32-NEXT: lxvw4x vs0, 0, r7
-; AIX-P8-32-NEXT: lwz r3, 0(r3)
-; AIX-P8-32-NEXT: stxvw4x vs0, 0, r4
-; AIX-P8-32-NEXT: stwx r3, r4, r5
-; AIX-P8-32-NEXT: lxvw4x v2, 0, r4
+; AIX-P8-32-NEXT: stwx r3, r7, r5
+; AIX-P8-32-NEXT: lxvw4x v2, 0, r7
; AIX-P8-32-NEXT: blr
entry:
%add.ptr = getelementptr inbounds i8, ptr %b, i64 65536
>From 79a6a2aad6f0c0b35f403c334259418f0dc99502 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Fri, 17 Oct 2025 23:07:07 -0700
Subject: [PATCH 2/5] [NVPTX][test] Simplify insertelt-dynamic tests
- Using i32 instead of double to reduce output size
- Reducing vector size from 8 to 4 elements
- Add more test cases (dynamic insertelt at beginning/middle/end,
repeated indices)
---
llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll | 649 ++++++++++---------
1 file changed, 335 insertions(+), 314 deletions(-)
diff --git a/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
index eb9e1328835fc..c88224701ea3b 100644
--- a/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
+++ b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
@@ -3,358 +3,379 @@
; RUN: %if ptxas %{ llc < %s -mcpu=sm_20 | %ptxas-verify %}
target triple = "nvptx64-nvidia-cuda"
-; COM: Save the vector to the stack once.
-define ptx_kernel void @lower_once(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) local_unnamed_addr {
-; CHECK-LABEL: lower_once(
+; Test dynamic insertelt at the beginning of a chain
+define <4 x i32> @dynamic_at_beginning(i32 %idx) {
+; CHECK-LABEL: dynamic_at_beginning(
; CHECK: {
-; CHECK-NEXT: .local .align 8 .b8 __local_depot0[64];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot0[16];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b64 %rd<39>;
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<6>;
; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot0;
; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
-; CHECK-NEXT: ld.param.b64 %rd1, [lower_once_param_0];
-; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [lower_once_param_1];
-; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [lower_once_param_1+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [lower_once_param_1+32];
-; CHECK-NEXT: ld.param.v2.b64 {%rd10, %rd11}, [lower_once_param_1+48];
-; CHECK-NEXT: ld.shared.b64 %rd12, [%rd1+16];
-; CHECK-NEXT: ld.shared.b64 %rd13, [%rd1+24];
-; CHECK-NEXT: ld.param.b32 %rd14, [lower_once_param_2];
-; CHECK-NEXT: and.b64 %rd15, %rd14, 7;
-; CHECK-NEXT: shl.b64 %rd16, %rd15, 3;
-; CHECK-NEXT: add.u64 %rd17, %SP, 0;
-; CHECK-NEXT: add.s64 %rd18, %rd17, %rd16;
-; CHECK-NEXT: ld.param.b32 %rd19, [lower_once_param_3];
-; CHECK-NEXT: and.b64 %rd20, %rd19, 7;
-; CHECK-NEXT: shl.b64 %rd21, %rd20, 3;
-; CHECK-NEXT: add.s64 %rd22, %rd17, %rd21;
-; CHECK-NEXT: ld.param.b32 %rd23, [lower_once_param_4];
-; CHECK-NEXT: and.b64 %rd24, %rd23, 7;
-; CHECK-NEXT: shl.b64 %rd25, %rd24, 3;
-; CHECK-NEXT: add.s64 %rd26, %rd17, %rd25;
-; CHECK-NEXT: st.b64 [%SP+56], %rd11;
-; CHECK-NEXT: st.b64 [%SP+48], %rd10;
-; CHECK-NEXT: st.b64 [%SP+40], %rd9;
-; CHECK-NEXT: st.b64 [%SP+32], %rd8;
-; CHECK-NEXT: st.b64 [%SP+24], %rd7;
-; CHECK-NEXT: st.b64 [%SP+16], %rd6;
-; CHECK-NEXT: st.b64 [%SP+8], %rd5;
-; CHECK-NEXT: st.b64 [%SP], %rd4;
-; CHECK-NEXT: st.b64 [%rd18], %rd2;
-; CHECK-NEXT: st.b64 [%rd22], %rd3;
-; CHECK-NEXT: st.b64 [%rd26], %rd12;
-; CHECK-NEXT: ld.param.b32 %rd27, [lower_once_param_5];
-; CHECK-NEXT: and.b64 %rd28, %rd27, 7;
-; CHECK-NEXT: shl.b64 %rd29, %rd28, 3;
-; CHECK-NEXT: add.s64 %rd30, %rd17, %rd29;
-; CHECK-NEXT: st.b64 [%rd30], %rd13;
-; CHECK-NEXT: ld.b64 %rd31, [%SP+8];
-; CHECK-NEXT: ld.b64 %rd32, [%SP];
-; CHECK-NEXT: ld.b64 %rd33, [%SP+24];
-; CHECK-NEXT: ld.b64 %rd34, [%SP+16];
-; CHECK-NEXT: ld.b64 %rd35, [%SP+40];
-; CHECK-NEXT: ld.b64 %rd36, [%SP+32];
-; CHECK-NEXT: ld.b64 %rd37, [%SP+56];
-; CHECK-NEXT: ld.b64 %rd38, [%SP+48];
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd38, %rd37};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd36, %rd35};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd34, %rd33};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd32, %rd31};
+; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_at_beginning_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
+; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
+; CHECK-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: st.b32 [%rd5], 10;
+; CHECK-NEXT: ld.b32 %r1, [%SP+12];
+; CHECK-NEXT: ld.b32 %r2, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r2, 20, 30, %r1};
; CHECK-NEXT: ret;
-entry:
- %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
- %element.0 = load double, ptr addrspace(3) %offset.0, align 64
- %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
- %element.1 = load double, ptr addrspace(3) %offset.1, align 8
- %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
- %element.2 = load double, ptr addrspace(3) %offset.2, align 8
- %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
- %element.3 = load double, ptr addrspace(3) %offset.3, align 8
- %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 %idx0
- %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 %idx1
- %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 %idx2
- %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx3
- %location = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1024
- store <8 x double> %vector.build3, ptr addrspace(3) %location, align 64
- ret void
+ %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx
+ %v1 = insertelement <4 x i32> %v0, i32 20, i32 1
+ %v2 = insertelement <4 x i32> %v1, i32 30, i32 2
+ ret <4 x i32> %v2
}
-; COM: Save the vector to the stack twice. Because these are in two different
-; slots, the resulting sequences may be non-overlapping even though the
-; insertelt sequences overlap.
-define ptx_kernel void @lower_twice(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) local_unnamed_addr {
-; CHECK-LABEL: lower_twice(
+; Test dynamic insertelt at the end of a chain
+define <4 x i32> @dynamic_at_end(i32 %idx) {
+; CHECK-LABEL: dynamic_at_end(
; CHECK: {
-; CHECK-NEXT: .local .align 8 .b8 __local_depot1[128];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot1[16];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b64 %rd<51>;
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<6>;
; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot1;
; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
-; CHECK-NEXT: ld.param.b64 %rd1, [lower_twice_param_0];
-; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [lower_twice_param_1];
-; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [lower_twice_param_1+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [lower_twice_param_1+32];
-; CHECK-NEXT: ld.param.v2.b64 {%rd10, %rd11}, [lower_twice_param_1+48];
-; CHECK-NEXT: ld.shared.b64 %rd12, [%rd1+16];
-; CHECK-NEXT: ld.shared.b64 %rd13, [%rd1+24];
-; CHECK-NEXT: ld.param.b32 %rd14, [lower_twice_param_2];
-; CHECK-NEXT: and.b64 %rd15, %rd14, 7;
-; CHECK-NEXT: shl.b64 %rd16, %rd15, 3;
-; CHECK-NEXT: add.u64 %rd17, %SP, 0;
-; CHECK-NEXT: add.s64 %rd18, %rd17, %rd16;
-; CHECK-NEXT: add.u64 %rd19, %SP, 64;
-; CHECK-NEXT: add.s64 %rd20, %rd19, %rd16;
-; CHECK-NEXT: ld.param.b32 %rd21, [lower_twice_param_3];
-; CHECK-NEXT: and.b64 %rd22, %rd21, 7;
-; CHECK-NEXT: shl.b64 %rd23, %rd22, 3;
-; CHECK-NEXT: add.s64 %rd24, %rd17, %rd23;
-; CHECK-NEXT: add.s64 %rd25, %rd19, %rd23;
-; CHECK-NEXT: st.b64 [%SP+120], %rd11;
-; CHECK-NEXT: st.b64 [%SP+112], %rd10;
-; CHECK-NEXT: st.b64 [%SP+104], %rd9;
-; CHECK-NEXT: st.b64 [%SP+96], %rd8;
-; CHECK-NEXT: st.b64 [%SP+88], %rd7;
-; CHECK-NEXT: st.b64 [%SP+80], %rd6;
-; CHECK-NEXT: st.b64 [%SP+72], %rd5;
-; CHECK-NEXT: st.b64 [%SP+64], %rd4;
-; CHECK-NEXT: st.b64 [%rd20], %rd2;
-; CHECK-NEXT: st.b64 [%rd25], %rd3;
-; CHECK-NEXT: ld.param.b32 %rd26, [lower_twice_param_4];
-; CHECK-NEXT: and.b64 %rd27, %rd26, 7;
-; CHECK-NEXT: shl.b64 %rd28, %rd27, 3;
-; CHECK-NEXT: add.s64 %rd29, %rd19, %rd28;
-; CHECK-NEXT: st.b64 [%rd29], %rd12;
-; CHECK-NEXT: add.s64 %rd30, %rd17, %rd28;
-; CHECK-NEXT: ld.b64 %rd31, [%SP+72];
-; CHECK-NEXT: ld.b64 %rd32, [%SP+64];
-; CHECK-NEXT: ld.b64 %rd33, [%SP+88];
-; CHECK-NEXT: ld.b64 %rd34, [%SP+80];
-; CHECK-NEXT: ld.b64 %rd35, [%SP+104];
-; CHECK-NEXT: ld.b64 %rd36, [%SP+96];
-; CHECK-NEXT: ld.b64 %rd37, [%SP+120];
-; CHECK-NEXT: ld.b64 %rd38, [%SP+112];
-; CHECK-NEXT: st.b64 [%SP+56], %rd11;
-; CHECK-NEXT: st.b64 [%SP+48], %rd10;
-; CHECK-NEXT: st.b64 [%SP+40], %rd9;
-; CHECK-NEXT: st.b64 [%SP+32], %rd8;
-; CHECK-NEXT: st.b64 [%SP+24], %rd7;
-; CHECK-NEXT: st.b64 [%SP+16], %rd6;
-; CHECK-NEXT: st.b64 [%SP+8], %rd5;
-; CHECK-NEXT: st.b64 [%SP], %rd4;
-; CHECK-NEXT: st.b64 [%rd18], %rd2;
-; CHECK-NEXT: st.b64 [%rd24], %rd3;
-; CHECK-NEXT: st.b64 [%rd30], %rd12;
-; CHECK-NEXT: ld.param.b32 %rd39, [lower_twice_param_5];
-; CHECK-NEXT: and.b64 %rd40, %rd39, 7;
-; CHECK-NEXT: shl.b64 %rd41, %rd40, 3;
-; CHECK-NEXT: add.s64 %rd42, %rd17, %rd41;
-; CHECK-NEXT: st.b64 [%rd42], %rd13;
-; CHECK-NEXT: ld.b64 %rd43, [%SP+8];
-; CHECK-NEXT: ld.b64 %rd44, [%SP];
-; CHECK-NEXT: ld.b64 %rd45, [%SP+24];
-; CHECK-NEXT: ld.b64 %rd46, [%SP+16];
-; CHECK-NEXT: ld.b64 %rd47, [%SP+40];
-; CHECK-NEXT: ld.b64 %rd48, [%SP+32];
-; CHECK-NEXT: ld.b64 %rd49, [%SP+56];
-; CHECK-NEXT: ld.b64 %rd50, [%SP+48];
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd50, %rd49};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd48, %rd47};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd46, %rd45};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd44, %rd43};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1144], {%rd38, %rd37};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1128], {%rd36, %rd35};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1112], {%rd34, %rd33};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1096], {%rd32, %rd31};
+; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_at_end_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
+; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
+; CHECK-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: st.b32 [%SP+4], 20;
+; CHECK-NEXT: st.b32 [%SP], 10;
+; CHECK-NEXT: st.b32 [%rd5], 30;
+; CHECK-NEXT: ld.b32 %r1, [%SP+12];
+; CHECK-NEXT: ld.b32 %r2, [%SP+8];
+; CHECK-NEXT: ld.b32 %r3, [%SP+4];
+; CHECK-NEXT: ld.b32 %r4, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
; CHECK-NEXT: ret;
-entry:
- %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
- %element.0 = load double, ptr addrspace(3) %offset.0, align 64
- %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
- %element.1 = load double, ptr addrspace(3) %offset.1, align 8
- %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
- %element.2 = load double, ptr addrspace(3) %offset.2, align 8
- %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
- %element.3 = load double, ptr addrspace(3) %offset.3, align 8
+ %v0 = insertelement <4 x i32> poison, i32 10, i32 0
+ %v1 = insertelement <4 x i32> %v0, i32 20, i32 1
+ %v2 = insertelement <4 x i32> %v1, i32 30, i32 %idx
+ ret <4 x i32> %v2
+}
-; COM: begin chain 1
- %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 %idx0
- %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 %idx1
+; Test dynamic insertelt in the middle of a chain
+define <4 x i32> @dynamic_in_middle(i32 %idx) {
+; CHECK-LABEL: dynamic_in_middle(
+; CHECK: {
+; CHECK-NEXT: .local .align 4 .b8 __local_depot2[16];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-NEXT: .reg .b64 %rd<6>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot2;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_in_middle_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
+; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
+; CHECK-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: st.b32 [%SP], 10;
+; CHECK-NEXT: st.b32 [%rd5], 20;
+; CHECK-NEXT: ld.b32 %r1, [%SP+12];
+; CHECK-NEXT: ld.b32 %r2, [%SP+4];
+; CHECK-NEXT: ld.b32 %r3, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r3, %r2, 30, %r1};
+; CHECK-NEXT: ret;
+ %v0 = insertelement <4 x i32> poison, i32 10, i32 0
+ %v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx
+ %v2 = insertelement <4 x i32> %v1, i32 30, i32 2
+ ret <4 x i32> %v2
+}
-; COM: interleave a second chain of insertelements
- %vector.build1-2 = insertelement <8 x double> %vector.build1, double %element.2, i32 %idx2
+; Test repeated dynamic insertelt with the same index
+define <4 x i32> @repeated_same_index(i32 %idx) {
+; CHECK-LABEL: repeated_same_index(
+; CHECK: {
+; CHECK-NEXT: .local .align 4 .b8 __local_depot3[16];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<6>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot3;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [repeated_same_index_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
+; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
+; CHECK-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: st.b32 [%rd5], 20;
+; CHECK-NEXT: ld.b32 %r1, [%SP+12];
+; CHECK-NEXT: ld.b32 %r2, [%SP+8];
+; CHECK-NEXT: ld.b32 %r3, [%SP+4];
+; CHECK-NEXT: ld.b32 %r4, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
+; CHECK-NEXT: ret;
+ %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx
+ %v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx
+ ret <4 x i32> %v1
+}
-; COM: continue chain 1
- %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 %idx2
- %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx3
+; Test multiple dynamic insertelts
+define <4 x i32> @multiple_dynamic(i32 %idx0, i32 %idx1) {
+; CHECK-LABEL: multiple_dynamic(
+; CHECK: {
+; CHECK-NEXT: .local .align 4 .b8 __local_depot4[16];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<10>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot4;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [multiple_dynamic_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
+; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
+; CHECK-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: st.b32 [%rd5], 10;
+; CHECK-NEXT: ld.param.b32 %rd6, [multiple_dynamic_param_1];
+; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
+; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
+; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8;
+; CHECK-NEXT: st.b32 [%rd9], 20;
+; CHECK-NEXT: ld.b32 %r1, [%SP+12];
+; CHECK-NEXT: ld.b32 %r2, [%SP+8];
+; CHECK-NEXT: ld.b32 %r3, [%SP+4];
+; CHECK-NEXT: ld.b32 %r4, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
+; CHECK-NEXT: ret;
+ %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
+ %v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx1
+ ret <4 x i32> %v1
+}
-; COM: save chain 1
- %location = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1024
- store <8 x double> %vector.build3, ptr addrspace(3) %location, align 64
+; Test chain with all dynamic insertelts
+define <4 x i32> @all_dynamic(i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) {
+; CHECK-LABEL: all_dynamic(
+; CHECK: {
+; CHECK-NEXT: .local .align 4 .b8 __local_depot5[16];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<18>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot5;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [all_dynamic_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
+; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
+; CHECK-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: ld.param.b32 %rd6, [all_dynamic_param_1];
+; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
+; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
+; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8;
+; CHECK-NEXT: ld.param.b32 %rd10, [all_dynamic_param_2];
+; CHECK-NEXT: and.b64 %rd11, %rd10, 3;
+; CHECK-NEXT: shl.b64 %rd12, %rd11, 2;
+; CHECK-NEXT: add.s64 %rd13, %rd4, %rd12;
+; CHECK-NEXT: st.b32 [%rd5], 10;
+; CHECK-NEXT: st.b32 [%rd9], 20;
+; CHECK-NEXT: st.b32 [%rd13], 30;
+; CHECK-NEXT: ld.param.b32 %rd14, [all_dynamic_param_3];
+; CHECK-NEXT: and.b64 %rd15, %rd14, 3;
+; CHECK-NEXT: shl.b64 %rd16, %rd15, 2;
+; CHECK-NEXT: add.s64 %rd17, %rd4, %rd16;
+; CHECK-NEXT: st.b32 [%rd17], 40;
+; CHECK-NEXT: ld.b32 %r1, [%SP+12];
+; CHECK-NEXT: ld.b32 %r2, [%SP+8];
+; CHECK-NEXT: ld.b32 %r3, [%SP+4];
+; CHECK-NEXT: ld.b32 %r4, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
+; CHECK-NEXT: ret;
+ %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
+ %v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx1
+ %v2 = insertelement <4 x i32> %v1, i32 30, i32 %idx2
+ %v3 = insertelement <4 x i32> %v2, i32 40, i32 %idx3
+ ret <4 x i32> %v3
+}
-; COM: save chain 2
- %location-2 = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1096
- store <8 x double> %vector.build1-2, ptr addrspace(3) %location-2, align 64
- ret void
+; Test mixed constant and dynamic insertelts with high ratio of dynamic ones.
+; Should lower all insertelts to stores.
+define <4 x i32> @mix_high_dynamic_ratio(i32 %idx0, i32 %idx1) {
+; CHECK-LABEL: mix_high_dynamic_ratio(
+; CHECK: {
+; CHECK-NEXT: .local .align 4 .b8 __local_depot6[16];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<10>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot6;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [mix_high_dynamic_ratio_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
+; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
+; CHECK-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: st.b32 [%rd5], 10;
+; CHECK-NEXT: ld.param.b32 %rd6, [mix_high_dynamic_ratio_param_1];
+; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
+; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
+; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8;
+; CHECK-NEXT: st.b32 [%SP+4], 20;
+; CHECK-NEXT: st.b32 [%rd9], 30;
+; CHECK-NEXT: ld.b32 %r1, [%SP+12];
+; CHECK-NEXT: ld.b32 %r2, [%SP+8];
+; CHECK-NEXT: ld.b32 %r3, [%SP+4];
+; CHECK-NEXT: ld.b32 %r4, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
+; CHECK-NEXT: ret;
+ %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
+ %v1 = insertelement <4 x i32> %v0, i32 20, i32 1
+ %v2 = insertelement <4 x i32> %v1, i32 30, i32 %idx1
+ ret <4 x i32> %v2
}
-; COM: a chain of insertelts may include dynamic and constant indices. We only
-; reduce the total number of memory operations if there is a high ratio of
-; dynamic to constant insertelts.
+; Test mixed constant and dynamic insertelts with low ratio of dynamic ones.
+; Should handle dynamic insertelt individually.
+define <4 x i32> @mix_low_dynamic_ratio(i32 %idx) {
+; CHECK-LABEL: mix_low_dynamic_ratio(
+; CHECK: {
+; CHECK-NEXT: .local .align 4 .b8 __local_depot7[16];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<6>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot7;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [mix_low_dynamic_ratio_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
+; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
+; CHECK-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: st.b32 [%SP], 10;
+; CHECK-NEXT: st.b32 [%rd5], 20;
+; CHECK-NEXT: ld.b32 %r1, [%SP+4];
+; CHECK-NEXT: ld.b32 %r2, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r2, %r1, 30, 40};
+; CHECK-NEXT: ret;
+ %v0 = insertelement <4 x i32> poison, i32 10, i32 0
+ %v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx
+ %v2 = insertelement <4 x i32> %v1, i32 30, i32 2
+ %v3 = insertelement <4 x i32> %v2, i32 40, i32 3
+ ret <4 x i32> %v3
+}
-; COM: lower all insertelts to stores. This avoids lowering the two dynamic
-; insertelts individually and saves memory.
-define ptx_kernel void @mix_lower_all(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0, i32 %idx2) local_unnamed_addr {
-; CHECK-LABEL: mix_lower_all(
+; Test two separate chains that don't interfere
+define void @two_separate_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) {
+; CHECK-LABEL: two_separate_chains(
; CHECK: {
-; CHECK-NEXT: .local .align 8 .b8 __local_depot2[64];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot8[32];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b64 %rd<31>;
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-NEXT: .reg .b64 %rd<13>;
; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0: // %entry
-; CHECK-NEXT: mov.b64 %SPL, __local_depot2;
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot8;
; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
-; CHECK-NEXT: ld.param.b64 %rd1, [mix_lower_all_param_0];
-; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [mix_lower_all_param_1];
-; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [mix_lower_all_param_1+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [mix_lower_all_param_1+32];
-; CHECK-NEXT: ld.param.v2.b64 {%rd10, %rd11}, [mix_lower_all_param_1+48];
-; CHECK-NEXT: ld.shared.b64 %rd12, [%rd1+16];
-; CHECK-NEXT: ld.shared.b64 %rd13, [%rd1+24];
-; CHECK-NEXT: ld.param.b32 %rd14, [mix_lower_all_param_2];
-; CHECK-NEXT: and.b64 %rd15, %rd14, 7;
-; CHECK-NEXT: shl.b64 %rd16, %rd15, 3;
-; CHECK-NEXT: add.u64 %rd17, %SP, 0;
-; CHECK-NEXT: add.s64 %rd18, %rd17, %rd16;
-; CHECK-NEXT: st.b64 [%SP+56], %rd11;
-; CHECK-NEXT: st.b64 [%SP+48], %rd10;
-; CHECK-NEXT: st.b64 [%SP+40], %rd9;
-; CHECK-NEXT: st.b64 [%SP+32], %rd8;
-; CHECK-NEXT: st.b64 [%SP+24], %rd7;
-; CHECK-NEXT: st.b64 [%SP+16], %rd6;
-; CHECK-NEXT: st.b64 [%SP+8], %rd5;
-; CHECK-NEXT: st.b64 [%SP], %rd4;
-; CHECK-NEXT: st.b64 [%rd18], %rd2;
-; CHECK-NEXT: st.b64 [%SP+16], %rd12;
-; CHECK-NEXT: st.b64 [%SP+8], %rd3;
-; CHECK-NEXT: ld.param.b32 %rd19, [mix_lower_all_param_3];
-; CHECK-NEXT: and.b64 %rd20, %rd19, 7;
-; CHECK-NEXT: shl.b64 %rd21, %rd20, 3;
-; CHECK-NEXT: add.s64 %rd22, %rd17, %rd21;
-; CHECK-NEXT: st.b64 [%rd22], %rd13;
-; CHECK-NEXT: ld.b64 %rd23, [%SP+8];
-; CHECK-NEXT: ld.b64 %rd24, [%SP];
-; CHECK-NEXT: ld.b64 %rd25, [%SP+24];
-; CHECK-NEXT: ld.b64 %rd26, [%SP+16];
-; CHECK-NEXT: ld.b64 %rd27, [%SP+40];
-; CHECK-NEXT: ld.b64 %rd28, [%SP+32];
-; CHECK-NEXT: ld.b64 %rd29, [%SP+56];
-; CHECK-NEXT: ld.b64 %rd30, [%SP+48];
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd30, %rd29};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd28, %rd27};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd26, %rd25};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd24, %rd23};
+; CHECK-NEXT: ld.param.b32 %rd1, [two_separate_chains_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
+; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
+; CHECK-NEXT: add.u64 %rd4, %SP, 16;
+; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: st.b32 [%rd5], 10;
+; CHECK-NEXT: ld.param.b32 %rd6, [two_separate_chains_param_1];
+; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
+; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
+; CHECK-NEXT: add.u64 %rd9, %SP, 0;
+; CHECK-NEXT: add.s64 %rd10, %rd9, %rd8;
+; CHECK-NEXT: ld.b32 %r1, [%SP+28];
+; CHECK-NEXT: ld.b32 %r2, [%SP+24];
+; CHECK-NEXT: ld.b32 %r3, [%SP+16];
+; CHECK-NEXT: ld.param.b64 %rd11, [two_separate_chains_param_2];
+; CHECK-NEXT: st.b32 [%rd10], 30;
+; CHECK-NEXT: ld.param.b64 %rd12, [two_separate_chains_param_3];
+; CHECK-NEXT: ld.b32 %r4, [%SP+12];
+; CHECK-NEXT: ld.b32 %r5, [%SP+4];
+; CHECK-NEXT: ld.b32 %r6, [%SP];
+; CHECK-NEXT: st.v4.b32 [%rd11], {%r3, 20, %r2, %r1};
+; CHECK-NEXT: st.v4.b32 [%rd12], {%r6, %r5, 40, %r4};
; CHECK-NEXT: ret;
-entry:
- %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
- %element.0 = load double, ptr addrspace(3) %offset.0, align 64
- %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
- %element.1 = load double, ptr addrspace(3) %offset.1, align 8
- %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
- %element.2 = load double, ptr addrspace(3) %offset.2, align 8
- %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
- %element.3 = load double, ptr addrspace(3) %offset.3, align 8
- %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 %idx0
- %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 1
- %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 2
- %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx2
- %location = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1024
- store <8 x double> %vector.build3, ptr addrspace(3) %location, align 64
+ ; Chain 1
+ %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
+ %v1 = insertelement <4 x i32> %v0, i32 20, i32 1
+
+ ; Chain 2
+ %w0 = insertelement <4 x i32> poison, i32 30, i32 %idx1
+ %w1 = insertelement <4 x i32> %w0, i32 40, i32 2
+
+ store <4 x i32> %v1, ptr %out0
+ store <4 x i32> %w1, ptr %out1
ret void
}
-; COM: lower only the single dynamic insertelt. Lowering the constant
-; insertelts to stores would not reduce the total amount of loads and stores.
-define ptx_kernel void @mix_lower_some(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0) local_unnamed_addr {
-; CHECK-LABEL: mix_lower_some(
+; Test overlapping chains (chain 2 starts from middle of chain 1)
+define void @overlapping_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) {
+; CHECK-LABEL: overlapping_chains(
; CHECK: {
-; CHECK-NEXT: .local .align 8 .b8 __local_depot3[64];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot9[32];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b64 %rd<25>;
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-NEXT: .reg .b64 %rd<14>;
; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0: // %entry
-; CHECK-NEXT: mov.b64 %SPL, __local_depot3;
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot9;
; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
-; CHECK-NEXT: ld.param.b64 %rd1, [mix_lower_some_param_0];
-; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [mix_lower_some_param_1+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [mix_lower_some_param_1+32];
-; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [mix_lower_some_param_1+48];
-; CHECK-NEXT: ld.param.b32 %rd10, [mix_lower_some_param_2];
-; CHECK-NEXT: and.b64 %rd11, %rd10, 7;
-; CHECK-NEXT: shl.b64 %rd12, %rd11, 3;
-; CHECK-NEXT: add.u64 %rd13, %SP, 0;
-; CHECK-NEXT: add.s64 %rd14, %rd13, %rd12;
-; CHECK-NEXT: ld.shared.b64 %rd15, [%rd1+16];
-; CHECK-NEXT: ld.shared.b64 %rd16, [%rd1+24];
-; CHECK-NEXT: ld.shared.b64 %rd17, [%rd1+32];
-; CHECK-NEXT: ld.shared.b64 %rd18, [%rd1+40];
-; CHECK-NEXT: ld.shared.b64 %rd19, [%rd1+48];
-; CHECK-NEXT: ld.shared.b64 %rd20, [%rd1+56];
-; CHECK-NEXT: st.b64 [%SP+56], %rd9;
-; CHECK-NEXT: st.b64 [%SP+48], %rd8;
-; CHECK-NEXT: st.b64 [%SP+40], %rd7;
-; CHECK-NEXT: st.b64 [%SP+32], %rd6;
-; CHECK-NEXT: st.b64 [%SP+24], %rd5;
-; CHECK-NEXT: st.b64 [%SP+16], %rd15;
-; CHECK-NEXT: st.b64 [%SP+8], %rd3;
-; CHECK-NEXT: st.b64 [%SP], %rd2;
-; CHECK-NEXT: st.b64 [%rd14], %rd16;
-; CHECK-NEXT: ld.b64 %rd21, [%SP+8];
-; CHECK-NEXT: ld.b64 %rd22, [%SP];
-; CHECK-NEXT: ld.b64 %rd23, [%SP+24];
-; CHECK-NEXT: ld.b64 %rd24, [%SP+16];
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd19, %rd20};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd17, %rd18};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd24, %rd23};
-; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd22, %rd21};
+; CHECK-NEXT: ld.param.b32 %rd1, [overlapping_chains_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
+; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
+; CHECK-NEXT: add.u64 %rd4, %SP, 16;
+; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: st.b32 [%rd5], 10;
+; CHECK-NEXT: add.u64 %rd6, %SP, 0;
+; CHECK-NEXT: add.s64 %rd7, %rd6, %rd3;
+; CHECK-NEXT: ld.b32 %r1, [%SP+28];
+; CHECK-NEXT: ld.b32 %r2, [%SP+16];
+; CHECK-NEXT: ld.param.b64 %rd8, [overlapping_chains_param_2];
+; CHECK-NEXT: st.b32 [%rd7], 10;
+; CHECK-NEXT: ld.param.b32 %rd9, [overlapping_chains_param_1];
+; CHECK-NEXT: and.b64 %rd10, %rd9, 3;
+; CHECK-NEXT: shl.b64 %rd11, %rd10, 2;
+; CHECK-NEXT: add.s64 %rd12, %rd6, %rd11;
+; CHECK-NEXT: st.b32 [%SP+4], 20;
+; CHECK-NEXT: st.b32 [%rd12], 30;
+; CHECK-NEXT: ld.param.b64 %rd13, [overlapping_chains_param_3];
+; CHECK-NEXT: ld.b32 %r3, [%SP+12];
+; CHECK-NEXT: ld.b32 %r4, [%SP+8];
+; CHECK-NEXT: ld.b32 %r5, [%SP+4];
+; CHECK-NEXT: ld.b32 %r6, [%SP];
+; CHECK-NEXT: st.v4.b32 [%rd8], {%r2, 20, 40, %r1};
+; CHECK-NEXT: st.v4.b32 [%rd13], {%r6, %r5, %r4, %r3};
; CHECK-NEXT: ret;
-entry:
- %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
- %element.0 = load double, ptr addrspace(3) %offset.0, align 64
- %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
- %element.1 = load double, ptr addrspace(3) %offset.1, align 8
- %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
- %element.2 = load double, ptr addrspace(3) %offset.2, align 8
- %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
- %element.3 = load double, ptr addrspace(3) %offset.3, align 8
- %offset.4 = getelementptr double, ptr addrspace(3) %shared.mem, i32 4
- %element.4 = load double, ptr addrspace(3) %offset.4, align 8
- %offset.5 = getelementptr double, ptr addrspace(3) %shared.mem, i32 5
- %element.5 = load double, ptr addrspace(3) %offset.5, align 8
- %offset.6 = getelementptr double, ptr addrspace(3) %shared.mem, i32 6
- %element.6 = load double, ptr addrspace(3) %offset.6, align 8
- %offset.7 = getelementptr double, ptr addrspace(3) %shared.mem, i32 7
- %element.7 = load double, ptr addrspace(3) %offset.7, align 8
- %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 0
- %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 1
- %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 2
- %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx0
- %vector.build4 = insertelement <8 x double> %vector.build3, double %element.4, i32 4
- %vector.build5 = insertelement <8 x double> %vector.build4, double %element.5, i32 5
- %vector.build6 = insertelement <8 x double> %vector.build5, double %element.6, i32 6
- %vector.build7 = insertelement <8 x double> %vector.build6, double %element.7, i32 7
- %location = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1024
- store <8 x double> %vector.build7, ptr addrspace(3) %location, align 64
+ %v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
+ %v1 = insertelement <4 x i32> %v0, i32 20, i32 1
+
+ ; Chain 2 starts from v1
+ %w0 = insertelement <4 x i32> %v1, i32 30, i32 %idx1
+
+ ; Continue chain 1
+ %v2 = insertelement <4 x i32> %v1, i32 40, i32 2
+
+ store <4 x i32> %v2, ptr %out0
+ store <4 x i32> %w0, ptr %out1
ret void
}
>From 481a9f7f068557ec8ada48fac6b0a861982d1fc6 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 21 Oct 2025 17:48:03 -0700
Subject: [PATCH 3/5] [NVPTX] simplify profitibility check
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 10 ++--
llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll | 48 ++++---------------
2 files changed, 12 insertions(+), 46 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 96e28353bde04..4ff7791b9e070 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -23502,12 +23502,10 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
NumDynamic += !isa<ConstantSDNode>(InVec.getOperand(2));
}
- // We will lower every insertelt in the sequence to a store. In the
- // default handling, only dynamic insertelts in the sequence will be
- // lowered to a store (+ vector save/load for each). Check that our
- // approach reduces the total number of loads and stores over the default.
- if (2 * VT.getVectorMinNumElements() + Seq.size() <
- NumDynamic * (2 * VT.getVectorMinNumElements() + 1)) {
+ // It always and only makes sense to lower this sequence when we have more
+ // than one dynamic insertelt, since we will not have more than V constant
+ // insertelts, so we will be reducing the total number of stores+loads.
+ if (NumDynamic > 1) {
// In cases where the vector is illegal it will be broken down into
// parts and stored in parts - we should use the alignment for the
// smallest part.
diff --git a/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
index c88224701ea3b..ab9f4589de3ab 100644
--- a/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
+++ b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
@@ -210,8 +210,8 @@ define <4 x i32> @all_dynamic(i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) {
; Test mixed constant and dynamic insertelts with high ratio of dynamic ones.
; Should lower all insertelts to stores.
-define <4 x i32> @mix_high_dynamic_ratio(i32 %idx0, i32 %idx1) {
-; CHECK-LABEL: mix_high_dynamic_ratio(
+define <4 x i32> @mix_dynamic_constant(i32 %idx0, i32 %idx1) {
+; CHECK-LABEL: mix_dynamic_constant(
; CHECK: {
; CHECK-NEXT: .local .align 4 .b8 __local_depot6[16];
; CHECK-NEXT: .reg .b64 %SP;
@@ -222,13 +222,13 @@ define <4 x i32> @mix_high_dynamic_ratio(i32 %idx0, i32 %idx1) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot6;
; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
-; CHECK-NEXT: ld.param.b32 %rd1, [mix_high_dynamic_ratio_param_0];
+; CHECK-NEXT: ld.param.b32 %rd1, [mix_dynamic_constant_param_0];
; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
; CHECK-NEXT: add.u64 %rd4, %SP, 0;
; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
; CHECK-NEXT: st.b32 [%rd5], 10;
-; CHECK-NEXT: ld.param.b32 %rd6, [mix_high_dynamic_ratio_param_1];
+; CHECK-NEXT: ld.param.b32 %rd6, [mix_dynamic_constant_param_1];
; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8;
@@ -246,50 +246,18 @@ define <4 x i32> @mix_high_dynamic_ratio(i32 %idx0, i32 %idx1) {
ret <4 x i32> %v2
}
-; Test mixed constant and dynamic insertelts with low ratio of dynamic ones.
-; Should handle dynamic insertelt individually.
-define <4 x i32> @mix_low_dynamic_ratio(i32 %idx) {
-; CHECK-LABEL: mix_low_dynamic_ratio(
-; CHECK: {
-; CHECK-NEXT: .local .align 4 .b8 __local_depot7[16];
-; CHECK-NEXT: .reg .b64 %SP;
-; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b32 %r<3>;
-; CHECK-NEXT: .reg .b64 %rd<6>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: mov.b64 %SPL, __local_depot7;
-; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
-; CHECK-NEXT: ld.param.b32 %rd1, [mix_low_dynamic_ratio_param_0];
-; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
-; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
-; CHECK-NEXT: add.u64 %rd4, %SP, 0;
-; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
-; CHECK-NEXT: st.b32 [%SP], 10;
-; CHECK-NEXT: st.b32 [%rd5], 20;
-; CHECK-NEXT: ld.b32 %r1, [%SP+4];
-; CHECK-NEXT: ld.b32 %r2, [%SP];
-; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r2, %r1, 30, 40};
-; CHECK-NEXT: ret;
- %v0 = insertelement <4 x i32> poison, i32 10, i32 0
- %v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx
- %v2 = insertelement <4 x i32> %v1, i32 30, i32 2
- %v3 = insertelement <4 x i32> %v2, i32 40, i32 3
- ret <4 x i32> %v3
-}
-
; Test two separate chains that don't interfere
define void @two_separate_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) {
; CHECK-LABEL: two_separate_chains(
; CHECK: {
-; CHECK-NEXT: .local .align 4 .b8 __local_depot8[32];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot7[32];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
; CHECK-NEXT: .reg .b32 %r<7>;
; CHECK-NEXT: .reg .b64 %rd<13>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: mov.b64 %SPL, __local_depot8;
+; CHECK-NEXT: mov.b64 %SPL, __local_depot7;
; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
; CHECK-NEXT: ld.param.b32 %rd1, [two_separate_chains_param_0];
; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
@@ -331,14 +299,14 @@ define void @two_separate_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) {
define void @overlapping_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) {
; CHECK-LABEL: overlapping_chains(
; CHECK: {
-; CHECK-NEXT: .local .align 4 .b8 __local_depot9[32];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot8[32];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
; CHECK-NEXT: .reg .b32 %r<7>;
; CHECK-NEXT: .reg .b64 %rd<14>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: mov.b64 %SPL, __local_depot9;
+; CHECK-NEXT: mov.b64 %SPL, __local_depot8;
; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
; CHECK-NEXT: ld.param.b32 %rd1, [overlapping_chains_param_0];
; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
>From f1cac7f9243485f87aab87a552fe577e66d4c8c0 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 21 Oct 2025 18:55:35 -0700
Subject: [PATCH 4/5] [NVPTX][test] include non-byte-aligned element types
---
llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll | 587 ++++++++++++++++---
1 file changed, 516 insertions(+), 71 deletions(-)
diff --git a/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
index ab9f4589de3ab..3ad824123c777 100644
--- a/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
+++ b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
@@ -100,11 +100,11 @@ define <4 x i32> @dynamic_in_middle(i32 %idx) {
define <4 x i32> @repeated_same_index(i32 %idx) {
; CHECK-LABEL: repeated_same_index(
; CHECK: {
-; CHECK-NEXT: .local .align 4 .b8 __local_depot3[16];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot3[32];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b32 %r<5>;
-; CHECK-NEXT: .reg .b64 %rd<6>;
+; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-NEXT: .reg .b64 %rd<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot3;
@@ -112,14 +112,25 @@ define <4 x i32> @repeated_same_index(i32 %idx) {
; CHECK-NEXT: ld.param.b32 %rd1, [repeated_same_index_param_0];
; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
-; CHECK-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-NEXT: add.u64 %rd4, %SP, 16;
; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: add.u64 %rd6, %SP, 0;
+; CHECK-NEXT: add.s64 %rd7, %rd6, %rd3;
+; CHECK-NEXT: st.b32 [%rd7], 10;
+; CHECK-NEXT: ld.b32 %r1, [%SP];
+; CHECK-NEXT: ld.b32 %r2, [%SP+4];
+; CHECK-NEXT: ld.b32 %r3, [%SP+8];
+; CHECK-NEXT: ld.b32 %r4, [%SP+12];
+; CHECK-NEXT: st.b32 [%SP+28], %r4;
+; CHECK-NEXT: st.b32 [%SP+24], %r3;
+; CHECK-NEXT: st.b32 [%SP+20], %r2;
+; CHECK-NEXT: st.b32 [%SP+16], %r1;
; CHECK-NEXT: st.b32 [%rd5], 20;
-; CHECK-NEXT: ld.b32 %r1, [%SP+12];
-; CHECK-NEXT: ld.b32 %r2, [%SP+8];
-; CHECK-NEXT: ld.b32 %r3, [%SP+4];
-; CHECK-NEXT: ld.b32 %r4, [%SP];
-; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
+; CHECK-NEXT: ld.b32 %r5, [%SP+28];
+; CHECK-NEXT: ld.b32 %r6, [%SP+24];
+; CHECK-NEXT: ld.b32 %r7, [%SP+20];
+; CHECK-NEXT: ld.b32 %r8, [%SP+16];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r8, %r7, %r6, %r5};
; CHECK-NEXT: ret;
%v0 = insertelement <4 x i32> poison, i32 10, i32 %idx
%v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx
@@ -130,11 +141,11 @@ define <4 x i32> @repeated_same_index(i32 %idx) {
define <4 x i32> @multiple_dynamic(i32 %idx0, i32 %idx1) {
; CHECK-LABEL: multiple_dynamic(
; CHECK: {
-; CHECK-NEXT: .local .align 4 .b8 __local_depot4[16];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot4[32];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b32 %r<5>;
-; CHECK-NEXT: .reg .b64 %rd<10>;
+; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-NEXT: .reg .b64 %rd<11>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot4;
@@ -148,13 +159,22 @@ define <4 x i32> @multiple_dynamic(i32 %idx0, i32 %idx1) {
; CHECK-NEXT: ld.param.b32 %rd6, [multiple_dynamic_param_1];
; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
-; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8;
-; CHECK-NEXT: st.b32 [%rd9], 20;
-; CHECK-NEXT: ld.b32 %r1, [%SP+12];
-; CHECK-NEXT: ld.b32 %r2, [%SP+8];
-; CHECK-NEXT: ld.b32 %r3, [%SP+4];
-; CHECK-NEXT: ld.b32 %r4, [%SP];
-; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
+; CHECK-NEXT: add.u64 %rd9, %SP, 16;
+; CHECK-NEXT: add.s64 %rd10, %rd9, %rd8;
+; CHECK-NEXT: ld.b32 %r1, [%SP];
+; CHECK-NEXT: ld.b32 %r2, [%SP+4];
+; CHECK-NEXT: ld.b32 %r3, [%SP+8];
+; CHECK-NEXT: ld.b32 %r4, [%SP+12];
+; CHECK-NEXT: st.b32 [%SP+28], %r4;
+; CHECK-NEXT: st.b32 [%SP+24], %r3;
+; CHECK-NEXT: st.b32 [%SP+20], %r2;
+; CHECK-NEXT: st.b32 [%SP+16], %r1;
+; CHECK-NEXT: st.b32 [%rd10], 20;
+; CHECK-NEXT: ld.b32 %r5, [%SP+28];
+; CHECK-NEXT: ld.b32 %r6, [%SP+24];
+; CHECK-NEXT: ld.b32 %r7, [%SP+20];
+; CHECK-NEXT: ld.b32 %r8, [%SP+16];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r8, %r7, %r6, %r5};
; CHECK-NEXT: ret;
%v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
%v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx1
@@ -165,11 +185,11 @@ define <4 x i32> @multiple_dynamic(i32 %idx0, i32 %idx1) {
define <4 x i32> @all_dynamic(i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) {
; CHECK-LABEL: all_dynamic(
; CHECK: {
-; CHECK-NEXT: .local .align 4 .b8 __local_depot5[16];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot5[64];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b32 %r<5>;
-; CHECK-NEXT: .reg .b64 %rd<18>;
+; CHECK-NEXT: .reg .b32 %r<17>;
+; CHECK-NEXT: .reg .b64 %rd<21>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot5;
@@ -179,27 +199,54 @@ define <4 x i32> @all_dynamic(i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) {
; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
; CHECK-NEXT: add.u64 %rd4, %SP, 0;
; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
+; CHECK-NEXT: st.b32 [%rd5], 10;
; CHECK-NEXT: ld.param.b32 %rd6, [all_dynamic_param_1];
; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
-; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8;
-; CHECK-NEXT: ld.param.b32 %rd10, [all_dynamic_param_2];
-; CHECK-NEXT: and.b64 %rd11, %rd10, 3;
-; CHECK-NEXT: shl.b64 %rd12, %rd11, 2;
-; CHECK-NEXT: add.s64 %rd13, %rd4, %rd12;
-; CHECK-NEXT: st.b32 [%rd5], 10;
-; CHECK-NEXT: st.b32 [%rd9], 20;
-; CHECK-NEXT: st.b32 [%rd13], 30;
-; CHECK-NEXT: ld.param.b32 %rd14, [all_dynamic_param_3];
-; CHECK-NEXT: and.b64 %rd15, %rd14, 3;
-; CHECK-NEXT: shl.b64 %rd16, %rd15, 2;
-; CHECK-NEXT: add.s64 %rd17, %rd4, %rd16;
-; CHECK-NEXT: st.b32 [%rd17], 40;
-; CHECK-NEXT: ld.b32 %r1, [%SP+12];
-; CHECK-NEXT: ld.b32 %r2, [%SP+8];
-; CHECK-NEXT: ld.b32 %r3, [%SP+4];
-; CHECK-NEXT: ld.b32 %r4, [%SP];
-; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
+; CHECK-NEXT: add.u64 %rd9, %SP, 16;
+; CHECK-NEXT: add.s64 %rd10, %rd9, %rd8;
+; CHECK-NEXT: ld.b32 %r1, [%SP];
+; CHECK-NEXT: ld.b32 %r2, [%SP+4];
+; CHECK-NEXT: ld.b32 %r3, [%SP+8];
+; CHECK-NEXT: ld.b32 %r4, [%SP+12];
+; CHECK-NEXT: st.b32 [%SP+28], %r4;
+; CHECK-NEXT: st.b32 [%SP+24], %r3;
+; CHECK-NEXT: st.b32 [%SP+20], %r2;
+; CHECK-NEXT: st.b32 [%SP+16], %r1;
+; CHECK-NEXT: st.b32 [%rd10], 20;
+; CHECK-NEXT: ld.param.b32 %rd11, [all_dynamic_param_2];
+; CHECK-NEXT: and.b64 %rd12, %rd11, 3;
+; CHECK-NEXT: shl.b64 %rd13, %rd12, 2;
+; CHECK-NEXT: add.u64 %rd14, %SP, 32;
+; CHECK-NEXT: add.s64 %rd15, %rd14, %rd13;
+; CHECK-NEXT: ld.b32 %r5, [%SP+16];
+; CHECK-NEXT: ld.b32 %r6, [%SP+20];
+; CHECK-NEXT: ld.b32 %r7, [%SP+24];
+; CHECK-NEXT: ld.b32 %r8, [%SP+28];
+; CHECK-NEXT: st.b32 [%SP+44], %r8;
+; CHECK-NEXT: st.b32 [%SP+40], %r7;
+; CHECK-NEXT: st.b32 [%SP+36], %r6;
+; CHECK-NEXT: st.b32 [%SP+32], %r5;
+; CHECK-NEXT: st.b32 [%rd15], 30;
+; CHECK-NEXT: ld.param.b32 %rd16, [all_dynamic_param_3];
+; CHECK-NEXT: and.b64 %rd17, %rd16, 3;
+; CHECK-NEXT: shl.b64 %rd18, %rd17, 2;
+; CHECK-NEXT: add.u64 %rd19, %SP, 48;
+; CHECK-NEXT: add.s64 %rd20, %rd19, %rd18;
+; CHECK-NEXT: ld.b32 %r9, [%SP+32];
+; CHECK-NEXT: ld.b32 %r10, [%SP+36];
+; CHECK-NEXT: ld.b32 %r11, [%SP+40];
+; CHECK-NEXT: ld.b32 %r12, [%SP+44];
+; CHECK-NEXT: st.b32 [%SP+60], %r12;
+; CHECK-NEXT: st.b32 [%SP+56], %r11;
+; CHECK-NEXT: st.b32 [%SP+52], %r10;
+; CHECK-NEXT: st.b32 [%SP+48], %r9;
+; CHECK-NEXT: st.b32 [%rd20], 40;
+; CHECK-NEXT: ld.b32 %r13, [%SP+60];
+; CHECK-NEXT: ld.b32 %r14, [%SP+56];
+; CHECK-NEXT: ld.b32 %r15, [%SP+52];
+; CHECK-NEXT: ld.b32 %r16, [%SP+48];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r16, %r15, %r14, %r13};
; CHECK-NEXT: ret;
%v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
%v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx1
@@ -213,11 +260,11 @@ define <4 x i32> @all_dynamic(i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) {
define <4 x i32> @mix_dynamic_constant(i32 %idx0, i32 %idx1) {
; CHECK-LABEL: mix_dynamic_constant(
; CHECK: {
-; CHECK-NEXT: .local .align 4 .b8 __local_depot6[16];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot6[32];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b32 %r<5>;
-; CHECK-NEXT: .reg .b64 %rd<10>;
+; CHECK-NEXT: .reg .b32 %r<8>;
+; CHECK-NEXT: .reg .b64 %rd<11>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot6;
@@ -231,14 +278,21 @@ define <4 x i32> @mix_dynamic_constant(i32 %idx0, i32 %idx1) {
; CHECK-NEXT: ld.param.b32 %rd6, [mix_dynamic_constant_param_1];
; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
-; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8;
-; CHECK-NEXT: st.b32 [%SP+4], 20;
-; CHECK-NEXT: st.b32 [%rd9], 30;
-; CHECK-NEXT: ld.b32 %r1, [%SP+12];
+; CHECK-NEXT: add.u64 %rd9, %SP, 16;
+; CHECK-NEXT: add.s64 %rd10, %rd9, %rd8;
+; CHECK-NEXT: ld.b32 %r1, [%SP];
; CHECK-NEXT: ld.b32 %r2, [%SP+8];
-; CHECK-NEXT: ld.b32 %r3, [%SP+4];
-; CHECK-NEXT: ld.b32 %r4, [%SP];
-; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
+; CHECK-NEXT: ld.b32 %r3, [%SP+12];
+; CHECK-NEXT: st.b32 [%SP+28], %r3;
+; CHECK-NEXT: st.b32 [%SP+24], %r2;
+; CHECK-NEXT: st.b32 [%SP+16], %r1;
+; CHECK-NEXT: st.b32 [%SP+20], 20;
+; CHECK-NEXT: st.b32 [%rd10], 30;
+; CHECK-NEXT: ld.b32 %r4, [%SP+28];
+; CHECK-NEXT: ld.b32 %r5, [%SP+24];
+; CHECK-NEXT: ld.b32 %r6, [%SP+20];
+; CHECK-NEXT: ld.b32 %r7, [%SP+16];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r7, %r6, %r5, %r4};
; CHECK-NEXT: ret;
%v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
%v1 = insertelement <4 x i32> %v0, i32 20, i32 1
@@ -302,8 +356,8 @@ define void @overlapping_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) {
; CHECK-NEXT: .local .align 4 .b8 __local_depot8[32];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b32 %r<7>;
-; CHECK-NEXT: .reg .b64 %rd<14>;
+; CHECK-NEXT: .reg .b32 %r<8>;
+; CHECK-NEXT: .reg .b64 %rd<13>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot8;
@@ -311,28 +365,30 @@ define void @overlapping_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) {
; CHECK-NEXT: ld.param.b32 %rd1, [overlapping_chains_param_0];
; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
-; CHECK-NEXT: add.u64 %rd4, %SP, 16;
+; CHECK-NEXT: add.u64 %rd4, %SP, 0;
; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
; CHECK-NEXT: st.b32 [%rd5], 10;
-; CHECK-NEXT: add.u64 %rd6, %SP, 0;
-; CHECK-NEXT: add.s64 %rd7, %rd6, %rd3;
-; CHECK-NEXT: ld.b32 %r1, [%SP+28];
-; CHECK-NEXT: ld.b32 %r2, [%SP+16];
-; CHECK-NEXT: ld.param.b64 %rd8, [overlapping_chains_param_2];
-; CHECK-NEXT: st.b32 [%rd7], 10;
-; CHECK-NEXT: ld.param.b32 %rd9, [overlapping_chains_param_1];
-; CHECK-NEXT: and.b64 %rd10, %rd9, 3;
-; CHECK-NEXT: shl.b64 %rd11, %rd10, 2;
-; CHECK-NEXT: add.s64 %rd12, %rd6, %rd11;
-; CHECK-NEXT: st.b32 [%SP+4], 20;
-; CHECK-NEXT: st.b32 [%rd12], 30;
-; CHECK-NEXT: ld.param.b64 %rd13, [overlapping_chains_param_3];
+; CHECK-NEXT: ld.param.b32 %rd6, [overlapping_chains_param_1];
+; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
+; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
+; CHECK-NEXT: add.u64 %rd9, %SP, 16;
+; CHECK-NEXT: add.s64 %rd10, %rd9, %rd8;
+; CHECK-NEXT: ld.b32 %r1, [%SP];
+; CHECK-NEXT: ld.b32 %r2, [%SP+8];
; CHECK-NEXT: ld.b32 %r3, [%SP+12];
-; CHECK-NEXT: ld.b32 %r4, [%SP+8];
-; CHECK-NEXT: ld.b32 %r5, [%SP+4];
-; CHECK-NEXT: ld.b32 %r6, [%SP];
-; CHECK-NEXT: st.v4.b32 [%rd8], {%r2, 20, 40, %r1};
-; CHECK-NEXT: st.v4.b32 [%rd13], {%r6, %r5, %r4, %r3};
+; CHECK-NEXT: ld.param.b64 %rd11, [overlapping_chains_param_2];
+; CHECK-NEXT: st.b32 [%SP+28], %r3;
+; CHECK-NEXT: st.b32 [%SP+24], %r2;
+; CHECK-NEXT: st.b32 [%SP+16], %r1;
+; CHECK-NEXT: st.b32 [%SP+20], 20;
+; CHECK-NEXT: st.b32 [%rd10], 30;
+; CHECK-NEXT: ld.param.b64 %rd12, [overlapping_chains_param_3];
+; CHECK-NEXT: ld.b32 %r4, [%SP+28];
+; CHECK-NEXT: ld.b32 %r5, [%SP+24];
+; CHECK-NEXT: ld.b32 %r6, [%SP+20];
+; CHECK-NEXT: ld.b32 %r7, [%SP+16];
+; CHECK-NEXT: st.v4.b32 [%rd11], {%r1, 20, 40, %r3};
+; CHECK-NEXT: st.v4.b32 [%rd12], {%r7, %r6, %r5, %r4};
; CHECK-NEXT: ret;
%v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
%v1 = insertelement <4 x i32> %v0, i32 20, i32 1
@@ -347,3 +403,392 @@ define void @overlapping_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) {
store <4 x i32> %w0, ptr %out1
ret void
}
+
+; Test with i1 elements (1-bit, non-byte-aligned)
+define <8 x i1> @dynamic_i1(i32 %idx) {
+; CHECK-LABEL: dynamic_i1(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot9[8];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot9;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i1_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 7;
+; CHECK-NEXT: add.u64 %rd3, %SP, 0;
+; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2;
+; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2};
+; CHECK-NEXT: st.b8 [%rd4], 1;
+; CHECK-NEXT: ld.b32 %r3, [%SP];
+; CHECK-NEXT: prmt.b32 %r4, %r3, 0, 0x7773U;
+; CHECK-NEXT: ld.b32 %r5, [%SP+4];
+; CHECK-NEXT: prmt.b32 %r6, %r5, 0, 0x7771U;
+; CHECK-NEXT: prmt.b32 %r7, %r5, 0, 0x7772U;
+; CHECK-NEXT: prmt.b32 %r8, %r5, 0, 0x7773U;
+; CHECK-NEXT: st.param.b8 [func_retval0+4], %r5;
+; CHECK-NEXT: st.param.b8 [func_retval0], %r3;
+; CHECK-NEXT: st.param.b8 [func_retval0+7], %r8;
+; CHECK-NEXT: st.param.b8 [func_retval0+6], %r7;
+; CHECK-NEXT: st.param.b8 [func_retval0+5], %r6;
+; CHECK-NEXT: st.param.b8 [func_retval0+3], %r4;
+; CHECK-NEXT: st.param.b8 [func_retval0+2], 1;
+; CHECK-NEXT: st.param.b8 [func_retval0+1], 0;
+; CHECK-NEXT: ret;
+ %v0 = insertelement <8 x i1> poison, i1 1, i32 %idx
+ %v1 = insertelement <8 x i1> %v0, i1 0, i32 1
+ %v2 = insertelement <8 x i1> %v1, i1 1, i32 2
+ ret <8 x i1> %v2
+}
+
+; Test with i2 elements (2-bit, non-byte-aligned)
+define <8 x i2> @dynamic_i2(i32 %idx) {
+; CHECK-LABEL: dynamic_i2(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot10[16];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b16 %rs<24>;
+; CHECK-NEXT: .reg .b32 %r<10>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot10;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i2_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 7;
+; CHECK-NEXT: add.u64 %rd3, %SP, 0;
+; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2;
+; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2};
+; CHECK-NEXT: st.b8 [%rd4], 1;
+; CHECK-NEXT: ld.b32 %r3, [%SP+4];
+; CHECK-NEXT: cvt.u16.u32 %rs1, %r3;
+; CHECK-NEXT: and.b16 %rs2, %rs1, 3;
+; CHECK-NEXT: prmt.b32 %r4, %r3, 0, 0x7771U;
+; CHECK-NEXT: cvt.u16.u32 %rs3, %r4;
+; CHECK-NEXT: and.b16 %rs4, %rs3, 3;
+; CHECK-NEXT: shl.b16 %rs5, %rs4, 2;
+; CHECK-NEXT: or.b16 %rs6, %rs2, %rs5;
+; CHECK-NEXT: prmt.b32 %r5, %r3, 0, 0x7772U;
+; CHECK-NEXT: cvt.u16.u32 %rs7, %r5;
+; CHECK-NEXT: and.b16 %rs8, %rs7, 3;
+; CHECK-NEXT: shl.b16 %rs9, %rs8, 4;
+; CHECK-NEXT: or.b16 %rs10, %rs6, %rs9;
+; CHECK-NEXT: prmt.b32 %r6, %r3, 0, 0x7773U;
+; CHECK-NEXT: cvt.u16.u32 %rs11, %r6;
+; CHECK-NEXT: shl.b16 %rs12, %rs11, 6;
+; CHECK-NEXT: or.b16 %rs13, %rs10, %rs12;
+; CHECK-NEXT: st.b8 [%SP+8], %rs13;
+; CHECK-NEXT: ld.b32 %r7, [%SP];
+; CHECK-NEXT: prmt.b32 %r8, %r7, 0, 0x7773U;
+; CHECK-NEXT: cvt.u16.u32 %rs14, %r8;
+; CHECK-NEXT: shl.b16 %rs15, %rs14, 6;
+; CHECK-NEXT: and.b16 %rs16, %rs15, 192;
+; CHECK-NEXT: ld.s8 %rs17, [%SP+8];
+; CHECK-NEXT: shl.b16 %rs18, %rs17, 8;
+; CHECK-NEXT: or.b16 %rs19, %rs16, %rs18;
+; CHECK-NEXT: prmt.b32 %r9, %r7, 0, 0x7770U;
+; CHECK-NEXT: st.param.b16 [func_retval0], %r9;
+; CHECK-NEXT: st.param.b16 [func_retval0+8], %rs17;
+; CHECK-NEXT: shr.s16 %rs20, %rs18, 14;
+; CHECK-NEXT: st.param.b16 [func_retval0+14], %rs20;
+; CHECK-NEXT: shr.s16 %rs21, %rs18, 12;
+; CHECK-NEXT: st.param.b16 [func_retval0+12], %rs21;
+; CHECK-NEXT: shr.s16 %rs22, %rs18, 10;
+; CHECK-NEXT: st.param.b16 [func_retval0+10], %rs22;
+; CHECK-NEXT: shr.s16 %rs23, %rs19, 6;
+; CHECK-NEXT: st.param.b16 [func_retval0+6], %rs23;
+; CHECK-NEXT: st.param.b16 [func_retval0+4], 3;
+; CHECK-NEXT: st.param.b16 [func_retval0+2], 2;
+; CHECK-NEXT: ret;
+ %v0 = insertelement <8 x i2> poison, i2 1, i32 %idx
+ %v1 = insertelement <8 x i2> %v0, i2 2, i32 1
+ %v2 = insertelement <8 x i2> %v1, i2 3, i32 2
+ ret <8 x i2> %v2
+}
+
+; Test with i3 elements (3-bit, non-byte-aligned)
+define <8 x i3> @dynamic_i3(i32 %idx) {
+; CHECK-LABEL: dynamic_i3(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot11[8];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b16 %rs<5>;
+; CHECK-NEXT: .reg .b32 %r<15>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot11;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i3_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 7;
+; CHECK-NEXT: add.u64 %rd3, %SP, 0;
+; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2;
+; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2};
+; CHECK-NEXT: st.b8 [%rd4], 1;
+; CHECK-NEXT: ld.b32 %r3, [%SP];
+; CHECK-NEXT: ld.b32 %r4, [%SP+4];
+; CHECK-NEXT: prmt.b32 %r5, %r4, 0, 0x7773U;
+; CHECK-NEXT: prmt.b32 %r6, %r4, 0, 0x7772U;
+; CHECK-NEXT: prmt.b32 %r7, %r6, %r5, 0x5410U;
+; CHECK-NEXT: st.param.b32 [func_retval0+12], %r7;
+; CHECK-NEXT: prmt.b32 %r8, %r4, 0, 0x7771U;
+; CHECK-NEXT: prmt.b32 %r9, %r4, 0, 0x7770U;
+; CHECK-NEXT: prmt.b32 %r10, %r9, %r8, 0x5410U;
+; CHECK-NEXT: st.param.b32 [func_retval0+8], %r10;
+; CHECK-NEXT: prmt.b32 %r11, %r3, 0, 0x7773U;
+; CHECK-NEXT: cvt.u16.u32 %rs1, %r11;
+; CHECK-NEXT: mov.b16 %rs2, 3;
+; CHECK-NEXT: mov.b32 %r12, {%rs2, %rs1};
+; CHECK-NEXT: st.param.b32 [func_retval0+4], %r12;
+; CHECK-NEXT: prmt.b32 %r13, %r3, 0, 0x7770U;
+; CHECK-NEXT: cvt.u16.u32 %rs3, %r13;
+; CHECK-NEXT: mov.b16 %rs4, 2;
+; CHECK-NEXT: mov.b32 %r14, {%rs3, %rs4};
+; CHECK-NEXT: st.param.b32 [func_retval0], %r14;
+; CHECK-NEXT: ret;
+ %v0 = insertelement <8 x i3> poison, i3 1, i32 %idx
+ %v1 = insertelement <8 x i3> %v0, i3 2, i32 1
+ %v2 = insertelement <8 x i3> %v1, i3 3, i32 2
+ ret <8 x i3> %v2
+}
+
+; Test with i4 elements (4-bit, non-byte-aligned)
+define <8 x i4> @dynamic_i4(i32 %idx) {
+; CHECK-LABEL: dynamic_i4(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot12[16];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b16 %rs<30>;
+; CHECK-NEXT: .reg .b32 %r<22>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot12;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i4_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 7;
+; CHECK-NEXT: add.u64 %rd3, %SP, 0;
+; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2;
+; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2};
+; CHECK-NEXT: st.b8 [%rd4], 1;
+; CHECK-NEXT: ld.b32 %r3, [%SP];
+; CHECK-NEXT: prmt.b32 %r4, %r3, 0, 0x7770U;
+; CHECK-NEXT: cvt.u16.u32 %rs1, %r4;
+; CHECK-NEXT: and.b16 %rs2, %rs1, 15;
+; CHECK-NEXT: prmt.b32 %r5, %r3, 0, 0x7771U;
+; CHECK-NEXT: cvt.u16.u32 %rs3, %r5;
+; CHECK-NEXT: and.b16 %rs4, %rs3, 15;
+; CHECK-NEXT: shl.b16 %rs5, %rs4, 4;
+; CHECK-NEXT: or.b16 %rs6, %rs2, %rs5;
+; CHECK-NEXT: prmt.b32 %r6, %r3, 0, 0x7772U;
+; CHECK-NEXT: cvt.u16.u32 %rs7, %r6;
+; CHECK-NEXT: and.b16 %rs8, %rs7, 15;
+; CHECK-NEXT: shl.b16 %rs9, %rs8, 8;
+; CHECK-NEXT: or.b16 %rs10, %rs6, %rs9;
+; CHECK-NEXT: prmt.b32 %r7, %r3, 0, 0x7773U;
+; CHECK-NEXT: cvt.u16.u32 %rs11, %r7;
+; CHECK-NEXT: shl.b16 %rs12, %rs11, 12;
+; CHECK-NEXT: or.b16 %rs13, %rs10, %rs12;
+; CHECK-NEXT: cvt.u32.u16 %r8, %rs13;
+; CHECK-NEXT: ld.b32 %r9, [%SP+4];
+; CHECK-NEXT: prmt.b32 %r10, %r9, 0, 0x7770U;
+; CHECK-NEXT: cvt.u16.u32 %rs14, %r10;
+; CHECK-NEXT: and.b16 %rs15, %rs14, 15;
+; CHECK-NEXT: prmt.b32 %r11, %r9, 0, 0x7771U;
+; CHECK-NEXT: cvt.u16.u32 %rs16, %r11;
+; CHECK-NEXT: and.b16 %rs17, %rs16, 15;
+; CHECK-NEXT: shl.b16 %rs18, %rs17, 4;
+; CHECK-NEXT: or.b16 %rs19, %rs15, %rs18;
+; CHECK-NEXT: prmt.b32 %r12, %r9, 0, 0x7772U;
+; CHECK-NEXT: cvt.u16.u32 %rs20, %r12;
+; CHECK-NEXT: and.b16 %rs21, %rs20, 15;
+; CHECK-NEXT: shl.b16 %rs22, %rs21, 8;
+; CHECK-NEXT: or.b16 %rs23, %rs19, %rs22;
+; CHECK-NEXT: prmt.b32 %r13, %r9, 0, 0x7773U;
+; CHECK-NEXT: cvt.u16.u32 %rs24, %r13;
+; CHECK-NEXT: shl.b16 %rs25, %rs24, 12;
+; CHECK-NEXT: or.b16 %rs26, %rs23, %rs25;
+; CHECK-NEXT: cvt.u32.u16 %r14, %rs26;
+; CHECK-NEXT: shl.b32 %r15, %r14, 16;
+; CHECK-NEXT: or.b32 %r16, %r8, %r15;
+; CHECK-NEXT: mov.b32 %r17, {%rs20, %rs24};
+; CHECK-NEXT: st.param.b32 [func_retval0+12], %r17;
+; CHECK-NEXT: mov.b32 %r18, {%rs14, %rs16};
+; CHECK-NEXT: st.param.b32 [func_retval0+8], %r18;
+; CHECK-NEXT: mov.b16 %rs27, 2;
+; CHECK-NEXT: mov.b32 %r19, {%rs1, %rs27};
+; CHECK-NEXT: st.param.b32 [func_retval0], %r19;
+; CHECK-NEXT: shr.u32 %r20, %r16, 12;
+; CHECK-NEXT: cvt.u16.u32 %rs28, %r20;
+; CHECK-NEXT: mov.b16 %rs29, 3;
+; CHECK-NEXT: mov.b32 %r21, {%rs29, %rs28};
+; CHECK-NEXT: st.param.b32 [func_retval0+4], %r21;
+; CHECK-NEXT: ret;
+ %v0 = insertelement <8 x i4> poison, i4 1, i32 %idx
+ %v1 = insertelement <8 x i4> %v0, i4 2, i32 1
+ %v2 = insertelement <8 x i4> %v1, i4 3, i32 2
+ ret <8 x i4> %v2
+}
+
+; Test with i5 elements (5-bit, non-byte-aligned)
+define <8 x i5> @dynamic_i5(i32 %idx) {
+; CHECK-LABEL: dynamic_i5(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot13[8];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b16 %rs<5>;
+; CHECK-NEXT: .reg .b32 %r<15>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot13;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i5_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 7;
+; CHECK-NEXT: add.u64 %rd3, %SP, 0;
+; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2;
+; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2};
+; CHECK-NEXT: st.b8 [%rd4], 1;
+; CHECK-NEXT: ld.b32 %r3, [%SP];
+; CHECK-NEXT: ld.b32 %r4, [%SP+4];
+; CHECK-NEXT: prmt.b32 %r5, %r4, 0, 0x7773U;
+; CHECK-NEXT: prmt.b32 %r6, %r4, 0, 0x7772U;
+; CHECK-NEXT: prmt.b32 %r7, %r6, %r5, 0x5410U;
+; CHECK-NEXT: prmt.b32 %r8, %r4, 0, 0x7771U;
+; CHECK-NEXT: prmt.b32 %r9, %r4, 0, 0x7770U;
+; CHECK-NEXT: prmt.b32 %r10, %r9, %r8, 0x5410U;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0+8], {%r10, %r7};
+; CHECK-NEXT: prmt.b32 %r11, %r3, 0, 0x7773U;
+; CHECK-NEXT: cvt.u16.u32 %rs1, %r11;
+; CHECK-NEXT: mov.b16 %rs2, 3;
+; CHECK-NEXT: mov.b32 %r12, {%rs2, %rs1};
+; CHECK-NEXT: prmt.b32 %r13, %r3, 0, 0x7770U;
+; CHECK-NEXT: cvt.u16.u32 %rs3, %r13;
+; CHECK-NEXT: mov.b16 %rs4, 2;
+; CHECK-NEXT: mov.b32 %r14, {%rs3, %rs4};
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r14, %r12};
+; CHECK-NEXT: ret;
+ %v0 = insertelement <8 x i5> poison, i5 1, i32 %idx
+ %v1 = insertelement <8 x i5> %v0, i5 2, i32 1
+ %v2 = insertelement <8 x i5> %v1, i5 3, i32 2
+ ret <8 x i5> %v2
+}
+
+; Test with i7 elements (7-bit, non-byte-aligned)
+define <8 x i7> @dynamic_i7(i32 %idx) {
+; CHECK-LABEL: dynamic_i7(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot14[8];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b16 %rs<5>;
+; CHECK-NEXT: .reg .b32 %r<15>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot14;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i7_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 7;
+; CHECK-NEXT: add.u64 %rd3, %SP, 0;
+; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2;
+; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2};
+; CHECK-NEXT: st.b8 [%rd4], 1;
+; CHECK-NEXT: ld.b32 %r3, [%SP];
+; CHECK-NEXT: ld.b32 %r4, [%SP+4];
+; CHECK-NEXT: prmt.b32 %r5, %r4, 0, 0x7773U;
+; CHECK-NEXT: prmt.b32 %r6, %r4, 0, 0x7772U;
+; CHECK-NEXT: prmt.b32 %r7, %r6, %r5, 0x5410U;
+; CHECK-NEXT: prmt.b32 %r8, %r4, 0, 0x7771U;
+; CHECK-NEXT: prmt.b32 %r9, %r4, 0, 0x7770U;
+; CHECK-NEXT: prmt.b32 %r10, %r9, %r8, 0x5410U;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0+8], {%r10, %r7};
+; CHECK-NEXT: prmt.b32 %r11, %r3, 0, 0x7773U;
+; CHECK-NEXT: cvt.u16.u32 %rs1, %r11;
+; CHECK-NEXT: mov.b16 %rs2, 3;
+; CHECK-NEXT: mov.b32 %r12, {%rs2, %rs1};
+; CHECK-NEXT: prmt.b32 %r13, %r3, 0, 0x7770U;
+; CHECK-NEXT: cvt.u16.u32 %rs3, %r13;
+; CHECK-NEXT: mov.b16 %rs4, 2;
+; CHECK-NEXT: mov.b32 %r14, {%rs3, %rs4};
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r14, %r12};
+; CHECK-NEXT: ret;
+ %v0 = insertelement <8 x i7> poison, i7 1, i32 %idx
+ %v1 = insertelement <8 x i7> %v0, i7 2, i32 1
+ %v2 = insertelement <8 x i7> %v1, i7 3, i32 2
+ ret <8 x i7> %v2
+}
+
+; Test with i6 elements (6-bit, non-byte-aligned)
+define <8 x i6> @dynamic_i6(i32 %idx) {
+; CHECK-LABEL: dynamic_i6(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot15[8];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b16 %rs<5>;
+; CHECK-NEXT: .reg .b32 %r<15>;
+; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.b64 %SPL, __local_depot15;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b32 %rd1, [dynamic_i6_param_0];
+; CHECK-NEXT: and.b64 %rd2, %rd1, 7;
+; CHECK-NEXT: add.u64 %rd3, %SP, 0;
+; CHECK-NEXT: or.b64 %rd4, %rd3, %rd2;
+; CHECK-NEXT: st.v2.b32 [%SP], {%r1, %r2};
+; CHECK-NEXT: st.b8 [%rd4], 1;
+; CHECK-NEXT: ld.b32 %r3, [%SP];
+; CHECK-NEXT: ld.b32 %r4, [%SP+4];
+; CHECK-NEXT: prmt.b32 %r5, %r4, 0, 0x7773U;
+; CHECK-NEXT: prmt.b32 %r6, %r4, 0, 0x7772U;
+; CHECK-NEXT: prmt.b32 %r7, %r6, %r5, 0x5410U;
+; CHECK-NEXT: prmt.b32 %r8, %r4, 0, 0x7771U;
+; CHECK-NEXT: prmt.b32 %r9, %r4, 0, 0x7770U;
+; CHECK-NEXT: prmt.b32 %r10, %r9, %r8, 0x5410U;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0+8], {%r10, %r7};
+; CHECK-NEXT: prmt.b32 %r11, %r3, 0, 0x7773U;
+; CHECK-NEXT: cvt.u16.u32 %rs1, %r11;
+; CHECK-NEXT: mov.b16 %rs2, 3;
+; CHECK-NEXT: mov.b32 %r12, {%rs2, %rs1};
+; CHECK-NEXT: prmt.b32 %r13, %r3, 0, 0x7770U;
+; CHECK-NEXT: cvt.u16.u32 %rs3, %r13;
+; CHECK-NEXT: mov.b16 %rs4, 2;
+; CHECK-NEXT: mov.b32 %r14, {%rs3, %rs4};
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r14, %r12};
+; CHECK-NEXT: ret;
+ %v0 = insertelement <8 x i6> poison, i6 1, i32 %idx
+ %v1 = insertelement <8 x i6> %v0, i6 2, i32 1
+ %v2 = insertelement <8 x i6> %v1, i6 3, i32 2
+ ret <8 x i6> %v2
+}
+
+; Test with multiple dynamic insertions on i3 elements
+define <4 x i3> @multiple_dynamic_i3(i32 %idx0, i32 %idx1) {
+; CHECK-LABEL: multiple_dynamic_i3(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [multiple_dynamic_i3_param_0];
+; CHECK-NEXT: shl.b32 %r2, %r1, 3;
+; CHECK-NEXT: bfi.b32 %r3, 1, %r4, %r2, 8;
+; CHECK-NEXT: ld.param.b32 %r5, [multiple_dynamic_i3_param_1];
+; CHECK-NEXT: shl.b32 %r6, %r5, 3;
+; CHECK-NEXT: bfi.b32 %r7, 2, %r3, %r6, 8;
+; CHECK-NEXT: st.param.b16 [func_retval0], %r7;
+; CHECK-NEXT: shr.u32 %r8, %r7, 16;
+; CHECK-NEXT: st.param.b16 [func_retval0+2], %r8;
+; CHECK-NEXT: ret;
+ %v0 = insertelement <4 x i3> poison, i3 1, i32 %idx0
+ %v1 = insertelement <4 x i3> %v0, i3 2, i32 %idx1
+ ret <4 x i3> %v1
+}
>From b6fc7e875e4fb4725c24d2e800230d8ea19250e1 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 28 Oct 2025 12:32:23 -0700
Subject: [PATCH 5/5] [NVPTX] address case where element type is
non-byte-aligned
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 33 ++-
llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll | 198 +++++++-----------
2 files changed, 99 insertions(+), 132 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4ff7791b9e070..9dac19db326cd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -23481,10 +23481,26 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
if (InVec.isUndef() && TLI.shouldSplatInsEltVarIndex(VT))
return DAG.getSplat(VT, DL, InVal);
- // Check if this operation is illegal and will be handled the default way.
- if (TLI.getTypeAction(*DAG.getContext(), VT) ==
- TargetLowering::TypeSplitVector ||
- TLI.isOperationExpand(ISD::INSERT_VECTOR_ELT, VT)) {
+ // Extend this type to be byte-addressable
+ EVT OldVT = VT;
+ EVT EltVT = VT.getVectorElementType();
+ bool IsByteSized = EltVT.isByteSized();
+ if (!IsByteSized) {
+ EltVT =
+ EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
+ VT = VT.changeElementType(EltVT);
+ }
+
+ // Check if this operation will be handled the default way for its type.
+ auto IsTypeDefaultHandled = [this](EVT VT) {
+ return TLI.getTypeAction(*DAG.getContext(), VT) ==
+ TargetLowering::TypeSplitVector ||
+ TLI.isOperationExpand(ISD::INSERT_VECTOR_ELT, VT);
+ };
+
+ // Check if this operation is illegal and will be handled the default way,
+ // even after extending the type to be byte-addressable.
+ if (IsTypeDefaultHandled(OldVT) && IsTypeDefaultHandled(VT)) {
// For each dynamic insertelt, the default way will save the vector to
// the stack, store at an offset, and load the modified vector. This can
// dramatically increase code size if we have a chain of insertelts on a
@@ -23518,6 +23534,8 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
// Save the vector to the stack
SDValue InVec = Seq.back()->getOperand(0);
+ if (!IsByteSized)
+ InVec = DAG.getNode(ISD::ANY_EXTEND, DL, VT, InVec);
SDValue Store = DAG.getStore(DAG.getEntryNode(), DL, InVec, StackPtr,
PtrInfo, SmallestAlign);
@@ -23526,6 +23544,10 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
SDValue Elmnt = N->getOperand(1);
SDValue Index = N->getOperand(2);
+ // Check if we have to extend the element type
+ if (!IsByteSized && Elmnt.getValueType().bitsLT(EltVT))
+ Elmnt = DAG.getNode(ISD::ANY_EXTEND, DL, EltVT, Elmnt);
+
// Store the new element. This may be larger than the vector element
// type, so use a truncating store.
SDValue EltPtr =
@@ -23540,7 +23562,8 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
// Load the saved vector from the stack
SDValue Load =
DAG.getLoad(VT, DL, Store, StackPtr, PtrInfo, SmallestAlign);
- return Load.getValue(0);
+ SDValue LoadV = Load.getValue(0);
+ return IsByteSized ? LoadV : DAG.getAnyExtOrTrunc(LoadV, DL, OldVT);
}
}
diff --git a/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
index 3ad824123c777..f2ccf3ed65c02 100644
--- a/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
+++ b/llvm/test/CodeGen/NVPTX/insertelt-dynamic.ll
@@ -100,11 +100,11 @@ define <4 x i32> @dynamic_in_middle(i32 %idx) {
define <4 x i32> @repeated_same_index(i32 %idx) {
; CHECK-LABEL: repeated_same_index(
; CHECK: {
-; CHECK-NEXT: .local .align 4 .b8 __local_depot3[32];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot3[16];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b32 %r<9>;
-; CHECK-NEXT: .reg .b64 %rd<8>;
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<6>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot3;
@@ -112,25 +112,14 @@ define <4 x i32> @repeated_same_index(i32 %idx) {
; CHECK-NEXT: ld.param.b32 %rd1, [repeated_same_index_param_0];
; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
-; CHECK-NEXT: add.u64 %rd4, %SP, 16;
+; CHECK-NEXT: add.u64 %rd4, %SP, 0;
; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
-; CHECK-NEXT: add.u64 %rd6, %SP, 0;
-; CHECK-NEXT: add.s64 %rd7, %rd6, %rd3;
-; CHECK-NEXT: st.b32 [%rd7], 10;
-; CHECK-NEXT: ld.b32 %r1, [%SP];
-; CHECK-NEXT: ld.b32 %r2, [%SP+4];
-; CHECK-NEXT: ld.b32 %r3, [%SP+8];
-; CHECK-NEXT: ld.b32 %r4, [%SP+12];
-; CHECK-NEXT: st.b32 [%SP+28], %r4;
-; CHECK-NEXT: st.b32 [%SP+24], %r3;
-; CHECK-NEXT: st.b32 [%SP+20], %r2;
-; CHECK-NEXT: st.b32 [%SP+16], %r1;
; CHECK-NEXT: st.b32 [%rd5], 20;
-; CHECK-NEXT: ld.b32 %r5, [%SP+28];
-; CHECK-NEXT: ld.b32 %r6, [%SP+24];
-; CHECK-NEXT: ld.b32 %r7, [%SP+20];
-; CHECK-NEXT: ld.b32 %r8, [%SP+16];
-; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r8, %r7, %r6, %r5};
+; CHECK-NEXT: ld.b32 %r1, [%SP+12];
+; CHECK-NEXT: ld.b32 %r2, [%SP+8];
+; CHECK-NEXT: ld.b32 %r3, [%SP+4];
+; CHECK-NEXT: ld.b32 %r4, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
; CHECK-NEXT: ret;
%v0 = insertelement <4 x i32> poison, i32 10, i32 %idx
%v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx
@@ -141,11 +130,11 @@ define <4 x i32> @repeated_same_index(i32 %idx) {
define <4 x i32> @multiple_dynamic(i32 %idx0, i32 %idx1) {
; CHECK-LABEL: multiple_dynamic(
; CHECK: {
-; CHECK-NEXT: .local .align 4 .b8 __local_depot4[32];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot4[16];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b32 %r<9>;
-; CHECK-NEXT: .reg .b64 %rd<11>;
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<10>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot4;
@@ -159,22 +148,13 @@ define <4 x i32> @multiple_dynamic(i32 %idx0, i32 %idx1) {
; CHECK-NEXT: ld.param.b32 %rd6, [multiple_dynamic_param_1];
; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
-; CHECK-NEXT: add.u64 %rd9, %SP, 16;
-; CHECK-NEXT: add.s64 %rd10, %rd9, %rd8;
-; CHECK-NEXT: ld.b32 %r1, [%SP];
-; CHECK-NEXT: ld.b32 %r2, [%SP+4];
-; CHECK-NEXT: ld.b32 %r3, [%SP+8];
-; CHECK-NEXT: ld.b32 %r4, [%SP+12];
-; CHECK-NEXT: st.b32 [%SP+28], %r4;
-; CHECK-NEXT: st.b32 [%SP+24], %r3;
-; CHECK-NEXT: st.b32 [%SP+20], %r2;
-; CHECK-NEXT: st.b32 [%SP+16], %r1;
-; CHECK-NEXT: st.b32 [%rd10], 20;
-; CHECK-NEXT: ld.b32 %r5, [%SP+28];
-; CHECK-NEXT: ld.b32 %r6, [%SP+24];
-; CHECK-NEXT: ld.b32 %r7, [%SP+20];
-; CHECK-NEXT: ld.b32 %r8, [%SP+16];
-; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r8, %r7, %r6, %r5};
+; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8;
+; CHECK-NEXT: st.b32 [%rd9], 20;
+; CHECK-NEXT: ld.b32 %r1, [%SP+12];
+; CHECK-NEXT: ld.b32 %r2, [%SP+8];
+; CHECK-NEXT: ld.b32 %r3, [%SP+4];
+; CHECK-NEXT: ld.b32 %r4, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
; CHECK-NEXT: ret;
%v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
%v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx1
@@ -185,11 +165,11 @@ define <4 x i32> @multiple_dynamic(i32 %idx0, i32 %idx1) {
define <4 x i32> @all_dynamic(i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) {
; CHECK-LABEL: all_dynamic(
; CHECK: {
-; CHECK-NEXT: .local .align 4 .b8 __local_depot5[64];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot5[16];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b32 %r<17>;
-; CHECK-NEXT: .reg .b64 %rd<21>;
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<18>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot5;
@@ -199,54 +179,27 @@ define <4 x i32> @all_dynamic(i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) {
; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
; CHECK-NEXT: add.u64 %rd4, %SP, 0;
; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
-; CHECK-NEXT: st.b32 [%rd5], 10;
; CHECK-NEXT: ld.param.b32 %rd6, [all_dynamic_param_1];
; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
-; CHECK-NEXT: add.u64 %rd9, %SP, 16;
-; CHECK-NEXT: add.s64 %rd10, %rd9, %rd8;
-; CHECK-NEXT: ld.b32 %r1, [%SP];
-; CHECK-NEXT: ld.b32 %r2, [%SP+4];
-; CHECK-NEXT: ld.b32 %r3, [%SP+8];
-; CHECK-NEXT: ld.b32 %r4, [%SP+12];
-; CHECK-NEXT: st.b32 [%SP+28], %r4;
-; CHECK-NEXT: st.b32 [%SP+24], %r3;
-; CHECK-NEXT: st.b32 [%SP+20], %r2;
-; CHECK-NEXT: st.b32 [%SP+16], %r1;
-; CHECK-NEXT: st.b32 [%rd10], 20;
-; CHECK-NEXT: ld.param.b32 %rd11, [all_dynamic_param_2];
-; CHECK-NEXT: and.b64 %rd12, %rd11, 3;
-; CHECK-NEXT: shl.b64 %rd13, %rd12, 2;
-; CHECK-NEXT: add.u64 %rd14, %SP, 32;
-; CHECK-NEXT: add.s64 %rd15, %rd14, %rd13;
-; CHECK-NEXT: ld.b32 %r5, [%SP+16];
-; CHECK-NEXT: ld.b32 %r6, [%SP+20];
-; CHECK-NEXT: ld.b32 %r7, [%SP+24];
-; CHECK-NEXT: ld.b32 %r8, [%SP+28];
-; CHECK-NEXT: st.b32 [%SP+44], %r8;
-; CHECK-NEXT: st.b32 [%SP+40], %r7;
-; CHECK-NEXT: st.b32 [%SP+36], %r6;
-; CHECK-NEXT: st.b32 [%SP+32], %r5;
-; CHECK-NEXT: st.b32 [%rd15], 30;
-; CHECK-NEXT: ld.param.b32 %rd16, [all_dynamic_param_3];
-; CHECK-NEXT: and.b64 %rd17, %rd16, 3;
-; CHECK-NEXT: shl.b64 %rd18, %rd17, 2;
-; CHECK-NEXT: add.u64 %rd19, %SP, 48;
-; CHECK-NEXT: add.s64 %rd20, %rd19, %rd18;
-; CHECK-NEXT: ld.b32 %r9, [%SP+32];
-; CHECK-NEXT: ld.b32 %r10, [%SP+36];
-; CHECK-NEXT: ld.b32 %r11, [%SP+40];
-; CHECK-NEXT: ld.b32 %r12, [%SP+44];
-; CHECK-NEXT: st.b32 [%SP+60], %r12;
-; CHECK-NEXT: st.b32 [%SP+56], %r11;
-; CHECK-NEXT: st.b32 [%SP+52], %r10;
-; CHECK-NEXT: st.b32 [%SP+48], %r9;
-; CHECK-NEXT: st.b32 [%rd20], 40;
-; CHECK-NEXT: ld.b32 %r13, [%SP+60];
-; CHECK-NEXT: ld.b32 %r14, [%SP+56];
-; CHECK-NEXT: ld.b32 %r15, [%SP+52];
-; CHECK-NEXT: ld.b32 %r16, [%SP+48];
-; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r16, %r15, %r14, %r13};
+; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8;
+; CHECK-NEXT: ld.param.b32 %rd10, [all_dynamic_param_2];
+; CHECK-NEXT: and.b64 %rd11, %rd10, 3;
+; CHECK-NEXT: shl.b64 %rd12, %rd11, 2;
+; CHECK-NEXT: add.s64 %rd13, %rd4, %rd12;
+; CHECK-NEXT: st.b32 [%rd5], 10;
+; CHECK-NEXT: st.b32 [%rd9], 20;
+; CHECK-NEXT: st.b32 [%rd13], 30;
+; CHECK-NEXT: ld.param.b32 %rd14, [all_dynamic_param_3];
+; CHECK-NEXT: and.b64 %rd15, %rd14, 3;
+; CHECK-NEXT: shl.b64 %rd16, %rd15, 2;
+; CHECK-NEXT: add.s64 %rd17, %rd4, %rd16;
+; CHECK-NEXT: st.b32 [%rd17], 40;
+; CHECK-NEXT: ld.b32 %r1, [%SP+12];
+; CHECK-NEXT: ld.b32 %r2, [%SP+8];
+; CHECK-NEXT: ld.b32 %r3, [%SP+4];
+; CHECK-NEXT: ld.b32 %r4, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
; CHECK-NEXT: ret;
%v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
%v1 = insertelement <4 x i32> %v0, i32 20, i32 %idx1
@@ -260,11 +213,11 @@ define <4 x i32> @all_dynamic(i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) {
define <4 x i32> @mix_dynamic_constant(i32 %idx0, i32 %idx1) {
; CHECK-LABEL: mix_dynamic_constant(
; CHECK: {
-; CHECK-NEXT: .local .align 4 .b8 __local_depot6[32];
+; CHECK-NEXT: .local .align 4 .b8 __local_depot6[16];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b32 %r<8>;
-; CHECK-NEXT: .reg .b64 %rd<11>;
+; CHECK-NEXT: .reg .b32 %r<5>;
+; CHECK-NEXT: .reg .b64 %rd<10>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot6;
@@ -278,21 +231,14 @@ define <4 x i32> @mix_dynamic_constant(i32 %idx0, i32 %idx1) {
; CHECK-NEXT: ld.param.b32 %rd6, [mix_dynamic_constant_param_1];
; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
-; CHECK-NEXT: add.u64 %rd9, %SP, 16;
-; CHECK-NEXT: add.s64 %rd10, %rd9, %rd8;
-; CHECK-NEXT: ld.b32 %r1, [%SP];
+; CHECK-NEXT: add.s64 %rd9, %rd4, %rd8;
+; CHECK-NEXT: st.b32 [%SP+4], 20;
+; CHECK-NEXT: st.b32 [%rd9], 30;
+; CHECK-NEXT: ld.b32 %r1, [%SP+12];
; CHECK-NEXT: ld.b32 %r2, [%SP+8];
-; CHECK-NEXT: ld.b32 %r3, [%SP+12];
-; CHECK-NEXT: st.b32 [%SP+28], %r3;
-; CHECK-NEXT: st.b32 [%SP+24], %r2;
-; CHECK-NEXT: st.b32 [%SP+16], %r1;
-; CHECK-NEXT: st.b32 [%SP+20], 20;
-; CHECK-NEXT: st.b32 [%rd10], 30;
-; CHECK-NEXT: ld.b32 %r4, [%SP+28];
-; CHECK-NEXT: ld.b32 %r5, [%SP+24];
-; CHECK-NEXT: ld.b32 %r6, [%SP+20];
-; CHECK-NEXT: ld.b32 %r7, [%SP+16];
-; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r7, %r6, %r5, %r4};
+; CHECK-NEXT: ld.b32 %r3, [%SP+4];
+; CHECK-NEXT: ld.b32 %r4, [%SP];
+; CHECK-NEXT: st.param.v4.b32 [func_retval0], {%r4, %r3, %r2, %r1};
; CHECK-NEXT: ret;
%v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
%v1 = insertelement <4 x i32> %v0, i32 20, i32 1
@@ -356,8 +302,8 @@ define void @overlapping_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) {
; CHECK-NEXT: .local .align 4 .b8 __local_depot8[32];
; CHECK-NEXT: .reg .b64 %SP;
; CHECK-NEXT: .reg .b64 %SPL;
-; CHECK-NEXT: .reg .b32 %r<8>;
-; CHECK-NEXT: .reg .b64 %rd<13>;
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-NEXT: .reg .b64 %rd<14>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: mov.b64 %SPL, __local_depot8;
@@ -365,30 +311,28 @@ define void @overlapping_chains(i32 %idx0, i32 %idx1, ptr %out0, ptr %out1) {
; CHECK-NEXT: ld.param.b32 %rd1, [overlapping_chains_param_0];
; CHECK-NEXT: and.b64 %rd2, %rd1, 3;
; CHECK-NEXT: shl.b64 %rd3, %rd2, 2;
-; CHECK-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-NEXT: add.u64 %rd4, %SP, 16;
; CHECK-NEXT: add.s64 %rd5, %rd4, %rd3;
; CHECK-NEXT: st.b32 [%rd5], 10;
-; CHECK-NEXT: ld.param.b32 %rd6, [overlapping_chains_param_1];
-; CHECK-NEXT: and.b64 %rd7, %rd6, 3;
-; CHECK-NEXT: shl.b64 %rd8, %rd7, 2;
-; CHECK-NEXT: add.u64 %rd9, %SP, 16;
-; CHECK-NEXT: add.s64 %rd10, %rd9, %rd8;
-; CHECK-NEXT: ld.b32 %r1, [%SP];
-; CHECK-NEXT: ld.b32 %r2, [%SP+8];
+; CHECK-NEXT: add.u64 %rd6, %SP, 0;
+; CHECK-NEXT: add.s64 %rd7, %rd6, %rd3;
+; CHECK-NEXT: ld.b32 %r1, [%SP+28];
+; CHECK-NEXT: ld.b32 %r2, [%SP+16];
+; CHECK-NEXT: ld.param.b64 %rd8, [overlapping_chains_param_2];
+; CHECK-NEXT: st.b32 [%rd7], 10;
+; CHECK-NEXT: ld.param.b32 %rd9, [overlapping_chains_param_1];
+; CHECK-NEXT: and.b64 %rd10, %rd9, 3;
+; CHECK-NEXT: shl.b64 %rd11, %rd10, 2;
+; CHECK-NEXT: add.s64 %rd12, %rd6, %rd11;
+; CHECK-NEXT: st.b32 [%SP+4], 20;
+; CHECK-NEXT: st.b32 [%rd12], 30;
+; CHECK-NEXT: ld.param.b64 %rd13, [overlapping_chains_param_3];
; CHECK-NEXT: ld.b32 %r3, [%SP+12];
-; CHECK-NEXT: ld.param.b64 %rd11, [overlapping_chains_param_2];
-; CHECK-NEXT: st.b32 [%SP+28], %r3;
-; CHECK-NEXT: st.b32 [%SP+24], %r2;
-; CHECK-NEXT: st.b32 [%SP+16], %r1;
-; CHECK-NEXT: st.b32 [%SP+20], 20;
-; CHECK-NEXT: st.b32 [%rd10], 30;
-; CHECK-NEXT: ld.param.b64 %rd12, [overlapping_chains_param_3];
-; CHECK-NEXT: ld.b32 %r4, [%SP+28];
-; CHECK-NEXT: ld.b32 %r5, [%SP+24];
-; CHECK-NEXT: ld.b32 %r6, [%SP+20];
-; CHECK-NEXT: ld.b32 %r7, [%SP+16];
-; CHECK-NEXT: st.v4.b32 [%rd11], {%r1, 20, 40, %r3};
-; CHECK-NEXT: st.v4.b32 [%rd12], {%r7, %r6, %r5, %r4};
+; CHECK-NEXT: ld.b32 %r4, [%SP+8];
+; CHECK-NEXT: ld.b32 %r5, [%SP+4];
+; CHECK-NEXT: ld.b32 %r6, [%SP];
+; CHECK-NEXT: st.v4.b32 [%rd8], {%r2, 20, 40, %r1};
+; CHECK-NEXT: st.v4.b32 [%rd13], {%r6, %r5, %r4, %r3};
; CHECK-NEXT: ret;
%v0 = insertelement <4 x i32> poison, i32 10, i32 %idx0
%v1 = insertelement <4 x i32> %v0, i32 20, i32 1
More information about the llvm-commits
mailing list