[llvm] 445973c - [LegalizeTypes] Handle non byte-sized elt types when splitting INSERT/EXTRACT_VECTOR_ELT (#93357)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 13 02:09:22 PDT 2024
Author: Björn Pettersson
Date: 2024-06-13T11:09:18+02:00
New Revision: 445973caceea9154b7f05a0b574ced346955be87
URL: https://github.com/llvm/llvm-project/commit/445973caceea9154b7f05a0b574ced346955be87
DIFF: https://github.com/llvm/llvm-project/commit/445973caceea9154b7f05a0b574ced346955be87.diff
LOG: [LegalizeTypes] Handle non byte-sized elt types when splitting INSERT/EXTRACT_VECTOR_ELT (#93357)
DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT and
DAGTypeLegalizer::SplitVecRes_EXTRACT_VECTOR_ELT did not handle
non byte-sized elements properly. In fact, it only dealt with
elements smaller than 8 bits (as well as byte-sized elements).
This patch generalizes the support for non byte-sized element by
always widening the the vector elements to next "round integer type"
(a power of 2 bit size). This should make sure that we can access a
single element via a simple byte-addressed scalar load/store.
Also removing a suspicious CustomLowerNode call from
SplitVecRes_INSERT_VECTOR_ELT. Considering that it did not reset
the Lo/Hi out arguments before the return I think that
DAGTypeLegalizer::SplitVectorResult could be fooled into registering
the input vector as being the result. This should however not have
caused any problems since DAGTypeLegalizer::SplitVectorResult is
doing the same CustomLowerNode call, making the code removed by
this patch redundant.
Added:
llvm/test/CodeGen/X86/legalize-ins-ext-vec-elt.ll
Modified:
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
llvm/test/CodeGen/AMDGPU/extract-load-i1.ll
llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 328f26c65ac76..52ef6209bc5fb 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1839,17 +1839,12 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
}
}
- // See if the target wants to custom expand this node.
- if (CustomLowerNode(N, N->getValueType(0), true))
- return;
-
// Make the vector elements byte-addressable if they aren't already.
EVT VecVT = Vec.getValueType();
EVT EltVT = VecVT.getVectorElementType();
- if (VecVT.getScalarSizeInBits() < 8) {
- EltVT = MVT::i8;
- VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
- VecVT.getVectorElementCount());
+ if (!EltVT.isByteSized()) {
+ EltVT = EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
+ VecVT = VecVT.changeElementType(EltVT);
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
// Extend the element type to match if needed.
if (EltVT.bitsGT(Elt.getValueType()))
@@ -3457,11 +3452,13 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
// Make the vector elements byte-addressable if they aren't already.
SDLoc dl(N);
EVT EltVT = VecVT.getVectorElementType();
- if (VecVT.getScalarSizeInBits() < 8) {
- EltVT = MVT::i8;
- VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
- VecVT.getVectorElementCount());
+ if (!EltVT.isByteSized()) {
+ EltVT = EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
+ VecVT = VecVT.changeElementType(EltVT);
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
+ SDValue NewExtract =
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vec, Idx);
+ return DAG.getAnyExtOrTrunc(NewExtract, dl, N->getValueType(0));
}
// Store the vector to the stack.
@@ -3479,13 +3476,9 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
// Load back the required element.
StackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
- // FIXME: This is to handle i1 vectors with elements promoted to i8.
- // i1 vector handling needs general improvement.
- if (N->getValueType(0).bitsLT(EltVT)) {
- SDValue Load = DAG.getLoad(EltVT, dl, Store, StackPtr,
- MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
- return DAG.getZExtOrTrunc(Load, dl, N->getValueType(0));
- }
+ // EXTRACT_VECTOR_ELT can extend the element type to the width of the return
+ // type, leaving the high bits undefined. But it can't truncate.
+ assert(N->getValueType(0).bitsGE(EltVT) && "Illegal EXTRACT_VECTOR_ELT.");
return DAG.getExtLoad(
ISD::EXTLOAD, dl, N->getValueType(0), Store, StackPtr,
diff --git a/llvm/test/CodeGen/AMDGPU/extract-load-i1.ll b/llvm/test/CodeGen/AMDGPU/extract-load-i1.ll
index 72ee660dc2adb..5b03240e3dc49 100644
--- a/llvm/test/CodeGen/AMDGPU/extract-load-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract-load-i1.ll
@@ -8,27 +8,28 @@ define i1 @extractloadi1(ptr %ptr, i32 %idx) {
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_load_ubyte v0, v[0:1]
-; CHECK-NEXT: v_and_b32_e32 v1, 7, v2
-; CHECK-NEXT: v_lshr_b32_e64 v2, s32, 6
-; CHECK-NEXT: v_or_b32_e32 v1, v2, v1
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_bfe_u32 v2, v0, 1, 1
-; CHECK-NEXT: v_bfe_u32 v3, v0, 2, 2
-; CHECK-NEXT: v_bfe_u32 v4, v0, 3, 1
-; CHECK-NEXT: v_lshrrev_b32_e32 v5, 4, v0
-; CHECK-NEXT: v_bfe_u32 v6, v0, 5, 1
-; CHECK-NEXT: v_lshrrev_b32_e32 v7, 6, v0
-; CHECK-NEXT: v_lshrrev_b32_e32 v8, 7, v0
-; CHECK-NEXT: buffer_store_byte v0, off, s[0:3], s32
-; CHECK-NEXT: buffer_store_byte v8, off, s[0:3], s32 offset:7
-; CHECK-NEXT: buffer_store_byte v7, off, s[0:3], s32 offset:6
-; CHECK-NEXT: buffer_store_byte v6, off, s[0:3], s32 offset:5
-; CHECK-NEXT: buffer_store_byte v5, off, s[0:3], s32 offset:4
-; CHECK-NEXT: buffer_store_byte v4, off, s[0:3], s32 offset:3
-; CHECK-NEXT: buffer_store_byte v3, off, s[0:3], s32 offset:2
-; CHECK-NEXT: buffer_store_byte v2, off, s[0:3], s32 offset:1
-; CHECK-NEXT: buffer_load_ubyte v0, v1, s[0:3], 0 offen
-; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 2, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, 5, v0
+; CHECK-NEXT: v_and_b32_e32 v4, 2, v0
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 6, v0
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 4, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, 3, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v8, 1, v0
+; CHECK-NEXT: v_or_b32_e32 v1, v1, v3
+; CHECK-NEXT: v_and_b32_e32 v3, 0x100, v7
+; CHECK-NEXT: v_and_b32_e32 v7, 0x100, v8
+; CHECK-NEXT: v_lshlrev_b32_e32 v4, 7, v4
+; CHECK-NEXT: v_or_b32_e32 v3, v6, v3
+; CHECK-NEXT: v_or_b32_e32 v5, v5, v7
+; CHECK-NEXT: v_or_b32_e32 v0, v0, v4
+; CHECK-NEXT: v_and_b32_e32 v1, 0x103, v1
+; CHECK-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, 16, v1
+; CHECK-NEXT: v_or_b32_e32 v1, v3, v4
+; CHECK-NEXT: v_or_b32_e32 v0, v0, v5
+; CHECK-NEXT: v_lshlrev_b32_e32 v2, 3, v2
+; CHECK-NEXT: v_lshr_b64 v[0:1], v[0:1], v2
; CHECK-NEXT: s_setpc_b64 s[30:31]
%val = load <8 x i1>, ptr %ptr
%ret = extractelement <8 x i1> %val, i32 %idx
diff --git a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
index 1ba7e706d1325..db3ea4df52981 100644
--- a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
@@ -410,17 +410,17 @@ entry:
}
; GCN-LABEL: {{^}}bit4_extelt:
-; FIXME: One v_mov_b32_e32 vN, 0 should suffice
-; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 0
-; GCN-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0
-; GCN-DAG: v_mov_b32_e32 [[ONE:v[0-9]+]], 1
-; GCN-DAG: buffer_store_byte [[ZERO]],
-; GCN-DAG: buffer_store_byte [[ONE]],
-; GCN-DAG: buffer_store_byte [[ZERO]],
-; GCN-DAG: buffer_store_byte [[ONE]],
-; GCN: buffer_load_ubyte [[LOAD:v[0-9]+]],
-; GCN: v_and_b32_e32 [[RES:v[0-9]+]], 1, [[LOAD]]
-; GCN: flat_store_dword v[{{[0-9:]+}}], [[RES]]
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dword s2, s[0:1], 0x2c
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_lshl_b32 s2, s2, 3
+; GCN-NEXT: s_lshr_b32 s2, 0x1000100, s2
+; GCN-NEXT: s_and_b32 s2, s2, 1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: v_mov_b32_e32 v2, s2
+; GCN-NEXT: flat_store_dword v[0:1], v2
define amdgpu_kernel void @bit4_extelt(ptr addrspace(1) %out, i32 %sel) {
entry:
%ext = extractelement <4 x i1> <i1 0, i1 1, i1 0, i1 1>, i32 %sel
diff --git a/llvm/test/CodeGen/X86/legalize-ins-ext-vec-elt.ll b/llvm/test/CodeGen/X86/legalize-ins-ext-vec-elt.ll
new file mode 100644
index 0000000000000..7b517c2ca574f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/legalize-ins-ext-vec-elt.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=x86_64-- -o - %s| FileCheck %s
+
+; Verify that we support non byte-sized elements, together with variable index.
+
+define void @Legalize_SplitVectorResult_insert_i28(i28 %elt, i16 %idx, ptr %p1, ptr %p2) nounwind {
+; CHECK-LABEL: Legalize_SplitVectorResult_insert_i28:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: xorps %xmm0, %xmm0
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: andl $7, %esi
+; CHECK-NEXT: movl %edi, -40(%rsp,%rsi,4)
+; CHECK-NEXT: movaps {{.*#+}} xmm0 = [268435455,268435455,268435455,268435455]
+; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; CHECK-NEXT: andps %xmm0, %xmm1
+; CHECK-NEXT: andps -{{[0-9]+}}(%rsp), %xmm0
+; CHECK-NEXT: movaps %xmm0, 16(%rcx)
+; CHECK-NEXT: movaps %xmm1, (%rcx)
+; CHECK-NEXT: retq
+ %vec1 = insertelement <8 x i28> zeroinitializer, i28 %elt, i16 %idx
+ %vec2 = zext <8 x i28> %vec1 to <8 x i32>
+ store <8 x i32> %vec2, ptr %p2
+ ret void
+}
+
+define void @Legalize_SplitVectorResult_extract_i12(i16 %idx, ptr %p1, ptr %p2) nounwind {
+; CHECK-LABEL: Legalize_SplitVectorResult_extract_i12:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: movaps (%rsi), %xmm0
+; CHECK-NEXT: movaps 16(%rsi), %xmm1
+; CHECK-NEXT: movaps 32(%rsi), %xmm2
+; CHECK-NEXT: movaps 48(%rsi), %xmm3
+; CHECK-NEXT: movaps 64(%rsi), %xmm4
+; CHECK-NEXT: movaps 80(%rsi), %xmm5
+; CHECK-NEXT: movaps 96(%rsi), %xmm6
+; CHECK-NEXT: movaps 112(%rsi), %xmm7
+; CHECK-NEXT: movaps %xmm7, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm6, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm5, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: andl $63, %edi
+; CHECK-NEXT: movzwl -128(%rsp,%rdi,2), %eax
+; CHECK-NEXT: andl $4095, %eax # imm = 0xFFF
+; CHECK-NEXT: movw %ax, (%rdx)
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
+ %vec = load <64 x i16>, ptr %p1
+ %trunc = trunc <64 x i16> %vec to <64 x i12>
+ %elt = extractelement <64 x i12> %trunc, i16 %idx
+ store i12 %elt, ptr %p2
+ ret void
+}
More information about the llvm-commits
mailing list