[llvm] [LegalizeTypes] Handle non byte-sized elt types when splitting INSERT/EXTRACT_VECTOR_ELT (PR #93357)
Björn Pettersson via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 12 12:54:51 PDT 2024
https://github.com/bjope updated https://github.com/llvm/llvm-project/pull/93357
>From 08cad581538ea979a24145e127a6d2e8cbeee01b Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Sat, 25 May 2024 01:35:12 +0200
Subject: [PATCH 1/4] [LegalizeTypes] Handle non byte-sized elt types when
splitting INSERT/EXTRACT_VECTOR_ELT
DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT and
DAGTypeLegalizer::SplitVecRes_EXTRACT_VECTOR_ELT did not handle
non byte-sized elements properly. In fact, it only dealt with
elements smaller than 8 bits (as well as byte-sized elements).
This patch generalizes the support for non byte-sized element by
always extending the vector elements to match the store size for
the element type when legalizing via a stack temporary. This should
make sure that we can access a single element via a simple
byte-addressed scalar load/store.
---
llvm/include/llvm/CodeGen/ValueTypes.h | 9 +++
.../SelectionDAG/LegalizeVectorTypes.cpp | 16 ++---
.../CodeGen/X86/legalize-ins-ext-vec-elt.ll | 60 +++++++++++++++++++
3 files changed, 75 insertions(+), 10 deletions(-)
create mode 100644 llvm/test/CodeGen/X86/legalize-ins-ext-vec-elt.ll
diff --git a/llvm/include/llvm/CodeGen/ValueTypes.h b/llvm/include/llvm/CodeGen/ValueTypes.h
index dab6c421bf6e6..9ad582c8ab7a9 100644
--- a/llvm/include/llvm/CodeGen/ValueTypes.h
+++ b/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -432,6 +432,15 @@ namespace llvm {
return EVT::getVectorVT(Context, EltVT, getVectorElementCount());
}
+ /// Return a VT for an integer vector type with the size of the elements
+ /// extended to the store size. The typed returned may be an extended
+ /// type.
+ EVT getStoreSizedIntegerVectorElementType(LLVMContext &Context) const {
+ EVT EltVT = getVectorElementType();
+ EltVT = EVT::getIntegerVT(Context, EltVT.getStoreSizeInBits());
+ return EVT::getVectorVT(Context, EltVT, getVectorElementCount());
+ }
+
// Return a VT for a vector type with the same element type but
// half the number of elements. The type returned may be an
// extended type.
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 40e621f0db220..28ecd229bf398 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1832,10 +1832,9 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
// Make the vector elements byte-addressable if they aren't already.
EVT VecVT = Vec.getValueType();
EVT EltVT = VecVT.getVectorElementType();
- if (VecVT.getScalarSizeInBits() < 8) {
- EltVT = MVT::i8;
- VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
- VecVT.getVectorElementCount());
+ if (!EltVT.isByteSized()) {
+ VecVT = VecVT.getStoreSizedIntegerVectorElementType(*DAG.getContext());
+ EltVT = VecVT.getVectorElementType();
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
// Extend the element type to match if needed.
if (EltVT.bitsGT(Elt.getValueType()))
@@ -3443,10 +3442,9 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
// Make the vector elements byte-addressable if they aren't already.
SDLoc dl(N);
EVT EltVT = VecVT.getVectorElementType();
- if (VecVT.getScalarSizeInBits() < 8) {
- EltVT = MVT::i8;
- VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
- VecVT.getVectorElementCount());
+ if (!EltVT.isByteSized()) {
+ VecVT = VecVT.getStoreSizedIntegerVectorElementType(*DAG.getContext());
+ EltVT = VecVT.getVectorElementType();
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
}
@@ -3465,8 +3463,6 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
// Load back the required element.
StackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
- // FIXME: This is to handle i1 vectors with elements promoted to i8.
- // i1 vector handling needs general improvement.
if (N->getValueType(0).bitsLT(EltVT)) {
SDValue Load = DAG.getLoad(EltVT, dl, Store, StackPtr,
MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
diff --git a/llvm/test/CodeGen/X86/legalize-ins-ext-vec-elt.ll b/llvm/test/CodeGen/X86/legalize-ins-ext-vec-elt.ll
new file mode 100644
index 0000000000000..7b517c2ca574f
--- /dev/null
+++ b/llvm/test/CodeGen/X86/legalize-ins-ext-vec-elt.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=x86_64-- -o - %s| FileCheck %s
+
+; Verify that we support non byte-sized elements, together with variable index.
+
+define void @Legalize_SplitVectorResult_insert_i28(i28 %elt, i16 %idx, ptr %p1, ptr %p2) nounwind {
+; CHECK-LABEL: Legalize_SplitVectorResult_insert_i28:
+; CHECK: # %bb.0:
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: xorps %xmm0, %xmm0
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: andl $7, %esi
+; CHECK-NEXT: movl %edi, -40(%rsp,%rsi,4)
+; CHECK-NEXT: movaps {{.*#+}} xmm0 = [268435455,268435455,268435455,268435455]
+; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; CHECK-NEXT: andps %xmm0, %xmm1
+; CHECK-NEXT: andps -{{[0-9]+}}(%rsp), %xmm0
+; CHECK-NEXT: movaps %xmm0, 16(%rcx)
+; CHECK-NEXT: movaps %xmm1, (%rcx)
+; CHECK-NEXT: retq
+ %vec1 = insertelement <8 x i28> zeroinitializer, i28 %elt, i16 %idx
+ %vec2 = zext <8 x i28> %vec1 to <8 x i32>
+ store <8 x i32> %vec2, ptr %p2
+ ret void
+}
+
+define void @Legalize_SplitVectorResult_extract_i12(i16 %idx, ptr %p1, ptr %p2) nounwind {
+; CHECK-LABEL: Legalize_SplitVectorResult_extract_i12:
+; CHECK: # %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: movaps (%rsi), %xmm0
+; CHECK-NEXT: movaps 16(%rsi), %xmm1
+; CHECK-NEXT: movaps 32(%rsi), %xmm2
+; CHECK-NEXT: movaps 48(%rsi), %xmm3
+; CHECK-NEXT: movaps 64(%rsi), %xmm4
+; CHECK-NEXT: movaps 80(%rsi), %xmm5
+; CHECK-NEXT: movaps 96(%rsi), %xmm6
+; CHECK-NEXT: movaps 112(%rsi), %xmm7
+; CHECK-NEXT: movaps %xmm7, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm6, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm5, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: andl $63, %edi
+; CHECK-NEXT: movzwl -128(%rsp,%rdi,2), %eax
+; CHECK-NEXT: andl $4095, %eax # imm = 0xFFF
+; CHECK-NEXT: movw %ax, (%rdx)
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
+ %vec = load <64 x i16>, ptr %p1
+ %trunc = trunc <64 x i16> %vec to <64 x i12>
+ %elt = extractelement <64 x i12> %trunc, i16 %idx
+ store i12 %elt, ptr %p2
+ ret void
+}
>From 789d628ca2ee446ba33df5a6ae94b2ee1ddf984b Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Wed, 29 May 2024 14:33:40 +0200
Subject: [PATCH 2/4] To be squashed: Round up element size to next power-of-2.
Also adding back a FIXME comment that I had removed (I think
that FIXME might be valid still, although I don't really know
the context).
---
llvm/include/llvm/CodeGen/ValueTypes.h | 9 ---------
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp | 10 ++++++----
2 files changed, 6 insertions(+), 13 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/ValueTypes.h b/llvm/include/llvm/CodeGen/ValueTypes.h
index 9ad582c8ab7a9..dab6c421bf6e6 100644
--- a/llvm/include/llvm/CodeGen/ValueTypes.h
+++ b/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -432,15 +432,6 @@ namespace llvm {
return EVT::getVectorVT(Context, EltVT, getVectorElementCount());
}
- /// Return a VT for an integer vector type with the size of the elements
- /// extended to the store size. The typed returned may be an extended
- /// type.
- EVT getStoreSizedIntegerVectorElementType(LLVMContext &Context) const {
- EVT EltVT = getVectorElementType();
- EltVT = EVT::getIntegerVT(Context, EltVT.getStoreSizeInBits());
- return EVT::getVectorVT(Context, EltVT, getVectorElementCount());
- }
-
// Return a VT for a vector type with the same element type but
// half the number of elements. The type returned may be an
// extended type.
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 28ecd229bf398..bfe5a7593e2c1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1833,8 +1833,8 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
EVT VecVT = Vec.getValueType();
EVT EltVT = VecVT.getVectorElementType();
if (!EltVT.isByteSized()) {
- VecVT = VecVT.getStoreSizedIntegerVectorElementType(*DAG.getContext());
- EltVT = VecVT.getVectorElementType();
+ EltVT = EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
+ VecVT = VecVT.changeElementType(EltVT);
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
// Extend the element type to match if needed.
if (EltVT.bitsGT(Elt.getValueType()))
@@ -3443,8 +3443,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
SDLoc dl(N);
EVT EltVT = VecVT.getVectorElementType();
if (!EltVT.isByteSized()) {
- VecVT = VecVT.getStoreSizedIntegerVectorElementType(*DAG.getContext());
- EltVT = VecVT.getVectorElementType();
+ EltVT = EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
+ VecVT = VecVT.changeElementType(EltVT);
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
}
@@ -3463,6 +3463,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
// Load back the required element.
StackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
+ // FIXME: This is to handle i1 vectors with elements promoted to i8.
+ // i1 vector handling needs general improvement.
if (N->getValueType(0).bitsLT(EltVT)) {
SDValue Load = DAG.getLoad(EltVT, dl, Store, StackPtr,
MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
>From b08f82953225115bd5ec5a840466ea52075a1f84 Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Wed, 12 Jun 2024 16:40:10 +0200
Subject: [PATCH 3/4] More things to squash...
Remove CustomLowerNode from DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT.
This should already be done by DAGTypeLegalizer::SplitVectorResult,
so it should have been dead code. But it was also wrong as the
returned Lo/Hi values could have been set by the GetSplitVector. So
without resetting Lo we would be in trouble.
Changed DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT to return
a new EXTRACT_VECTOR_ELT when widening elements to be byte sized.
This allows further custom legalization etc to kick in. This was
proposed by Eli.
Replaced some old N->getValueType(0).bitsLT(EltVT) handling by
an assert. That scenario should not be possible any longer
given the other cahnges to SplitVecOp_EXTRACT_VECTOR_ELT.
---
.../SelectionDAG/LegalizeVectorTypes.cpp | 17 +++-----
.../sve-extract-fixed-from-scalable-vector.ll | 38 ++++++++---------
llvm/test/CodeGen/AMDGPU/extract-load-i1.ll | 41 ++++++++++---------
.../CodeGen/AMDGPU/extract_vector_dynelt.ll | 22 +++++-----
4 files changed, 56 insertions(+), 62 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index bfe5a7593e2c1..a5fd5ced18fca 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1825,10 +1825,6 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
}
}
- // See if the target wants to custom expand this node.
- if (CustomLowerNode(N, N->getValueType(0), true))
- return;
-
// Make the vector elements byte-addressable if they aren't already.
EVT VecVT = Vec.getValueType();
EVT EltVT = VecVT.getVectorElementType();
@@ -3446,6 +3442,9 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
EltVT = EltVT.changeTypeToInteger().getRoundIntegerType(*DAG.getContext());
VecVT = VecVT.changeElementType(EltVT);
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
+ SDValue NewExtract =
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vec, Idx);
+ return DAG.getZExtOrTrunc(NewExtract, dl, N->getValueType(0));
}
// Store the vector to the stack.
@@ -3463,13 +3462,9 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
// Load back the required element.
StackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
- // FIXME: This is to handle i1 vectors with elements promoted to i8.
- // i1 vector handling needs general improvement.
- if (N->getValueType(0).bitsLT(EltVT)) {
- SDValue Load = DAG.getLoad(EltVT, dl, Store, StackPtr,
- MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
- return DAG.getZExtOrTrunc(Load, dl, N->getValueType(0));
- }
+ // EXTRACT_VECTOR_ELT can extend the element type to the width of the return
+ // type, leaving the high bits undefined. But it can't truncate.
+ assert(N->getValueType(0).bitsGE(EltVT) && "Illegal EXTRACT_VECTOR_ELT.");
return DAG.getExtLoad(
ISD::EXTLOAD, dl, N->getValueType(0), Store, StackPtr,
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
index 641050ae69d9b..a7ec919e6a932 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
@@ -178,9 +178,8 @@ define <4 x i1> @extract_v4i1_nxv32i1_16(<vscale x 32 x i1> %arg) {
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1
; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1
-; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: addvl x9, sp, #2
; CHECK-NEXT: ptrue p2.b
-; CHECK-NEXT: add x8, x8, #16
; CHECK-NEXT: st1b { z0.b }, p2, [sp, #1, mul vl]
; CHECK-NEXT: st1b { z1.b }, p2, [sp]
; CHECK-NEXT: st1b { z0.b }, p2, [sp, #3, mul vl]
@@ -189,16 +188,16 @@ define <4 x i1> @extract_v4i1_nxv32i1_16(<vscale x 32 x i1> %arg) {
; CHECK-NEXT: st1b { z1.b }, p2, [sp, #4, mul vl]
; CHECK-NEXT: st1b { z0.b }, p2, [sp, #7, mul vl]
; CHECK-NEXT: st1b { z1.b }, p2, [sp, #6, mul vl]
-; CHECK-NEXT: ld1 { v0.b }[0], [x8]
-; CHECK-NEXT: addvl x8, sp, #2
-; CHECK-NEXT: add x8, x8, #17
-; CHECK-NEXT: ld1 { v0.b }[2], [x8]
+; CHECK-NEXT: ldrb w8, [sp, #16]
+; CHECK-NEXT: ldrb w9, [x9, #17]
+; CHECK-NEXT: fmov s0, w8
; CHECK-NEXT: addvl x8, sp, #4
-; CHECK-NEXT: add x8, x8, #18
-; CHECK-NEXT: ld1 { v0.b }[4], [x8]
+; CHECK-NEXT: ldrb w8, [x8, #18]
+; CHECK-NEXT: mov v0.h[1], w9
+; CHECK-NEXT: mov v0.h[2], w8
; CHECK-NEXT: addvl x8, sp, #6
-; CHECK-NEXT: add x8, x8, #19
-; CHECK-NEXT: ld1 { v0.b }[6], [x8]
+; CHECK-NEXT: ldrb w8, [x8, #19]
+; CHECK-NEXT: mov v0.h[3], w8
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: addvl sp, sp, #8
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -234,8 +233,7 @@ define <4 x i3> @extract_v4i3_nxv32i3_16(<vscale x 32 x i3> %arg) {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: add x8, x8, #16
+; CHECK-NEXT: addvl x9, sp, #2
; CHECK-NEXT: st1b { z1.b }, p0, [sp, #1, mul vl]
; CHECK-NEXT: st1b { z0.b }, p0, [sp]
; CHECK-NEXT: st1b { z1.b }, p0, [sp, #3, mul vl]
@@ -244,16 +242,16 @@ define <4 x i3> @extract_v4i3_nxv32i3_16(<vscale x 32 x i3> %arg) {
; CHECK-NEXT: st1b { z0.b }, p0, [sp, #4, mul vl]
; CHECK-NEXT: st1b { z1.b }, p0, [sp, #7, mul vl]
; CHECK-NEXT: st1b { z0.b }, p0, [sp, #6, mul vl]
-; CHECK-NEXT: ld1 { v0.b }[0], [x8]
-; CHECK-NEXT: addvl x8, sp, #2
-; CHECK-NEXT: add x8, x8, #17
-; CHECK-NEXT: ld1 { v0.b }[2], [x8]
+; CHECK-NEXT: ldrb w8, [sp, #16]
+; CHECK-NEXT: ldrb w9, [x9, #17]
+; CHECK-NEXT: fmov s0, w8
; CHECK-NEXT: addvl x8, sp, #4
-; CHECK-NEXT: add x8, x8, #18
-; CHECK-NEXT: ld1 { v0.b }[4], [x8]
+; CHECK-NEXT: ldrb w8, [x8, #18]
+; CHECK-NEXT: mov v0.h[1], w9
+; CHECK-NEXT: mov v0.h[2], w8
; CHECK-NEXT: addvl x8, sp, #6
-; CHECK-NEXT: add x8, x8, #19
-; CHECK-NEXT: ld1 { v0.b }[6], [x8]
+; CHECK-NEXT: ldrb w8, [x8, #19]
+; CHECK-NEXT: mov v0.h[3], w8
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: addvl sp, sp, #8
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/AMDGPU/extract-load-i1.ll b/llvm/test/CodeGen/AMDGPU/extract-load-i1.ll
index 72ee660dc2adb..5b03240e3dc49 100644
--- a/llvm/test/CodeGen/AMDGPU/extract-load-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract-load-i1.ll
@@ -8,27 +8,28 @@ define i1 @extractloadi1(ptr %ptr, i32 %idx) {
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_load_ubyte v0, v[0:1]
-; CHECK-NEXT: v_and_b32_e32 v1, 7, v2
-; CHECK-NEXT: v_lshr_b32_e64 v2, s32, 6
-; CHECK-NEXT: v_or_b32_e32 v1, v2, v1
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_bfe_u32 v2, v0, 1, 1
-; CHECK-NEXT: v_bfe_u32 v3, v0, 2, 2
-; CHECK-NEXT: v_bfe_u32 v4, v0, 3, 1
-; CHECK-NEXT: v_lshrrev_b32_e32 v5, 4, v0
-; CHECK-NEXT: v_bfe_u32 v6, v0, 5, 1
-; CHECK-NEXT: v_lshrrev_b32_e32 v7, 6, v0
-; CHECK-NEXT: v_lshrrev_b32_e32 v8, 7, v0
-; CHECK-NEXT: buffer_store_byte v0, off, s[0:3], s32
-; CHECK-NEXT: buffer_store_byte v8, off, s[0:3], s32 offset:7
-; CHECK-NEXT: buffer_store_byte v7, off, s[0:3], s32 offset:6
-; CHECK-NEXT: buffer_store_byte v6, off, s[0:3], s32 offset:5
-; CHECK-NEXT: buffer_store_byte v5, off, s[0:3], s32 offset:4
-; CHECK-NEXT: buffer_store_byte v4, off, s[0:3], s32 offset:3
-; CHECK-NEXT: buffer_store_byte v3, off, s[0:3], s32 offset:2
-; CHECK-NEXT: buffer_store_byte v2, off, s[0:3], s32 offset:1
-; CHECK-NEXT: buffer_load_ubyte v0, v1, s[0:3], 0 offen
-; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_lshrrev_b32_e32 v1, 2, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v3, 5, v0
+; CHECK-NEXT: v_and_b32_e32 v4, 2, v0
+; CHECK-NEXT: v_lshrrev_b32_e32 v5, 6, v0
+; CHECK-NEXT: v_lshrrev_b32_e32 v6, 4, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v7, 3, v0
+; CHECK-NEXT: v_lshlrev_b32_e32 v8, 1, v0
+; CHECK-NEXT: v_or_b32_e32 v1, v1, v3
+; CHECK-NEXT: v_and_b32_e32 v3, 0x100, v7
+; CHECK-NEXT: v_and_b32_e32 v7, 0x100, v8
+; CHECK-NEXT: v_lshlrev_b32_e32 v4, 7, v4
+; CHECK-NEXT: v_or_b32_e32 v3, v6, v3
+; CHECK-NEXT: v_or_b32_e32 v5, v5, v7
+; CHECK-NEXT: v_or_b32_e32 v0, v0, v4
+; CHECK-NEXT: v_and_b32_e32 v1, 0x103, v1
+; CHECK-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; CHECK-NEXT: v_lshlrev_b32_e32 v5, 16, v1
+; CHECK-NEXT: v_or_b32_e32 v1, v3, v4
+; CHECK-NEXT: v_or_b32_e32 v0, v0, v5
+; CHECK-NEXT: v_lshlrev_b32_e32 v2, 3, v2
+; CHECK-NEXT: v_lshr_b64 v[0:1], v[0:1], v2
; CHECK-NEXT: s_setpc_b64 s[30:31]
%val = load <8 x i1>, ptr %ptr
%ret = extractelement <8 x i1> %val, i32 %idx
diff --git a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
index 1ba7e706d1325..db3ea4df52981 100644
--- a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
@@ -410,17 +410,17 @@ entry:
}
; GCN-LABEL: {{^}}bit4_extelt:
-; FIXME: One v_mov_b32_e32 vN, 0 should suffice
-; GCN: v_mov_b32_e32 [[FI:v[0-9]+]], 0
-; GCN-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0
-; GCN-DAG: v_mov_b32_e32 [[ONE:v[0-9]+]], 1
-; GCN-DAG: buffer_store_byte [[ZERO]],
-; GCN-DAG: buffer_store_byte [[ONE]],
-; GCN-DAG: buffer_store_byte [[ZERO]],
-; GCN-DAG: buffer_store_byte [[ONE]],
-; GCN: buffer_load_ubyte [[LOAD:v[0-9]+]],
-; GCN: v_and_b32_e32 [[RES:v[0-9]+]], 1, [[LOAD]]
-; GCN: flat_store_dword v[{{[0-9:]+}}], [[RES]]
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dword s2, s[0:1], 0x2c
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_lshl_b32 s2, s2, 3
+; GCN-NEXT: s_lshr_b32 s2, 0x1000100, s2
+; GCN-NEXT: s_and_b32 s2, s2, 1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: v_mov_b32_e32 v2, s2
+; GCN-NEXT: flat_store_dword v[0:1], v2
define amdgpu_kernel void @bit4_extelt(ptr addrspace(1) %out, i32 %sel) {
entry:
%ext = extractelement <4 x i1> <i1 0, i1 1, i1 0, i1 1>, i32 %sel
>From 0bad49499d10b4390d91cfb3beab85a1f3a1ef0e Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Wed, 12 Jun 2024 21:27:32 +0200
Subject: [PATCH 4/4] fixup: use getAnyExtOrTrunc
---
.../SelectionDAG/LegalizeVectorTypes.cpp | 2 +-
.../sve-extract-fixed-from-scalable-vector.ll | 38 ++++++++++---------
2 files changed, 21 insertions(+), 19 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index a5fd5ced18fca..083ce9dd4fbab 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -3444,7 +3444,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) {
Vec = DAG.getNode(ISD::ANY_EXTEND, dl, VecVT, Vec);
SDValue NewExtract =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vec, Idx);
- return DAG.getZExtOrTrunc(NewExtract, dl, N->getValueType(0));
+ return DAG.getAnyExtOrTrunc(NewExtract, dl, N->getValueType(0));
}
// Store the vector to the stack.
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
index a7ec919e6a932..641050ae69d9b 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
@@ -178,8 +178,9 @@ define <4 x i1> @extract_v4i1_nxv32i1_16(<vscale x 32 x i1> %arg) {
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1
; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1
-; CHECK-NEXT: addvl x9, sp, #2
+; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: ptrue p2.b
+; CHECK-NEXT: add x8, x8, #16
; CHECK-NEXT: st1b { z0.b }, p2, [sp, #1, mul vl]
; CHECK-NEXT: st1b { z1.b }, p2, [sp]
; CHECK-NEXT: st1b { z0.b }, p2, [sp, #3, mul vl]
@@ -188,16 +189,16 @@ define <4 x i1> @extract_v4i1_nxv32i1_16(<vscale x 32 x i1> %arg) {
; CHECK-NEXT: st1b { z1.b }, p2, [sp, #4, mul vl]
; CHECK-NEXT: st1b { z0.b }, p2, [sp, #7, mul vl]
; CHECK-NEXT: st1b { z1.b }, p2, [sp, #6, mul vl]
-; CHECK-NEXT: ldrb w8, [sp, #16]
-; CHECK-NEXT: ldrb w9, [x9, #17]
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ld1 { v0.b }[0], [x8]
+; CHECK-NEXT: addvl x8, sp, #2
+; CHECK-NEXT: add x8, x8, #17
+; CHECK-NEXT: ld1 { v0.b }[2], [x8]
; CHECK-NEXT: addvl x8, sp, #4
-; CHECK-NEXT: ldrb w8, [x8, #18]
-; CHECK-NEXT: mov v0.h[1], w9
-; CHECK-NEXT: mov v0.h[2], w8
+; CHECK-NEXT: add x8, x8, #18
+; CHECK-NEXT: ld1 { v0.b }[4], [x8]
; CHECK-NEXT: addvl x8, sp, #6
-; CHECK-NEXT: ldrb w8, [x8, #19]
-; CHECK-NEXT: mov v0.h[3], w8
+; CHECK-NEXT: add x8, x8, #19
+; CHECK-NEXT: ld1 { v0.b }[6], [x8]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: addvl sp, sp, #8
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -233,7 +234,8 @@ define <4 x i3> @extract_v4i3_nxv32i3_16(<vscale x 32 x i3> %arg) {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 64 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: ptrue p0.b
-; CHECK-NEXT: addvl x9, sp, #2
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: add x8, x8, #16
; CHECK-NEXT: st1b { z1.b }, p0, [sp, #1, mul vl]
; CHECK-NEXT: st1b { z0.b }, p0, [sp]
; CHECK-NEXT: st1b { z1.b }, p0, [sp, #3, mul vl]
@@ -242,16 +244,16 @@ define <4 x i3> @extract_v4i3_nxv32i3_16(<vscale x 32 x i3> %arg) {
; CHECK-NEXT: st1b { z0.b }, p0, [sp, #4, mul vl]
; CHECK-NEXT: st1b { z1.b }, p0, [sp, #7, mul vl]
; CHECK-NEXT: st1b { z0.b }, p0, [sp, #6, mul vl]
-; CHECK-NEXT: ldrb w8, [sp, #16]
-; CHECK-NEXT: ldrb w9, [x9, #17]
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ld1 { v0.b }[0], [x8]
+; CHECK-NEXT: addvl x8, sp, #2
+; CHECK-NEXT: add x8, x8, #17
+; CHECK-NEXT: ld1 { v0.b }[2], [x8]
; CHECK-NEXT: addvl x8, sp, #4
-; CHECK-NEXT: ldrb w8, [x8, #18]
-; CHECK-NEXT: mov v0.h[1], w9
-; CHECK-NEXT: mov v0.h[2], w8
+; CHECK-NEXT: add x8, x8, #18
+; CHECK-NEXT: ld1 { v0.b }[4], [x8]
; CHECK-NEXT: addvl x8, sp, #6
-; CHECK-NEXT: ldrb w8, [x8, #19]
-; CHECK-NEXT: mov v0.h[3], w8
+; CHECK-NEXT: add x8, x8, #19
+; CHECK-NEXT: ld1 { v0.b }[6], [x8]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: addvl sp, sp, #8
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
More information about the llvm-commits
mailing list