[Openmp-commits] [libcxx] [flang] [mlir] [libc] [openmp] [libcxxabi] [clang-tools-extra] [clang] [llvm] [compiler-rt] [AArch64] Add custom lowering for load <3 x i8>. (PR #78632)
Florian Hahn via Openmp-commits
openmp-commits at lists.llvm.org
Mon Jan 22 08:05:55 PST 2024
https://github.com/fhahn updated https://github.com/llvm/llvm-project/pull/78632
>From a786cdedc2c9a9898cd0b80d84f5b11aace5da1c Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Tue, 28 Nov 2023 15:44:02 +0000
Subject: [PATCH 1/2] [AArch64] Add custom lowering for load <3 x i8>.
Add custom combine to lower load <3 x i8> as the more efficient sequence
below:
ldrb wX, [x0, #2]
ldrh wY, [x0]
orr wX, wY, wX, lsl #16
fmov s0, wX
At the moment, there are almost no cases in which such vector operations
will be generated automatically. The motivating case is non-power-of-2
SLP vectorization: https://github.com/llvm/llvm-project/pull/77790
---
.../Target/AArch64/AArch64ISelLowering.cpp | 54 ++++++++++++++++++-
.../AArch64/vec3-loads-ext-trunc-stores.ll | 44 +++++----------
2 files changed, 65 insertions(+), 33 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 8a6f1dc7487bae8..e1139c2fede8e41 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -21095,6 +21095,50 @@ static SDValue foldTruncStoreOfExt(SelectionDAG &DAG, SDNode *N) {
return SDValue();
}
+// A custom combine to lower load <3 x i8> as the more efficient sequence
+// below:
+// ldrb wX, [x0, #2]
+// ldrh wY, [x0]
+// orr wX, wY, wX, lsl #16
+// fmov s0, wX
+//
+static SDValue combineV3I8LoadExt(LoadSDNode *LD, SelectionDAG &DAG) {
+ EVT MemVT = LD->getMemoryVT();
+ if (MemVT != EVT::getVectorVT(*DAG.getContext(), MVT::i8, 3) ||
+ LD->getOriginalAlign() >= 4)
+ return SDValue();
+
+ SDLoc DL(LD);
+ SDValue Chain = LD->getChain();
+ SDValue BasePtr = LD->getBasePtr();
+
+ // Load 2 x i8, then 1 x i8.
+ SDValue L16 = DAG.getLoad(MVT::i16, DL, Chain, BasePtr, LD->getPointerInfo(),
+ LD->getOriginalAlign());
+ SDValue L8 =
+ DAG.getLoad(MVT::i8, DL, Chain,
+ DAG.getMemBasePlusOffset(BasePtr, TypeSize::getFixed(2), DL),
+ LD->getPointerInfo(), LD->getOriginalAlign());
+
+ // Extend to i32.
+ SDValue Ext16 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, L16);
+ SDValue Ext8 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, L8);
+
+ // Pack 2 x i8 and 1 x i8 in an i32 and convert to v4i8.
+ SDValue Shr = DAG.getNode(ISD::SHL, DL, MVT::i32, Ext8,
+ DAG.getConstant(16, DL, MVT::i32));
+ SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, Ext16, Shr);
+ SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::v4i8, Or);
+
+ // Extract v3i8 again.
+ SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MemVT, Cast,
+ DAG.getConstant(0, DL, MVT::i64));
+ SDValue TokenFactor = DAG.getNode(
+ ISD::TokenFactor, DL, MVT::Other,
+ {SDValue(cast<SDNode>(L16), 1), SDValue(cast<SDNode>(L8), 1)});
+ return DAG.getMergeValues({Extract, TokenFactor}, DL);
+}
+
// Perform TBI simplification if supported by the target and try to break up
// nontemporal loads larger than 256-bits loads for odd types so LDNPQ 256-bit
// load instructions can be selected.
@@ -21106,10 +21150,16 @@ static SDValue performLOADCombine(SDNode *N,
performTBISimplification(N->getOperand(1), DCI, DAG);
LoadSDNode *LD = cast<LoadSDNode>(N);
- EVT MemVT = LD->getMemoryVT();
- if (LD->isVolatile() || !LD->isNonTemporal() || !Subtarget->isLittleEndian())
+ if (LD->isVolatile() || !Subtarget->isLittleEndian())
+ return SDValue(N, 0);
+
+ if (SDValue Res = combineV3I8LoadExt(LD, DAG))
+ return Res;
+
+ if (!LD->isNonTemporal())
return SDValue(N, 0);
+ EVT MemVT = LD->getMemoryVT();
if (MemVT.isScalableVector() || MemVT.getSizeInBits() <= 256 ||
MemVT.getSizeInBits() % 256 == 0 ||
256 % MemVT.getScalarSizeInBits() != 0)
diff --git a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
index 9eeb194409df6fa..7cac4134f0e1598 100644
--- a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
+++ b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
@@ -5,19 +5,10 @@
define <16 x i8> @load_v3i8(ptr %src, ptr %dst) {
; CHECK-LABEL: load_v3i8:
; CHECK: ; %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: ldr s0, [sp, #12]
-; CHECK-NEXT: ushll.8h v0, v0, #0
-; CHECK-NEXT: umov.h w8, v0[0]
-; CHECK-NEXT: umov.h w9, v0[1]
+; CHECK-NEXT: ldrb w8, [x0, #2]
+; CHECK-NEXT: ldrh w9, [x0]
+; CHECK-NEXT: orr w8, w9, w8, lsl #16
; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: add x8, x0, #2
-; CHECK-NEXT: mov.b v0[1], w9
-; CHECK-NEXT: ld1.b { v0 }[2], [x8]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; BE-LABEL: load_v3i8:
@@ -47,19 +38,14 @@ define <16 x i8> @load_v3i8(ptr %src, ptr %dst) {
define <4 x i32> @load_v3i8_to_4xi32(ptr %src, ptr %dst) {
; CHECK-LABEL: load_v3i8_to_4xi32:
; CHECK: ; %bb.0:
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: ldrh w8, [x0]
+; CHECK-NEXT: ldrb w8, [x0, #2]
+; CHECK-NEXT: ldrh w9, [x0]
; CHECK-NEXT: movi.2d v1, #0x0000ff000000ff
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: ldr s0, [sp, #12]
-; CHECK-NEXT: ldrsb w8, [x0, #2]
-; CHECK-NEXT: ushll.8h v0, v0, #0
-; CHECK-NEXT: mov.h v0[1], v0[1]
-; CHECK-NEXT: mov.h v0[2], w8
+; CHECK-NEXT: orr w8, w9, w8, lsl #16
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: zip1.8b v0, v0, v0
; CHECK-NEXT: ushll.4s v0, v0, #0
; CHECK-NEXT: and.16b v0, v0, v1
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; BE-LABEL: load_v3i8_to_4xi32:
@@ -193,19 +179,15 @@ entry:
define void @load_ext_to_64bits(ptr %src, ptr %dst) {
; CHECK-LABEL: load_ext_to_64bits:
; CHECK: ; %bb.0: ; %entry
-; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: ldrh w8, [x0]
-; CHECK-NEXT: strh w8, [sp, #12]
-; CHECK-NEXT: add x8, x0, #2
-; CHECK-NEXT: ldr s0, [sp, #12]
-; CHECK-NEXT: ushll.8h v0, v0, #0
-; CHECK-NEXT: ld1.b { v0 }[4], [x8]
+; CHECK-NEXT: ldrb w8, [x0, #2]
+; CHECK-NEXT: ldrh w9, [x0]
+; CHECK-NEXT: orr w8, w9, w8, lsl #16
+; CHECK-NEXT: fmov s0, w8
; CHECK-NEXT: add x8, x1, #4
+; CHECK-NEXT: zip1.8b v0, v0, v0
; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: st1.h { v0 }[2], [x8]
; CHECK-NEXT: str s0, [x1]
-; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; BE-LABEL: load_ext_to_64bits:
>From 192233f0fda044c759054ae9d79c5b33d66fb1af Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Fri, 19 Jan 2024 16:49:34 +0000
Subject: [PATCH 2/2] !fixup adjust alignment and pointer info
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e1139c2fede8e41..95bc6b5cdff57d3 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -21115,10 +21115,10 @@ static SDValue combineV3I8LoadExt(LoadSDNode *LD, SelectionDAG &DAG) {
// Load 2 x i8, then 1 x i8.
SDValue L16 = DAG.getLoad(MVT::i16, DL, Chain, BasePtr, LD->getPointerInfo(),
LD->getOriginalAlign());
- SDValue L8 =
- DAG.getLoad(MVT::i8, DL, Chain,
- DAG.getMemBasePlusOffset(BasePtr, TypeSize::getFixed(2), DL),
- LD->getPointerInfo(), LD->getOriginalAlign());
+ TypeSize Offset2 = TypeSize::getFixed(2);
+ SDValue L8 = DAG.getLoad(
+ MVT::i8, DL, Chain, DAG.getMemBasePlusOffset(BasePtr, Offset2, DL),
+ LD->getPointerInfo(), commonAlignment(LD->getOriginalAlign(), Offset2));
// Extend to i32.
SDValue Ext16 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, L16);
More information about the Openmp-commits
mailing list