[llvm] [AArch64] Lower extending sitofp using tbl (PR #92528)
Momchil Velikov via llvm-commits
llvm-commits at lists.llvm.org
Fri May 31 03:40:05 PDT 2024
https://github.com/momchil-velikov updated https://github.com/llvm/llvm-project/pull/92528
>From 16166436a8e9c2fa528f9bd0a47d8251362a3c8c Mon Sep 17 00:00:00 2001
From: Momchil Velikov <momchil.velikov at arm.com>
Date: Fri, 17 May 2024 11:50:31 +0100
Subject: [PATCH 1/3] [AArch64] Refactor creation of a shuffle mask for TBL
(NFC)
---
.../Target/AArch64/AArch64ISelLowering.cpp | 88 +++++++++++--------
1 file changed, 50 insertions(+), 38 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index ac6f1e07c4184..7f64d09c00f1a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -15756,48 +15756,51 @@ bool AArch64TargetLowering::shouldSinkOperands(
return false;
}
-static bool createTblShuffleForZExt(ZExtInst *ZExt, FixedVectorType *DstTy,
- bool IsLittleEndian) {
- Value *Op = ZExt->getOperand(0);
- auto *SrcTy = cast<FixedVectorType>(Op->getType());
- auto SrcWidth = cast<IntegerType>(SrcTy->getElementType())->getBitWidth();
- auto DstWidth = cast<IntegerType>(DstTy->getElementType())->getBitWidth();
+static bool createTblShuffleMask(unsigned SrcWidth, unsigned DstWidth,
+ unsigned NumElts, bool IsLittleEndian,
+ SmallVectorImpl<int> &Mask) {
if (DstWidth % 8 != 0 || DstWidth <= 16 || DstWidth >= 64)
return false;
- assert(DstWidth % SrcWidth == 0 &&
- "TBL lowering is not supported for a ZExt instruction with this "
- "source & destination element type.");
- unsigned ZExtFactor = DstWidth / SrcWidth;
+ if (DstWidth % SrcWidth != 0)
+ return false;
+
+ unsigned Factor = DstWidth / SrcWidth;
+ unsigned MaskLen = NumElts * Factor;
+
+ Mask.clear();
+ Mask.resize(MaskLen, NumElts);
+
+ unsigned SrcIndex = 0;
+ for (unsigned I = 0; I < MaskLen; I += Factor)
+ Mask[I] = SrcIndex++;
+
+ if (!IsLittleEndian)
+ std::rotate(Mask.rbegin(), Mask.rbegin() + Factor - 1, Mask.rend());
+
+ return true;
+}
+
+static Value *createTblShuffleForZExt(IRBuilderBase &Builder, Value *Op,
+ FixedVectorType *ZExtTy,
+ FixedVectorType *DstTy,
+ bool IsLittleEndian) {
+ auto *SrcTy = cast<FixedVectorType>(Op->getType());
unsigned NumElts = SrcTy->getNumElements();
- IRBuilder<> Builder(ZExt);
+ auto SrcWidth = cast<IntegerType>(SrcTy->getElementType())->getBitWidth();
+ auto DstWidth = cast<IntegerType>(DstTy->getElementType())->getBitWidth();
+
SmallVector<int> Mask;
- // Create a mask that selects <0,...,Op[i]> for each lane of the destination
- // vector to replace the original ZExt. This can later be lowered to a set of
- // tbl instructions.
- for (unsigned i = 0; i < NumElts * ZExtFactor; i++) {
- if (IsLittleEndian) {
- if (i % ZExtFactor == 0)
- Mask.push_back(i / ZExtFactor);
- else
- Mask.push_back(NumElts);
- } else {
- if ((i + 1) % ZExtFactor == 0)
- Mask.push_back((i - ZExtFactor + 1) / ZExtFactor);
- else
- Mask.push_back(NumElts);
- }
- }
+ if (!createTblShuffleMask(SrcWidth, DstWidth, NumElts, IsLittleEndian, Mask))
+ return nullptr;
auto *FirstEltZero = Builder.CreateInsertElement(
PoisonValue::get(SrcTy), Builder.getInt8(0), uint64_t(0));
Value *Result = Builder.CreateShuffleVector(Op, FirstEltZero, Mask);
Result = Builder.CreateBitCast(Result, DstTy);
- if (DstTy != ZExt->getType())
- Result = Builder.CreateZExt(Result, ZExt->getType());
- ZExt->replaceAllUsesWith(Result);
- ZExt->eraseFromParent();
- return true;
+ if (DstTy != ZExtTy)
+ Result = Builder.CreateZExt(Result, ZExtTy);
+ return Result;
}
static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) {
@@ -15962,21 +15965,30 @@ bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
DstTy = TruncDstType;
}
-
- return createTblShuffleForZExt(ZExt, DstTy, Subtarget->isLittleEndian());
+ IRBuilder<> Builder(ZExt);
+ Value *Result = createTblShuffleForZExt(
+ Builder, ZExt->getOperand(0), cast<FixedVectorType>(ZExt->getType()),
+ DstTy, Subtarget->isLittleEndian());
+ if (!Result)
+ return false;
+ ZExt->replaceAllUsesWith(Result);
+ ZExt->eraseFromParent();
+ return true;
}
auto *UIToFP = dyn_cast<UIToFPInst>(I);
if (UIToFP && SrcTy->getElementType()->isIntegerTy(8) &&
DstTy->getElementType()->isFloatTy()) {
IRBuilder<> Builder(I);
- auto *ZExt = cast<ZExtInst>(
- Builder.CreateZExt(I->getOperand(0), VectorType::getInteger(DstTy)));
+ Value *ZExt = createTblShuffleForZExt(
+ Builder, I->getOperand(0), FixedVectorType::getInteger(DstTy),
+ FixedVectorType::getInteger(DstTy), Subtarget->isLittleEndian());
+ if (!ZExt)
+ return false;
auto *UI = Builder.CreateUIToFP(ZExt, DstTy);
I->replaceAllUsesWith(UI);
I->eraseFromParent();
- return createTblShuffleForZExt(ZExt, cast<FixedVectorType>(ZExt->getType()),
- Subtarget->isLittleEndian());
+ return true;
}
// Convert 'fptoui <(8|16) x float> to <(8|16) x i8>' to a wide fptoui
>From af2dab51fb52bcdce46c0774cadbc9b69bd7f8b1 Mon Sep 17 00:00:00 2001
From: Momchil Velikov <momchil.velikov at arm.com>
Date: Tue, 28 May 2024 14:35:33 +0100
Subject: [PATCH 2/3] [fixup] Simplify code
Change-Id: I0e51f00113d0692d5d3751c89eac8167511d1d9e
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7f64d09c00f1a..04fbaf30a4b0d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -15772,12 +15772,9 @@ static bool createTblShuffleMask(unsigned SrcWidth, unsigned DstWidth,
Mask.resize(MaskLen, NumElts);
unsigned SrcIndex = 0;
- for (unsigned I = 0; I < MaskLen; I += Factor)
+ for (unsigned I = IsLittleEndian ? 0 : Factor - 1; I < MaskLen; I += Factor)
Mask[I] = SrcIndex++;
- if (!IsLittleEndian)
- std::rotate(Mask.rbegin(), Mask.rbegin() + Factor - 1, Mask.rend());
-
return true;
}
>From e8e2ab0156ac14f1781cdc40b0adc5b6ce3cc40c Mon Sep 17 00:00:00 2001
From: Momchil Velikov <momchil.velikov at arm.com>
Date: Tue, 21 May 2024 15:57:16 +0100
Subject: [PATCH 3/3] [AArch64] Lower extending sitofp using tbl
In a similar manner as in https://reviews.llvm.org/D133494
use `TBL` to place bytes in the *upper* part of `i32` elements
and then convert to float using fixed-point `scvtf`, i.e.
scvtf Vd.4s, Vn.4s, #24
Change-Id: Ib9df3e4243612cbee8560907b24b14e76b61f265
---
llvm/lib/CodeGen/CodeGenPrepare.cpp | 3 +-
.../Target/AArch64/AArch64ISelLowering.cpp | 35 ++++
llvm/test/CodeGen/AArch64/sitofp-to-tbl.ll | 196 ++++++++++++++++++
3 files changed, 233 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AArch64/sitofp-to-tbl.ll
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 339a1f1f2f002..4c52dbfa23903 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -8333,7 +8333,8 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) {
if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
return true;
- if ((isa<UIToFPInst>(I) || isa<FPToUIInst>(I) || isa<TruncInst>(I)) &&
+ if ((isa<UIToFPInst>(I) || isa<SIToFPInst>(I) || isa<FPToUIInst>(I) ||
+ isa<TruncInst>(I)) &&
TLI->optimizeExtendOrTruncateConversion(
I, LI->getLoopFor(I->getParent()), *TTI))
return true;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 04fbaf30a4b0d..72ad160528501 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -15800,6 +15800,24 @@ static Value *createTblShuffleForZExt(IRBuilderBase &Builder, Value *Op,
return Result;
}
+static Value *createTblShuffleForSExt(IRBuilderBase &Builder, Value *Op,
+ FixedVectorType *DstTy,
+ bool IsLittleEndian) {
+ auto *SrcTy = cast<FixedVectorType>(Op->getType());
+ auto SrcWidth = cast<IntegerType>(SrcTy->getElementType())->getBitWidth();
+ auto DstWidth = cast<IntegerType>(DstTy->getElementType())->getBitWidth();
+
+ SmallVector<int> Mask;
+ if (!createTblShuffleMask(SrcWidth, DstWidth, SrcTy->getNumElements(),
+ !IsLittleEndian, Mask))
+ return nullptr;
+
+ auto *FirstEltZero = Builder.CreateInsertElement(
+ PoisonValue::get(SrcTy), Builder.getInt8(0), uint64_t(0));
+
+ return Builder.CreateShuffleVector(Op, FirstEltZero, Mask);
+}
+
static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) {
IRBuilder<> Builder(TI);
SmallVector<Value *> Parts;
@@ -15988,6 +16006,23 @@ bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
return true;
}
+ auto *SIToFP = dyn_cast<SIToFPInst>(I);
+ if (SIToFP && SrcTy->getElementType()->isIntegerTy(8) &&
+ DstTy->getElementType()->isFloatTy()) {
+ IRBuilder<> Builder(I);
+ auto *Shuffle = createTblShuffleForSExt(Builder, I->getOperand(0),
+ FixedVectorType::getInteger(DstTy),
+ Subtarget->isLittleEndian());
+ if (!Shuffle)
+ return false;
+ auto *Cast = Builder.CreateBitCast(Shuffle, VectorType::getInteger(DstTy));
+ auto *AShr = Builder.CreateAShr(Cast, 24, "", true);
+ auto *SI = Builder.CreateSIToFP(AShr, DstTy);
+ I->replaceAllUsesWith(SI);
+ I->eraseFromParent();
+ return true;
+ }
+
// Convert 'fptoui <(8|16) x float> to <(8|16) x i8>' to a wide fptoui
// followed by a truncate lowered to using tbl.4.
auto *FPToUI = dyn_cast<FPToUIInst>(I);
diff --git a/llvm/test/CodeGen/AArch64/sitofp-to-tbl.ll b/llvm/test/CodeGen/AArch64/sitofp-to-tbl.ll
new file mode 100644
index 0000000000000..b7e8a3a3defe4
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sitofp-to-tbl.ll
@@ -0,0 +1,196 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs < %s | FileCheck %s
+
+target triple = "aarch64-linux"
+
+; CHECK-LABEL: .LCPI0_0:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 6
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 7
+; CHECK-NEXT: .LCPI0_1:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+
+define void @sitofp_v8i8_to_v8f32(ptr %src, ptr %dst) {
+; CHECK-LABEL: sitofp_v8i8_to_v8f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: adrp x8, .LCPI0_0
+; CHECK-NEXT: adrp x9, .LCPI0_1
+; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI0_1]
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: .LBB0_1: // %loop
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldr d2, [x0, x8, lsl #3]
+; CHECK-NEXT: add x9, x1, x8, lsl #5
+; CHECK-NEXT: add x8, x8, #1
+; CHECK-NEXT: cmp x8, #1000
+; CHECK-NEXT: tbl v3.16b, { v2.16b }, v0.16b
+; CHECK-NEXT: tbl v2.16b, { v2.16b }, v1.16b
+; CHECK-NEXT: scvtf v3.4s, v3.4s, #24
+; CHECK-NEXT: scvtf v2.4s, v2.4s, #24
+; CHECK-NEXT: stp q2, q3, [x9]
+; CHECK-NEXT: b.eq .LBB0_1
+; CHECK-NEXT: // %bb.2: // %exit
+; CHECK-NEXT: ret
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.src = getelementptr inbounds <8 x i8>, ptr %src, i64 %iv
+ %l = load <8 x i8>, ptr %gep.src
+ %conv = sitofp <8 x i8> %l to <8 x float>
+ %gep.dst = getelementptr inbounds <8 x float>, ptr %dst, i64 %iv
+ store <8 x float> %conv, ptr %gep.dst
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 1000
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+; CHECK-LABEL: .LCPI1_0:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 12
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 13
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 14
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 15
+; CHECK-NEXT: .LCPI1_1:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 9
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 10
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 11
+; CHECK-NEXT: .LCPI1_2:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 4
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 6
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 7
+; CHECK-NEXT: .LCPI1_3:
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 2
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 255
+; CHECK-NEXT: .byte 3
+
+define void @sitofp_v16i8_to_v16f32(ptr %src, ptr %dst) {
+; CHECK-LABEL: sitofp_v16i8_to_v16f32:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: adrp x8, .LCPI1_0
+; CHECK-NEXT: adrp x9, .LCPI1_1
+; CHECK-NEXT: adrp x10, .LCPI1_2
+; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI1_0]
+; CHECK-NEXT: adrp x8, .LCPI1_3
+; CHECK-NEXT: ldr q1, [x9, :lo12:.LCPI1_1]
+; CHECK-NEXT: ldr q2, [x10, :lo12:.LCPI1_2]
+; CHECK-NEXT: ldr q3, [x8, :lo12:.LCPI1_3]
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: .LBB1_1: // %loop
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ldr q4, [x0, x8, lsl #4]
+; CHECK-NEXT: add x9, x1, x8, lsl #6
+; CHECK-NEXT: add x8, x8, #1
+; CHECK-NEXT: cmp x8, #1000
+; CHECK-NEXT: tbl v5.16b, { v4.16b }, v0.16b
+; CHECK-NEXT: tbl v6.16b, { v4.16b }, v1.16b
+; CHECK-NEXT: tbl v7.16b, { v4.16b }, v2.16b
+; CHECK-NEXT: tbl v4.16b, { v4.16b }, v3.16b
+; CHECK-NEXT: scvtf v5.4s, v5.4s, #24
+; CHECK-NEXT: scvtf v6.4s, v6.4s, #24
+; CHECK-NEXT: scvtf v7.4s, v7.4s, #24
+; CHECK-NEXT: scvtf v4.4s, v4.4s, #24
+; CHECK-NEXT: stp q6, q5, [x9, #32]
+; CHECK-NEXT: stp q4, q7, [x9]
+; CHECK-NEXT: b.eq .LBB1_1
+; CHECK-NEXT: // %bb.2: // %exit
+; CHECK-NEXT: ret
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.src = getelementptr inbounds <16 x i8>, ptr %src, i64 %iv
+ %l = load <16 x i8>, ptr %gep.src
+ %conv = sitofp <16 x i8> %l to <16 x float>
+ %gep.dst = getelementptr inbounds <16 x float>, ptr %dst, i64 %iv
+ store <16 x float> %conv, ptr %gep.dst
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 1000
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret void
+}
More information about the llvm-commits
mailing list