[llvm] [LLVM][CodeGen][SVE] Don't combine shifts at the expense of addressing modes. (PR #149873)
Paul Walker via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 23 04:42:35 PDT 2025
https://github.com/paulwalker-arm updated https://github.com/llvm/llvm-project/pull/149873
>From 375e9a51022d0d6e307ab831f2c89cd763f947a4 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Mon, 21 Jul 2025 18:46:29 +0100
Subject: [PATCH 1/4] Add tests that fail because and invalid size test.
The tests themselves will crash without this PR but the current output
shows the result of just disabling the code for scalable vectors.
---
llvm/test/CodeGen/AArch64/arm64-fold-lshr.ll | 15 ++++++
.../sve-ld1-addressing-mode-reg-reg.ll | 53 +++++++++++++++++++
2 files changed, 68 insertions(+)
diff --git a/llvm/test/CodeGen/AArch64/arm64-fold-lshr.ll b/llvm/test/CodeGen/AArch64/arm64-fold-lshr.ll
index 9dfc8df703ce6..9666c5cc298ff 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fold-lshr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fold-lshr.ll
@@ -136,3 +136,18 @@ entry:
%0 = load i64, ptr %arrayidx, align 8
ret i64 %0
}
+
+define <2 x i64> @loadv2i64_shr1(i64 %a, i64 %b, ptr %table) {
+; CHECK-LABEL: loadv2i64_shr1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mul x8, x1, x0
+; CHECK-NEXT: lsr x8, x8, #1
+; CHECK-NEXT: ldr q0, [x2, x8, lsl #4]
+; CHECK-NEXT: ret
+entry:
+ %mul = mul i64 %b, %a
+ %shr = lshr i64 %mul, 1
+ %arrayidx = getelementptr inbounds <2 x i64>, ptr %table, i64 %shr
+ %0 = load <2 x i64>, ptr %arrayidx, align 16
+ ret <2 x i64> %0
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
index 05abfa319d389..7079054c0a635 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
@@ -268,6 +268,21 @@ define <vscale x 2 x bfloat> @ld1_nxv2bf16(ptr %addr, i64 %off) {
ret <vscale x 2 x bfloat> %val
}
+; Ensure we don't lose the free shift when using indexed addressing.
+define <vscale x 2 x bfloat> @ld1_nxv2bf16_double_shift(ptr %addr, i64 %off) {
+; CHECK-LABEL: ld1_nxv2bf16_double_shift:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr x8, x1, #6
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: add x8, x0, x8, lsl #1
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x8]
+; CHECK-NEXT: ret
+ %off2 = lshr i64 %off, 6
+ %ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off2
+ %val = load volatile <vscale x 2 x bfloat>, ptr %ptr
+ ret <vscale x 2 x bfloat> %val
+}
+
; LD1W
define <vscale x 4 x i32> @ld1_nxv4i32(ptr %addr, i64 %off) {
@@ -327,6 +342,21 @@ define <vscale x 2 x float> @ld1_nxv2f32(ptr %addr, i64 %off) {
ret <vscale x 2 x float> %val
}
+; Ensure we don't lose the free shift when using indexed addressing.
+define <vscale x 2 x float> @ld1_nxv2f32_double_shift(ptr %addr, i64 %off) {
+; CHECK-LABEL: ld1_nxv2f32_double_shift:
+; CHECK: // %bb.0:
+; CHECK-NEXT: lsr x8, x1, #6
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: add x8, x0, x8, lsl #2
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x8]
+; CHECK-NEXT: ret
+ %off2 = lshr i64 %off, 6
+ %ptr = getelementptr inbounds float, ptr %addr, i64 %off2
+ %val = load volatile <vscale x 2 x float>, ptr %ptr
+ ret <vscale x 2 x float> %val
+}
+
; LD1D
define <vscale x 2 x i64> @ld1_nxv2i64(ptr %addr, i64 %off) {
@@ -350,3 +380,26 @@ define <vscale x 2 x double> @ld1_nxv2f64(ptr %addr, i64 %off) {
%val = load volatile <vscale x 2 x double>, ptr %ptr
ret <vscale x 2 x double> %val
}
+
+; Ensure we don't lose the free shift when using indexed addressing.
+define <vscale x 2 x double> @ld1_nxv2f64_double_shift(ptr %addr, i64 %off) {
+; CHECK-LE-LABEL: ld1_nxv2f64_double_shift:
+; CHECK-LE: // %bb.0:
+; CHECK-LE-NEXT: lsr x8, x1, #3
+; CHECK-LE-NEXT: ptrue p0.b
+; CHECK-LE-NEXT: and x8, x8, #0x1ffffffffffffff8
+; CHECK-LE-NEXT: ld1b { z0.b }, p0/z, [x0, x8]
+; CHECK-LE-NEXT: ret
+;
+; CHECK-BE-LABEL: ld1_nxv2f64_double_shift:
+; CHECK-BE: // %bb.0:
+; CHECK-BE-NEXT: lsr x8, x1, #6
+; CHECK-BE-NEXT: ptrue p0.d
+; CHECK-BE-NEXT: add x8, x0, x8, lsl #3
+; CHECK-BE-NEXT: ld1d { z0.d }, p0/z, [x8]
+; CHECK-BE-NEXT: ret
+ %off2 = lshr i64 %off, 6
+ %ptr = getelementptr inbounds double, ptr %addr, i64 %off2
+ %val = load volatile <vscale x 2 x double>, ptr %ptr
+ ret <vscale x 2 x double> %val
+}
>From f2373ffff401751349c2c74bd87de388d996bbdf Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Mon, 21 Jul 2025 19:08:33 +0100
Subject: [PATCH 2/4] [LLVM][CodeGen][SVE] Don't combine shifts at the expense
of addressing mode.
Fixes https://github.com/llvm/llvm-project/issues/149654
---
.../Target/AArch64/AArch64ISelLowering.cpp | 15 ++++++---
.../sve-ld1-addressing-mode-reg-reg.ll | 31 ++++++-------------
2 files changed, 21 insertions(+), 25 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4f13a14d24649..eb0c9a8d6b4b2 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18010,10 +18010,17 @@ bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
unsigned ShlAmt = C2->getZExtValue();
if (auto ShouldADD = *N->user_begin();
ShouldADD->getOpcode() == ISD::ADD && ShouldADD->hasOneUse()) {
- if (auto ShouldLOAD = dyn_cast<LoadSDNode>(*ShouldADD->user_begin())) {
- unsigned ByteVT = ShouldLOAD->getMemoryVT().getSizeInBits() / 8;
- if ((1ULL << ShlAmt) == ByteVT &&
- isIndexedLoadLegal(ISD::PRE_INC, ShouldLOAD->getMemoryVT()))
+ if (auto Load = dyn_cast<LoadSDNode>(*ShouldADD->user_begin())) {
+ TypeSize Size = Load->getMemoryVT().getSizeInBits();
+ // NOTE: +3 to account for bytes->bits transition.
+ if (TypeSize::getFixed(1ULL << (ShlAmt + 3)) == Size &&
+ isIndexedLoadLegal(ISD::PRE_INC, Load->getMemoryVT()))
+ return false;
+
+ unsigned ScalarSize = Load->getMemoryVT().getScalarSizeInBits();
+ // NOTE: +3 to account for bytes->bits transition.
+ if ((1ULL << (ShlAmt + 3)) == ScalarSize &&
+ Load->getValueType(0).isScalableVector())
return false;
}
}
diff --git a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
index 7079054c0a635..29e94dd6c5242 100644
--- a/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ld1-addressing-mode-reg-reg.ll
@@ -272,10 +272,9 @@ define <vscale x 2 x bfloat> @ld1_nxv2bf16(ptr %addr, i64 %off) {
define <vscale x 2 x bfloat> @ld1_nxv2bf16_double_shift(ptr %addr, i64 %off) {
; CHECK-LABEL: ld1_nxv2bf16_double_shift:
; CHECK: // %bb.0:
-; CHECK-NEXT: lsr x8, x1, #6
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: add x8, x0, x8, lsl #1
-; CHECK-NEXT: ld1h { z0.d }, p0/z, [x8]
+; CHECK-NEXT: lsr x8, x1, #6
+; CHECK-NEXT: ld1h { z0.d }, p0/z, [x0, x8, lsl #1]
; CHECK-NEXT: ret
%off2 = lshr i64 %off, 6
%ptr = getelementptr inbounds bfloat, ptr %addr, i64 %off2
@@ -346,10 +345,9 @@ define <vscale x 2 x float> @ld1_nxv2f32(ptr %addr, i64 %off) {
define <vscale x 2 x float> @ld1_nxv2f32_double_shift(ptr %addr, i64 %off) {
; CHECK-LABEL: ld1_nxv2f32_double_shift:
; CHECK: // %bb.0:
-; CHECK-NEXT: lsr x8, x1, #6
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: add x8, x0, x8, lsl #2
-; CHECK-NEXT: ld1w { z0.d }, p0/z, [x8]
+; CHECK-NEXT: lsr x8, x1, #6
+; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
; CHECK-NEXT: ret
%off2 = lshr i64 %off, 6
%ptr = getelementptr inbounds float, ptr %addr, i64 %off2
@@ -383,21 +381,12 @@ define <vscale x 2 x double> @ld1_nxv2f64(ptr %addr, i64 %off) {
; Ensure we don't lose the free shift when using indexed addressing.
define <vscale x 2 x double> @ld1_nxv2f64_double_shift(ptr %addr, i64 %off) {
-; CHECK-LE-LABEL: ld1_nxv2f64_double_shift:
-; CHECK-LE: // %bb.0:
-; CHECK-LE-NEXT: lsr x8, x1, #3
-; CHECK-LE-NEXT: ptrue p0.b
-; CHECK-LE-NEXT: and x8, x8, #0x1ffffffffffffff8
-; CHECK-LE-NEXT: ld1b { z0.b }, p0/z, [x0, x8]
-; CHECK-LE-NEXT: ret
-;
-; CHECK-BE-LABEL: ld1_nxv2f64_double_shift:
-; CHECK-BE: // %bb.0:
-; CHECK-BE-NEXT: lsr x8, x1, #6
-; CHECK-BE-NEXT: ptrue p0.d
-; CHECK-BE-NEXT: add x8, x0, x8, lsl #3
-; CHECK-BE-NEXT: ld1d { z0.d }, p0/z, [x8]
-; CHECK-BE-NEXT: ret
+; CHECK-LABEL: ld1_nxv2f64_double_shift:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.d
+; CHECK-NEXT: lsr x8, x1, #6
+; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; CHECK-NEXT: ret
%off2 = lshr i64 %off, 6
%ptr = getelementptr inbounds double, ptr %addr, i64 %off2
%val = load volatile <vscale x 2 x double>, ptr %ptr
>From 8b86cd7ce81e73840cd4f916afd3723321ada758 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Wed, 23 Jul 2025 11:04:25 +0000
Subject: [PATCH 3/4] Improve shift readibility and reduce getMemoryVT calls.
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index eb0c9a8d6b4b2..5a94e15158a95 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18011,15 +18011,15 @@ bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
if (auto ShouldADD = *N->user_begin();
ShouldADD->getOpcode() == ISD::ADD && ShouldADD->hasOneUse()) {
if (auto Load = dyn_cast<LoadSDNode>(*ShouldADD->user_begin())) {
- TypeSize Size = Load->getMemoryVT().getSizeInBits();
- // NOTE: +3 to account for bytes->bits transition.
- if (TypeSize::getFixed(1ULL << (ShlAmt + 3)) == Size &&
- isIndexedLoadLegal(ISD::PRE_INC, Load->getMemoryVT()))
+ EVT MemVT = Load->getMemoryVT();
+
+ TypeSize Size = MemVT.getSizeInBits();
+ if (TypeSize::getFixed(8ULL << ShlAmt) == Size &&
+ isIndexedLoadLegal(ISD::PRE_INC, MemVT))
return false;
- unsigned ScalarSize = Load->getMemoryVT().getScalarSizeInBits();
- // NOTE: +3 to account for bytes->bits transition.
- if ((1ULL << (ShlAmt + 3)) == ScalarSize &&
+ unsigned ScalarSize = MemVT.getScalarSizeInBits();
+ if ((8ULL << ShlAmt) == ScalarSize &&
Load->getValueType(0).isScalableVector())
return false;
}
>From 53dfdd333b580b727d4cbc91552fab7a436e439c Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Wed, 23 Jul 2025 11:21:21 +0000
Subject: [PATCH 4/4] Simplify code to remove need for TypeSize.
---
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 5a94e15158a95..2d8ea4b32efc0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -18013,15 +18013,11 @@ bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
if (auto Load = dyn_cast<LoadSDNode>(*ShouldADD->user_begin())) {
EVT MemVT = Load->getMemoryVT();
- TypeSize Size = MemVT.getSizeInBits();
- if (TypeSize::getFixed(8ULL << ShlAmt) == Size &&
- isIndexedLoadLegal(ISD::PRE_INC, MemVT))
- return false;
+ if (Load->getValueType(0).isScalableVector())
+ return (8ULL << ShlAmt) != MemVT.getScalarSizeInBits();
- unsigned ScalarSize = MemVT.getScalarSizeInBits();
- if ((8ULL << ShlAmt) == ScalarSize &&
- Load->getValueType(0).isScalableVector())
- return false;
+ if (isIndexedLoadLegal(ISD::PRE_INC, MemVT))
+ return (8ULL << ShlAmt) != MemVT.getFixedSizeInBits();
}
}
}
More information about the llvm-commits
mailing list