[llvm] [AArch64] Support scalable offsets with isLegalAddressingMode (PR #83255)

Graham Hunter via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 6 02:28:04 PST 2024


https://github.com/huntergr-arm updated https://github.com/llvm/llvm-project/pull/83255

>From 6d671f87592b3f3e3091efd27d250582f9e179b3 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Fri, 23 Feb 2024 14:02:13 +0000
Subject: [PATCH 1/5] [TTI][TLI][NFC] Add 'OffsetIsScalable' to
 isLegalAddressingMode

Adds a new parameter to the TTI version of the function, along with
a matching field in the struct for TLI.

This extra bool just indicates that the BaseOffset should be treated
as a scalable quantity (meaning that it should be multiplied by
'vscale' to get the real value at runtime).
---
 llvm/include/llvm/Analysis/TargetTransformInfo.h       | 10 ++++++----
 llvm/include/llvm/Analysis/TargetTransformInfoImpl.h   |  3 ++-
 llvm/include/llvm/CodeGen/BasicTTIImpl.h               |  4 +++-
 llvm/include/llvm/CodeGen/TargetLowering.h             |  1 +
 llvm/lib/Analysis/TargetTransformInfo.cpp              |  5 +++--
 llvm/lib/CodeGen/TargetLoweringBase.cpp                |  4 ++++
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp        |  4 ++++
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp              |  4 ++++
 llvm/lib/Target/ARC/ARCISelLowering.cpp                |  4 ++++
 llvm/lib/Target/ARM/ARMISelLowering.cpp                |  4 ++++
 llvm/lib/Target/AVR/AVRISelLowering.cpp                |  4 ++++
 llvm/lib/Target/BPF/BPFISelLowering.cpp                |  4 ++++
 llvm/lib/Target/Hexagon/HexagonISelLowering.cpp        |  4 ++++
 llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp    |  4 ++++
 llvm/lib/Target/Mips/MipsISelLowering.cpp              |  4 ++++
 llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp            |  4 ++++
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp            |  4 ++++
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp            |  4 ++++
 llvm/lib/Target/SystemZ/SystemZISelLowering.cpp        |  4 ++++
 .../lib/Target/WebAssembly/WebAssemblyISelLowering.cpp |  4 ++++
 llvm/lib/Target/X86/X86ISelLowering.cpp                |  4 ++++
 llvm/lib/Target/XCore/XCoreISelLowering.cpp            |  4 ++++
 llvm/unittests/Target/AArch64/AddressingModes.cpp      |  4 +++-
 23 files changed, 86 insertions(+), 9 deletions(-)

diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 58577a6b6eb5c0..0cfda084805209 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -711,7 +711,8 @@ class TargetTransformInfo {
   bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                              bool HasBaseReg, int64_t Scale,
                              unsigned AddrSpace = 0,
-                             Instruction *I = nullptr) const;
+                             Instruction *I = nullptr,
+                             bool OffsetIsScalable = false) const;
 
   /// Return true if LSR cost of C1 is lower than C2.
   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
@@ -1839,7 +1840,8 @@ class TargetTransformInfo::Concept {
   virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
                                      int64_t BaseOffset, bool HasBaseReg,
                                      int64_t Scale, unsigned AddrSpace,
-                                     Instruction *I) = 0;
+                                     Instruction *I,
+                                     bool OffsetIsScalable) = 0;
   virtual bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                              const TargetTransformInfo::LSRCost &C2) = 0;
   virtual bool isNumRegsMajorCostOfLSR() = 0;
@@ -2300,9 +2302,9 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
   }
   bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                              bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
-                             Instruction *I) override {
+                             Instruction *I, bool OffsetIsScalable) override {
     return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
-                                      AddrSpace, I);
+                                      AddrSpace, I, OffsetIsScalable);
   }
   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                      const TargetTransformInfo::LSRCost &C2) override {
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 13379cc126a40c..dd9265be3eeeec 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -220,7 +220,8 @@ class TargetTransformInfoImplBase {
 
   bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                              bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
-                             Instruction *I = nullptr) const {
+                             Instruction *I = nullptr,
+                             bool OffsetIsScalable = false) const {
     // Guess that only reg and reg+reg addressing is allowed. This heuristic is
     // taken from the implementation of LSR.
     return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 61f6564e8cd79b..4bb0255c2bcf70 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -334,12 +334,14 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
 
   bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                              bool HasBaseReg, int64_t Scale,
-                             unsigned AddrSpace, Instruction *I = nullptr) {
+                             unsigned AddrSpace, Instruction *I = nullptr,
+                             bool OffsetIsScalable = false) {
     TargetLoweringBase::AddrMode AM;
     AM.BaseGV = BaseGV;
     AM.BaseOffs = BaseOffset;
     AM.HasBaseReg = HasBaseReg;
     AM.Scale = Scale;
+    AM.OffsetIsScalable = OffsetIsScalable;
     return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
   }
 
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index f2e00aab8d5da2..90bfa2983d2cb1 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -2696,6 +2696,7 @@ class TargetLoweringBase {
     int64_t      BaseOffs = 0;
     bool         HasBaseReg = false;
     int64_t      Scale = 0;
+    bool         OffsetIsScalable = false;
     AddrMode() = default;
   };
 
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 1f11f0d7dd620e..a2562a7edebaf0 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -403,9 +403,10 @@ bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
                                                 int64_t BaseOffset,
                                                 bool HasBaseReg, int64_t Scale,
                                                 unsigned AddrSpace,
-                                                Instruction *I) const {
+                                                Instruction *I,
+                                                bool OffsetIsScalable) const {
   return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
-                                        Scale, AddrSpace, I);
+                                        Scale, AddrSpace, I, OffsetIsScalable);
 }
 
 bool TargetTransformInfo::isLSRCostLess(const LSRCost &C1,
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 646c0c345e54e0..cef358890252de 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -2008,6 +2008,10 @@ bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
   // The default implementation of this implements a conservative RISCy, r+r and
   // r+i addr mode.
 
+  // Scalable offsets not supported
+  if (AM.OffsetIsScalable)
+    return false;
+
   // Allows a sign-extended 16-bit immediate field.
   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
     return false;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 3b92e95d7c2876..e7713ac804f6bc 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16345,6 +16345,10 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
   //  reg1 + reg2
   //  reg + SIZE_IN_BYTES * reg
 
+  // No scalable offsets allowed.
+  if (AMode.OffsetIsScalable)
+    return false;
+
   // No global is ever allowed as a base.
   if (AMode.BaseGV)
     return false;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 84ef9679ab9563..f632467d3568f2 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1531,6 +1531,10 @@ bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                              const AddrMode &AM, Type *Ty,
                                              unsigned AS, Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   // No global is ever allowed as a base.
   if (AM.BaseGV)
     return false;
diff --git a/llvm/lib/Target/ARC/ARCISelLowering.cpp b/llvm/lib/Target/ARC/ARCISelLowering.cpp
index 5dd343d97b80c2..8570406425b554 100644
--- a/llvm/lib/Target/ARC/ARCISelLowering.cpp
+++ b/llvm/lib/Target/ARC/ARCISelLowering.cpp
@@ -737,6 +737,10 @@ bool ARCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
                                               unsigned AS,
                                               Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   return AM.Scale == 0;
 }
 
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index b98006ed0cb3f4..2c7a602b7c4c63 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -19645,6 +19645,10 @@ bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM,
 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
                                               unsigned AS, Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   EVT VT = getValueType(DL, Ty, true);
   if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
     return false;
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index f91e77adb8f810..74fa09c409acfd 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -1058,6 +1058,10 @@ bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
                                               unsigned AS,
                                               Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   int64_t Offs = AM.BaseOffs;
 
   // Allow absolute addresses.
diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp
index 4d8ace7c1ece02..f6f91cd8ee9bf3 100644
--- a/llvm/lib/Target/BPF/BPFISelLowering.cpp
+++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp
@@ -920,6 +920,10 @@ bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
                                               unsigned AS,
                                               Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   // No global is ever allowed as a base.
   if (AM.BaseGV)
     return false;
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 13691053ddd707..a9f83f03f0c74d 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -3659,6 +3659,10 @@ bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
 bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                   const AddrMode &AM, Type *Ty,
                                                   unsigned AS, Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   if (Ty->isSized()) {
     // When LSR detects uses of the same base address to access different
     // types (e.g. unions), it will assume a conservative type for these
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 3324dd2e8fc217..24e44cf398684d 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -4895,6 +4895,10 @@ bool LoongArchTargetLowering::isLegalAddressingMode(const DataLayout &DL,
   //  4. reg1 + reg2
   // TODO: Add more checks after support vector extension.
 
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   // No global is ever allowed as a base.
   if (AM.BaseGV)
     return false;
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 97e830cec27cad..c35b61d22bfe5a 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -4292,6 +4292,10 @@ bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                const AddrMode &AM, Type *Ty,
                                                unsigned AS,
                                                Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   // No global is ever allowed as a base.
   if (AM.BaseGV)
     return false;
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 66a101036f9134..a4d61511d9b7c2 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5092,6 +5092,10 @@ bool NVPTXTargetLowering::isLegalAddressingMode(const DataLayout &DL,
   // - [areg+immoff]
   // - [immAddr]
 
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   if (AM.BaseGV) {
     return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
   }
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 51becf1d5b8584..1a2d07db850bb4 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -16814,6 +16814,10 @@ bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
                                               unsigned AS,
                                               Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   // Vector type r+i form is supported since power9 as DQ form. We don't check
   // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
   // imm form is preferred and the offset can be adjusted to use imm form later
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index dde1882f5eea83..a41738fb718f44 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1753,6 +1753,10 @@ bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                 const AddrMode &AM, Type *Ty,
                                                 unsigned AS,
                                                 Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   // No global is ever allowed as a base.
   if (AM.BaseGV)
     return false;
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 3b85a6ac0371ed..5feba75d92f54a 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -1058,6 +1058,10 @@ supportedAddressingMode(Instruction *I, bool HasVector) {
 
 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
        const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   // Punt on globals for now, although they can be used in limited
   // RELATIVE LONG cases.
   if (AM.BaseGV)
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 7c47790d1e3515..51425a684a4145 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -784,6 +784,10 @@ bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                       const AddrMode &AM,
                                                       Type *Ty, unsigned AS,
                                                       Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   // WebAssembly offsets are added as unsigned without wrapping. The
   // isLegalAddressingMode gives us no way to determine if wrapping could be
   // happening, so we approximate this by accepting only non-negative offsets.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 0722c402348ee0..30674cf8deeabe 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -33664,6 +33664,10 @@ bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
                                               unsigned AS,
                                               Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   // X86 supports extremely general addressing modes.
   CodeModel::Model M = getTargetMachine().getCodeModel();
 
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 18feeaadb03c83..e3518b2707e122 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1787,6 +1787,10 @@ bool XCoreTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                 const AddrMode &AM, Type *Ty,
                                                 unsigned AS,
                                                 Instruction *I) const {
+  // No scalable offsets allowed.
+  if (AM.OffsetIsScalable)
+    return false;
+
   if (Ty->getTypeID() == Type::VoidTyID)
     return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
 
diff --git a/llvm/unittests/Target/AArch64/AddressingModes.cpp b/llvm/unittests/Target/AArch64/AddressingModes.cpp
index 284ea7ae9233ed..e893e98f6c2621 100644
--- a/llvm/unittests/Target/AArch64/AddressingModes.cpp
+++ b/llvm/unittests/Target/AArch64/AddressingModes.cpp
@@ -13,11 +13,13 @@ using namespace llvm;
 namespace {
 
 struct AddrMode : public TargetLowering::AddrMode {
-  constexpr AddrMode(GlobalValue *GV, int64_t Offs, bool HasBase, int64_t S) {
+  constexpr AddrMode(GlobalValue *GV, int64_t Offs, bool HasBase, int64_t S,
+                     bool ScalableOffset = false) {
     BaseGV = GV;
     BaseOffs = Offs;
     HasBaseReg = HasBase;
     Scale = S;
+    OffsetIsScalable = ScalableOffset;
   }
 };
 struct TestCase {

>From fb41f36cd75e6a435823d9bf27035d891b5e8b3c Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Fri, 23 Feb 2024 14:45:19 +0000
Subject: [PATCH 2/5] [NFC] Tests for isLegalAddressingMode scalable offsets

---
 .../Target/AArch64/AddressingModes.cpp        | 42 +++++++++++++++++++
 1 file changed, 42 insertions(+)

diff --git a/llvm/unittests/Target/AArch64/AddressingModes.cpp b/llvm/unittests/Target/AArch64/AddressingModes.cpp
index e893e98f6c2621..2c7536e8ea0c6c 100644
--- a/llvm/unittests/Target/AArch64/AddressingModes.cpp
+++ b/llvm/unittests/Target/AArch64/AddressingModes.cpp
@@ -155,6 +155,41 @@ const std::initializer_list<TestCase> Tests = {
     {{nullptr, 4096 + 1, true, 0}, 8, false},
 
 };
+
+struct SVETestCase {
+  AddrMode AM;
+  unsigned TypeBits;
+  unsigned NumElts;
+  bool Result;
+};
+
+const std::initializer_list<SVETestCase> SVETests = {
+    // {BaseGV, BaseOffs, HasBaseReg, Scale, Scalable}, EltBits, Count, Result
+    // Test immediate range -- [-8,7] vector's worth.
+    // <vscale x 16 x i8>, increment by one vector
+    {{nullptr, 16, true, 0, true}, 8, 16, false},
+    // <vscale x 4 x i32>, increment by eight vectors
+    {{nullptr, 128, true, 0, true}, 32, 4, false},
+    // <vscale x 8 x i16>, increment by seven vectors
+    {{nullptr, 112, true, 0, true}, 16, 8, false},
+    // <vscale x 2 x i64>, decrement by eight vectors
+    {{nullptr, -128, true, 0, true}, 64, 2, false},
+    // <vscale x 16 x i8>, decrement by nine vectors
+    {{nullptr, -144, true, 0, true}, 8, 16, false},
+
+    // Half the size of a vector register, but allowable with extending
+    // loads and truncating stores
+    // <vscale x 8 x i8>, increment by three vectors
+    {{nullptr, 24, true, 0, true}, 8, 8, false},
+
+    // Test invalid types or offsets
+    // <vscale x 5 x i32>, increment by one vector (base size > 16B)
+    {{nullptr, 20, true, 0, true}, 32, 5, false},
+    // <vscale x 8 x i16>, increment by half a vector
+    {{nullptr, 8, true, 0, true}, 16, 8, false},
+    // <vscale x 3 x i8>, increment by 3 vectors (non-power-of-two)
+    {{nullptr, 9, true, 0, true}, 8, 3, false},
+};
 } // namespace
 
 TEST(AddressingModes, AddressingModes) {
@@ -181,4 +216,11 @@ TEST(AddressingModes, AddressingModes) {
     Type *Typ = Type::getIntNTy(Ctx, Test.TypeBits);
     ASSERT_EQ(TLI->isLegalAddressingMode(DL, Test.AM, Typ, 0), Test.Result);
   }
+
+  for (const auto &SVETest : SVETests) {
+    Type *Ty = VectorType::get(Type::getIntNTy(Ctx, SVETest.TypeBits),
+                               ElementCount::getScalable(SVETest.NumElts));
+    ASSERT_EQ(TLI->isLegalAddressingMode(DL, SVETest.AM, Ty, 0),
+              SVETest.Result);
+  }
 }

>From 662cfe1429bbbedf287070f9d2b49bb56e874cd6 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Fri, 23 Feb 2024 15:49:03 +0000
Subject: [PATCH 3/5] [AArch64] Support scalable offsets with
 isLegalAddressingMode

Given a base register and a scalable offset (multiplied by vscale),
return true if the offset corresponds to the valid range for the
size of the vector type in memory; e.g. `[X0, #1, mul vl]`
---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 20 +++++++++++++++----
 .../Target/AArch64/AddressingModes.cpp        |  8 ++++----
 2 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e7713ac804f6bc..c3c5c6ddff4025 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16345,10 +16345,6 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
   //  reg1 + reg2
   //  reg + SIZE_IN_BYTES * reg
 
-  // No scalable offsets allowed.
-  if (AMode.OffsetIsScalable)
-    return false;
-
   // No global is ever allowed as a base.
   if (AMode.BaseGV)
     return false;
@@ -16378,6 +16374,18 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
 
   if (Ty->isScalableTy()) {
     if (isa<ScalableVectorType>(Ty)) {
+      // See if we have a foldable vscale-based offset, for vector types which
+      // are either legal or smaller than the minimum; more work will be
+      // required if we need to consider addressing for types which need
+      // legalization by splitting.
+      uint64_t VecNumBytes = DL.getTypeSizeInBits(Ty).getKnownMinValue() / 8;
+      if (AM.HasBaseReg && AM.BaseOffs != 0 && AM.OffsetIsScalable &&
+          !AM.Scale && (AM.BaseOffs % VecNumBytes == 0) && VecNumBytes <= 16 &&
+          isPowerOf2_64(VecNumBytes)) {
+        int64_t Idx = AM.BaseOffs / (int64_t)VecNumBytes;
+        return Idx >= -8 && Idx <= 7;
+      }
+
       uint64_t VecElemNumBytes =
           DL.getTypeSizeInBits(cast<VectorType>(Ty)->getElementType()) / 8;
       return AM.HasBaseReg && !AM.BaseOffs &&
@@ -16387,6 +16395,10 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
     return AM.HasBaseReg && !AM.BaseOffs && !AM.Scale;
   }
 
+  // No scalable offsets allowed for non-scalable types.
+  if (AM.OffsetIsScalable)
+    return false;
+
   // check reg + imm case:
   // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12
   uint64_t NumBytes = 0;
diff --git a/llvm/unittests/Target/AArch64/AddressingModes.cpp b/llvm/unittests/Target/AArch64/AddressingModes.cpp
index 2c7536e8ea0c6c..0c086b83242600 100644
--- a/llvm/unittests/Target/AArch64/AddressingModes.cpp
+++ b/llvm/unittests/Target/AArch64/AddressingModes.cpp
@@ -167,20 +167,20 @@ const std::initializer_list<SVETestCase> SVETests = {
     // {BaseGV, BaseOffs, HasBaseReg, Scale, Scalable}, EltBits, Count, Result
     // Test immediate range -- [-8,7] vector's worth.
     // <vscale x 16 x i8>, increment by one vector
-    {{nullptr, 16, true, 0, true}, 8, 16, false},
+    {{nullptr, 16, true, 0, true}, 8, 16, true},
     // <vscale x 4 x i32>, increment by eight vectors
     {{nullptr, 128, true, 0, true}, 32, 4, false},
     // <vscale x 8 x i16>, increment by seven vectors
-    {{nullptr, 112, true, 0, true}, 16, 8, false},
+    {{nullptr, 112, true, 0, true}, 16, 8, true},
     // <vscale x 2 x i64>, decrement by eight vectors
-    {{nullptr, -128, true, 0, true}, 64, 2, false},
+    {{nullptr, -128, true, 0, true}, 64, 2, true},
     // <vscale x 16 x i8>, decrement by nine vectors
     {{nullptr, -144, true, 0, true}, 8, 16, false},
 
     // Half the size of a vector register, but allowable with extending
     // loads and truncating stores
     // <vscale x 8 x i8>, increment by three vectors
-    {{nullptr, 24, true, 0, true}, 8, 8, false},
+    {{nullptr, 24, true, 0, true}, 8, 8, true},
 
     // Test invalid types or offsets
     // <vscale x 5 x i32>, increment by one vector (base size > 16B)

>From 5236e040143bf3a29dc3f28ab15bc55b3fdda1d3 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Thu, 29 Feb 2024 15:04:22 +0000
Subject: [PATCH 4/5] Fix formatting

---
 llvm/include/llvm/Analysis/TargetTransformInfo.h | 6 ++----
 llvm/include/llvm/CodeGen/BasicTTIImpl.h         | 4 ++--
 llvm/include/llvm/CodeGen/TargetLowering.h       | 2 +-
 3 files changed, 5 insertions(+), 7 deletions(-)

diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 0cfda084805209..68158255bd6127 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -710,8 +710,7 @@ class TargetTransformInfo {
   /// TODO: Handle pre/postinc as well.
   bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                              bool HasBaseReg, int64_t Scale,
-                             unsigned AddrSpace = 0,
-                             Instruction *I = nullptr,
+                             unsigned AddrSpace = 0, Instruction *I = nullptr,
                              bool OffsetIsScalable = false) const;
 
   /// Return true if LSR cost of C1 is lower than C2.
@@ -1840,8 +1839,7 @@ class TargetTransformInfo::Concept {
   virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
                                      int64_t BaseOffset, bool HasBaseReg,
                                      int64_t Scale, unsigned AddrSpace,
-                                     Instruction *I,
-                                     bool OffsetIsScalable) = 0;
+                                     Instruction *I, bool OffsetIsScalable) = 0;
   virtual bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                              const TargetTransformInfo::LSRCost &C2) = 0;
   virtual bool isNumRegsMajorCostOfLSR() = 0;
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 4bb0255c2bcf70..70c41f7b5ac8f2 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -333,8 +333,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
   }
 
   bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
-                             bool HasBaseReg, int64_t Scale,
-                             unsigned AddrSpace, Instruction *I = nullptr,
+                             bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
+                             Instruction *I = nullptr,
                              bool OffsetIsScalable = false) {
     TargetLoweringBase::AddrMode AM;
     AM.BaseGV = BaseGV;
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 90bfa2983d2cb1..c4dd714930acd5 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -2696,7 +2696,7 @@ class TargetLoweringBase {
     int64_t      BaseOffs = 0;
     bool         HasBaseReg = false;
     int64_t      Scale = 0;
-    bool         OffsetIsScalable = false;
+    bool OffsetIsScalable = false;
     AddrMode() = default;
   };
 

>From 8f3297a6f2947bc868c45a5bfdd596209991db5f Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Tue, 5 Mar 2024 08:29:55 +0000
Subject: [PATCH 5/5] Remove changes from targets without scalable vectors, use
 separate offset.

---
 .../llvm/Analysis/TargetTransformInfo.h       |  9 +++---
 .../llvm/Analysis/TargetTransformInfoImpl.h   |  2 +-
 llvm/include/llvm/CodeGen/BasicTTIImpl.h      |  4 +--
 llvm/include/llvm/CodeGen/TargetLowering.h    |  2 +-
 llvm/lib/Analysis/TargetTransformInfo.cpp     |  4 +--
 llvm/lib/CodeGen/TargetLoweringBase.cpp       |  2 +-
 .../Target/AArch64/AArch64ISelLowering.cpp    | 10 +++----
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  7 ++---
 llvm/lib/Target/ARC/ARCISelLowering.cpp       |  4 ---
 llvm/lib/Target/ARM/ARMISelLowering.cpp       |  7 ++---
 llvm/lib/Target/AVR/AVRISelLowering.cpp       |  4 ---
 llvm/lib/Target/BPF/BPFISelLowering.cpp       |  4 ---
 .../Target/Hexagon/HexagonISelLowering.cpp    |  7 ++---
 .../LoongArch/LoongArchISelLowering.cpp       |  4 ---
 llvm/lib/Target/Mips/MipsISelLowering.cpp     |  4 ---
 llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp   |  4 ---
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp   |  4 ---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  4 +--
 .../Target/SystemZ/SystemZISelLowering.cpp    |  8 ++----
 .../WebAssembly/WebAssemblyISelLowering.cpp   |  4 ---
 llvm/lib/Target/X86/X86ISelLowering.cpp       |  4 ---
 llvm/lib/Target/XCore/XCoreISelLowering.cpp   |  4 ---
 .../Target/AArch64/AddressingModes.cpp        | 28 +++++++++++--------
 23 files changed, 44 insertions(+), 90 deletions(-)

diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 68158255bd6127..2912bc62c2e61c 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -711,7 +711,7 @@ class TargetTransformInfo {
   bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                              bool HasBaseReg, int64_t Scale,
                              unsigned AddrSpace = 0, Instruction *I = nullptr,
-                             bool OffsetIsScalable = false) const;
+                             int64_t ScalableOffset = 0) const;
 
   /// Return true if LSR cost of C1 is lower than C2.
   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
@@ -1839,7 +1839,8 @@ class TargetTransformInfo::Concept {
   virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
                                      int64_t BaseOffset, bool HasBaseReg,
                                      int64_t Scale, unsigned AddrSpace,
-                                     Instruction *I, bool OffsetIsScalable) = 0;
+                                     Instruction *I,
+                                     int64_t ScalableOffset) = 0;
   virtual bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                              const TargetTransformInfo::LSRCost &C2) = 0;
   virtual bool isNumRegsMajorCostOfLSR() = 0;
@@ -2300,9 +2301,9 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
   }
   bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                              bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
-                             Instruction *I, bool OffsetIsScalable) override {
+                             Instruction *I, int64_t ScalableOffset) override {
     return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
-                                      AddrSpace, I, OffsetIsScalable);
+                                      AddrSpace, I, ScalableOffset);
   }
   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                      const TargetTransformInfo::LSRCost &C2) override {
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index dd9265be3eeeec..22e448a113ba93 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -221,7 +221,7 @@ class TargetTransformInfoImplBase {
   bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                              bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
                              Instruction *I = nullptr,
-                             bool OffsetIsScalable = false) const {
+                             int64_t ScalableOffset = 0) const {
     // Guess that only reg and reg+reg addressing is allowed. This heuristic is
     // taken from the implementation of LSR.
     return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 70c41f7b5ac8f2..721900038ddd57 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -335,13 +335,13 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
   bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                              bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
                              Instruction *I = nullptr,
-                             bool OffsetIsScalable = false) {
+                             int64_t ScalableOffset = 0) {
     TargetLoweringBase::AddrMode AM;
     AM.BaseGV = BaseGV;
     AM.BaseOffs = BaseOffset;
     AM.HasBaseReg = HasBaseReg;
     AM.Scale = Scale;
-    AM.OffsetIsScalable = OffsetIsScalable;
+    AM.ScalableOffset = ScalableOffset;
     return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
   }
 
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index c4dd714930acd5..c9f8bc8fd466d1 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -2696,7 +2696,7 @@ class TargetLoweringBase {
     int64_t      BaseOffs = 0;
     bool         HasBaseReg = false;
     int64_t      Scale = 0;
-    bool OffsetIsScalable = false;
+    int64_t ScalableOffset = 0;
     AddrMode() = default;
   };
 
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index a2562a7edebaf0..a2a984ac18cee9 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -404,9 +404,9 @@ bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
                                                 bool HasBaseReg, int64_t Scale,
                                                 unsigned AddrSpace,
                                                 Instruction *I,
-                                                bool OffsetIsScalable) const {
+                                                int64_t ScalableOffset) const {
   return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
-                                        Scale, AddrSpace, I, OffsetIsScalable);
+                                        Scale, AddrSpace, I, ScalableOffset);
 }
 
 bool TargetTransformInfo::isLSRCostLess(const LSRCost &C1,
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index cef358890252de..d6e7d0233b40d8 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -2009,7 +2009,7 @@ bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
   // r+i addr mode.
 
   // Scalable offsets not supported
-  if (AM.OffsetIsScalable)
+  if (AM.ScalableOffset)
     return false;
 
   // Allows a sign-extended 16-bit immediate field.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c3c5c6ddff4025..edb98843937ebd 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -16379,16 +16379,16 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
       // required if we need to consider addressing for types which need
       // legalization by splitting.
       uint64_t VecNumBytes = DL.getTypeSizeInBits(Ty).getKnownMinValue() / 8;
-      if (AM.HasBaseReg && AM.BaseOffs != 0 && AM.OffsetIsScalable &&
-          !AM.Scale && (AM.BaseOffs % VecNumBytes == 0) && VecNumBytes <= 16 &&
+      if (AM.HasBaseReg && !AM.BaseOffs && AM.ScalableOffset && !AM.Scale &&
+          (AM.ScalableOffset % VecNumBytes == 0) && VecNumBytes <= 16 &&
           isPowerOf2_64(VecNumBytes)) {
-        int64_t Idx = AM.BaseOffs / (int64_t)VecNumBytes;
+        int64_t Idx = AM.ScalableOffset / (int64_t)VecNumBytes;
         return Idx >= -8 && Idx <= 7;
       }
 
       uint64_t VecElemNumBytes =
           DL.getTypeSizeInBits(cast<VectorType>(Ty)->getElementType()) / 8;
-      return AM.HasBaseReg && !AM.BaseOffs &&
+      return AM.HasBaseReg && !AM.BaseOffs && !AM.ScalableOffset &&
              (AM.Scale == 0 || (uint64_t)AM.Scale == VecElemNumBytes);
     }
 
@@ -16396,7 +16396,7 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
   }
 
   // No scalable offsets allowed for non-scalable types.
-  if (AM.OffsetIsScalable)
+  if (AM.ScalableOffset)
     return false;
 
   // check reg + imm case:
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index f632467d3568f2..8b162a9d05e51f 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1530,11 +1530,8 @@ bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
 
 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                              const AddrMode &AM, Type *Ty,
-                                             unsigned AS, Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
+                                             unsigned AS,
+                                             Instruction *I) const {
   // No global is ever allowed as a base.
   if (AM.BaseGV)
     return false;
diff --git a/llvm/lib/Target/ARC/ARCISelLowering.cpp b/llvm/lib/Target/ARC/ARCISelLowering.cpp
index 8570406425b554..5dd343d97b80c2 100644
--- a/llvm/lib/Target/ARC/ARCISelLowering.cpp
+++ b/llvm/lib/Target/ARC/ARCISelLowering.cpp
@@ -737,10 +737,6 @@ bool ARCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
                                               unsigned AS,
                                               Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
   return AM.Scale == 0;
 }
 
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 2c7a602b7c4c63..1ef150e1afdbda 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -19644,11 +19644,8 @@ bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM,
 /// by AM is legal for this target, for a load/store of the specified type.
 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
-                                              unsigned AS, Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
+                                              unsigned AS,
+                                              Instruction *I) const {
   EVT VT = getValueType(DL, Ty, true);
   if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
     return false;
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index 74fa09c409acfd..f91e77adb8f810 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -1058,10 +1058,6 @@ bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
                                               unsigned AS,
                                               Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
   int64_t Offs = AM.BaseOffs;
 
   // Allow absolute addresses.
diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp
index f6f91cd8ee9bf3..4d8ace7c1ece02 100644
--- a/llvm/lib/Target/BPF/BPFISelLowering.cpp
+++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp
@@ -920,10 +920,6 @@ bool BPFTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
                                               unsigned AS,
                                               Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
   // No global is ever allowed as a base.
   if (AM.BaseGV)
     return false;
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index a9f83f03f0c74d..32aac128cd2c0d 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -3658,11 +3658,8 @@ bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
 /// AM is legal for this target, for a load/store of the specified type.
 bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                   const AddrMode &AM, Type *Ty,
-                                                  unsigned AS, Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
+                                                  unsigned AS,
+                                                  Instruction *I) const {
   if (Ty->isSized()) {
     // When LSR detects uses of the same base address to access different
     // types (e.g. unions), it will assume a conservative type for these
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 24e44cf398684d..3324dd2e8fc217 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -4895,10 +4895,6 @@ bool LoongArchTargetLowering::isLegalAddressingMode(const DataLayout &DL,
   //  4. reg1 + reg2
   // TODO: Add more checks after support vector extension.
 
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
   // No global is ever allowed as a base.
   if (AM.BaseGV)
     return false;
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index c35b61d22bfe5a..97e830cec27cad 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -4292,10 +4292,6 @@ bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                const AddrMode &AM, Type *Ty,
                                                unsigned AS,
                                                Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
   // No global is ever allowed as a base.
   if (AM.BaseGV)
     return false;
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index a4d61511d9b7c2..66a101036f9134 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -5092,10 +5092,6 @@ bool NVPTXTargetLowering::isLegalAddressingMode(const DataLayout &DL,
   // - [areg+immoff]
   // - [immAddr]
 
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
   if (AM.BaseGV) {
     return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
   }
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 1a2d07db850bb4..51becf1d5b8584 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -16814,10 +16814,6 @@ bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
                                               unsigned AS,
                                               Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
   // Vector type r+i form is supported since power9 as DQ form. We don't check
   // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
   // imm form is preferred and the offset can be adjusted to use imm form later
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a41738fb718f44..2330621ba992d5 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1753,8 +1753,8 @@ bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                 const AddrMode &AM, Type *Ty,
                                                 unsigned AS,
                                                 Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
+  // No scalable offsets allowed yet.
+  if (AM.ScalableOffset)
     return false;
 
   // No global is ever allowed as a base.
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 5feba75d92f54a..d617a834e79bde 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -1057,11 +1057,9 @@ supportedAddressingMode(Instruction *I, bool HasVector) {
 }
 
 bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
-       const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
+                                                  const AddrMode &AM, Type *Ty,
+                                                  unsigned AS,
+                                                  Instruction *I) const {
   // Punt on globals for now, although they can be used in limited
   // RELATIVE LONG cases.
   if (AM.BaseGV)
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 51425a684a4145..7c47790d1e3515 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -784,10 +784,6 @@ bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                       const AddrMode &AM,
                                                       Type *Ty, unsigned AS,
                                                       Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
   // WebAssembly offsets are added as unsigned without wrapping. The
   // isLegalAddressingMode gives us no way to determine if wrapping could be
   // happening, so we approximate this by accepting only non-negative offsets.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 30674cf8deeabe..0722c402348ee0 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -33664,10 +33664,6 @@ bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                               const AddrMode &AM, Type *Ty,
                                               unsigned AS,
                                               Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
   // X86 supports extremely general addressing modes.
   CodeModel::Model M = getTargetMachine().getCodeModel();
 
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index e3518b2707e122..18feeaadb03c83 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1787,10 +1787,6 @@ bool XCoreTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                 const AddrMode &AM, Type *Ty,
                                                 unsigned AS,
                                                 Instruction *I) const {
-  // No scalable offsets allowed.
-  if (AM.OffsetIsScalable)
-    return false;
-
   if (Ty->getTypeID() == Type::VoidTyID)
     return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
 
diff --git a/llvm/unittests/Target/AArch64/AddressingModes.cpp b/llvm/unittests/Target/AArch64/AddressingModes.cpp
index 0c086b83242600..0af18d886791a1 100644
--- a/llvm/unittests/Target/AArch64/AddressingModes.cpp
+++ b/llvm/unittests/Target/AArch64/AddressingModes.cpp
@@ -14,12 +14,12 @@ namespace {
 
 struct AddrMode : public TargetLowering::AddrMode {
   constexpr AddrMode(GlobalValue *GV, int64_t Offs, bool HasBase, int64_t S,
-                     bool ScalableOffset = false) {
+                     int64_t SOffs = 0) {
     BaseGV = GV;
     BaseOffs = Offs;
     HasBaseReg = HasBase;
     Scale = S;
-    OffsetIsScalable = ScalableOffset;
+    ScalableOffset = SOffs;
   }
 };
 struct TestCase {
@@ -164,31 +164,35 @@ struct SVETestCase {
 };
 
 const std::initializer_list<SVETestCase> SVETests = {
-    // {BaseGV, BaseOffs, HasBaseReg, Scale, Scalable}, EltBits, Count, Result
+    // {BaseGV, BaseOffs, HasBaseReg, Scale, SOffs}, EltBits, Count, Result
     // Test immediate range -- [-8,7] vector's worth.
     // <vscale x 16 x i8>, increment by one vector
-    {{nullptr, 16, true, 0, true}, 8, 16, true},
+    {{nullptr, 0, true, 0, 16}, 8, 16, true},
     // <vscale x 4 x i32>, increment by eight vectors
-    {{nullptr, 128, true, 0, true}, 32, 4, false},
+    {{nullptr, 0, true, 0, 128}, 32, 4, false},
     // <vscale x 8 x i16>, increment by seven vectors
-    {{nullptr, 112, true, 0, true}, 16, 8, true},
+    {{nullptr, 0, true, 0, 112}, 16, 8, true},
     // <vscale x 2 x i64>, decrement by eight vectors
-    {{nullptr, -128, true, 0, true}, 64, 2, true},
+    {{nullptr, 0, true, 0, -128}, 64, 2, true},
     // <vscale x 16 x i8>, decrement by nine vectors
-    {{nullptr, -144, true, 0, true}, 8, 16, false},
+    {{nullptr, 0, true, 0, -144}, 8, 16, false},
 
     // Half the size of a vector register, but allowable with extending
     // loads and truncating stores
     // <vscale x 8 x i8>, increment by three vectors
-    {{nullptr, 24, true, 0, true}, 8, 8, true},
+    {{nullptr, 0, true, 0, 24}, 8, 8, true},
 
     // Test invalid types or offsets
     // <vscale x 5 x i32>, increment by one vector (base size > 16B)
-    {{nullptr, 20, true, 0, true}, 32, 5, false},
+    {{nullptr, 0, true, 0, 20}, 32, 5, false},
     // <vscale x 8 x i16>, increment by half a vector
-    {{nullptr, 8, true, 0, true}, 16, 8, false},
+    {{nullptr, 0, true, 0, 8}, 16, 8, false},
     // <vscale x 3 x i8>, increment by 3 vectors (non-power-of-two)
-    {{nullptr, 9, true, 0, true}, 8, 3, false},
+    {{nullptr, 0, true, 0, 9}, 8, 3, false},
+
+    // Scalable and fixed offsets
+    // <vscale x 16 x i8>, increment by 32 then decrement by vscale x 16
+    {{nullptr, 32, true, 0, -16}, 8, 16, false},
 };
 } // namespace
 



More information about the llvm-commits mailing list