[llvm] [Codegen] Add a separate stack ID for scalable predicates (PR #142390)

Benjamin Maxwell via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 10 07:28:42 PDT 2025


https://github.com/MacDue updated https://github.com/llvm/llvm-project/pull/142390

>From bed04e2fec9b8d0beb5fa38cd90c77db3ace6fe9 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Wed, 21 May 2025 13:14:19 +0000
Subject: [PATCH] [Codegen] Add a separate stack ID for scalable predicates

This splits out "ScalablePredVector" from the "ScalableVector" StackID
this is primarily to allow easy differentiation between vectors and
predicates (without inspecting instructions).

This new stack ID is not used in many places yet, but will be used in
a later patch to mark stack slots that are known to contain predicates.

Change-Id: I92c4c96af517ab2cfcf0a6eb9a853c2bac9de342
---
 llvm/include/llvm/CodeGen/MIRYamlMapping.h    |  2 ++
 llvm/include/llvm/CodeGen/MachineFrameInfo.h  |  9 +++++-
 .../llvm/CodeGen/TargetFrameLowering.h        |  1 +
 .../CodeGen/StackFrameLayoutAnalysisPass.cpp  |  2 +-
 .../Target/AArch64/AArch64FrameLowering.cpp   | 32 +++++++++----------
 .../lib/Target/AArch64/AArch64FrameLowering.h |  4 ++-
 .../Target/AArch64/AArch64ISelDAGToDAG.cpp    |  4 +--
 .../Target/AArch64/AArch64ISelLowering.cpp    | 13 +++++---
 llvm/lib/Target/AArch64/AArch64InstrInfo.cpp  |  8 ++---
 .../AArch64/AArch64PrologueEpilogue.cpp       |  4 +--
 .../AArch64/debug-info-sve-dbg-declare.mir    |  8 ++---
 .../AArch64/debug-info-sve-dbg-value.mir      |  4 +--
 llvm/test/CodeGen/AArch64/framelayout-sve.mir | 12 +++----
 llvm/test/CodeGen/AArch64/spillfill-sve.mir   | 10 +++---
 .../AArch64/sve-calling-convention-byref.ll   | 10 +++---
 15 files changed, 69 insertions(+), 54 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/llvm/include/llvm/CodeGen/MIRYamlMapping.h
index a91c26ee1122a..d61dfda4f003d 100644
--- a/llvm/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/llvm/include/llvm/CodeGen/MIRYamlMapping.h
@@ -378,6 +378,8 @@ struct ScalarEnumerationTraits<TargetStackID::Value> {
     IO.enumCase(ID, "default", TargetStackID::Default);
     IO.enumCase(ID, "sgpr-spill", TargetStackID::SGPRSpill);
     IO.enumCase(ID, "scalable-vector", TargetStackID::ScalableVector);
+    IO.enumCase(ID, "scalable-predicate-vector",
+                TargetStackID::ScalablePredicateVector);
     IO.enumCase(ID, "wasm-local", TargetStackID::WasmLocal);
     IO.enumCase(ID, "noalloc", TargetStackID::NoAlloc);
   }
diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h
index e666001035deb..32ccfd28d8a41 100644
--- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -494,7 +494,14 @@ class MachineFrameInfo {
   /// Should this stack ID be considered in MaxAlignment.
   bool contributesToMaxAlignment(uint8_t StackID) {
     return StackID == TargetStackID::Default ||
-           StackID == TargetStackID::ScalableVector;
+           StackID == TargetStackID::ScalableVector ||
+           StackID == TargetStackID::ScalablePredicateVector;
+  }
+
+  bool isScalableStackID(int ObjectIdx) const {
+    uint8_t StackID = getStackID(ObjectIdx);
+    return StackID == TargetStackID::ScalableVector ||
+           StackID == TargetStackID::ScalablePredicateVector;
   }
 
   /// setObjectAlignment - Change the alignment of the specified stack object.
diff --git a/llvm/include/llvm/CodeGen/TargetFrameLowering.h b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
index 0e29e45752a9f..75696faf114cc 100644
--- a/llvm/include/llvm/CodeGen/TargetFrameLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetFrameLowering.h
@@ -32,6 +32,7 @@ enum Value {
   SGPRSpill = 1,
   ScalableVector = 2,
   WasmLocal = 3,
+  ScalablePredicateVector = 4,
   NoAlloc = 255
 };
 }
diff --git a/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp b/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp
index 096a33c17cb4b..ec75dc3e3871c 100644
--- a/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp
+++ b/llvm/lib/CodeGen/StackFrameLayoutAnalysisPass.cpp
@@ -72,7 +72,7 @@ struct StackFrameLayoutAnalysis {
         : Slot(Idx), Size(MFI.getObjectSize(Idx)),
           Align(MFI.getObjectAlign(Idx).value()), Offset(Offset),
           SlotTy(Invalid), Scalable(false) {
-      Scalable = MFI.getStackID(Idx) == TargetStackID::ScalableVector;
+      Scalable = MFI.isScalableStackID(Idx);
       if (MFI.isSpillSlotObjectIndex(Idx))
         SlotTy = SlotType::Spill;
       else if (MFI.isFixedObjectIndex(Idx))
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 175b5e04d82ff..ca75f2bc1e49d 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -329,7 +329,7 @@ static int64_t getArgumentStackToRestore(MachineFunction &MF,
 static bool produceCompactUnwindFrame(const AArch64FrameLowering &,
                                       MachineFunction &MF);
 
-// Conservatively, returns true if the function is likely to have an SVE vectors
+// Conservatively, returns true if the function is likely to have SVE vectors
 // on the stack. This function is safe to be called before callee-saves or
 // object offsets have been determined.
 static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL,
@@ -343,7 +343,7 @@ static bool isLikelyToHaveSVEStack(const AArch64FrameLowering &AFL,
 
   const MachineFrameInfo &MFI = MF.getFrameInfo();
   for (int FI = MFI.getObjectIndexBegin(); FI < MFI.getObjectIndexEnd(); FI++) {
-    if (MFI.getStackID(FI) == TargetStackID::ScalableVector)
+    if (MFI.isScalableStackID(FI))
       return true;
   }
 
@@ -726,8 +726,7 @@ static void emitCalleeSavedRestores(MachineBasicBlock &MBB,
   CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameDestroy);
 
   for (const auto &Info : CSI) {
-    if (SVE !=
-        (MFI.getStackID(Info.getFrameIdx()) == TargetStackID::ScalableVector))
+    if (SVE != MFI.isScalableStackID(Info.getFrameIdx()))
       continue;
 
     MCRegister Reg = Info.getReg();
@@ -2141,7 +2140,7 @@ AArch64FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
   const auto *AFI = MF.getInfo<AArch64FunctionInfo>();
   bool FPAfterSVECalleeSaves =
       isTargetWindows(MF) && AFI->getSVECalleeSavedStackSize();
-  if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
+  if (MFI.isScalableStackID(FI)) {
     if (FPAfterSVECalleeSaves &&
         -ObjectOffset <= (int64_t)AFI->getSVECalleeSavedStackSize())
       return StackOffset::getScalable(ObjectOffset);
@@ -2207,7 +2206,7 @@ StackOffset AArch64FrameLowering::resolveFrameIndexReference(
   const auto &MFI = MF.getFrameInfo();
   int64_t ObjectOffset = MFI.getObjectOffset(FI);
   bool isFixed = MFI.isFixedObjectIndex(FI);
-  bool isSVE = MFI.getStackID(FI) == TargetStackID::ScalableVector;
+  bool isSVE = MFI.isScalableStackID(FI);
   return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg,
                                      PreferFP, ForSimm);
 }
@@ -2934,10 +2933,14 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
     }
     // Update the StackIDs of the SVE stack slots.
     MachineFrameInfo &MFI = MF.getFrameInfo();
-    if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR) {
+    if (RPI.Type == RegPairInfo::ZPR) {
       MFI.setStackID(FrameIdxReg1, TargetStackID::ScalableVector);
       if (RPI.isPaired())
         MFI.setStackID(FrameIdxReg2, TargetStackID::ScalableVector);
+    } else if (RPI.Type == RegPairInfo::PPR) {
+      MFI.setStackID(FrameIdxReg1, TargetStackID::ScalablePredicateVector);
+      if (RPI.isPaired())
+        MFI.setStackID(FrameIdxReg2, TargetStackID::ScalablePredicateVector);
     }
   }
   return true;
@@ -3145,8 +3148,7 @@ void AArch64FrameLowering::determineStackHazardSlot(
       for (auto &MI : MBB) {
         std::optional<int> FI = getLdStFrameID(MI, MFI);
         if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) {
-          if (MFI.getStackID(*FI) == TargetStackID::ScalableVector ||
-              AArch64InstrInfo::isFpOrNEON(MI))
+          if (MFI.isScalableStackID(*FI) || AArch64InstrInfo::isFpOrNEON(MI))
             FrameObjects[*FI] |= 2;
           else
             FrameObjects[*FI] |= 1;
@@ -3591,7 +3593,7 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
 #ifndef NDEBUG
   // First process all fixed stack objects.
   for (int I = MFI.getObjectIndexBegin(); I != 0; ++I)
-    assert(MFI.getStackID(I) != TargetStackID::ScalableVector &&
+    assert(!MFI.isScalableStackID(I) &&
            "SVE vectors should never be passed on the stack by value, only by "
            "reference.");
 #endif
@@ -3625,12 +3627,11 @@ static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
   int StackProtectorFI = -1;
   if (MFI.hasStackProtectorIndex()) {
     StackProtectorFI = MFI.getStackProtectorIndex();
-    if (MFI.getStackID(StackProtectorFI) == TargetStackID::ScalableVector)
+    if (MFI.isScalableStackID(StackProtectorFI))
       ObjectsToAllocate.push_back(StackProtectorFI);
   }
   for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
-    unsigned StackID = MFI.getStackID(I);
-    if (StackID != TargetStackID::ScalableVector)
+    if (!MFI.isScalableStackID(I))
       continue;
     if (I == StackProtectorFI)
       continue;
@@ -4634,8 +4635,7 @@ void AArch64FrameLowering::orderFrameObjects(
       if (AFI.hasStackHazardSlotIndex()) {
         std::optional<int> FI = getLdStFrameID(MI, MFI);
         if (FI && *FI >= 0 && *FI < (int)FrameObjects.size()) {
-          if (MFI.getStackID(*FI) == TargetStackID::ScalableVector ||
-              AArch64InstrInfo::isFpOrNEON(MI))
+          if (MFI.isScalableStackID(*FI) || AArch64InstrInfo::isFpOrNEON(MI))
             FrameObjects[*FI].Accesses |= FrameObject::AccessFPR;
           else
             FrameObjects[*FI].Accesses |= FrameObject::AccessGPR;
@@ -4993,7 +4993,7 @@ void AArch64FrameLowering::emitRemarks(
           }
 
           unsigned RegTy = StackAccess::AccessType::GPR;
-          if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector) {
+          if (MFI.isScalableStackID(FrameIdx)) {
             // SPILL_PPR_TO_ZPR_SLOT_PSEUDO and FILL_PPR_FROM_ZPR_SLOT_PSEUDO
             // spill/fill the predicate as a data vector (so are an FPR access).
             if (MI.getOpcode() != AArch64::SPILL_PPR_TO_ZPR_SLOT_PSEUDO &&
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.h b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
index a9d65441a4e30..5cf24f5e22919 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.h
@@ -123,6 +123,7 @@ class AArch64FrameLowering : public TargetFrameLowering {
       return false;
     case TargetStackID::Default:
     case TargetStackID::ScalableVector:
+    case TargetStackID::ScalablePredicateVector:
     case TargetStackID::NoAlloc:
       return true;
     }
@@ -131,7 +132,8 @@ class AArch64FrameLowering : public TargetFrameLowering {
   bool isStackIdSafeForLocalArea(unsigned StackId) const override {
     // We don't support putting SVE objects into the pre-allocated local
     // frame block at the moment.
-    return StackId != TargetStackID::ScalableVector;
+    return (StackId != TargetStackID::ScalableVector &&
+            StackId != TargetStackID::ScalablePredicateVector);
   }
 
   friend class AArch64PrologueEmitter;
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 6fdc981fc21a5..66833bba33d46 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -7497,7 +7497,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
     int FI = cast<FrameIndexSDNode>(N)->getIndex();
     // We can only encode VL scaled offsets, so only fold in frame indexes
     // referencing SVE objects.
-    if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
+    if (MFI.isScalableStackID(FI)) {
       Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
       OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
       return true;
@@ -7543,7 +7543,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
     int FI = cast<FrameIndexSDNode>(Base)->getIndex();
     // We can only encode VL scaled offsets, so only fold in frame indexes
     // referencing SVE objects.
-    if (MFI.getStackID(FI) == TargetStackID::ScalableVector)
+    if (MFI.isScalableStackID(FI))
       Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL));
   }
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 5ffaf2c49b4c0..6b6fbe9bd0e13 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -9115,8 +9115,7 @@ void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
       (MI.getOpcode() == AArch64::ADDXri ||
        MI.getOpcode() == AArch64::SUBXri)) {
     const MachineOperand &MO = MI.getOperand(1);
-    if (MO.isFI() && MF.getFrameInfo().getStackID(MO.getIndex()) ==
-                         TargetStackID::ScalableVector)
+    if (MO.isFI() && MF.getFrameInfo().isScalableStackID(MO.getIndex()))
       MI.addOperand(MachineOperand::CreateReg(AArch64::VG, /*IsDef=*/false,
                                               /*IsImplicit=*/true));
   }
@@ -9565,8 +9564,12 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
       Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty);
       MachineFrameInfo &MFI = MF.getFrameInfo();
       int FI = MFI.CreateStackObject(StoreSize, Alignment, false);
-      if (isScalable)
-        MFI.setStackID(FI, TargetStackID::ScalableVector);
+      if (isScalable) {
+        bool IsPred = VA.getValVT() == MVT::aarch64svcount ||
+                      VA.getValVT().getVectorElementType() == MVT::i1;
+        MFI.setStackID(FI, IsPred ? TargetStackID::ScalablePredicateVector
+                                  : TargetStackID::ScalableVector);
+      }
 
       MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
       SDValue Ptr = DAG.getFrameIndex(
@@ -29393,7 +29396,7 @@ void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {
   // than doing it here in finalizeLowering.
   if (MFI.hasStackProtectorIndex()) {
     for (unsigned int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
-      if (MFI.getStackID(i) == TargetStackID::ScalableVector &&
+      if (MFI.isScalableStackID(i) &&
           MFI.getObjectSSPLayout(i) != MachineFrameInfo::SSPLK_None) {
         MFI.setStackID(MFI.getStackProtectorIndex(),
                        TargetStackID::ScalableVector);
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index e56fe90259d5c..4db4c5325a953 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -5582,7 +5582,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
       assert(Subtarget.isSVEorStreamingSVEAvailable() &&
              "Unexpected register store without SVE store instructions");
       Opc = AArch64::STR_PXI;
-      StackID = TargetStackID::ScalableVector;
+      StackID = TargetStackID::ScalablePredicateVector;
     }
     break;
   }
@@ -5597,7 +5597,7 @@ void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
       Opc = AArch64::STRSui;
     else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) {
       Opc = AArch64::STR_PPXI;
-      StackID = TargetStackID::ScalableVector;
+      StackID = TargetStackID::ScalablePredicateVector;
     }
     break;
   case 8:
@@ -5767,7 +5767,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
       if (IsPNR)
         PNRReg = DestReg;
       Opc = AArch64::LDR_PXI;
-      StackID = TargetStackID::ScalableVector;
+      StackID = TargetStackID::ScalablePredicateVector;
     }
     break;
   }
@@ -5782,7 +5782,7 @@ void AArch64InstrInfo::loadRegFromStackSlot(
       Opc = AArch64::LDRSui;
     else if (AArch64::PPR2RegClass.hasSubClassEq(RC)) {
       Opc = AArch64::LDR_PPXI;
-      StackID = TargetStackID::ScalableVector;
+      StackID = TargetStackID::ScalablePredicateVector;
     }
     break;
   case 8:
diff --git a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
index af424987b8ddb..ad2dc374e6f55 100644
--- a/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
+++ b/llvm/lib/Target/AArch64/AArch64PrologueEpilogue.cpp
@@ -739,7 +739,7 @@ void AArch64PrologueEmitter::emitCalleeSavedGPRLocations(
   CFIInstBuilder CFIBuilder(MBB, MBBI, MachineInstr::FrameSetup);
   for (const auto &Info : CSI) {
     unsigned FrameIdx = Info.getFrameIdx();
-    if (MFI.getStackID(FrameIdx) == TargetStackID::ScalableVector)
+    if (MFI.isScalableStackID(FrameIdx))
       continue;
 
     assert(!Info.isSpilledToReg() && "Spilling to registers not implemented");
@@ -772,7 +772,7 @@ void AArch64PrologueEmitter::emitCalleeSavedSVELocations(
   }
 
   for (const auto &Info : CSI) {
-    if (MFI.getStackID(Info.getFrameIdx()) != TargetStackID::ScalableVector)
+    if (!MFI.isScalableStackID(Info.getFrameIdx()))
       continue;
 
     // Not all unwinders may know about SVE registers, so assume the lowest
diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
index aca2816225e3e..7fd0cee068fd1 100644
--- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
+++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-declare.mir
@@ -164,10 +164,10 @@ stack:
   - { id: 1, name: z1.addr, size: 16, alignment: 16, stack-id: scalable-vector,
       debug-info-variable: '!31', debug-info-expression: '!DIExpression()',
       debug-info-location: '!32' }
-  - { id: 2, name: p0.addr, size: 2, alignment: 2, stack-id: scalable-vector,
+  - { id: 2, name: p0.addr, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
       debug-info-variable: '!33', debug-info-expression: '!DIExpression()',
       debug-info-location: '!34' }
-  - { id: 3, name: p1.addr, size: 2, alignment: 2, stack-id: scalable-vector,
+  - { id: 3, name: p1.addr, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
       debug-info-variable: '!35', debug-info-expression: '!DIExpression()',
       debug-info-location: '!36' }
   - { id: 4, name: w0.addr, size: 4, alignment: 4, local-offset: -4, debug-info-variable: '!37',
@@ -181,10 +181,10 @@ stack:
   - { id: 7, name: localv1, size: 16, alignment: 16, stack-id: scalable-vector,
       debug-info-variable: '!45', debug-info-expression: '!DIExpression()',
       debug-info-location: '!46' }
-  - { id: 8, name: localp0, size: 2, alignment: 2, stack-id: scalable-vector,
+  - { id: 8, name: localp0, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
       debug-info-variable: '!48', debug-info-expression: '!DIExpression()',
       debug-info-location: '!49' }
-  - { id: 9, name: localp1, size: 2, alignment: 2, stack-id: scalable-vector,
+  - { id: 9, name: localp1, size: 2, alignment: 2, stack-id: scalable-predicate-vector,
       debug-info-variable: '!51', debug-info-expression: '!DIExpression()',
       debug-info-location: '!52' }
 machineFunctionInfo: {}
diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
index 0ea180b20730f..41ba5542150ab 100644
--- a/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
+++ b/llvm/test/CodeGen/AArch64/debug-info-sve-dbg-value.mir
@@ -96,8 +96,8 @@ stack:
   - { id: 1, size: 8,  alignment: 8 }
   - { id: 2, size: 16, alignment: 16, stack-id: scalable-vector }
   - { id: 3, size: 16, alignment: 16, stack-id: scalable-vector }
-  - { id: 4, size: 2,  alignment: 2,  stack-id: scalable-vector }
-  - { id: 5, size: 2,  alignment: 2,  stack-id: scalable-vector }
+  - { id: 4, size: 2,  alignment: 2,  stack-id: scalable-predicate-vector }
+  - { id: 5, size: 2,  alignment: 2,  stack-id: scalable-predicate-vector }
 machineFunctionInfo: {}
 body:             |
   bb.0.entry:
diff --git a/llvm/test/CodeGen/AArch64/framelayout-sve.mir b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
index 03a6aabffaaf1..110141609a59e 100644
--- a/llvm/test/CodeGen/AArch64/framelayout-sve.mir
+++ b/llvm/test/CodeGen/AArch64/framelayout-sve.mir
@@ -1215,19 +1215,19 @@ body:             |
 # CHECK:        - { id: 2, name: '', type: default, offset: -112, size: 16, alignment: 16,
 # CHECK-NEXT:       stack-id: scalable-vector,
 # CHECK:        - { id: 3, name: '', type: default, offset: -114, size: 2, alignment: 2,
-# CHECK-NEXT:       stack-id: scalable-vector,
+# CHECK-NEXT:       stack-id: scalable-predicate-vector,
 # CHECK:        - { id: 4, name: '', type: spill-slot, offset: -144, size: 16, alignment: 16,
 # CHECK-NEXT:       stack-id: scalable-vector,
 # CHECK:        - { id: 5, name: '', type: spill-slot, offset: -146, size: 2, alignment: 2,
-# CHECK-NEXT:       stack-id: scalable-vector,
+# CHECK-NEXT:       stack-id: scalable-predicate-vector,
 # CHECK:        - { id: 6, name: '', type: spill-slot, offset: -16, size: 16, alignment: 16,
 # CHECK-NEXT:       stack-id: scalable-vector, callee-saved-register: '$z8',
 # CHECK:        - { id: 7, name: '', type: spill-slot, offset: -32, size: 16, alignment: 16,
 # CHECK-NEXT:       stack-id: scalable-vector, callee-saved-register: '$z23',
 # CHECK:        - { id: 8, name: '', type: spill-slot, offset: -34, size: 2, alignment: 2,
-# CHECK-NEXT:       stack-id: scalable-vector, callee-saved-register: '$p4',
+# CHECK-NEXT:       stack-id: scalable-predicate-vector, callee-saved-register: '$p4',
 # CHECK:        - { id: 9, name: '', type: spill-slot, offset: -36, size: 2, alignment: 2,
-# CHECK-NEXT:       stack-id: scalable-vector, callee-saved-register: '$p15',
+# CHECK-NEXT:       stack-id: scalable-predicate-vector, callee-saved-register: '$p15',
 # CHECK:        - { id: 10, name: '', type: spill-slot, offset: -16, size: 8, alignment: 16,
 # CHECK-NEXT:       stack-id: default, callee-saved-register: '$fp',
 #
@@ -1295,9 +1295,9 @@ stack:
   - { id: 0, type: default,    size:  32, alignment: 16, stack-id: scalable-vector }
   - { id: 1, type: default,    size:   4, alignment:  2, stack-id: scalable-vector }
   - { id: 2, type: default,    size:  16, alignment: 16, stack-id: scalable-vector }
-  - { id: 3, type: default,    size:   2, alignment:  2, stack-id: scalable-vector }
+  - { id: 3, type: default,    size:   2, alignment:  2, stack-id: scalable-predicate-vector }
   - { id: 4, type: spill-slot, size:  16, alignment: 16, stack-id: scalable-vector }
-  - { id: 5, type: spill-slot, size:   2, alignment:  2, stack-id: scalable-vector }
+  - { id: 5, type: spill-slot, size:   2, alignment:  2, stack-id: scalable-predicate-vector }
 body:             |
   bb.0.entry:
 
diff --git a/llvm/test/CodeGen/AArch64/spillfill-sve.mir b/llvm/test/CodeGen/AArch64/spillfill-sve.mir
index 2b16dd0f29ecc..5569175fb58fc 100644
--- a/llvm/test/CodeGen/AArch64/spillfill-sve.mir
+++ b/llvm/test/CodeGen/AArch64/spillfill-sve.mir
@@ -39,7 +39,7 @@ body:             |
     ; CHECK-LABEL: name: spills_fills_stack_id_ppr
     ; CHECK: stack:
     ; CHECK:      - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2
-    ; CHECK-NEXT:     stack-id: scalable-vector, callee-saved-register: ''
+    ; CHECK-NEXT:     stack-id: scalable-predicate-vector, callee-saved-register: ''
 
     ; EXPAND-LABEL: name: spills_fills_stack_id_ppr
     ; EXPAND: STR_PXI $p0, $sp, 7
@@ -82,7 +82,7 @@ body:             |
     ; CHECK-LABEL: name: spills_fills_stack_id_ppr2
     ; CHECK: stack:
     ; CHECK:      - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 2
-    ; CHECK-NEXT:     stack-id: scalable-vector, callee-saved-register: ''
+    ; CHECK-NEXT:     stack-id: scalable-predicate-vector, callee-saved-register: ''
 
     ; EXPAND-LABEL: name: spills_fills_stack_id_ppr2
     ; EXPAND: STR_PXI $p0, $sp, 6
@@ -127,7 +127,7 @@ body:             |
     ; CHECK-LABEL: name: spills_fills_stack_id_ppr2
     ; CHECK: stack:
     ; CHECK:      - { id: 0, name: '', type: spill-slot, offset: 0, size: 4, alignment: 2
-    ; CHECK-NEXT:     stack-id: scalable-vector, callee-saved-register: ''
+    ; CHECK-NEXT:     stack-id: scalable-predicate-vector, callee-saved-register: ''
 
     ; EXPAND-LABEL: name: spills_fills_stack_id_ppr2mul2
     ; EXPAND: STR_PXI $p0, $sp, 6
@@ -172,7 +172,7 @@ body:             |
     ; CHECK-LABEL: name: spills_fills_stack_id_pnr
     ; CHECK: stack:
     ; CHECK:      - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2
-    ; CHECK-NEXT:     stack-id: scalable-vector, callee-saved-register: ''
+    ; CHECK-NEXT:     stack-id: scalable-predicate-vector, callee-saved-register: ''
 
     ; EXPAND-LABEL: name: spills_fills_stack_id_pnr
     ; EXPAND: STR_PXI $pn0, $sp, 7
@@ -211,7 +211,7 @@ body:             |
     ; CHECK-LABEL: name: spills_fills_stack_id_virtreg_pnr
     ; CHECK: stack:
     ; CHECK:      - { id: 0, name: '', type: spill-slot, offset: 0, size: 2, alignment: 2
-    ; CHECK-NEXT:     stack-id: scalable-vector, callee-saved-register: ''
+    ; CHECK-NEXT:     stack-id: scalable-predicate-vector, callee-saved-register: ''
 
     ; EXPAND-LABEL: name: spills_fills_stack_id_virtreg_pnr
     ; EXPAND: renamable $pn8 = WHILEGE_CXX_B
diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
index 7bddd1d70aaa8..cc63c7ffc0c1e 100644
--- a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
+++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll
@@ -56,9 +56,9 @@ define aarch64_sve_vector_pcs <vscale x 16 x i1> @caller_with_many_svepred_arg(<
 ; CHECK: name: caller_with_many_svepred_arg
 ; CHECK: stack:
 ; CHECK:      - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2,
-; CHECK-NEXT:     stack-id: scalable-vector
+; CHECK-NEXT:     stack-id: scalable-predicate-vector
 ; CHECK:      - { id: 1, name: '', type: default, offset: 0, size: 2, alignment: 2,
-; CHECK-NEXT:     stack-id: scalable-vector
+; CHECK-NEXT:     stack-id: scalable-predicate-vector
 ; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.0, 0
 ; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.1, 0
 ; CHECK-DAG: [[BASE1:%[0-9]+]]:gpr64sp = ADDXri %stack.0, 0
@@ -90,7 +90,7 @@ define aarch64_sve_vector_pcs <vscale x 16 x i1> @caller_with_svepred_arg_1xv16i
 ; CHECK: name: caller_with_svepred_arg_1xv16i1_4xv16i1
 ; CHECK: stack:
 ; CHECK:      - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2,
-; CHECK-NEXT:     stack-id: scalable-vector,
+; CHECK-NEXT:     stack-id: scalable-predicate-vector,
 ; CHECK:    [[PRED0:%[0-9]+]]:ppr = COPY $p0
 ; CHECK:    ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
 ; CHECK:    STR_PXI [[PRED0]], %stack.0, 0 :: (store (<vscale x 1 x s16>) into %stack.0)
@@ -139,7 +139,7 @@ define [4 x <vscale x 16 x i1>] @caller_with_svepred_arg_4xv16i1_4xv16i1([4 x <v
 ; CHECK: name: caller_with_svepred_arg_4xv16i1_4xv16i1
 ; CHECK: stack:
 ; CHECK:      - { id: 0, name: '', type: default, offset: 0, size: 8, alignment: 2,
-; CHECK-NEXT:     stack-id: scalable-vector,
+; CHECK-NEXT:     stack-id: scalable-predicate-vector,
 ; CHECK:    [[PRED3:%[0-9]+]]:ppr = COPY $p3
 ; CHECK:    [[PRED2:%[0-9]+]]:ppr = COPY $p2
 ; CHECK:    [[PRED1:%[0-9]+]]:ppr = COPY $p1
@@ -200,7 +200,7 @@ define [2 x <vscale x 32 x i1>] @caller_with_svepred_arg_2xv32i1_1xv16i1([2 x <v
 ; CHECK: name: caller_with_svepred_arg_2xv32i1_1xv16i1
 ; CHECK: stack:
 ; CHECK:      - { id: 0, name: '', type: default, offset: 0, size: 8, alignment: 2,
-; CHECK-NEXT:     stack-id: scalable-vector,
+; CHECK-NEXT:     stack-id: scalable-predicate-vector,
 ; CHECK:    [[PRED3:%[0-9]+]]:ppr = COPY $p3
 ; CHECK:    [[PRED2:%[0-9]+]]:ppr = COPY $p2
 ; CHECK:    [[PRED1:%[0-9]+]]:ppr = COPY $p1



More information about the llvm-commits mailing list