[llvm] 0f45eaf - [RISCV] Add a scavenge spill slot when use ADDI to compute scalable stack offset

via llvm-commits llvm-commits at lists.llvm.org
Sun Jul 3 06:03:57 PDT 2022


Author: luxufan
Date: 2022-07-03T20:18:13+08:00
New Revision: 0f45eaf0da1f28a744f7dd24536e6d1a25b7d6cf

URL: https://github.com/llvm/llvm-project/commit/0f45eaf0da1f28a744f7dd24536e6d1a25b7d6cf
DIFF: https://github.com/llvm/llvm-project/commit/0f45eaf0da1f28a744f7dd24536e6d1a25b7d6cf.diff

LOG: [RISCV] Add a scavenge spill slot when use ADDI to compute scalable stack offset

Computing scalable offset needs up to two scrach registers. We add
scavenge spill slots according to the result of `RISCV::isRVVSpill`
and `RVVStackSize`. Since ADDI is not included in `RISCV::isRVVSpill`,
PEI doesn't add scavenge spill slots for scrach registers when using
ADDI to get scalable stack offsets.

The ADDI instruction has a destination register which can be used as
a scrach register. So one scavenge spil slot is sufficient for
computing scalable stack offsets.

Differential Revision: https://reviews.llvm.org/D128188

Added: 
    llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir

Modified: 
    llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.h
    llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
    llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
    llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 0a9a9efec323c..57d8ba6f01618 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -21,6 +21,8 @@
 #include "llvm/IR/DiagnosticInfo.h"
 #include "llvm/MC/MCDwarf.h"
 
+#include <algorithm>
+
 using namespace llvm;
 
 // For now we use x18, a.k.a s2, as pointer to shadow call stack.
@@ -941,14 +943,48 @@ RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFrameInfo &MFI) const {
   return std::make_pair(StackSize, RVVStackAlign);
 }
 
-static bool hasRVVSpillWithFIs(MachineFunction &MF) {
+static unsigned getScavSlotsNumForRVV(MachineFunction &MF) {
+  // For RVV spill, scalable stack offsets computing requires up to two scratch
+  // registers
+  static constexpr unsigned ScavSlotsNumRVVSpillScalableObject = 2;
+
+  // For RVV spill, non-scalable stack offsets computing requires up to one
+  // scratch register.
+  static constexpr unsigned ScavSlotsNumRVVSpillNonScalableObject = 1;
+
+  // ADDI instruction's destination register can be used for computing
+  // offsets. So Scalable stack offsets require up to one scratch register.
+  static constexpr unsigned ScavSlotsADDIScalableObject = 1;
+
+  static constexpr unsigned MaxScavSlotsNumKnown =
+      std::max({ScavSlotsADDIScalableObject, ScavSlotsNumRVVSpillScalableObject,
+                ScavSlotsNumRVVSpillNonScalableObject});
+
+  unsigned MaxScavSlotsNum = 0;
   if (!MF.getSubtarget<RISCVSubtarget>().hasVInstructions())
     return false;
   for (const MachineBasicBlock &MBB : MF)
-    for (const MachineInstr &MI : MBB)
-      if (RISCV::isRVVSpill(MI, /*CheckFIs*/ true))
-        return true;
-  return false;
+    for (const MachineInstr &MI : MBB) {
+      bool IsRVVSpill = RISCV::isRVVSpill(MI);
+      for (auto &MO : MI.operands()) {
+        if (!MO.isFI())
+          continue;
+        bool IsScalableVectorID = MF.getFrameInfo().getStackID(MO.getIndex()) ==
+                                  TargetStackID::ScalableVector;
+        if (IsRVVSpill) {
+          MaxScavSlotsNum = std::max(
+              MaxScavSlotsNum, IsScalableVectorID
+                                   ? ScavSlotsNumRVVSpillScalableObject
+                                   : ScavSlotsNumRVVSpillNonScalableObject);
+        } else if (MI.getOpcode() == RISCV::ADDI && IsScalableVectorID) {
+          MaxScavSlotsNum =
+              std::max(MaxScavSlotsNum, ScavSlotsADDIScalableObject);
+        }
+      }
+      if (MaxScavSlotsNum == MaxScavSlotsNumKnown)
+        return MaxScavSlotsNumKnown;
+    }
+  return MaxScavSlotsNum;
 }
 
 void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
@@ -980,17 +1016,14 @@ void RISCVFrameLowering::processFunctionBeforeFrameFinalized(
   // RVV loads & stores have no capacity to hold the immediate address offsets
   // so we must always reserve an emergency spill slot if the MachineFunction
   // contains any RVV spills.
-  if (!isInt<11>(MFI.estimateStackSize(MF)) || hasRVVSpillWithFIs(MF)) {
-    int RegScavFI = MFI.CreateStackObject(RegInfo->getSpillSize(*RC),
-                                          RegInfo->getSpillAlign(*RC), false);
-    RS->addScavengingFrameIndex(RegScavFI);
-    // For RVV, scalable stack offsets require up to two scratch registers to
-    // compute the final offset. Reserve an additional emergency spill slot.
-    if (RVVStackSize != 0) {
-      int RVVRegScavFI = MFI.CreateStackObject(
-          RegInfo->getSpillSize(*RC), RegInfo->getSpillAlign(*RC), false);
-      RS->addScavengingFrameIndex(RVVRegScavFI);
-    }
+  unsigned ScavSlotsNum = 0;
+  if (!isInt<11>(MFI.estimateStackSize(MF)))
+    ScavSlotsNum = 1;
+
+  ScavSlotsNum = std::max(ScavSlotsNum, getScavSlotsNumForRVV(MF));
+  for (unsigned i = 0; i < ScavSlotsNum; i++) {
+    RS->addScavengingFrameIndex(MFI.CreateStackObject(
+        RegInfo->getSpillSize(*RC), RegInfo->getSpillAlign(*RC), false));
   }
 
   if (MFI.getCalleeSavedInfo().empty() || RVFI->useSaveRestoreLibCalls(MF)) {

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 951ab3cabc4a2..685604ad9a59b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1880,16 +1880,14 @@ static bool isRVVWholeLoadStore(unsigned Opcode) {
   }
 }
 
-bool RISCV::isRVVSpill(const MachineInstr &MI, bool CheckFIs) {
+bool RISCV::isRVVSpill(const MachineInstr &MI) {
   // RVV lacks any support for immediate addressing for stack addresses, so be
   // conservative.
   unsigned Opcode = MI.getOpcode();
   if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
       !isRVVWholeLoadStore(Opcode) && !isRVVSpillForZvlsseg(Opcode))
     return false;
-  return !CheckFIs || any_of(MI.operands(), [](const MachineOperand &MO) {
-    return MO.isFI();
-  });
+  return true;
 }
 
 Optional<std::pair<unsigned, unsigned>>

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 369d66a55262f..5368437618bd6 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -184,9 +184,8 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
 namespace RISCV {
 
 // Returns true if the given MI is an RVV instruction opcode for which we may
-// expect to see a FrameIndex operand. When CheckFIs is true, the instruction
-// must contain at least one FrameIndex operand.
-bool isRVVSpill(const MachineInstr &MI, bool CheckFIs);
+// expect to see a FrameIndex operand.
+bool isRVVSpill(const MachineInstr &MI);
 
 Optional<std::pair<unsigned, unsigned>> isRVVSpillForZvlsseg(unsigned Opcode);
 

diff  --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 0f69b55d6ad20..0c92190764987 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -174,7 +174,7 @@ void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
   Register FrameReg;
   StackOffset Offset =
       getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
-  bool IsRVVSpill = RISCV::isRVVSpill(MI, /*CheckFIs*/ false);
+  bool IsRVVSpill = RISCV::isRVVSpill(MI);
   if (!IsRVVSpill)
     Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir b/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
new file mode 100644
index 0000000000000..f807c7693332a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/addi-rvv-stack-object.mir
@@ -0,0 +1,60 @@
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass prologepilog %s -o - | FileCheck %s
+--- |
+  declare void @extern(<vscale x 16 x i8>*) #0
+  
+  define void @addi_rvv_stack_object() #0 {
+    %local0 = alloca <vscale x 16 x i8>, align 16
+    call void @extern(<vscale x 16 x i8>* %local0)
+    ret void
+  }
+  
+  attributes #0 = { "target-features"="+v" }
+
+...
+---
+name:            addi_rvv_stack_object
+alignment:       4
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       0
+  offsetAdjustment: 0
+  maxAlignment:    16
+  adjustsStack:    false
+  hasCalls:        true
+  stackProtector:  ''
+  functionContext: ''
+  maxCallFrameSize: 4294967295
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  hasTailCall:     false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:
+  - { id: 0, name: local0, type: default, offset: 0, size: 16, alignment: 16, 
+      stack-id: scalable-vector, callee-saved-register: '', callee-saved-restored: true, 
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+# CHECK: - { id: 2, name: '', type: default, offset: -16, size: 8, alignment: 8,
+# CHECK:     stack-id: default, callee-saved-register: '', callee-saved-restored: true,
+# CHECK:     debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+debugValueSubstitutions: []
+constants:       []
+machineFunctionInfo:
+  varArgsFrameIndex: 0
+  varArgsSaveSize: 0
+body:             |
+  bb.0 (%ir-block.0):
+    ADJCALLSTACKDOWN 0, 0, implicit-def dead $x2, implicit $x2
+    $x10 = ADDI %stack.0.local0, 0
+    PseudoCALL target-flags(riscv-plt) @extern, csr_ilp32d_lp64d, implicit-def dead $x1, implicit killed $x10, implicit-def $x2
+    ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2
+    PseudoRET
+
+...

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
index 2d77024872004..686401da41a4e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-stack-align.mir
@@ -122,22 +122,22 @@
   ;
   ; RV64-LABEL: rvv_stack_align32:
   ; RV64:       # %bb.0:
-  ; RV64-NEXT:    addi sp, sp, -48
-  ; RV64-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
-  ; RV64-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
-  ; RV64-NEXT:    addi s0, sp, 48
+  ; RV64-NEXT:    addi sp, sp, -80
+  ; RV64-NEXT:    sd ra, 72(sp) # 8-byte Folded Spill
+  ; RV64-NEXT:    sd s0, 64(sp) # 8-byte Folded Spill
+  ; RV64-NEXT:    addi s0, sp, 80
   ; RV64-NEXT:    csrr a0, vlenb
   ; RV64-NEXT:    slli a0, a0, 2
   ; RV64-NEXT:    sub sp, sp, a0
   ; RV64-NEXT:    andi sp, sp, -32
-  ; RV64-NEXT:    addi a0, sp, 32
-  ; RV64-NEXT:    addi a1, sp, 8
-  ; RV64-NEXT:    mv a2, sp
+  ; RV64-NEXT:    addi a0, sp, 64
+  ; RV64-NEXT:    addi a1, sp, 40
+  ; RV64-NEXT:    addi a2, sp, 32
   ; RV64-NEXT:    call extern at plt
-  ; RV64-NEXT:    addi sp, s0, -48
-  ; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
-  ; RV64-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
-  ; RV64-NEXT:    addi sp, sp, 48
+  ; RV64-NEXT:    addi sp, s0, -80
+  ; RV64-NEXT:    ld ra, 72(sp) # 8-byte Folded Reload
+  ; RV64-NEXT:    ld s0, 64(sp) # 8-byte Folded Reload
+  ; RV64-NEXT:    addi sp, sp, 80
   ; RV64-NEXT:    ret
     %a = alloca <vscale x 4 x i32>, align 32
     %b = alloca i64

diff  --git a/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll b/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
index 44ad494eed0c7..c13232106040d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/scalar-stack-align.ll
@@ -13,36 +13,36 @@
 define i64* @scalar_stack_align16() nounwind {
 ; RV32-LABEL: scalar_stack_align16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    addi sp, sp, -48
+; RV32-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    csrr a0, vlenb
 ; RV32-NEXT:    slli a0, a0, 1
 ; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    addi a0, sp, 32
 ; RV32-NEXT:    call extern at plt
-; RV32-NEXT:    mv a0, sp
+; RV32-NEXT:    addi a0, sp, 16
 ; RV32-NEXT:    csrr a1, vlenb
 ; RV32-NEXT:    slli a1, a1, 1
 ; RV32-NEXT:    add sp, sp, a1
-; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 48
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: scalar_stack_align16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -32
-; RV64-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    addi sp, sp, -48
+; RV64-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    csrr a0, vlenb
 ; RV64-NEXT:    slli a0, a0, 1
 ; RV64-NEXT:    sub sp, sp, a0
-; RV64-NEXT:    addi a0, sp, 16
+; RV64-NEXT:    addi a0, sp, 32
 ; RV64-NEXT:    call extern at plt
-; RV64-NEXT:    mv a0, sp
+; RV64-NEXT:    addi a0, sp, 16
 ; RV64-NEXT:    csrr a1, vlenb
 ; RV64-NEXT:    slli a1, a1, 1
 ; RV64-NEXT:    add sp, sp, a1
-; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 48
 ; RV64-NEXT:    ret
   %a = alloca <vscale x 2 x i32>
   %c = alloca i64, align 16


        


More information about the llvm-commits mailing list