[llvm] 0ddf38c - [Hexagon] Improve stack address base reuse for HVX spills

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 17 19:32:46 PDT 2021


Author: Krzysztof Parzyszek
Date: 2021-03-17T21:22:56-05:00
New Revision: 0ddf38c99ebbcec48cb4bce450a59d805e827fe6

URL: https://github.com/llvm/llvm-project/commit/0ddf38c99ebbcec48cb4bce450a59d805e827fe6
DIFF: https://github.com/llvm/llvm-project/commit/0ddf38c99ebbcec48cb4bce450a59d805e827fe6.diff

LOG: [Hexagon] Improve stack address base reuse for HVX spills

The offset in HVX loads/stores is only 4 bits long, so often an
extra register is needed to hold the address. Minimize the number
of such registers by "standardizing" the base addresses and reusing
preexisting base registers when replacing frame indices.

Added: 
    llvm/test/CodeGen/Hexagon/hvx-reuse-fi-base.ll

Modified: 
    llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
index 5ece577e8285..db3fb93d0b11 100644
--- a/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonRegisterInfo.cpp
@@ -19,6 +19,7 @@
 #include "llvm/ADT/BitVector.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/LiveRegUnits.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
@@ -30,6 +31,7 @@
 #include "llvm/IR/Function.h"
 #include "llvm/IR/Type.h"
 #include "llvm/MC/MachineLocation.h"
+#include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/raw_ostream.h"
@@ -41,6 +43,10 @@
 
 using namespace llvm;
 
+static cl::opt<unsigned> FrameIndexSearchLimit(
+    "hexagon-frame-index-search-limit", cl::init(32), cl::Hidden,
+    cl::desc("Limit on instruction search in frame index elimination"));
+
 HexagonRegisterInfo::HexagonRegisterInfo(unsigned HwMode)
     : HexagonGenRegisterInfo(Hexagon::R31, 0/*DwarfFlavor*/, 0/*EHFlavor*/,
                              0/*PC*/, HwMode) {}
@@ -133,7 +139,7 @@ const uint32_t *HexagonRegisterInfo::getCallPreservedMask(
 
 
 BitVector HexagonRegisterInfo::getReservedRegs(const MachineFunction &MF)
-  const {
+      const {
   BitVector Reserved(getNumRegs());
   Reserved.set(Hexagon::R29);
   Reserved.set(Hexagon::R30);
@@ -188,7 +194,6 @@ BitVector HexagonRegisterInfo::getReservedRegs(const MachineFunction &MF)
   return Reserved;
 }
 
-
 void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
                                               int SPAdj, unsigned FIOp,
                                               RegScavenger *RS) const {
@@ -210,7 +215,6 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
   int Offset = HFI.getFrameIndexReference(MF, FI, BP).getFixed();
   // Add the offset from the instruction.
   int RealOffset = Offset + MI.getOperand(FIOp+1).getImm();
-  bool IsKill = false;
 
   unsigned Opc = MI.getOpcode();
   switch (Opc) {
@@ -228,18 +232,92 @@ void HexagonRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
   if (!HII.isValidOffset(Opc, RealOffset, this)) {
     // If the offset is not valid, calculate the address in a temporary
     // register and use it with offset 0.
+    int InstOffset = 0;
+    // The actual base register (BP) is typically shared between many
+    // instructions where frame indices are being replaced. In scalar
+    // instructions the offset range is large, and the need for an extra
+    // add instruction is infrequent. Vector loads/stores, however, have
+    // a much smaller offset range: [-8, 7), or #s4. In those cases it
+    // makes sense to "standardize" the immediate in the "addi" instruction
+    // so that multiple loads/stores could be based on it.
+    bool IsPair = false;
+    switch (MI.getOpcode()) {
+      // All of these instructions have the same format: base+#s4.
+      case Hexagon::PS_vloadrw_ai:
+      case Hexagon::PS_vloadrw_nt_ai:
+      case Hexagon::PS_vstorerw_ai:
+      case Hexagon::PS_vstorerw_nt_ai:
+        IsPair = true;
+        LLVM_FALLTHROUGH;
+      case Hexagon::PS_vloadrv_ai:
+      case Hexagon::PS_vloadrv_nt_ai:
+      case Hexagon::PS_vstorerv_ai:
+      case Hexagon::PS_vstorerv_nt_ai:
+      case Hexagon::V6_vL32b_ai:
+      case Hexagon::V6_vS32b_ai: {
+        unsigned HwLen = HST.getVectorLength();
+        if (RealOffset % HwLen == 0) {
+          int VecOffset = RealOffset / HwLen;
+          // Rewrite the offset as "base + [-8, 7)".
+          VecOffset += 8;
+          // Pairs are expanded into two instructions: make sure that both
+          // can use the same base (i.e. VecOffset+1 is not a 
diff erent
+          // multiple of 16 than VecOffset).
+          if (!IsPair || (VecOffset + 1) % 16 != 0) {
+            RealOffset = (VecOffset & -16) * HwLen;
+            InstOffset = (VecOffset % 16 - 8) * HwLen;
+          }
+        }
+      }
+    }
+
+    // Search backwards in the block for "Reg = A2_addi BP, RealOffset".
+    // This will give us a chance to avoid creating a new register.
+    Register ReuseBP;
+    unsigned SearchCount = 0, SearchLimit = FrameIndexSearchLimit;
+    bool PassedCall = false;
+    LiveRegUnits Defs(*this), Uses(*this);
+
+    for (auto I = std::next(II.getReverse()), E = MB.rend(); I != E; ++I) {
+      if (SearchCount == SearchLimit)
+        break;
+      ++SearchCount;
+      const MachineInstr &BI = *I;
+      LiveRegUnits::accumulateUsedDefed(BI, Defs, Uses, this);
+      PassedCall |= BI.isCall();
+
+      if (BI.getOpcode() != Hexagon::A2_addi)
+        continue;
+      if (BI.getOperand(1).getReg() != BP)
+        continue;
+      const auto &Op2 = BI.getOperand(2);
+      if (!Op2.isImm() || Op2.getImm() != RealOffset)
+        continue;
+
+      Register R = BI.getOperand(0).getReg();
+      if (R.isPhysical()) {
+        if (Defs.available(R))
+          ReuseBP = R;
+      } else if (R.isVirtual()) {
+        if (!PassedCall)
+          ReuseBP = R;
+      }
+      break;
+    }
+
     auto &MRI = MF.getRegInfo();
-    Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
-    const DebugLoc &DL = MI.getDebugLoc();
-    BuildMI(MB, II, DL, HII.get(Hexagon::A2_addi), TmpR)
-      .addReg(BP)
-      .addImm(RealOffset);
-    BP = TmpR;
-    RealOffset = 0;
-    IsKill = true;
+    if (!ReuseBP) {
+      ReuseBP = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
+      const DebugLoc &DL = MI.getDebugLoc();
+      BuildMI(MB, II, DL, HII.get(Hexagon::A2_addi), ReuseBP)
+        .addReg(BP)
+        .addImm(RealOffset);
+    }
+    BP = ReuseBP;
+    RealOffset = InstOffset;
   }
 
-  MI.getOperand(FIOp).ChangeToRegister(BP, false, false, IsKill);
+  MI.getOperand(FIOp).ChangeToRegister(BP, false, false, false);
   MI.getOperand(FIOp+1).ChangeToImmediate(RealOffset);
 }
 

diff  --git a/llvm/test/CodeGen/Hexagon/hvx-reuse-fi-base.ll b/llvm/test/CodeGen/Hexagon/hvx-reuse-fi-base.ll
new file mode 100644
index 000000000000..73da83b921e2
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/hvx-reuse-fi-base.ll
@@ -0,0 +1,212 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=hexagon < %s | FileCheck %s
+target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+target triple = "hexagon"
+
+ at g0 = external dso_local global <64 x i32>, align 128
+ at g1 = external hidden unnamed_addr constant [110 x i8], align 1
+ at g2 = external hidden unnamed_addr constant [102 x i8], align 1
+ at g3 = external hidden unnamed_addr constant [110 x i8], align 1
+
+declare dso_local void @f0() #0
+
+declare dso_local void @f1(i8*, ...) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32>, i32, i32 immarg) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32>, <32 x i32>) #1
+
+define dso_local void @f2() #0 {
+; CHECK-LABEL: f2:
+; CHECK:       // %bb.0: // %b0
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r1:0 = combine(#2,##16843009)
+; CHECK-NEXT:     allocframe(r29,#1536):raw
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v1 = vsplat(r1)
+; CHECK-NEXT:     r17:16 = combine(#-1,#1)
+; CHECK-NEXT:     r29 = and(r29,#-256)
+; CHECK-NEXT:     memd(r30+#-8) = r17:16
+; CHECK-NEXT:    } // 8-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vsplat(r16)
+; CHECK-NEXT:     r2 = add(r29,#2048)
+; CHECK-NEXT:     memd(r30+#-16) = r19:18
+; CHECK-NEXT:    } // 8-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     q0 = vand(v0,r0)
+; CHECK-NEXT:     r18 = ##-2147483648
+; CHECK-NEXT:     vmem(r2+#-7) = v0
+; CHECK-NEXT:    } // 128-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vand(q0,r17)
+; CHECK-NEXT:     r0 = ##g1
+; CHECK-NEXT:     memd(r30+#-24) = r21:20
+; CHECK-NEXT:    } // 8-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r19 = ##g0+128
+; CHECK-NEXT:     vmem(r2+#-6) = v0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v3:2.h = vadd(v0.ub,v1.ub)
+; CHECK-NEXT:     r20 = ##g0
+; CHECK-NEXT:     vmem(r29+#5) = v1
+; CHECK-NEXT:    } // 128-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     vmem(r29+#6) = v2
+; CHECK-NEXT:    } // 256-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v31:30.uw = vrmpy(v3:2.ub,r18.ub,#0)
+; CHECK-NEXT:     vmem(r29+#7) = v3
+; CHECK-NEXT:    } // 256-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     vmem(r19+#0) = v31
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     call f1
+; CHECK-NEXT:     vmem(r20+#0) = v30
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = add(r29,#2048)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vmem(r0+#-7)
+; CHECK-NEXT:    } // 128-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v1:0.h = vadd(v0.ub,v0.ub)
+; CHECK-NEXT:     r0 = ##g2
+; CHECK-NEXT:     vmem(r29+#2) = v0.new
+; CHECK-NEXT:    } // 256-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     vmem(r29+#3) = v1
+; CHECK-NEXT:    } // 256-byte Folded Spill
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v1:0.uw = vrmpy(v1:0.ub,r17.ub,#0)
+; CHECK-NEXT:     vmem(r19+#0) = v1.new
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     call f1
+; CHECK-NEXT:     vmem(r20+#0) = v0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = ##2147483647
+; CHECK-NEXT:     v0 = vmem(r29+#2)
+; CHECK-NEXT:    } // 256-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v1 = vmem(r29+#3)
+; CHECK-NEXT:    } // 256-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v1:0.uw = vrmpy(v1:0.ub,r0.ub,#1)
+; CHECK-NEXT:     r0 = ##g3
+; CHECK-NEXT:     vmem(r19+#0) = v1.new
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     call f1
+; CHECK-NEXT:     vmem(r20+#0) = v0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vmem(r29+#6)
+; CHECK-NEXT:    } // 256-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v1 = vmem(r29+#7)
+; CHECK-NEXT:    } // 256-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v1:0.uw = vrmpy(v1:0.ub,r18.ub,#1)
+; CHECK-NEXT:     vmem(r19+#0) = v1.new
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     call f0
+; CHECK-NEXT:     vmem(r20+#0) = v0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = #0
+; CHECK-NEXT:     v0 = vmem(r29+#6)
+; CHECK-NEXT:    } // 256-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v1 = vmem(r29+#7)
+; CHECK-NEXT:    } // 256-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v1:0.uw = vrmpy(v1:0.ub,r0.ub,#1)
+; CHECK-NEXT:     vmem(r19+#0) = v1.new
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     call f0
+; CHECK-NEXT:     vmem(r20+#0) = v0
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r0 = add(r29,#2048)
+; CHECK-NEXT:     v1 = vmem(r29+#5)
+; CHECK-NEXT:    } // 128-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v0 = vmem(r0+#-7)
+; CHECK-NEXT:    } // 128-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v1:0.h = vadd(v0.ub,v1.ub)
+; CHECK-NEXT:    }
+; CHECK-NEXT:    {
+; CHECK-NEXT:     v1:0.uw = vrmpy(v1:0.ub,r16.ub,#1)
+; CHECK-NEXT:     r17:16 = memd(r30+#-8)
+; CHECK-NEXT:     vmem(r19+#0) = v1.new
+; CHECK-NEXT:    } // 8-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r19:18 = memd(r30+#-16)
+; CHECK-NEXT:     vmem(r20+#0) = v0
+; CHECK-NEXT:    } // 8-byte Folded Reload
+; CHECK-NEXT:    {
+; CHECK-NEXT:     r21:20 = memd(r30+#-24)
+; CHECK-NEXT:     r31:30 = dealloc_return(r30):raw
+; CHECK-NEXT:    } // 8-byte Folded Reload
+b0:
+  %v0 = alloca <32 x i32>, align 128
+  %v1 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1)
+  %v2 = call <128 x i1> @llvm.hexagon.V6.vandvrt.128B(<32 x i32> %v1, i32 16843009)
+  %v3 = call <32 x i32> @llvm.hexagon.V6.vandqrt.128B(<128 x i1> %v2, i32 -1)
+  store <32 x i32> %v3, <32 x i32>* %v0, align 128
+  %v4 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 2)
+  %v5 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> undef, <32 x i32> %v4)
+  %v6 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v5, i32 -2147483648, i32 0)
+  store <64 x i32> %v6, <64 x i32>* @g0, align 128
+  call void (i8*, ...) @f1(i8* getelementptr inbounds ([110 x i8], [110 x i8]* @g1, i32 0, i32 0)) #2
+  %v7 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1)
+  %v8 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %v7, <32 x i32> undef)
+  %v9 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v8, i32 -1, i32 0)
+  store <64 x i32> %v9, <64 x i32>* @g0, align 128
+  call void (i8*, ...) @f1(i8* getelementptr inbounds ([102 x i8], [102 x i8]* @g2, i32 0, i32 0)) #2
+  %v10 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1)
+  %v11 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %v10, <32 x i32> undef)
+  %v12 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v11, i32 2147483647, i32 1)
+  store <64 x i32> %v12, <64 x i32>* @g0, align 128
+  call void (i8*, ...) @f1(i8* getelementptr inbounds ([110 x i8], [110 x i8]* @g3, i32 0, i32 0)) #2
+  %v13 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 2)
+  %v14 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> undef, <32 x i32> %v13)
+  %v15 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v14, i32 -2147483648, i32 1)
+  store <64 x i32> %v15, <64 x i32>* @g0, align 128
+  call void @f0() #2
+  %v16 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 2)
+  %v17 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> undef, <32 x i32> %v16)
+  %v18 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v17, i32 0, i32 1)
+  store <64 x i32> %v18, <64 x i32>* @g0, align 128
+  call void @f0() #2
+  %v19 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 1)
+  %v20 = call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 2)
+  %v21 = call <64 x i32> @llvm.hexagon.V6.vaddubh.128B(<32 x i32> %v19, <32 x i32> %v20)
+  %v22 = call <64 x i32> @llvm.hexagon.V6.vrmpyubi.128B(<64 x i32> %v21, i32 1, i32 1)
+  store <64 x i32> %v22, <64 x i32>* @g0, align 128
+  ret void
+}
+
+attributes #0 = { nounwind "use-soft-float"="false" "target-cpu"="hexagonv66" "target-features"="+hvxv66,+hvx-length128b" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind optsize }


        


More information about the llvm-commits mailing list