[llvm] e52c558 - [RISCV] Narrow indices of fixed vector gather/scatter nodes

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 18 11:49:21 PDT 2023


Author: Philip Reames
Date: 2023-09-18T11:49:14-07:00
New Revision: e52c55881358e60abbc4811e84ff48eff8f64fbb

URL: https://github.com/llvm/llvm-project/commit/e52c55881358e60abbc4811e84ff48eff8f64fbb
DIFF: https://github.com/llvm/llvm-project/commit/e52c55881358e60abbc4811e84ff48eff8f64fbb.diff

LOG: [RISCV] Narrow indices of fixed vector gather/scatter nodes

Doing so allows the use of smaller constants overall, and may allow (for some small vector constants) avoiding the constant pool entirely.  This can result in extra VTYPE toggles if we get unlucky.

This was reviewed under PR #66405.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index de58335b435651c..e74d184c0a35d04 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -11633,15 +11633,39 @@ static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
 }
 
-// According to the property that indexed load/store instructions
-// zero-extended their indices, \p narrowIndex tries to narrow the type of index
-// operand if it is matched to pattern (shl (zext x to ty), C) and bits(x) + C <
-// bits(ty).
+/// According to the property that indexed load/store instructions zero-extend
+/// their indices, try to narrow the type of index operand.
 static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &DAG) {
   if (isIndexTypeSigned(IndexType))
     return false;
 
-  if (N.getOpcode() != ISD::SHL || !N->hasOneUse())
+  if (!N->hasOneUse())
+    return false;
+
+  EVT VT = N.getValueType();
+  SDLoc DL(N);
+
+  // In general, what we're doing here is seeing if we can sink a truncate to
+  // a smaller element type into the expression tree building our index.
+  // TODO: We can generalize this and handle a bunch more cases if useful.
+
+  // Narrow a buildvector to the narrowest element type.  This requires less
+  // work and less register pressure at high LMUL, and creates smaller constants
+  // which may be cheaper to materialize.
+  if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) {
+    KnownBits Known = DAG.computeKnownBits(N);
+    unsigned ActiveBits = std::max(8u, Known.countMaxActiveBits());
+    LLVMContext &C = *DAG.getContext();
+    EVT ResultVT = EVT::getIntegerVT(C, ActiveBits).getRoundIntegerType(C);
+    if (ResultVT.bitsLT(VT.getVectorElementType())) {
+      N = DAG.getNode(ISD::TRUNCATE, DL,
+                      VT.changeVectorElementType(ResultVT), N);
+      return true;
+    }
+  }
+
+  // Handle the pattern (shl (zext x to ty), C) and bits(x) + C < bits(ty).
+  if (N.getOpcode() != ISD::SHL)
     return false;
 
   SDValue N0 = N.getOperand(0);
@@ -11656,7 +11680,6 @@ static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &D
   if (!ISD::isConstantSplatVector(N1.getNode(), ShAmt))
     return false;;
 
-  SDLoc DL(N);
   SDValue Src = N0.getOperand(0);
   EVT SrcVT = Src.getValueType();
   unsigned SrcElen = SrcVT.getScalarSizeInBits();

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 3711f014e06478b..ac5c11ca88df51a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -13027,8 +13027,8 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
 ; RV32-NEXT:    lui a1, %hi(.LCPI107_0)
 ; RV32-NEXT:    addi a1, a1, %lo(.LCPI107_0)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT:    vle32.v v10, (a1)
-; RV32-NEXT:    vluxei32.v v8, (a0), v10
+; RV32-NEXT:    vle8.v v9, (a1)
+; RV32-NEXT:    vluxei8.v v8, (a0), v9
 ; RV32-NEXT:    ret
 ;
 ; RV64V-LABEL: mgather_strided_2xSEW:
@@ -13036,8 +13036,8 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
 ; RV64V-NEXT:    lui a1, %hi(.LCPI107_0)
 ; RV64V-NEXT:    addi a1, a1, %lo(.LCPI107_0)
 ; RV64V-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64V-NEXT:    vle64.v v12, (a1)
-; RV64V-NEXT:    vluxei64.v v8, (a0), v12
+; RV64V-NEXT:    vle8.v v9, (a1)
+; RV64V-NEXT:    vluxei8.v v8, (a0), v9
 ; RV64V-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_strided_2xSEW:
@@ -13144,8 +13144,8 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
 ; RV32-NEXT:    lui a1, %hi(.LCPI108_0)
 ; RV32-NEXT:    addi a1, a1, %lo(.LCPI108_0)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT:    vle32.v v10, (a1)
-; RV32-NEXT:    vluxei32.v v8, (a0), v10
+; RV32-NEXT:    vle8.v v9, (a1)
+; RV32-NEXT:    vluxei8.v v8, (a0), v9
 ; RV32-NEXT:    ret
 ;
 ; RV64V-LABEL: mgather_gather_2xSEW:
@@ -13153,8 +13153,8 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
 ; RV64V-NEXT:    lui a1, %hi(.LCPI108_0)
 ; RV64V-NEXT:    addi a1, a1, %lo(.LCPI108_0)
 ; RV64V-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV64V-NEXT:    vle64.v v12, (a1)
-; RV64V-NEXT:    vluxei64.v v8, (a0), v12
+; RV64V-NEXT:    vle8.v v9, (a1)
+; RV64V-NEXT:    vluxei8.v v8, (a0), v9
 ; RV64V-NEXT:    ret
 ;
 ; RV64ZVE32F-LABEL: mgather_gather_2xSEW:


        


More information about the llvm-commits mailing list