[llvm] 354e045 - [RISCV] Make custom isel for (add X, imm) used by load/stores more selective.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 30 14:21:24 PDT 2022


Author: Craig Topper
Date: 2022-06-30T14:20:11-07:00
New Revision: 354e04554a35c1007c7bbf0c7477bdea08642cfc

URL: https://github.com/llvm/llvm-project/commit/354e04554a35c1007c7bbf0c7477bdea08642cfc
DIFF: https://github.com/llvm/llvm-project/commit/354e04554a35c1007c7bbf0c7477bdea08642cfc.diff

LOG: [RISCV] Make custom isel for (add X, imm) used by load/stores more selective.

Only handle immediates that would produce an ADDI or ADDIW of Lo12
as the final instruction in their materialization.

As the test change show this removes immediates that materialize
with lui+addiw that is not the same as lui+addi.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/test/CodeGen/RISCV/mem64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 92f49d0b8ed44..6ce8f61e9e090 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -187,11 +187,8 @@ static bool hasMemOffset(SDNode *N, unsigned &BaseOpIdx,
   return false;
 }
 
-static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
-                         int64_t Imm, const RISCVSubtarget &Subtarget) {
-  RISCVMatInt::InstSeq Seq =
-      RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
-
+static SDNode *selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
+                            RISCVMatInt::InstSeq &Seq) {
   SDNode *Result = nullptr;
   SDValue SrcReg = CurDAG->getRegister(RISCV::X0, VT);
   for (RISCVMatInt::Inst &Inst : Seq) {
@@ -219,6 +216,14 @@ static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
   return Result;
 }
 
+static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
+                         int64_t Imm, const RISCVSubtarget &Subtarget) {
+  RISCVMatInt::InstSeq Seq =
+      RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
+
+  return selectImmSeq(CurDAG, DL, VT, Seq);
+}
+
 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
                            unsigned NF, RISCVII::VLMUL LMUL) {
   static const unsigned M1TupleRegClassIDs[] = {
@@ -667,6 +672,24 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     if (isInt<12>(Offset / 2) && isInt<12>(Offset - Offset / 2))
       break;
 
+    RISCVMatInt::InstSeq Seq =
+        RISCVMatInt::generateInstSeq(Offset, Subtarget->getFeatureBits());
+
+    Offset -= Lo12;
+    // Restore sign bits for RV32.
+    if (!Subtarget->is64Bit())
+      Offset = SignExtend64<32>(Offset);
+
+    // We can fold if the last operation is an ADDI or its an ADDIW that could
+    // be treated as an ADDI.
+    if (Seq.back().Opc != RISCV::ADDI &&
+        !(Seq.back().Opc == RISCV::ADDIW && isInt<32>(Offset)))
+      break;
+    assert(Seq.back().Imm == Lo12 && "Expected immediate to match Lo12");
+    // Drop the last operation.
+    Seq.pop_back();
+    assert(!Seq.empty() && "Expected more instructions in sequence");
+
     bool AllPointerUses = true;
     for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) {
       SDNode *User = *UI;
@@ -695,13 +718,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     if (!AllPointerUses)
       break;
 
-    Offset -= Lo12;
-    // Restore sign bits for RV32.
-    if (!Subtarget->is64Bit())
-      Offset = SignExtend64<32>(Offset);
-
     // Emit (ADDI (ADD X, Hi), Lo)
-    SDNode *Imm = selectImm(CurDAG, DL, VT, Offset, *Subtarget);
+    SDNode *Imm = selectImmSeq(CurDAG, DL, VT, Seq);
     SDNode *ADD = CurDAG->getMachineNode(RISCV::ADD, DL, VT,
                                          Node->getOperand(0), SDValue(Imm, 0));
     SDNode *ADDI =

diff  --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll
index 018a9565f562e..e1339e8ddd385 100644
--- a/llvm/test/CodeGen/RISCV/mem64.ll
+++ b/llvm/test/CodeGen/RISCV/mem64.ll
@@ -307,10 +307,10 @@ define i64 @lw_sw_far_local(i64* %a, i64 %b)  {
 define i64 @lw_really_far_local(i64* %a)  {
 ; RV64I-LABEL: lw_really_far_local:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a1, 1
-; RV64I-NEXT:    slli a1, a1, 31
+; RV64I-NEXT:    lui a1, 524288
+; RV64I-NEXT:    addiw a1, a1, -2048
 ; RV64I-NEXT:    add a0, a0, a1
-; RV64I-NEXT:    ld a0, -2048(a0)
+; RV64I-NEXT:    ld a0, 0(a0)
 ; RV64I-NEXT:    ret
   %1 = getelementptr inbounds i64, i64* %a, i64 268435200
   %2 = load volatile i64, i64* %1
@@ -322,10 +322,10 @@ define i64 @lw_really_far_local(i64* %a)  {
 define void @st_really_far_local(i64* %a, i64 %b)  {
 ; RV64I-LABEL: st_really_far_local:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 1
-; RV64I-NEXT:    slli a2, a2, 31
+; RV64I-NEXT:    lui a2, 524288
+; RV64I-NEXT:    addiw a2, a2, -2048
 ; RV64I-NEXT:    add a0, a0, a2
-; RV64I-NEXT:    sd a1, -2048(a0)
+; RV64I-NEXT:    sd a1, 0(a0)
 ; RV64I-NEXT:    ret
   %1 = getelementptr inbounds i64, i64* %a, i64 268435200
   store i64 %b, i64* %1
@@ -337,11 +337,11 @@ define void @st_really_far_local(i64* %a, i64 %b)  {
 define i64 @lw_sw_really_far_local(i64* %a, i64 %b)  {
 ; RV64I-LABEL: lw_sw_really_far_local:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    li a2, 1
-; RV64I-NEXT:    slli a2, a2, 31
+; RV64I-NEXT:    lui a2, 524288
+; RV64I-NEXT:    addiw a2, a2, -2048
 ; RV64I-NEXT:    add a2, a0, a2
-; RV64I-NEXT:    ld a0, -2048(a2)
-; RV64I-NEXT:    sd a1, -2048(a2)
+; RV64I-NEXT:    ld a0, 0(a2)
+; RV64I-NEXT:    sd a1, 0(a2)
 ; RV64I-NEXT:    ret
   %1 = getelementptr inbounds i64, i64* %a, i64 268435200
   %2 = load volatile i64, i64* %1


        


More information about the llvm-commits mailing list