[llvm] [RISCV] Extend InstSeq (used in constant mat) to support multiple live regs (PR #67159)

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 22 08:52:18 PDT 2023


https://github.com/preames created https://github.com/llvm/llvm-project/pull/67159

Posted mostly for discussion.  This turned out to be more invasive and fiddly than I'd expected when I first started.  My guess is that we probably won't land this, but I wanted to show what the option looked like.  

This moves the logic for constant materialization with an additional vreg into common code.  It does so by extending the InstSeq construct to support multiple live values in the sequence.  This converts an InstSeq from being a linear chain of instructions to being a DAG of instructions.

Note that the emergency-slot.mir test change is in principle a bug fix; we're setting kill flags on a register which has later usage.  Its probably an uninteresting bug in practice since it only happens on X0 which is a constant physreg and thus the kill flag is fairly meaningless.

>From 2b60707b447c390e5d515cb64060396b9197a238 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Thu, 21 Sep 2023 12:46:33 -0700
Subject: [PATCH] [RISCV] Extend InstSeq (used in constant mat) to support
 multiple live regs

This moves the logic for constant materialization with an additional vreg into common code.  It does so by extending the InstSeq construct to support multiple live values in the sequence.  This converts an InstSeq from being a linear chain of instructions to being a DAG of instructions.

Note that the emergency-slot.mir test change is in principle a bug fix; we're setting kill flags on a register which has later usage.  Its probably an uninteresting bug in practice since it only happens on X0 which is a constant physreg and thus the kill flag is fairly meaningless.
---
 .../Target/RISCV/AsmParser/RISCVAsmParser.cpp | 33 +++++----
 .../RISCV/GISel/RISCVInstructionSelector.cpp  | 23 +++----
 .../Target/RISCV/MCTargetDesc/RISCVMatInt.cpp | 68 +++++++++++++------
 .../Target/RISCV/MCTargetDesc/RISCVMatInt.h   | 25 +++++--
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp   | 54 ++++-----------
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 20 +-----
 llvm/lib/Target/RISCV/RISCVInstrInfo.cpp      | 28 ++++----
 .../test/CodeGen/RISCV/rvv/emergency-slot.mir |  6 +-
 8 files changed, 124 insertions(+), 133 deletions(-)

diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 7d8d82e381313bf..5438173da7f2957 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -2978,32 +2978,31 @@ void RISCVAsmParser::emitLoadImm(MCRegister DestReg, int64_t Value,
   RISCVMatInt::InstSeq Seq =
       RISCVMatInt::generateInstSeq(Value, getSTI().getFeatureBits());
 
-  MCRegister SrcReg = RISCV::X0;
+  SmallVector<MCRegister> Results;
+  Results.push_back(RISCV::X0);
   for (const RISCVMatInt::Inst &Inst : Seq) {
+    const unsigned Reg0Off = (unsigned)Inst.getReg0() + 1;
+    const unsigned Reg1Off = (unsigned)Inst.getReg1() + 1;
+    const MCRegister SrcReg0 = Results[Results.size()-Reg0Off];
+    const MCRegister SrcReg1 = Results[Results.size()-Reg1Off];
+    // Note: We only support sequences with a single live register here.
+    assert(SrcReg0 == RISCV::X0 || SrcReg0 == DestReg);
+    assert(SrcReg1 == RISCV::X0 || SrcReg1 == DestReg);
     switch (Inst.getOpndKind()) {
     case RISCVMatInt::Imm:
-      emitToStreamer(Out,
-                     MCInstBuilder(Inst.getOpcode()).addReg(DestReg).addImm(Inst.getImm()));
-      break;
-    case RISCVMatInt::RegX0:
-      emitToStreamer(
-          Out, MCInstBuilder(Inst.getOpcode()).addReg(DestReg).addReg(SrcReg).addReg(
-                   RISCV::X0));
+      emitToStreamer(Out, MCInstBuilder(Inst.getOpcode()).addReg(DestReg)
+                            .addImm(Inst.getImm()));
       break;
     case RISCVMatInt::RegReg:
-      emitToStreamer(
-          Out, MCInstBuilder(Inst.getOpcode()).addReg(DestReg).addReg(SrcReg).addReg(
-                   SrcReg));
+      emitToStreamer(Out, MCInstBuilder(Inst.getOpcode()).addReg(DestReg)
+                            .addReg(SrcReg0).addReg(SrcReg1));
       break;
     case RISCVMatInt::RegImm:
-      emitToStreamer(
-          Out, MCInstBuilder(Inst.getOpcode()).addReg(DestReg).addReg(SrcReg).addImm(
-                   Inst.getImm()));
+      emitToStreamer(Out, MCInstBuilder(Inst.getOpcode()).addReg(DestReg)
+                            .addReg(SrcReg0).addImm(Inst.getImm()));
       break;
     }
-
-    // Only the first instruction has X0 as its source.
-    SrcReg = DestReg;
+    Results.push_back(DestReg);
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 4c246d7de1da952..7b7d6c4bbae2e91 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -281,15 +281,20 @@ bool RISCVInstructionSelector::selectConstant(MachineInstr &MI,
   RISCVMatInt::InstSeq Seq =
       RISCVMatInt::generateInstSeq(Imm, Subtarget->getFeatureBits());
   unsigned NumInsts = Seq.size();
-  Register SrcReg = RISCV::X0;
 
+  SmallVector<Register> Results;
+  Results.push_back(RISCV::X0);
   for (unsigned i = 0; i < NumInsts; i++) {
     Register DstReg = i < NumInsts - 1
                           ? MRI.createVirtualRegister(&RISCV::GPRRegClass)
                           : FinalReg;
     const RISCVMatInt::Inst &I = Seq[i];
-    MachineInstr *Result;
+    const unsigned Reg0Off = (unsigned)I.getReg0() + 1;
+    const unsigned Reg1Off = (unsigned)I.getReg1() + 1;
+    const Register SrcReg0 = Results[Results.size()-Reg0Off];
+    const Register SrcReg1 = Results[Results.size()-Reg1Off];
 
+    MachineInstr *Result;
     switch (I.getOpndKind()) {
     case RISCVMatInt::Imm:
       // clang-format off
@@ -298,22 +303,16 @@ bool RISCVInstructionSelector::selectConstant(MachineInstr &MI,
                    .addImm(I.getImm());
       // clang-format on
       break;
-    case RISCVMatInt::RegX0:
-      Result = MIB.buildInstr(I.getOpcode())
-                   .addDef(DstReg)
-                   .addReg(SrcReg)
-                   .addReg(RISCV::X0);
-      break;
     case RISCVMatInt::RegReg:
       Result = MIB.buildInstr(I.getOpcode())
                    .addDef(DstReg)
-                   .addReg(SrcReg)
-                   .addReg(SrcReg);
+                   .addReg(SrcReg0)
+                   .addReg(SrcReg1);
       break;
     case RISCVMatInt::RegImm:
       Result = MIB.buildInstr(I.getOpcode())
                    .addDef(DstReg)
-                   .addReg(SrcReg)
+                   .addReg(SrcReg0)
                    .addImm(I.getImm());
       break;
     }
@@ -321,7 +320,7 @@ bool RISCVInstructionSelector::selectConstant(MachineInstr &MI,
     if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
       return false;
 
-    SrcReg = DstReg;
+    Results.push_back(DstReg);
   }
 
   return true;
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
index f659779e9772055..eb859ff78b1b7e2 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
@@ -53,7 +53,7 @@ static void generateInstSeqImpl(int64_t Val,
   // Use BSETI for a single bit that can't be expressed by a single LUI or ADDI.
   if (ActiveFeatures[RISCV::FeatureStdExtZbs] && isPowerOf2_64(Val) &&
       (!isInt<32>(Val) || Val == 0x800)) {
-    Res.emplace_back(RISCV::BSETI, Log2_64(Val));
+    Res.emplace_back(RISCV::BSETI, Log2_64(Val), 0, 0);
     return;
   }
 
@@ -69,11 +69,11 @@ static void generateInstSeqImpl(int64_t Val,
     int64_t Lo12 = SignExtend64<12>(Val);
 
     if (Hi20)
-      Res.emplace_back(RISCV::LUI, Hi20);
+      Res.emplace_back(RISCV::LUI, Hi20, 0, 0);
 
     if (Lo12 || Hi20 == 0) {
       unsigned AddiOpc = (IsRV64 && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
-      Res.emplace_back(AddiOpc, Lo12);
+      Res.emplace_back(AddiOpc, Lo12, 0, 0);
     }
     return;
   }
@@ -146,11 +146,11 @@ static void generateInstSeqImpl(int64_t Val,
   // Skip shift if we were able to use LUI directly.
   if (ShiftAmount) {
     unsigned Opc = Unsigned ? RISCV::SLLI_UW : RISCV::SLLI;
-    Res.emplace_back(Opc, ShiftAmount);
+    Res.emplace_back(Opc, ShiftAmount, 0, 0);
   }
 
   if (Lo12)
-    Res.emplace_back(RISCV::ADDI, Lo12);
+    Res.emplace_back(RISCV::ADDI, Lo12, 0, 0);
 }
 
 static unsigned extractRotateInfo(int64_t Val) {
@@ -172,7 +172,8 @@ static unsigned extractRotateInfo(int64_t Val) {
 }
 
 namespace llvm::RISCVMatInt {
-InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
+InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures,
+                        bool AllowMultipleVRegs) {
   RISCVMatInt::InstSeq Res;
   generateInstSeqImpl(Val, ActiveFeatures, Res);
 
@@ -193,7 +194,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
 
     // Keep the new sequence if it is an improvement.
     if ((TmpSeq.size() + 1) < Res.size() || IsShiftedCompressible) {
-      TmpSeq.emplace_back(RISCV::SLLI, TrailingZeros);
+      TmpSeq.emplace_back(RISCV::SLLI, TrailingZeros, 0, 0);
       Res = TmpSeq;
     }
   }
@@ -222,7 +223,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
 
     // Keep the new sequence if it is an improvement.
     if ((TmpSeq.size() + 1) < Res.size()) {
-      TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros);
+      TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros, 0, 0);
       Res = TmpSeq;
     }
 
@@ -233,7 +234,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
 
     // Keep the new sequence if it is an improvement.
     if ((TmpSeq.size() + 1) < Res.size()) {
-      TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros);
+      TmpSeq.emplace_back(RISCV::SRLI, LeadingZeros, 0, 0);
       Res = TmpSeq;
     }
 
@@ -247,7 +248,8 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
 
       // Keep the new sequence if it is an improvement.
       if ((TmpSeq.size() + 1) < Res.size()) {
-        TmpSeq.emplace_back(RISCV::ADD_UW, 0);
+        unsigned X0Off = TmpSeq.size();
+        TmpSeq.emplace_back(RISCV::ADD_UW, 0, 0, X0Off);
         Res = TmpSeq;
       }
     }
@@ -263,7 +265,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
       RISCVMatInt::InstSeq TmpSeq;
       generateInstSeqImpl(LoVal, ActiveFeatures, TmpSeq);
       if ((TmpSeq.size() + 1) < Res.size()) {
-        TmpSeq.emplace_back(RISCV::PACK, 0);
+        TmpSeq.emplace_back(RISCV::PACK, 0, 0, 0);
         Res = TmpSeq;
       }
     }
@@ -290,7 +292,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
       RISCVMatInt::InstSeq TmpSeq;
       generateInstSeqImpl(NewVal, ActiveFeatures, TmpSeq);
       if ((TmpSeq.size() + 1) < Res.size()) {
-        TmpSeq.emplace_back(Opc, 31);
+        TmpSeq.emplace_back(Opc, 31, 0, 0);
         Res = TmpSeq;
       }
     }
@@ -314,7 +316,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
     if (Opc > 0) {
       while (Hi != 0) {
         unsigned Bit = llvm::countr_zero(Hi);
-        TmpSeq.emplace_back(Opc, Bit + 32);
+        TmpSeq.emplace_back(Opc, Bit + 32, 0, 0);
         Hi &= (Hi - 1); // Clear lowest set bit.
       }
       if (TmpSeq.size() < Res.size())
@@ -342,7 +344,7 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
     if (Div > 0) {
       generateInstSeqImpl(Val / Div, ActiveFeatures, TmpSeq);
       if ((TmpSeq.size() + 1) < Res.size()) {
-        TmpSeq.emplace_back(Opc, 0);
+        TmpSeq.emplace_back(Opc, 0, 0, 0);
         Res = TmpSeq;
       }
     } else {
@@ -369,8 +371,8 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
         assert(TmpSeq.empty() && "Expected empty TmpSeq");
         generateInstSeqImpl(Hi52 / Div, ActiveFeatures, TmpSeq);
         if ((TmpSeq.size() + 2) < Res.size()) {
-          TmpSeq.emplace_back(Opc, 0);
-          TmpSeq.emplace_back(RISCV::ADDI, Lo12);
+          TmpSeq.emplace_back(Opc, 0, 0, 0);
+          TmpSeq.emplace_back(RISCV::ADDI, Lo12, 0, 0);
           Res = TmpSeq;
         }
       }
@@ -385,19 +387,42 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures) {
       RISCVMatInt::InstSeq TmpSeq;
       uint64_t NegImm12 = llvm::rotl<uint64_t>(Val, Rotate);
       assert(isInt<12>(NegImm12));
-      TmpSeq.emplace_back(RISCV::ADDI, NegImm12);
+      TmpSeq.emplace_back(RISCV::ADDI, NegImm12, 0, 0);
       TmpSeq.emplace_back(ActiveFeatures[RISCV::FeatureStdExtZbb]
                               ? RISCV::RORI
                               : RISCV::TH_SRRI,
-                          Rotate);
+                          Rotate, 0, 0);
       Res = TmpSeq;
     }
   }
+
+  // See if we can create this constant as (ADD (SLLI X, 32), X) where X is at
+  // worst an LUI+ADDIW. This will require an extra register, but avoids a
+  // constant pool.
+  // If we have Zba we can use (ADD_UW X, (SLLI X, 32)) to handle cases where
+  // low and high 32 bits are the same and bit 31 and 63 are set.
+  if (Res.size() > 3 && AllowMultipleVRegs) {
+    int64_t LoVal = SignExtend64<32>(Val);
+    int64_t HiVal = SignExtend64<32>(((uint64_t)Val - (uint64_t)LoVal) >> 32);
+    if (LoVal == HiVal ||
+        (ActiveFeatures[RISCV::FeatureStdExtZba] && Lo_32(Val) == Hi_32(Val))) {
+      RISCVMatInt::InstSeq TmpSeq =
+        RISCVMatInt::generateInstSeq(LoVal, ActiveFeatures);
+      if ((TmpSeq.size() + 2) < Res.size()) {
+        TmpSeq.emplace_back(RISCV::SLLI, 32, 0, 0);
+        unsigned AddOpc = (LoVal == HiVal) ? RISCV::ADD : RISCV::ADD_UW;
+        TmpSeq.emplace_back(AddOpc, 0, 1, 0);
+        Res = TmpSeq;
+      }
+    }
+  }
+
   return Res;
 }
 
 int getIntMatCost(const APInt &Val, unsigned Size,
-                  const FeatureBitset &ActiveFeatures, bool CompressionCost) {
+                  const FeatureBitset &ActiveFeatures, bool CompressionCost,
+                  bool AllowMultipleVRegs) {
   bool IsRV64 = ActiveFeatures[RISCV::Feature64Bit];
   bool HasRVC = CompressionCost && (ActiveFeatures[RISCV::FeatureStdExtC] ||
                                     ActiveFeatures[RISCV::FeatureStdExtZca]);
@@ -408,7 +433,8 @@ int getIntMatCost(const APInt &Val, unsigned Size,
   int Cost = 0;
   for (unsigned ShiftVal = 0; ShiftVal < Size; ShiftVal += PlatRegSize) {
     APInt Chunk = Val.ashr(ShiftVal).sextOrTrunc(PlatRegSize);
-    InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), ActiveFeatures);
+    InstSeq MatSeq = generateInstSeq(Chunk.getSExtValue(), ActiveFeatures,
+                                     AllowMultipleVRegs);
     Cost += getInstSeqCost(MatSeq, HasRVC);
   }
   return std::max(1, Cost);
@@ -420,8 +446,8 @@ OpndKind Inst::getOpndKind() const {
     llvm_unreachable("Unexpected opcode!");
   case RISCV::LUI:
     return RISCVMatInt::Imm;
+  case RISCV::ADD:
   case RISCV::ADD_UW:
-    return RISCVMatInt::RegX0;
   case RISCV::SH1ADD:
   case RISCV::SH2ADD:
   case RISCV::SH3ADD:
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
index ae7b8d402184d9c..b90da69d4a938d7 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.h
@@ -21,24 +21,35 @@ namespace RISCVMatInt {
 enum OpndKind {
   RegImm, // ADDI/ADDIW/SLLI/SRLI/BSETI/BCLRI
   Imm,    // LUI
-  RegReg, // SH1ADD/SH2ADD/SH3ADD
-  RegX0,  // ADD_UW
+  RegReg // SH1ADD/SH2ADD/SH3ADD
 };
 
 class Inst {
   unsigned Opc;
-  int32_t Imm; // The largest value we need to store is 20 bits.
+  // Reg0 and Reg1 are the offset in the containing sequence which
+  // define the vreg used as the respective operand (if any).  Note
+  // that a sequence implicitly starts with X0, so an offset one
+  // past the start of the sequence is valid, and means X0.
+  uint8_t Reg0 : 4;
+  uint8_t Reg1 : 4;
+  int32_t Imm : 24; // The largest value we need to store is 20 bits.
 
 public:
-  Inst(unsigned Opc, int64_t I) : Opc(Opc), Imm(I) {
+  Inst(unsigned Opc, int64_t I, uint8_t R0, uint8_t R1)
+    : Opc(Opc), Reg0(R0), Reg1(R1), Imm(I) {
     assert(I == Imm && "truncated");
+    assert(Reg0 == R0 && Reg1 == R1 && "truncated");
   }
 
   unsigned getOpcode() const { return Opc; }
   int64_t getImm() const { return Imm; }
+  uint8_t getReg0() const { return Reg0; }
+  uint8_t getReg1() const { return Reg1; }
 
   OpndKind getOpndKind() const;
 };
+static_assert(sizeof(Inst) == 8);
+
 using InstSeq = SmallVector<Inst, 8>;
 
 // Helper to generate an instruction sequence that will materialise the given
@@ -46,7 +57,8 @@ using InstSeq = SmallVector<Inst, 8>;
 // simple struct is produced rather than directly emitting the instructions in
 // order to allow this helper to be used from both the MC layer and during
 // instruction selection.
-InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures);
+InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures,
+                        bool AllowMultipleVRegs = false);
 
 // Helper to estimate the number of instructions required to materialise the
 // given immediate value into a register. This estimate does not account for
@@ -60,7 +72,8 @@ InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures);
 // which is more compressible.
 int getIntMatCost(const APInt &Val, unsigned Size,
                   const FeatureBitset &ActiveFeatures,
-                  bool CompressionCost = false);
+                  bool CompressionCost = false,
+                  bool AllowMultipleVRegs = false);
 } // namespace RISCVMatInt
 } // namespace llvm
 #endif
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index e6d0346c45e8d54..5b14dc50a7d726c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -163,65 +163,35 @@ void RISCVDAGToDAGISel::PostprocessISelDAG() {
 
 static SDValue selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
                             RISCVMatInt::InstSeq &Seq) {
-  SDValue SrcReg = CurDAG->getRegister(RISCV::X0, VT);
+  SmallVector<SDValue> Results;
+  Results.push_back(CurDAG->getRegister(RISCV::X0, VT));
   for (const RISCVMatInt::Inst &Inst : Seq) {
-    SDValue SDImm = CurDAG->getTargetConstant(Inst.getImm(), DL, VT);
+    const unsigned Reg0Off = (unsigned)Inst.getReg0() + 1;
+    const unsigned Reg1Off = (unsigned)Inst.getReg1() + 1;
+    const SDValue SrcReg0 = Results[Results.size()-Reg0Off];
+    const SDValue SrcReg1 = Results[Results.size()-Reg1Off];
+    const SDValue SDImm = CurDAG->getTargetConstant(Inst.getImm(), DL, VT);
     SDNode *Result = nullptr;
     switch (Inst.getOpndKind()) {
     case RISCVMatInt::Imm:
       Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SDImm);
       break;
-    case RISCVMatInt::RegX0:
-      Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg,
-                                      CurDAG->getRegister(RISCV::X0, VT));
-      break;
     case RISCVMatInt::RegReg:
-      Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SrcReg);
+      Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg0, SrcReg1);
       break;
     case RISCVMatInt::RegImm:
-      Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg, SDImm);
+      Result = CurDAG->getMachineNode(Inst.getOpcode(), DL, VT, SrcReg0, SDImm);
       break;
     }
-
-    // Only the first instruction has X0 as its source.
-    SrcReg = SDValue(Result, 0);
+    Results.push_back(SDValue(Result, 0));
   }
-
-  return SrcReg;
+  return Results.back();
 }
 
 static SDValue selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT,
                          int64_t Imm, const RISCVSubtarget &Subtarget) {
   RISCVMatInt::InstSeq Seq =
-      RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
-
-  // See if we can create this constant as (ADD (SLLI X, 32), X) where X is at
-  // worst an LUI+ADDIW. This will require an extra register, but avoids a
-  // constant pool.
-  // If we have Zba we can use (ADD_UW X, (SLLI X, 32)) to handle cases where
-  // low and high 32 bits are the same and bit 31 and 63 are set.
-  if (Seq.size() > 3) {
-    int64_t LoVal = SignExtend64<32>(Imm);
-    int64_t HiVal = SignExtend64<32>(((uint64_t)Imm - (uint64_t)LoVal) >> 32);
-    if (LoVal == HiVal ||
-        (Subtarget.hasStdExtZba() && Lo_32(Imm) == Hi_32(Imm))) {
-      RISCVMatInt::InstSeq SeqLo =
-          RISCVMatInt::generateInstSeq(LoVal, Subtarget.getFeatureBits());
-      if ((SeqLo.size() + 2) < Seq.size()) {
-        SDValue Lo = selectImmSeq(CurDAG, DL, VT, SeqLo);
-
-        SDValue SLLI = SDValue(
-            CurDAG->getMachineNode(RISCV::SLLI, DL, VT, Lo,
-                                   CurDAG->getTargetConstant(32, DL, VT)),
-            0);
-        // Prefer ADD when possible.
-        unsigned AddOpc = (LoVal == HiVal) ? RISCV::ADD : RISCV::ADD_UW;
-        return SDValue(CurDAG->getMachineNode(AddOpc, DL, VT, Lo, SLLI), 0);
-      }
-    }
-  }
-
-  // Otherwise, use the original sequence.
+    RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits(), true);
   return selectImmSeq(CurDAG, DL, VT, Seq);
 }
 
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f1cea6c6756f4fc..bdd182a9e680c46 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4950,27 +4950,11 @@ static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG,
     return Op;
 
   RISCVMatInt::InstSeq Seq =
-      RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
+    RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits(),
+                                 !DAG.shouldOptForSize());
   if (Seq.size() <= Subtarget.getMaxBuildIntsCost())
     return Op;
 
-  // Special case. See if we can build the constant as (ADD (SLLI X, 32), X) do
-  // that if it will avoid a constant pool.
-  // It will require an extra temporary register though.
-  // If we have Zba we can use (ADD_UW X, (SLLI X, 32)) to handle cases where
-  // low and high 32 bits are the same and bit 31 and 63 are set.
-  if (!DAG.shouldOptForSize()) {
-    int64_t LoVal = SignExtend64<32>(Imm);
-    int64_t HiVal = SignExtend64<32>(((uint64_t)Imm - (uint64_t)LoVal) >> 32);
-    if (LoVal == HiVal ||
-        (Subtarget.hasStdExtZba() && Lo_32(Imm) == Hi_32(Imm))) {
-      RISCVMatInt::InstSeq SeqLo =
-          RISCVMatInt::generateInstSeq(LoVal, Subtarget.getFeatureBits());
-      if ((SeqLo.size() + 2) <= Subtarget.getMaxBuildIntsCost())
-        return Op;
-    }
-  }
-
   // Expand to a constant pool using the default expansion code.
   return SDValue();
 }
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 816ceaf95607e71..464d1b79c387c7f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -738,8 +738,6 @@ void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
                             MachineBasicBlock::iterator MBBI,
                             const DebugLoc &DL, Register DstReg, uint64_t Val,
                             MachineInstr::MIFlag Flag) const {
-  Register SrcReg = RISCV::X0;
-
   if (!STI.is64Bit() && !isInt<32>(Val))
     report_fatal_error("Should only materialize 32-bit constants for RV32");
 
@@ -747,35 +745,37 @@ void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
       RISCVMatInt::generateInstSeq(Val, STI.getFeatureBits());
   assert(!Seq.empty());
 
+  SmallVector<Register> Results;
+  Results.push_back(RISCV::X0);
   for (const RISCVMatInt::Inst &Inst : Seq) {
+    const unsigned Reg0Off = (unsigned)Inst.getReg0() + 1;
+    const unsigned Reg1Off = (unsigned)Inst.getReg1() + 1;
+    const Register SrcReg0 = Results[Results.size()-Reg0Off];
+    const Register SrcReg1 = Results[Results.size()-Reg1Off];
+    // Note: We only support sequences with a single live register here.
+    assert(SrcReg0 == RISCV::X0 || SrcReg0 == DstReg);
+    assert(SrcReg1 == RISCV::X0 || SrcReg1 == DstReg);
+
     switch (Inst.getOpndKind()) {
     case RISCVMatInt::Imm:
       BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
           .addImm(Inst.getImm())
           .setMIFlag(Flag);
       break;
-    case RISCVMatInt::RegX0:
-      BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
-          .addReg(SrcReg, RegState::Kill)
-          .addReg(RISCV::X0)
-          .setMIFlag(Flag);
-      break;
     case RISCVMatInt::RegReg:
       BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
-          .addReg(SrcReg, RegState::Kill)
-          .addReg(SrcReg, RegState::Kill)
+          .addReg(SrcReg0, getKillRegState(SrcReg0 != RISCV::X0))
+          .addReg(SrcReg1, getKillRegState(SrcReg1 != RISCV::X0))
           .setMIFlag(Flag);
       break;
     case RISCVMatInt::RegImm:
       BuildMI(MBB, MBBI, DL, get(Inst.getOpcode()), DstReg)
-          .addReg(SrcReg, RegState::Kill)
+          .addReg(SrcReg0, getKillRegState(SrcReg0 != RISCV::X0))
           .addImm(Inst.getImm())
           .setMIFlag(Flag);
       break;
     }
-
-    // Only the first instruction has X0 as its source.
-    SrcReg = DstReg;
+    Results.push_back(DstReg);
   }
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
index 5294214ec6630aa..8fb4be6b49ed648 100644
--- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
@@ -83,14 +83,14 @@ body:             |
   ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa $x8, 0
   ; CHECK-NEXT:   $x2 = frame-setup ADDI $x2, -272
   ; CHECK-NEXT:   $x10 = frame-setup PseudoReadVLENB
-  ; CHECK-NEXT:   $x11 = frame-setup ADDI killed $x0, 52
+  ; CHECK-NEXT:   $x11 = frame-setup ADDI $x0, 52
   ; CHECK-NEXT:   $x10 = frame-setup MUL killed $x10, killed $x11
   ; CHECK-NEXT:   $x2 = frame-setup SUB $x2, killed $x10
   ; CHECK-NEXT:   $x2 = frame-setup ANDI $x2, -128
   ; CHECK-NEXT:   dead renamable $x15 = PseudoVSETIVLI 1, 72 /* e16, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
   ; CHECK-NEXT:   renamable $v25 = PseudoVMV_V_X_M1 undef $v25, killed renamable $x12, $noreg, 4 /* e16 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
   ; CHECK-NEXT:   $x10 = PseudoReadVLENB
-  ; CHECK-NEXT:   $x11 = ADDI killed $x0, 50
+  ; CHECK-NEXT:   $x11 = ADDI $x0, 50
   ; CHECK-NEXT:   $x10 = MUL killed $x10, killed $x11
   ; CHECK-NEXT:   $x10 = ADD $x2, killed $x10
   ; CHECK-NEXT:   $x10 = ADDI killed $x10, 2047
@@ -130,7 +130,7 @@ body:             |
   ; CHECK-NEXT:   SD killed $x10, $x2, 8 :: (store (s64) into %stack.15)
   ; CHECK-NEXT:   $x10 = PseudoReadVLENB
   ; CHECK-NEXT:   SD killed $x12, $x2, 0 :: (store (s64) into %stack.16)
-  ; CHECK-NEXT:   $x12 = ADDI killed $x0, 50
+  ; CHECK-NEXT:   $x12 = ADDI $x0, 50
   ; CHECK-NEXT:   $x10 = MUL killed $x10, killed $x12
   ; CHECK-NEXT:   $x12 = LD $x2, 0 :: (load (s64) from %stack.16)
   ; CHECK-NEXT:   $x10 = ADD $x2, killed $x10



More information about the llvm-commits mailing list