[llvm] a664e75 - [RISCV] Refactor DAG-to-DAG Selection: Port lowering code for `qc.insb/qc.insbi` to RISCVISelLowering.cpp (#157618)

via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 23 20:03:03 PDT 2025


Author: quic_hchandel
Date: 2025-09-24T08:32:59+05:30
New Revision: a664e7599390a5bf79cf8e45551774ec83444205

URL: https://github.com/llvm/llvm-project/commit/a664e7599390a5bf79cf8e45551774ec83444205
DIFF: https://github.com/llvm/llvm-project/commit/a664e7599390a5bf79cf8e45551774ec83444205.diff

LOG: [RISCV] Refactor DAG-to-DAG Selection: Port lowering code for `qc.insb/qc.insbi` to RISCVISelLowering.cpp  (#157618)

This is a follow-up to #154135 and does similar changes for
`qc.insb/qc.insbi`.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index dda6023b37f7b..437022f5cde9f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -677,95 +677,6 @@ bool RISCVDAGToDAGISel::trySignedBitfieldExtract(SDNode *Node) {
   return false;
 }
 
-bool RISCVDAGToDAGISel::trySignedBitfieldInsertInMask(SDNode *Node) {
-  // Supported only in Xqcibm for now.
-  if (!Subtarget->hasVendorXqcibm())
-    return false;
-
-  using namespace SDPatternMatch;
-
-  SDValue X;
-  APInt MaskImm;
-  if (!sd_match(Node, m_Or(m_OneUse(m_Value(X)), m_ConstInt(MaskImm))))
-    return false;
-
-  unsigned ShAmt, Width;
-  if (!MaskImm.isShiftedMask(ShAmt, Width) || MaskImm.isSignedIntN(12))
-    return false;
-
-  // If Zbs is enabled and it is a single bit set we can use BSETI which
-  // can be compressed to C_BSETI when Xqcibm in enabled.
-  if (Width == 1 && Subtarget->hasStdExtZbs())
-    return false;
-
-  // If C1 is a shifted mask (but can't be formed as an ORI),
-  // use a bitfield insert of -1.
-  // Transform (or x, C1)
-  //        -> (qc.insbi x, -1, width, shift)
-  SDLoc DL(Node);
-  MVT VT = Node->getSimpleValueType(0);
-
-  SDValue Ops[] = {X, CurDAG->getSignedTargetConstant(-1, DL, VT),
-                   CurDAG->getTargetConstant(Width, DL, VT),
-                   CurDAG->getTargetConstant(ShAmt, DL, VT)};
-  SDNode *BitIns = CurDAG->getMachineNode(RISCV::QC_INSBI, DL, VT, Ops);
-  ReplaceNode(Node, BitIns);
-  return true;
-}
-
-// Generate a QC_INSB/QC_INSBI from 'or (and X, MaskImm), OrImm' iff the value
-// being inserted only sets known zero bits.
-bool RISCVDAGToDAGISel::tryBitfieldInsertOpFromOrAndImm(SDNode *Node) {
-  // Supported only in Xqcibm for now.
-  if (!Subtarget->hasVendorXqcibm())
-    return false;
-
-  using namespace SDPatternMatch;
-
-  SDValue And;
-  APInt MaskImm, OrImm;
-  if (!sd_match(Node, m_Or(m_OneUse(m_And(m_Value(And), m_ConstInt(MaskImm))),
-                           m_ConstInt(OrImm))))
-    return false;
-
-  // Compute the Known Zero for the AND as this allows us to catch more general
-  // cases than just looking for AND with imm.
-  KnownBits Known = CurDAG->computeKnownBits(Node->getOperand(0));
-
-  // The bits being inserted must only set those bits that are known to be zero.
-  if (!OrImm.isSubsetOf(Known.Zero)) {
-    // FIXME:  It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
-    // currently handle this case.
-    return false;
-  }
-
-  unsigned ShAmt, Width;
-  // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
-  if (!Known.Zero.isShiftedMask(ShAmt, Width))
-    return false;
-
-  // QC_INSB(I) dst, src, #width, #shamt.
-  SDLoc DL(Node);
-  MVT VT = Node->getSimpleValueType(0);
-  SDValue ImmNode;
-  auto Opc = RISCV::QC_INSB;
-
-  int32_t LIImm = OrImm.getSExtValue() >> ShAmt;
-
-  if (isInt<5>(LIImm)) {
-    Opc = RISCV::QC_INSBI;
-    ImmNode = CurDAG->getSignedTargetConstant(LIImm, DL, MVT::i32);
-  } else {
-    ImmNode = selectImm(CurDAG, DL, MVT::i32, LIImm, *Subtarget);
-  }
-
-  SDValue Ops[] = {And, ImmNode, CurDAG->getTargetConstant(Width, DL, VT),
-                   CurDAG->getTargetConstant(ShAmt, DL, VT)};
-  SDNode *BitIns = CurDAG->getMachineNode(Opc, DL, VT, Ops);
-  ReplaceNode(Node, BitIns);
-  return true;
-}
-
 bool RISCVDAGToDAGISel::trySignedBitfieldInsertInSign(SDNode *Node) {
   // Only supported with XAndesPerf at the moment.
   if (!Subtarget->hasVendorXAndesPerf())
@@ -1384,12 +1295,6 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     return;
   }
   case ISD::OR: {
-    if (trySignedBitfieldInsertInMask(Node))
-      return;
-
-    if (tryBitfieldInsertOpFromOrAndImm(Node))
-      return;
-
     if (tryShrinkShlLogicImm(Node))
       return;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index cf2f763abc063..f03b44c875cab 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -76,8 +76,6 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
   bool tryShrinkShlLogicImm(SDNode *Node);
   bool trySignedBitfieldExtract(SDNode *Node);
   bool trySignedBitfieldInsertInSign(SDNode *Node);
-  bool trySignedBitfieldInsertInMask(SDNode *Node);
-  bool tryBitfieldInsertOpFromOrAndImm(SDNode *Node);
   bool tryUnsignedBitfieldExtract(SDNode *Node, const SDLoc &DL, MVT VT,
                                   SDValue X, unsigned Msb, unsigned Lsb);
   bool tryUnsignedBitfieldInsertInZero(SDNode *Node, const SDLoc &DL, MVT VT,

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 542be1c20efc0..937213bfddfad 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16203,7 +16203,6 @@ static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
     return SDValue();
 
   using namespace SDPatternMatch;
-
   SDValue Base, Inserted;
   APInt CMask;
   if (!sd_match(N, m_Xor(m_Value(Base),
@@ -16214,7 +16213,6 @@ static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
 
   if (N->getValueType(0) != MVT::i32)
     return SDValue();
-
   unsigned Width, ShAmt;
   if (!CMask.isShiftedMask(ShAmt, Width))
     return SDValue();
@@ -16235,10 +16233,96 @@ static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
   return DAG.getNode(RISCVISD::QC_INSB, DL, MVT::i32, Ops);
 }
 
+static SDValue combineOrToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
+                                         const RISCVSubtarget &Subtarget) {
+  if (!Subtarget.hasVendorXqcibm())
+    return SDValue();
+
+  using namespace SDPatternMatch;
+
+  SDValue X;
+  APInt MaskImm;
+  if (!sd_match(N, m_Or(m_OneUse(m_Value(X)), m_ConstInt(MaskImm))))
+    return SDValue();
+
+  unsigned ShAmt, Width;
+  if (!MaskImm.isShiftedMask(ShAmt, Width) || MaskImm.isSignedIntN(12))
+    return SDValue();
+
+  if (N->getValueType(0) != MVT::i32)
+    return SDValue();
+
+  // If Zbs is enabled and it is a single bit set we can use BSETI which
+  // can be compressed to C_BSETI when Xqcibm in enabled.
+  if (Width == 1 && Subtarget.hasStdExtZbs())
+    return SDValue();
+
+  // If C1 is a shifted mask (but can't be formed as an ORI),
+  // use a bitfield insert of -1.
+  // Transform (or x, C1)
+  //        -> (qc.insbi x, -1, width, shift)
+  SDLoc DL(N);
+
+  SDValue Ops[] = {X, DAG.getSignedConstant(-1, DL, MVT::i32),
+                   DAG.getConstant(Width, DL, MVT::i32),
+                   DAG.getConstant(ShAmt, DL, MVT::i32)};
+  return DAG.getNode(RISCVISD::QC_INSB, DL, MVT::i32, Ops);
+}
+
+// Generate a QC_INSB/QC_INSBI from 'or (and X, MaskImm), OrImm' iff the value
+// being inserted only sets known zero bits.
+static SDValue combineOrAndToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
+                                            const RISCVSubtarget &Subtarget) {
+  // Supported only in Xqcibm for now.
+  if (!Subtarget.hasVendorXqcibm())
+    return SDValue();
+
+  using namespace SDPatternMatch;
+
+  SDValue Inserted;
+  APInt MaskImm, OrImm;
+  if (!sd_match(
+          N, m_SpecificVT(MVT::i32, m_Or(m_OneUse(m_And(m_Value(Inserted),
+                                                        m_ConstInt(MaskImm))),
+                                         m_ConstInt(OrImm)))))
+    return SDValue();
+
+  // Compute the Known Zero for the AND as this allows us to catch more general
+  // cases than just looking for AND with imm.
+  KnownBits Known = DAG.computeKnownBits(N->getOperand(0));
+
+  // The bits being inserted must only set those bits that are known to be
+  // zero.
+  if (!OrImm.isSubsetOf(Known.Zero)) {
+    // FIXME:  It's okay if the OrImm sets NotKnownZero bits to 1, but we don't
+    // currently handle this case.
+    return SDValue();
+  }
+
+  unsigned ShAmt, Width;
+  // The KnownZero mask must be a shifted mask (e.g., 1110..011, 11100..00).
+  if (!Known.Zero.isShiftedMask(ShAmt, Width))
+    return SDValue();
+
+  // QC_INSB(I) dst, src, #width, #shamt.
+  SDLoc DL(N);
+
+  SDValue ImmNode =
+      DAG.getSignedConstant(OrImm.getSExtValue() >> ShAmt, DL, MVT::i32);
+
+  SDValue Ops[] = {Inserted, ImmNode, DAG.getConstant(Width, DL, MVT::i32),
+                   DAG.getConstant(ShAmt, DL, MVT::i32)};
+  return DAG.getNode(RISCVISD::QC_INSB, DL, MVT::i32, Ops);
+}
+
 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                                 const RISCVSubtarget &Subtarget) {
   SelectionDAG &DAG = DCI.DAG;
 
+  if (SDValue V = combineOrToBitfieldInsert(N, DAG, Subtarget))
+    return V;
+  if (SDValue V = combineOrAndToBitfieldInsert(N, DAG, Subtarget))
+    return V;
   if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
     return V;
   if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))


        


More information about the llvm-commits mailing list