[llvm] [RISCV] Add changes to have better coverage for qc.insb and qc.insbi (PR #154135)

via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 18 08:22:26 PDT 2025


https://github.com/hchandel created https://github.com/llvm/llvm-project/pull/154135

Co-authored by @lenary 

>From 8a7393339e05d0256fbd0ad6dafd7d57c1bbfc9c Mon Sep 17 00:00:00 2001
From: Harsh Chandel <hchandel at qti.qualcomm.com>
Date: Mon, 18 Aug 2025 16:12:21 +0530
Subject: [PATCH 1/2] [RISCV] Add changes to have better coverage for qc.insb
 and qc.insbi

Change-Id: I364e2a81ffd358c4f8250bae120a35fbbbd32cac
---
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp |  46 ----
 llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h   |   1 -
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp |  49 ++++
 llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td |  12 +
 llvm/test/CodeGen/RISCV/xqcibm-insbi.ll     | 268 ++++++++++++++++++++
 5 files changed, 329 insertions(+), 47 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 9e1530a2d00f4..28598bcce2624 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -773,49 +773,6 @@ bool RISCVDAGToDAGISel::trySignedBitfieldInsertInSign(SDNode *Node) {
   return false;
 }
 
-// (xor X, (and (xor X, C1), C2))
-// -> (qc.insbi X, (C1 >> ShAmt), Width, ShAmt)
-// where C2 is a shifted mask with width=Width and shift=ShAmt
-bool RISCVDAGToDAGISel::tryBitfieldInsertOpFromXor(SDNode *Node) {
-
-  if (!Subtarget->hasVendorXqcibm())
-    return false;
-
-  using namespace SDPatternMatch;
-
-  SDValue X;
-  APInt CImm, CMask;
-  if (!sd_match(
-          Node,
-          m_Xor(m_Value(X),
-                m_OneUse(m_And(m_OneUse(m_Xor(m_Deferred(X), m_ConstInt(CImm))),
-                               m_ConstInt(CMask))))))
-    return false;
-
-  unsigned Width, ShAmt;
-  if (!CMask.isShiftedMask(ShAmt, Width))
-    return false;
-
-  int64_t Imm = CImm.getSExtValue();
-  Imm >>= ShAmt;
-
-  SDLoc DL(Node);
-  SDValue ImmNode;
-  auto Opc = RISCV::QC_INSB;
-
-  if (isInt<5>(Imm)) {
-    Opc = RISCV::QC_INSBI;
-    ImmNode = CurDAG->getSignedTargetConstant(Imm, DL, MVT::i32);
-  } else {
-    ImmNode = selectImm(CurDAG, DL, MVT::i32, Imm, *Subtarget);
-  }
-  SDValue Ops[] = {X, ImmNode, CurDAG->getTargetConstant(Width, DL, MVT::i32),
-                   CurDAG->getTargetConstant(ShAmt, DL, MVT::i32)};
-  ReplaceNode(Node, CurDAG->getMachineNode(Opc, DL, MVT::i32, Ops));
-
-  return true;
-}
-
 bool RISCVDAGToDAGISel::tryUnsignedBitfieldExtract(SDNode *Node,
                                                    const SDLoc &DL, MVT VT,
                                                    SDValue X, unsigned Msb,
@@ -1393,9 +1350,6 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     if (tryShrinkShlLogicImm(Node))
       return;
 
-    if (tryBitfieldInsertOpFromXor(Node))
-      return;
-
     break;
   case ISD::AND: {
     auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 9d4cd0e6e3393..ee3a86e25add0 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -75,7 +75,6 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
   bool trySignedBitfieldExtract(SDNode *Node);
   bool trySignedBitfieldInsertInSign(SDNode *Node);
   bool trySignedBitfieldInsertInMask(SDNode *Node);
-  bool tryBitfieldInsertOpFromXor(SDNode *Node);
   bool tryUnsignedBitfieldExtract(SDNode *Node, const SDLoc &DL, MVT VT,
                                   SDValue X, unsigned Msb, unsigned Lsb);
   bool tryUnsignedBitfieldInsertInZero(SDNode *Node, const SDLoc &DL, MVT VT,
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index aedba7e52e3ab..5b40ec4ee1f75 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16059,6 +16059,52 @@ static SDValue combineOrOfCZERO(SDNode *N, SDValue N0, SDValue N1,
   return DAG.getNode(ISD::XOR, DL, VT, NewOr, TrueV.getOperand(1));
 }
 
+// (xor X, (and(xor X, Y), C2))
+// ->(qc_insb X, (sra Y, ShAmt), Width, ShAmt)
+// where C2 is a shifted mask with width = Width and shift = ShAmt
+// qc_insb might become qc.insb or qc.insbi depending on the operands.
+static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
+                                          const RISCVSubtarget &Subtarget) {
+  if (!Subtarget.hasVendorXqcibm())
+    return SDValue();
+
+  using namespace SDPatternMatch;
+
+  SDValue Base, Inserted;
+  APInt CMask;
+  if (!sd_match(N, m_Xor(m_Value(Base),
+                         m_OneUse(m_And(m_OneUse(m_Xor(m_Deferred(Base),
+                                                       m_Value(Inserted))),
+                                        m_ConstInt(CMask))))))
+    return SDValue();
+
+  if (N->getValueType(0) != MVT::i32 || Base.getValueType() != MVT::i32 ||
+      Inserted.getValueType() != MVT::i32)
+    return SDValue();
+
+  unsigned Width, ShAmt;
+  if (!CMask.isShiftedMask(ShAmt, Width))
+    return SDValue();
+
+  // Width must be in 1..32 (inclusive).
+  if (Width > 32 || Width == 0)
+    return SDValue();
+
+  if (!isUInt<5>(ShAmt))
+    return SDValue();
+
+  SDLoc DL(N);
+
+  // `Inserted` needs to be right - shifted before it is put into the
+  // instruction.
+  Inserted = DAG.getNode(ISD::SRA, DL, MVT::i32, Inserted,
+                         DAG.getShiftAmountConstant(ShAmt, MVT::i32, DL));
+
+  SDValue Ops[] = {Base, Inserted, DAG.getConstant(Width, DL, MVT::i32),
+                   DAG.getConstant(ShAmt, DL, MVT::i32)};
+  return DAG.getNode(RISCVISD::QC_INSB, DL, MVT::i32, Ops);
+}
+
 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                                 const RISCVSubtarget &Subtarget) {
   SelectionDAG &DAG = DCI.DAG;
@@ -16131,6 +16177,9 @@ static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
     }
   }
 
+  if (SDValue V = combineXorToBitfieldInsert(N, DAG, Subtarget))
+    return V;
+
   if (SDValue V = combineBinOpToReduce(N, DAG, Subtarget))
     return V;
   if (SDValue V = combineBinOpOfExtractToReduceTree(N, DAG, Subtarget))
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
index 2c64b0c220fba..dc58f1e2c0d61 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td
@@ -22,6 +22,13 @@ def SDT_SetMultiple : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>,
 def qc_setwmi : RVSDNode<"QC_SETWMI", SDT_SetMultiple,
                          [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
 
+def qc_insb : RVSDNode<"QC_INSB", SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>,
+                                                       SDTCisSameAs<0, 2>,
+                                                       SDTCisVT<0, i32>,
+                                                       SDTCisInt<3>,
+                                                       SDTCisInt<4>]>,
+                       []>;
+
 def uimm5nonzero : RISCVOp<XLenVT>,
                    ImmLeaf<XLenVT, [{return (Imm != 0) && isUInt<5>(Imm);}]> {
   let ParserMatchClass = UImmAsmOperand<5, "NonZero">;
@@ -1508,6 +1515,11 @@ def : Pat<(i32 (and GPRNoX0:$rs, 1023)), (QC_EXTU GPRNoX0:$rs, 10, 0)>;
 def : Pat<(i32 (and GPRNoX0:$rs, 2047)), (QC_EXTU GPRNoX0:$rs, 11, 0)>;
 
 def : Pat<(i32 (bitreverse GPRNoX0:$rs1)), (QC_BREV32 GPRNoX0:$rs1)>;
+
+def : Pat<(qc_insb GPRNoX0:$rd, simm5:$imm5, uimm5_plus1:$width, uimm5:$shamt),
+          (QC_INSBI GPRNoX0:$rd, simm5:$imm5, uimm5_plus1:$width, uimm5:$shamt)>;
+def : Pat<(qc_insb GPRNoX0:$rd, GPR:$rs1, uimm5_plus1:$width, uimm5:$shamt),
+          (QC_INSB GPRNoX0:$rd, GPR:$rs1, uimm5_plus1:$width, uimm5:$shamt)>;
 } // Predicates = [HasVendorXqcibm, IsRV32]
 
 // If Zbb is enabled sext.b/h is preferred since they are compressible
diff --git a/llvm/test/CodeGen/RISCV/xqcibm-insbi.ll b/llvm/test/CodeGen/RISCV/xqcibm-insbi.ll
index e4a545169d210..9ea4bce968af2 100644
--- a/llvm/test/CodeGen/RISCV/xqcibm-insbi.ll
+++ b/llvm/test/CodeGen/RISCV/xqcibm-insbi.ll
@@ -260,3 +260,271 @@ define i64 @insbi_i64_large_mask(i64 %in1) nounwind {
   %xor2 = xor i64 %and1, %in1
   ret i64 %xor2
 }
+
+define i32 @insb(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    xor a1, a1, a0
+; RV32I-NEXT:    andi a1, a1, -2
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32XQCIBM-LABEL: insb:
+; RV32XQCIBM:       # %bb.0:
+; RV32XQCIBM-NEXT:    qc.ext a1, a1, 31, 0
+; RV32XQCIBM-NEXT:    qc.insb a0, a1, 31, 1
+; RV32XQCIBM-NEXT:    ret
+  %shl1 = shl i32 %in2, 1
+  %xor1 = xor i32 %shl1, %in1
+  %and1 = and i32 -2, %xor1
+  %xor2 = xor i32 %in1, %and1
+  ret i32 %xor2
+}
+
+define i32 @insb_and_mul(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_and_mul:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    xor a1, a1, a0
+; RV32I-NEXT:    andi a1, a1, -2
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32XQCIBM-LABEL: insb_and_mul:
+; RV32XQCIBM:       # %bb.0:
+; RV32XQCIBM-NEXT:    slli a1, a1, 1
+; RV32XQCIBM-NEXT:    xor a1, a1, a0
+; RV32XQCIBM-NEXT:    andi a1, a1, -2
+; RV32XQCIBM-NEXT:    xor a0, a0, a1
+; RV32XQCIBM-NEXT:    add a0, a0, a1
+; RV32XQCIBM-NEXT:    ret
+  %shl1 = shl i32 %in2, 1
+  %xor1 = xor i32 %shl1, %in1
+  %and1 = and i32 -2, %xor1
+  %xor2 = xor i32 %in1, %and1
+  %add1 = add i32 %xor2, %and1
+  ret i32 %add1
+}
+
+define i32 @insb_xor_mul(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_xor_mul:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    xor a1, a1, a0
+; RV32I-NEXT:    andi a2, a1, -2
+; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32XQCIBM-LABEL: insb_xor_mul:
+; RV32XQCIBM:       # %bb.0:
+; RV32XQCIBM-NEXT:    slli a1, a1, 1
+; RV32XQCIBM-NEXT:    xor a1, a1, a0
+; RV32XQCIBM-NEXT:    andi a2, a1, -2
+; RV32XQCIBM-NEXT:    xor a0, a0, a2
+; RV32XQCIBM-NEXT:    add a0, a0, a1
+; RV32XQCIBM-NEXT:    ret
+  %shl1 = shl i32 %in2, 1
+  %xor1 = xor i32 %shl1, %in1
+  %and1 = and i32 -2, %xor1
+  %xor2 = xor i32 %in1, %and1
+  %add1 = add i32 %xor2, %xor1
+  ret i32 %add1
+}
+
+define i32 @insb_shl_mul(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_shl_mul:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    xor a2, a1, a0
+; RV32I-NEXT:    andi a2, a2, -2
+; RV32I-NEXT:    xor a0, a0, a2
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32XQCIBM-LABEL: insb_shl_mul:
+; RV32XQCIBM:       # %bb.0:
+; RV32XQCIBM-NEXT:    slli a1, a1, 1
+; RV32XQCIBM-NEXT:    srai a2, a1, 1
+; RV32XQCIBM-NEXT:    qc.insb a0, a2, 31, 1
+; RV32XQCIBM-NEXT:    add a0, a0, a1
+; RV32XQCIBM-NEXT:    ret
+  %shl1 = shl i32 %in2, 1
+  %xor1 = xor i32 %shl1, %in1
+  %and1 = and i32 -2, %xor1
+  %xor2 = xor i32 %in1, %and1
+  %add1 = add i32 %xor2, %shl1
+  ret i32 %add1
+}
+
+define i32 @insb_comm(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_comm:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    xor a1, a0, a1
+; RV32I-NEXT:    andi a1, a1, -2
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32XQCIBM-LABEL: insb_comm:
+; RV32XQCIBM:       # %bb.0:
+; RV32XQCIBM-NEXT:    qc.ext a1, a1, 31, 0
+; RV32XQCIBM-NEXT:    qc.insb a0, a1, 31, 1
+; RV32XQCIBM-NEXT:    ret
+  %shl1 = shl i32 %in2, 1
+  %xor1 = xor i32 %in1, %shl1
+  %and1 = and i32 -2, %xor1
+  %xor2 = xor i32 %in1, %and1
+  ret i32 %xor2
+}
+
+define i32 @insb_comm1(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_comm1:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    xor a1, a0, a1
+; RV32I-NEXT:    andi a1, a1, -2
+; RV32I-NEXT:    xor a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32XQCIBM-LABEL: insb_comm1:
+; RV32XQCIBM:       # %bb.0:
+; RV32XQCIBM-NEXT:    qc.ext a1, a1, 31, 0
+; RV32XQCIBM-NEXT:    qc.insb a0, a1, 31, 1
+; RV32XQCIBM-NEXT:    ret
+  %shl1 = shl i32 %in2, 1
+  %xor1 = xor i32 %in1, %shl1
+  %and1 = and i32 -2, %xor1
+  %xor2 = xor i32 %and1, %in1
+  ret i32 %xor2
+}
+
+define i32 @insb_comm2(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_comm2:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    xor a1, a0, a1
+; RV32I-NEXT:    andi a1, a1, -2
+; RV32I-NEXT:    xor a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV32XQCIBM-LABEL: insb_comm2:
+; RV32XQCIBM:       # %bb.0:
+; RV32XQCIBM-NEXT:    qc.ext a1, a1, 31, 0
+; RV32XQCIBM-NEXT:    qc.insb a0, a1, 31, 1
+; RV32XQCIBM-NEXT:    ret
+  %shl1 = shl i32 %in2, 1
+  %xor1 = xor i32 %in1, %shl1
+  %and1 = and i32 %xor1, -2
+  %xor2 = xor i32 %and1, %in1
+  ret i32 %xor2
+}
+
+define i32 @insb_not_shifted_mask(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_not_shifted_mask:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 18
+; RV32I-NEXT:    xor a1, a0, a1
+; RV32I-NEXT:    lui a2, 320
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32XQCIBM-LABEL: insb_not_shifted_mask:
+; RV32XQCIBM:       # %bb.0:
+; RV32XQCIBM-NEXT:    slli a1, a1, 18
+; RV32XQCIBM-NEXT:    xor a1, a1, a0
+; RV32XQCIBM-NEXT:    lui a2, 320
+; RV32XQCIBM-NEXT:    and a1, a1, a2
+; RV32XQCIBM-NEXT:    xor a0, a0, a1
+; RV32XQCIBM-NEXT:    ret
+  %shl1 = shl i32 %in2, 18
+  %xor1 = xor i32 %in1, %shl1
+  %and1 = and i32 1310720, %xor1
+  %xor2 = xor i32 %in1, %and1
+  ret i32 %xor2
+}
+
+define i32 @insb_shift_diffrom_mask(i32 %in1, i32 %in2) nounwind {
+; RV32I-LABEL: insb_shift_diffrom_mask:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 16
+; RV32I-NEXT:    xor a1, a0, a1
+; RV32I-NEXT:    lui a2, 192
+; RV32I-NEXT:    and a1, a1, a2
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32XQCIBM-LABEL: insb_shift_diffrom_mask:
+; RV32XQCIBM:       # %bb.0:
+; RV32XQCIBM-NEXT:    qc.ext a1, a1, 14, 2
+; RV32XQCIBM-NEXT:    qc.insb a0, a1, 2, 18
+; RV32XQCIBM-NEXT:    ret
+  %shl1 = shl i32 %in2, 16
+  %xor1 = xor i32 %in1, %shl1
+  %and1 = and i32 786432, %xor1
+  %xor2 = xor i32 %in1, %and1
+  ret i32 %xor2
+}
+
+define i64 @insb_i64(i64 %in1, i64 %in2) nounwind {
+; RV32I-LABEL: insb_i64:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a1, a2, 31
+; RV32I-NEXT:    slli a3, a3, 1
+; RV32I-NEXT:    slli a2, a2, 1
+; RV32I-NEXT:    or a1, a3, a1
+; RV32I-NEXT:    xor a2, a0, a2
+; RV32I-NEXT:    andi a2, a2, -2
+; RV32I-NEXT:    xor a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV32XQCIBM-LABEL: insb_i64:
+; RV32XQCIBM:       # %bb.0:
+; RV32XQCIBM-NEXT:    srli a1, a2, 31
+; RV32XQCIBM-NEXT:    slli a3, a3, 1
+; RV32XQCIBM-NEXT:    qc.ext a2, a2, 31, 0
+; RV32XQCIBM-NEXT:    or a1, a1, a3
+; RV32XQCIBM-NEXT:    qc.insb a0, a2, 31, 1
+; RV32XQCIBM-NEXT:    ret
+  %shl1 = shl i64 %in2, 1
+  %xor1 = xor i64 %in1, %shl1
+  %and1 = and i64 %xor1, -2
+  %xor2 = xor i64 %and1, %in1
+  ret i64 %xor2
+}
+
+define i64 @insb_i64_only(i64 %in1, i64 %in2) nounwind {
+; RV32I-LABEL: insb_i64_only:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a4, a2, 31
+; RV32I-NEXT:    slli a3, a3, 1
+; RV32I-NEXT:    slli a2, a2, 1
+; RV32I-NEXT:    or a3, a3, a4
+; RV32I-NEXT:    lui a4, 524288
+; RV32I-NEXT:    xor a2, a0, a2
+; RV32I-NEXT:    xor a3, a1, a3
+; RV32I-NEXT:    and a2, a2, a4
+; RV32I-NEXT:    andi a3, a3, 7
+; RV32I-NEXT:    xor a0, a2, a0
+; RV32I-NEXT:    xor a1, a3, a1
+; RV32I-NEXT:    ret
+;
+; RV32XQCIBM-LABEL: insb_i64_only:
+; RV32XQCIBM:       # %bb.0:
+; RV32XQCIBM-NEXT:    srli a4, a2, 31
+; RV32XQCIBM-NEXT:    slli a3, a3, 1
+; RV32XQCIBM-NEXT:    qc.ext a2, a2, 1, 30
+; RV32XQCIBM-NEXT:    or a3, a3, a4
+; RV32XQCIBM-NEXT:    qc.insb a0, a2, 1, 31
+; RV32XQCIBM-NEXT:    qc.insb a1, a3, 3, 0
+; RV32XQCIBM-NEXT:    ret
+  %shl1 = shl i64 %in2, 1
+  %xor1 = xor i64 %in1, %shl1
+  %and1 = and i64 %xor1, 32212254720
+  %xor2 = xor i64 %and1, %in1
+  ret i64 %xor2
+}
+

>From e4f25a414ddc6dd3a08c2a01b3bae4ae661d6aa3 Mon Sep 17 00:00:00 2001
From: Harsh Chandel <hchandel at qti.qualcomm.com>
Date: Mon, 18 Aug 2025 20:47:28 +0530
Subject: [PATCH 2/2] fixup! Remove Redundant checks

Change-Id: Ie1e465530bdf7c1a10def0e3a5bb995f4e055b7e
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5b40ec4ee1f75..cac21d9372878 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16086,13 +16086,6 @@ static SDValue combineXorToBitfieldInsert(SDNode *N, SelectionDAG &DAG,
   if (!CMask.isShiftedMask(ShAmt, Width))
     return SDValue();
 
-  // Width must be in 1..32 (inclusive).
-  if (Width > 32 || Width == 0)
-    return SDValue();
-
-  if (!isUInt<5>(ShAmt))
-    return SDValue();
-
   SDLoc DL(N);
 
   // `Inserted` needs to be right - shifted before it is put into the



More information about the llvm-commits mailing list